diff --git a/.github/workflows/sqlx.yml b/.github/workflows/sqlx.yml index c00149afaa..c0bf3a2792 100644 --- a/.github/workflows/sqlx.yml +++ b/.github/workflows/sqlx.yml @@ -14,6 +14,7 @@ jobs: - uses: actions/checkout@v4 - run: cargo fmt --all -- --check + check: name: Check runs-on: ubuntu-22.04 @@ -40,6 +41,25 @@ jobs: --no-default-features --lib --tests --features offline,all-databases,migrate,runtime-${{ matrix.runtime }}-${{ matrix.tls }} + - name: Run clippy for core with all features + run: | + cargo clippy --manifest-path sqlx-core/Cargo.toml \ + --no-default-features \ + --features offline,all-databases,all-types,migrate,runtime-${{ matrix.runtime }}-${{ matrix.tls }} \ + -- -D warnings + - name: Run clippy for root with all features + run: | + cargo clippy \ + --no-default-features \ + --features offline,all-databases,all-types,migrate,runtime-${{ matrix.runtime }}-${{ matrix.tls }},macros \ + -- -D warnings + - name: Run clippy for all targets + run: | + cargo clippy \ + --no-default-features \ + --all-targets \ + --features offline,all-databases,migrate,runtime-${{ matrix.runtime }}-${{ matrix.tls }} \ + -- -D warnings test: name: Unit Test @@ -98,6 +118,7 @@ jobs: name: cargo-sqlx-${{ matrix.target }} path: ${{ matrix.bin }} + sqlite: name: SQLite runs-on: ubuntu-22.04 @@ -110,6 +131,12 @@ jobs: - uses: actions/checkout@v4 - run: mkdir /tmp/sqlite3-lib && wget -O /tmp/sqlite3-lib/ipaddr.so https://github.com/nalgeon/sqlean/releases/download/0.15.2/ipaddr.so - uses: Swatinem/rust-cache@v2 + - name: Run clippy for sqlite + run: | + cargo clippy \ + --no-default-features \ + --features sqlite,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }},macros,migrate \ + -- -D warnings - run: cargo test --no-default-features @@ -153,6 +180,15 @@ jobs: args: > --features postgres,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }} + - name: Run clippy for postgres + run: | + cargo clippy \ + --no-default-features \ + --features postgres,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }},macros,migrate \ + -- -D warnings + env: + RUSTFLAGS: --cfg postgres_${{ matrix.postgres }} + - run: | docker compose -f tests/docker-compose.yml run -d -p 5432:5432 --name postgres_${{ matrix.postgres }} postgres_${{ matrix.postgres }} docker exec postgres_${{ matrix.postgres }} bash -c "until pg_isready; do sleep 1; done" @@ -224,6 +260,13 @@ jobs: args: > --features mysql,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }} + - name: Run clippy for mysql + run: | + cargo clippy \ + --no-default-features \ + --features mysql,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }},macros,migrate \ + -- -D warnings + - run: docker compose -f tests/docker-compose.yml run -d -p 3306:3306 mysql_${{ matrix.mysql }} - run: sleep 60 @@ -275,6 +318,13 @@ jobs: args: > --features mysql,runtime-${{ matrix.runtime }}-${{ matrix.tls }} + - name: Run clippy for mariadb + run: | + cargo clippy \ + --no-default-features \ + --features mysql,runtime-${{ matrix.runtime }}-${{ matrix.tls }},macros,migrate \ + -- -D warnings + - run: docker compose -f tests/docker-compose.yml run -d -p 3306:3306 mariadb_${{ matrix.mariadb }} - run: sleep 30 @@ -315,6 +365,13 @@ jobs: args: > --features mssql,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }} + - name: Run clippy for mssql + run: | + cargo clippy \ + --no-default-features \ + --features mssql,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }},macros,migrate \ + -- -D warnings + - run: docker compose -f tests/docker-compose.yml run -d -p 1433:1433 mssql_${{ matrix.mssql }} - run: sleep 80 # MSSQL takes a "bit" to startup diff --git a/Cargo.toml b/Cargo.toml index 69e3fda77c..1afcdeefb3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,6 +18,9 @@ members = [ "examples/sqlite/todos", ] +[workspace.lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(postgres_14)', 'cfg(postgres_9_6)'] } + [package] name = "sqlx-oldapi" version = "0.6.48" diff --git a/sqlx-core/src/any/connection/executor.rs b/sqlx-core/src/any/connection/executor.rs index 560d818010..3eb67c139e 100644 --- a/sqlx-core/src/any/connection/executor.rs +++ b/sqlx-core/src/any/connection/executor.rs @@ -14,13 +14,13 @@ use futures_util::{StreamExt, TryStreamExt}; impl<'c> Executor<'c> for &'c mut AnyConnection { type Database = Any; - fn fetch_many<'e, 'q: 'e, E: 'q>( + fn fetch_many<'e, 'q: 'e, E>( self, mut query: E, ) -> BoxStream<'e, Result, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { let arguments = query.take_arguments(); let query = query.sql(); @@ -52,13 +52,13 @@ impl<'c> Executor<'c> for &'c mut AnyConnection { } } - fn fetch_optional<'e, 'q: 'e, E: 'q>( + fn fetch_optional<'e, 'q: 'e, E>( self, mut query: E, ) -> BoxFuture<'e, Result, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { let arguments = query.take_arguments(); let query = query.sql(); diff --git a/sqlx-core/src/any/connection/mod.rs b/sqlx-core/src/any/connection/mod.rs index f057551db5..33bc7d983f 100644 --- a/sqlx-core/src/any/connection/mod.rs +++ b/sqlx-core/src/any/connection/mod.rs @@ -35,6 +35,7 @@ pub struct AnyConnection(pub(super) AnyConnectionKind); #[derive(Debug)] // Used internally in `sqlx-macros` #[doc(hidden)] +#[allow(clippy::large_enum_variant)] pub enum AnyConnectionKind { #[cfg(feature = "postgres")] Postgres(postgres::PgConnection), diff --git a/sqlx-core/src/any/statement.rs b/sqlx-core/src/any/statement.rs index 0c283c2e5e..17db4f46c4 100644 --- a/sqlx-core/src/any/statement.rs +++ b/sqlx-core/src/any/statement.rs @@ -33,7 +33,7 @@ impl<'q> Statement<'q> for AnyStatement<'q> { fn parameters(&self) -> Option> { match &self.parameters { - Some(Either::Left(types)) => Some(Either::Left(&types)), + Some(Either::Left(types)) => Some(Either::Left(types)), Some(Either::Right(count)) => Some(Either::Right(*count)), None => None, } @@ -54,7 +54,7 @@ where statement .column_names .get(*self) + .copied() .ok_or_else(|| Error::ColumnNotFound((*self).into())) - .map(|v| *v) } } diff --git a/sqlx-core/src/arguments.rs b/sqlx-core/src/arguments.rs index 46acd3f76b..2261be7fa7 100644 --- a/sqlx-core/src/arguments.rs +++ b/sqlx-core/src/arguments.rs @@ -29,6 +29,7 @@ pub trait IntoArguments<'q, DB: HasArguments<'q>>: Sized + Send { // NOTE: required due to lack of lazy normalization #[allow(unused_macros)] +#[allow(clippy::needless_doctest_main)] macro_rules! impl_into_arguments_for_arguments { ($Arguments:path) => { impl<'q> diff --git a/sqlx-core/src/connection.rs b/sqlx-core/src/connection.rs index c92145eb92..ae0ae0d649 100644 --- a/sqlx-core/src/connection.rs +++ b/sqlx-core/src/connection.rs @@ -119,7 +119,7 @@ pub trait Connection: Send { { let options = url.parse(); - Box::pin(async move { Ok(Self::connect_with(&options?).await?) }) + Box::pin(async move { Self::connect_with(&options?).await }) } /// Establish a new database connection with the provided options. diff --git a/sqlx-core/src/executor.rs b/sqlx-core/src/executor.rs index 2b0e27c219..96c318e017 100644 --- a/sqlx-core/src/executor.rs +++ b/sqlx-core/src/executor.rs @@ -26,25 +26,25 @@ pub trait Executor<'c>: Send + Debug + Sized { type Database: Database; /// Execute the query and return the total number of rows affected. - fn execute<'e, 'q: 'e, E: 'q>( + fn execute<'e, 'q: 'e, E>( self, query: E, ) -> BoxFuture<'e, Result<::QueryResult, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { self.execute_many(query).try_collect().boxed() } /// Execute multiple queries and return the rows affected from each query, in a stream. - fn execute_many<'e, 'q: 'e, E: 'q>( + fn execute_many<'e, 'q: 'e, E>( self, query: E, ) -> BoxStream<'e, Result<::QueryResult, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { self.fetch_many(query) .try_filter_map(|step| async move { @@ -57,13 +57,13 @@ pub trait Executor<'c>: Send + Debug + Sized { } /// Execute the query and return the generated results as a stream. - fn fetch<'e, 'q: 'e, E: 'q>( + fn fetch<'e, 'q: 'e, E>( self, query: E, ) -> BoxStream<'e, Result<::Row, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { self.fetch_many(query) .try_filter_map(|step| async move { @@ -77,7 +77,7 @@ pub trait Executor<'c>: Send + Debug + Sized { /// Execute multiple queries and return the generated results as a stream /// from each query, in a stream. - fn fetch_many<'e, 'q: 'e, E: 'q>( + fn fetch_many<'e, 'q: 'e, E>( self, query: E, ) -> BoxStream< @@ -89,28 +89,28 @@ pub trait Executor<'c>: Send + Debug + Sized { > where 'c: 'e, - E: Execute<'q, Self::Database>; + E: Execute<'q, Self::Database> + 'q; /// Execute the query and return all the generated results, collected into a [`Vec`]. - fn fetch_all<'e, 'q: 'e, E: 'q>( + fn fetch_all<'e, 'q: 'e, E>( self, query: E, ) -> BoxFuture<'e, Result::Row>, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { self.fetch(query).try_collect().boxed() } /// Execute the query and returns exactly one row. - fn fetch_one<'e, 'q: 'e, E: 'q>( + fn fetch_one<'e, 'q: 'e, E>( self, query: E, ) -> BoxFuture<'e, Result<::Row, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { self.fetch_optional(query) .and_then(|row| match row { @@ -121,13 +121,13 @@ pub trait Executor<'c>: Send + Debug + Sized { } /// Execute the query and returns at most one row. - fn fetch_optional<'e, 'q: 'e, E: 'q>( + fn fetch_optional<'e, 'q: 'e, E>( self, query: E, ) -> BoxFuture<'e, Result::Row>, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>; + E: Execute<'q, Self::Database> + 'q; /// Prepare the SQL query to inspect the type information of its parameters /// and results. diff --git a/sqlx-core/src/ext/ustr.rs b/sqlx-core/src/ext/ustr.rs index 27dd8fe6af..204c33a8bb 100644 --- a/sqlx-core/src/ext/ustr.rs +++ b/sqlx-core/src/ext/ustr.rs @@ -103,6 +103,6 @@ impl serde::Serialize for UStr { where S: serde::Serializer, { - serializer.serialize_str(&self) + serializer.serialize_str(self) } } diff --git a/sqlx-core/src/io/buf.rs b/sqlx-core/src/io/buf.rs index 7aa3289ecd..2446ce61e7 100644 --- a/sqlx-core/src/io/buf.rs +++ b/sqlx-core/src/io/buf.rs @@ -22,7 +22,7 @@ pub trait BufExt: Buf { impl BufExt for Bytes { fn get_bytes_nul(&mut self) -> Result { let nul = - memchr(b'\0', &self).ok_or_else(|| err_protocol!("expected NUL in byte sequence"))?; + memchr(b'\0', self).ok_or_else(|| err_protocol!("expected NUL in byte sequence"))?; let v = self.slice(0..nul); @@ -40,7 +40,7 @@ impl BufExt for Bytes { fn get_str_nul(&mut self) -> Result { self.get_bytes_nul().and_then(|bytes| { - from_utf8(&*bytes) + from_utf8(&bytes) .map(ToOwned::to_owned) .map_err(|err| err_protocol!("{}", err)) }) diff --git a/sqlx-core/src/lib.rs b/sqlx-core/src/lib.rs index 1056fbde1d..8489b1127d 100644 --- a/sqlx-core/src/lib.rs +++ b/sqlx-core/src/lib.rs @@ -3,12 +3,8 @@ #![recursion_limit = "512"] #![warn(future_incompatible, rust_2018_idioms)] #![allow(clippy::needless_doctest_main, clippy::type_complexity, dead_code)] -#![deny( - clippy::cast_possible_truncation, - clippy::cast_possible_wrap, - clippy::cast_precision_loss, - clippy::cast_sign_loss -)] +// Note: Cast warnings are allowed on a case-by-case basis with explicit #[allow(...)] +// This ensures we're aware of potential issues with numeric conversions // See `clippy.toml` at the workspace root #![deny(clippy::disallowed_methods)] // diff --git a/sqlx-core/src/logger.rs b/sqlx-core/src/logger.rs index df36ff73aa..6cfd80d215 100644 --- a/sqlx-core/src/logger.rs +++ b/sqlx-core/src/logger.rs @@ -47,7 +47,7 @@ impl<'q> QueryLogger<'q> { .to_level() .filter(|lvl| log::log_enabled!(target: "sqlx::query", *lvl)) { - let mut summary = parse_query_summary(&self.sql); + let mut summary = parse_query_summary(self.sql); let sql = if summary != self.sql { summary.push_str(" …"); @@ -99,16 +99,11 @@ impl<'q, O: Debug + Hash + Eq, R: Debug, P: Debug> QueryPlanLogger<'q, O, R, P> } pub(crate) fn log_enabled(&self) -> bool { - if let Some(_lvl) = self - .settings + self.settings .statements_level .to_level() .filter(|lvl| log::log_enabled!(target: "sqlx::explain", *lvl)) - { - return true; - } else { - return false; - } + .is_some() } pub(crate) fn add_result(&mut self, result: R) { @@ -126,7 +121,7 @@ impl<'q, O: Debug + Hash + Eq, R: Debug, P: Debug> QueryPlanLogger<'q, O, R, P> .to_level() .filter(|lvl| log::log_enabled!(target: "sqlx::explain", *lvl)) { - let mut summary = parse_query_summary(&self.sql); + let mut summary = parse_query_summary(self.sql); let sql = if summary != self.sql { summary.push_str(" …"); diff --git a/sqlx-core/src/mssql/arguments.rs b/sqlx-core/src/mssql/arguments.rs index f8983ca2de..98e6de84be 100644 --- a/sqlx-core/src/mssql/arguments.rs +++ b/sqlx-core/src/mssql/arguments.rs @@ -86,7 +86,7 @@ impl MssqlArguments { // @p1 int, @p2 nvarchar(10), ... if !declarations.is_empty() { - declarations.push_str(","); + declarations.push(','); } declarations.push_str(name); diff --git a/sqlx-core/src/mssql/column.rs b/sqlx-core/src/mssql/column.rs index a6bdbe823c..c6db78cc78 100644 --- a/sqlx-core/src/mssql/column.rs +++ b/sqlx-core/src/mssql/column.rs @@ -33,7 +33,7 @@ impl Column for MssqlColumn { } fn name(&self) -> &str { - &*self.name + &self.name } fn type_info(&self) -> &MssqlTypeInfo { diff --git a/sqlx-core/src/mssql/connection/establish.rs b/sqlx-core/src/mssql/connection/establish.rs index 6e1c238274..d1fbb2175d 100644 --- a/sqlx-core/src/mssql/connection/establish.rs +++ b/sqlx-core/src/mssql/connection/establish.rs @@ -43,8 +43,7 @@ impl MssqlConnection { stream.setup_encryption().await?; } (Encrypt::Required, Encrypt::Off | Encrypt::NotSupported) => { - return Err(Error::Tls(Box::new(std::io::Error::new( - std::io::ErrorKind::Other, + return Err(Error::Tls(Box::new(std::io::Error::other( "TLS encryption required but not supported by server", )))); } @@ -73,7 +72,7 @@ impl MssqlConnection { server_name: &options.server_name, client_interface_name: &options.client_interface_name, language: &options.language, - database: &*options.database, + database: &options.database, client_id: [0; 6], }; diff --git a/sqlx-core/src/mssql/connection/executor.rs b/sqlx-core/src/mssql/connection/executor.rs index 25e1480d05..53e431eabd 100644 --- a/sqlx-core/src/mssql/connection/executor.rs +++ b/sqlx-core/src/mssql/connection/executor.rs @@ -71,13 +71,13 @@ impl MssqlConnection { impl<'c> Executor<'c> for &'c mut MssqlConnection { type Database = Mssql; - fn fetch_many<'e, 'q: 'e, E: 'q>( + fn fetch_many<'e, 'q: 'e, E>( self, mut query: E, ) -> BoxStream<'e, Result, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { let sql = query.sql(); let arguments = query.take_arguments(); @@ -135,13 +135,13 @@ impl<'c> Executor<'c> for &'c mut MssqlConnection { }) } - fn fetch_optional<'e, 'q: 'e, E: 'q>( + fn fetch_optional<'e, 'q: 'e, E>( self, query: E, ) -> BoxFuture<'e, Result, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { let mut s = self.fetch_many(query); diff --git a/sqlx-core/src/mssql/connection/prepare.rs b/sqlx-core/src/mssql/connection/prepare.rs index 50d33983dd..db96af33cd 100644 --- a/sqlx-core/src/mssql/connection/prepare.rs +++ b/sqlx-core/src/mssql/connection/prepare.rs @@ -28,7 +28,7 @@ pub(crate) async fn prepare( for m in PARAMS_RE.captures_iter(sql) { if !params.is_empty() { - params.push_str(","); + params.push(','); } params.push_str(&m[0]); diff --git a/sqlx-core/src/mssql/connection/stream.rs b/sqlx-core/src/mssql/connection/stream.rs index 2d8d5b43f6..b19c26578d 100644 --- a/sqlx-core/src/mssql/connection/stream.rs +++ b/sqlx-core/src/mssql/connection/stream.rs @@ -142,7 +142,7 @@ impl MssqlStream { // TDS communicates in streams of packets that are themselves streams of messages pub(super) async fn recv_message(&mut self) -> Result { loop { - while self.response.as_ref().map_or(false, |r| !r.1.is_empty()) { + while self.response.as_ref().is_some_and(|r| !r.1.is_empty()) { let buf = if let Some((_, buf)) = self.response.as_mut() { buf } else { diff --git a/sqlx-core/src/mssql/connection/tls_prelogin_stream_wrapper.rs b/sqlx-core/src/mssql/connection/tls_prelogin_stream_wrapper.rs index 29c3ccf1da..ada4a613ed 100644 --- a/sqlx-core/src/mssql/connection/tls_prelogin_stream_wrapper.rs +++ b/sqlx-core/src/mssql/connection/tls_prelogin_stream_wrapper.rs @@ -35,7 +35,6 @@ use std::task::{self, ready, Poll}; /// /// This allows us to use standard TLS libraries while still conforming to the TDS protocol /// requirements for the PRELOGIN phase. - const HEADER_BYTES: usize = 8; pub(crate) struct TlsPreloginWrapper { @@ -101,14 +100,17 @@ impl AsyncRead for TlsPreloginWrapper< let read = header_buf.filled().len(); if read == 0 { - return Poll::Ready(Ok(PollReadOut::default())); + #[cfg(feature = "_rt-async-std")] + return Poll::Ready(Ok(0)); + #[cfg(feature = "_rt-tokio")] + return Poll::Ready(Ok(())); } inner.header_pos += read; } let header: PacketHeader = Decode::decode(Bytes::copy_from_slice(&inner.header_buf)) - .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + .map_err(io::Error::other)?; inner.read_remaining = usize::from(header.length) - HEADER_BYTES; @@ -122,6 +124,7 @@ impl AsyncRead for TlsPreloginWrapper< let max_read = std::cmp::min(inner.read_remaining, buf.remaining()); let mut limited_buf = buf.take(max_read); + #[allow(clippy::let_unit_value)] let res = ready!(Pin::new(&mut inner.stream).poll_read(cx, &mut limited_buf))?; let read = limited_buf.filled().len(); @@ -153,14 +156,17 @@ impl AsyncRead for TlsPreloginWrapper< let read = ready!(Pin::new(&mut inner.stream).poll_read(cx, header_buf))?; if read == 0 { - return Poll::Ready(Ok(PollReadOut::default())); + #[cfg(feature = "_rt-async-std")] + return Poll::Ready(Ok(0)); + #[cfg(feature = "_rt-tokio")] + return Poll::Ready(Ok(())); } inner.header_pos += read; } let header: PacketHeader = Decode::decode(Bytes::copy_from_slice(&inner.header_buf)) - .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + .map_err(io::Error::other)?; inner.read_remaining = usize::from(header.length) - HEADER_BYTES; diff --git a/sqlx-core/src/mssql/io/buf_mut.rs b/sqlx-core/src/mssql/io/buf_mut.rs index 01cc757b9f..724e91b525 100644 --- a/sqlx-core/src/mssql/io/buf_mut.rs +++ b/sqlx-core/src/mssql/io/buf_mut.rs @@ -5,8 +5,8 @@ pub trait MssqlBufMutExt { impl MssqlBufMutExt for Vec { fn put_utf16_str(&mut self, s: &str) { - let mut enc = s.encode_utf16(); - while let Some(ch) = enc.next() { + let enc = s.encode_utf16(); + for ch in enc { self.extend_from_slice(&ch.to_le_bytes()); } } diff --git a/sqlx-core/src/mssql/options/mod.rs b/sqlx-core/src/mssql/options/mod.rs index 70ebdbbbd3..3e90e070e1 100644 --- a/sqlx-core/src/mssql/options/mod.rs +++ b/sqlx-core/src/mssql/options/mod.rs @@ -134,6 +134,7 @@ impl MssqlConnectOptions { /// Size in bytes of TDS packets to exchange with the server. /// Returns an error if the size is smaller than 512 bytes + #[allow(clippy::result_large_err)] pub fn requested_packet_size(mut self, size: u32) -> Result { if size < 512 { Err(self) diff --git a/sqlx-core/src/mssql/options/parse.rs b/sqlx-core/src/mssql/options/parse.rs index dd5e6549c3..fb6d921c98 100644 --- a/sqlx-core/src/mssql/options/parse.rs +++ b/sqlx-core/src/mssql/options/parse.rs @@ -60,7 +60,7 @@ impl FromStr for MssqlConnectOptions { let username = url.username(); if !username.is_empty() { options = options.username( - &*percent_decode_str(username) + &percent_decode_str(username) .decode_utf8() .map_err(Error::config)?, ); @@ -68,7 +68,7 @@ impl FromStr for MssqlConnectOptions { if let Some(password) = url.password() { options = options.password( - &*percent_decode_str(password) + &percent_decode_str(password) .decode_utf8() .map_err(Error::config)?, ); @@ -82,7 +82,7 @@ impl FromStr for MssqlConnectOptions { for (key, value) in url.query_pairs() { match key.as_ref() { "instance" => { - options = options.instance(&*value); + options = options.instance(&value); } "encrypt" => { match value.to_lowercase().as_str() { @@ -104,7 +104,7 @@ impl FromStr for MssqlConnectOptions { options = options.trust_server_certificate(trust); } "hostname_in_certificate" => { - options = options.hostname_in_certificate(&*value); + options = options.hostname_in_certificate(&value); } "packet_size" => { let size = value.parse().map_err(Error::config)?; @@ -116,11 +116,11 @@ impl FromStr for MssqlConnectOptions { options = options.client_program_version(value.parse().map_err(Error::config)?) } "client_pid" => options = options.client_pid(value.parse().map_err(Error::config)?), - "hostname" => options = options.hostname(&*value), - "app_name" => options = options.app_name(&*value), - "server_name" => options = options.server_name(&*value), - "client_interface_name" => options = options.client_interface_name(&*value), - "language" => options = options.language(&*value), + "hostname" => options = options.hostname(&value), + "app_name" => options = options.app_name(&value), + "server_name" => options = options.server_name(&value), + "client_interface_name" => options = options.client_interface_name(&value), + "language" => options = options.language(&value), _ => { return Err(Error::config(MssqlInvalidOption(key.into()))); } diff --git a/sqlx-core/src/mssql/protocol/login.rs b/sqlx-core/src/mssql/protocol/login.rs index a001e00acc..323e24d3e5 100644 --- a/sqlx-core/src/mssql/protocol/login.rs +++ b/sqlx-core/src/mssql/protocol/login.rs @@ -97,9 +97,8 @@ impl Encode<'_> for Login7<'_> { // password buffer starting with the position pointed to by ibPassword or // ibChangePassword, the client SHOULD first swap the four high bits with // the four low bits and then do a bit-XOR with 0xA5 (10100101). - for i in password_start..buf.len() { - let b = buf[i]; - buf[i] = ((b << 4) & 0xf0 | (b >> 4) & 0x0f) ^ 0xa5; + for b in &mut buf[password_start..] { + *b = ((*b << 4) & 0xf0 | (*b >> 4) & 0x0f) ^ 0xa5; } // [AppName] The client application name @@ -143,6 +142,7 @@ impl Encode<'_> for Login7<'_> { } } +#[allow(clippy::ptr_arg)] fn write_offset(buf: &mut Vec, offsets: &mut usize, beg: usize) { // The offset must be relative to the beginning of the packet payload, after // the packet header diff --git a/sqlx-core/src/mssql/protocol/pre_login.rs b/sqlx-core/src/mssql/protocol/pre_login.rs index da46f1b630..99d2c70ee2 100644 --- a/sqlx-core/src/mssql/protocol/pre_login.rs +++ b/sqlx-core/src/mssql/protocol/pre_login.rs @@ -90,8 +90,7 @@ impl<'de> Decode<'de> for PreLogin { return Err(err_protocol!( "PRELOGIN: unexpected login option token: 0x{:02?}", token - ) - .into()); + )); } } } @@ -195,6 +194,7 @@ enum PreLoginOptionToken { } impl PreLoginOptionToken { + #[allow(clippy::ptr_arg)] fn put(self, buf: &mut Vec, pos: &mut usize, offset: &mut u16, len: u16) { buf[*pos] = self as u8; *pos += 1; diff --git a/sqlx-core/src/mssql/protocol/row.rs b/sqlx-core/src/mssql/protocol/row.rs index 1094019ab2..33e64fba1c 100644 --- a/sqlx-core/src/mssql/protocol/row.rs +++ b/sqlx-core/src/mssql/protocol/row.rs @@ -20,7 +20,7 @@ impl Row { let mut column_types = Vec::with_capacity(columns.len()); let nulls = if nullable { - buf.get_bytes((columns.len() + 7) / 8) + buf.get_bytes(columns.len().div_ceil(8)) } else { Bytes::from_static(b"") }; diff --git a/sqlx-core/src/mssql/protocol/type_info.rs b/sqlx-core/src/mssql/protocol/type_info.rs index 6b17358cda..8c7212e421 100644 --- a/sqlx-core/src/mssql/protocol/type_info.rs +++ b/sqlx-core/src/mssql/protocol/type_info.rs @@ -143,7 +143,7 @@ impl TypeInfo { } // Baltic locales - 0x0425 | 0x0427 | 0x0426 => encoding_rs::WINDOWS_1257, + 0x0425..=0x0427 => encoding_rs::WINDOWS_1257, // Greek 0x0408 => encoding_rs::WINDOWS_1253, @@ -204,9 +204,9 @@ impl TypeInfo { let scale = buf.get_u8(); let mut size = match scale { - 0 | 1 | 2 => 3, - 3 | 4 => 4, - 5 | 6 | 7 => 5, + 0..=2 => 3, + 3..=4 => 4, + 5..=7 => 5, scale => { return Err(err_protocol!("invalid scale {} for type {:?}", scale, ty)); diff --git a/sqlx-core/src/mssql/types/float.rs b/sqlx-core/src/mssql/types/float.rs index a4aad77bc9..089273a03e 100644 --- a/sqlx-core/src/mssql/types/float.rs +++ b/sqlx-core/src/mssql/types/float.rs @@ -29,6 +29,7 @@ impl Encode<'_, Mssql> for f32 { impl Decode<'_, Mssql> for f32 { fn decode(value: MssqlValueRef<'_>) -> Result { let as_f64 = >::decode(value)?; + #[allow(clippy::cast_possible_truncation)] Ok(as_f64 as f32) } } @@ -81,7 +82,9 @@ impl Decode<'_, Mssql> for f64 { DataType::MoneyN | DataType::Money | DataType::SmallMoney => { let numerator = decode_money_bytes(value.as_bytes()?)?; let denominator = 10_000; + #[allow(clippy::cast_precision_loss)] let integer_part = (numerator / denominator) as f64; + #[allow(clippy::cast_precision_loss)] let fractional_part = (numerator % denominator) as f64 / denominator as f64; Ok(integer_part + fractional_part) } diff --git a/sqlx-core/src/mysql/arguments.rs b/sqlx-core/src/mysql/arguments.rs index 3d8dcce86f..a5d4417bf2 100644 --- a/sqlx-core/src/mysql/arguments.rs +++ b/sqlx-core/src/mysql/arguments.rs @@ -31,6 +31,11 @@ impl MySqlArguments { pub fn len(&self) -> usize { self.types.len() } + + #[doc(hidden)] + pub fn is_empty(&self) -> bool { + self.types.is_empty() + } } impl<'q> Arguments<'q> for MySqlArguments { diff --git a/sqlx-core/src/mysql/column.rs b/sqlx-core/src/mysql/column.rs index ecbe8ca774..ab40c47da8 100644 --- a/sqlx-core/src/mysql/column.rs +++ b/sqlx-core/src/mysql/column.rs @@ -24,7 +24,7 @@ impl Column for MySqlColumn { } fn name(&self) -> &str { - &*self.name + &self.name } fn type_info(&self) -> &MySqlTypeInfo { diff --git a/sqlx-core/src/mysql/connection/auth.rs b/sqlx-core/src/mysql/connection/auth.rs index 192e5be7fe..038d4d28d9 100644 --- a/sqlx-core/src/mysql/connection/auth.rs +++ b/sqlx-core/src/mysql/connection/auth.rs @@ -76,7 +76,7 @@ fn scramble_sha1(password: &str, nonce: &Chain) -> Vec { let mut pw_hash = ctx.finalize_reset(); - ctx.update(&pw_hash); + ctx.update(pw_hash); let pw_hash_hash = ctx.finalize_reset(); @@ -100,7 +100,7 @@ fn scramble_sha256(password: &str, nonce: &Chain) -> Vec { let mut pw_hash = ctx.finalize_reset(); - ctx.update(&pw_hash); + ctx.update(pw_hash); let pw_hash_hash = ctx.finalize_reset(); @@ -141,10 +141,10 @@ async fn encrypt_rsa<'s>( let (a, b) = (nonce.first_ref(), nonce.last_ref()); let mut nonce = Vec::with_capacity(a.len() + b.len()); - nonce.extend_from_slice(&*a); - nonce.extend_from_slice(&*b); + nonce.extend_from_slice(a); + nonce.extend_from_slice(b); - xor_eq(&mut pass, &*nonce); + xor_eq(&mut pass, &nonce); // client sends an RSA encrypted password let pkey = parse_rsa_pub_key(rsa_pub_key)?; @@ -179,5 +179,5 @@ fn parse_rsa_pub_key(key: &[u8]) -> Result { // we are receiving a PKCS#8 RSA Public Key at all // times from MySQL - RsaPublicKey::from_public_key_pem(&pem).map_err(Error::protocol) + RsaPublicKey::from_public_key_pem(pem).map_err(Error::protocol) } diff --git a/sqlx-core/src/mysql/connection/executor.rs b/sqlx-core/src/mysql/connection/executor.rs index 8d73794f57..10220b77b9 100644 --- a/sqlx-core/src/mysql/connection/executor.rs +++ b/sqlx-core/src/mysql/connection/executor.rs @@ -25,7 +25,7 @@ use futures_util::{pin_mut, TryStreamExt}; use std::{borrow::Cow, sync::Arc}; impl MySqlConnection { - async fn get_or_prepare<'c>( + async fn get_or_prepare( &mut self, sql: &str, persistent: bool, @@ -213,13 +213,13 @@ impl MySqlConnection { impl<'c> Executor<'c> for &'c mut MySqlConnection { type Database = MySql; - fn fetch_many<'e, 'q: 'e, E: 'q>( + fn fetch_many<'e, 'q: 'e, E>( self, mut query: E, ) -> BoxStream<'e, Result, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { let sql = query.sql(); let arguments = query.take_arguments(); @@ -237,13 +237,13 @@ impl<'c> Executor<'c> for &'c mut MySqlConnection { }) } - fn fetch_optional<'e, 'q: 'e, E: 'q>( + fn fetch_optional<'e, 'q: 'e, E>( self, query: E, ) -> BoxFuture<'e, Result, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { let mut s = self.fetch_many(query); @@ -289,7 +289,7 @@ impl<'c> Executor<'c> for &'c mut MySqlConnection { let (_, metadata) = self.get_or_prepare(sql, false).await?; - let columns = (&*metadata.columns).clone(); + let columns = (*metadata.columns).clone(); let nullable = columns .iter() @@ -335,7 +335,7 @@ fn recv_next_result_column(def: &ColumnDefinition, ordinal: usize) -> Result UStr::new(name), }; - let type_info = MySqlTypeInfo::from_column(&def); + let type_info = MySqlTypeInfo::from_column(def); Ok(MySqlColumn { name, diff --git a/sqlx-core/src/mysql/connection/stream.rs b/sqlx-core/src/mysql/connection/stream.rs index dd9a1235b8..657cbeaca5 100644 --- a/sqlx-core/src/mysql/connection/stream.rs +++ b/sqlx-core/src/mysql/connection/stream.rs @@ -149,7 +149,7 @@ impl MySqlStream { // TODO: packet joining if payload - .get(0) + .first() .ok_or(err_protocol!("Packet empty"))? .eq(&0xff) { diff --git a/sqlx-core/src/mysql/migrate.rs b/sqlx-core/src/mysql/migrate.rs index 3a23616950..fe657d8db5 100644 --- a/sqlx-core/src/mysql/migrate.rs +++ b/sqlx-core/src/mysql/migrate.rs @@ -192,11 +192,11 @@ CREATE TABLE IF NOT EXISTS _sqlx_migrations ( .map_err(MigrateError::AccessMigrationMetadata)?; if let Some(checksum) = checksum { - return if checksum == &*migration.checksum { + if checksum == *migration.checksum { Ok(()) } else { Err(MigrateError::VersionMismatch(migration.version)) - }; + } } else { Err(MigrateError::VersionMissing(migration.version)) } @@ -320,7 +320,7 @@ CREATE TABLE IF NOT EXISTS _sqlx_migrations ( async fn current_database(conn: &mut MySqlConnection) -> Result { // language=MySQL - Ok(query_scalar("SELECT DATABASE()").fetch_one(conn).await?) + query_scalar("SELECT DATABASE()").fetch_one(conn).await } // inspired from rails: https://github.com/rails/rails/blob/6e49cc77ab3d16c06e12f93158eaf3e507d4120e/activerecord/lib/active_record/migration.rb#L1308 diff --git a/sqlx-core/src/mysql/options/parse.rs b/sqlx-core/src/mysql/options/parse.rs index b1a2e98485..4854642402 100644 --- a/sqlx-core/src/mysql/options/parse.rs +++ b/sqlx-core/src/mysql/options/parse.rs @@ -22,7 +22,7 @@ impl FromStr for MySqlConnectOptions { let username = url.username(); if !username.is_empty() { options = options.username( - &*percent_decode_str(username) + &percent_decode_str(username) .decode_utf8() .map_err(Error::config)?, ); @@ -30,7 +30,7 @@ impl FromStr for MySqlConnectOptions { if let Some(password) = url.password() { options = options.password( - &*percent_decode_str(password) + &percent_decode_str(password) .decode_utf8() .map_err(Error::config)?, ); @@ -52,11 +52,11 @@ impl FromStr for MySqlConnectOptions { } "charset" => { - options = options.charset(&*value); + options = options.charset(&value); } "collation" => { - options = options.collation(&*value); + options = options.collation(&value); } "statement-cache-capacity" => { diff --git a/sqlx-core/src/mysql/options/ssl_mode.rs b/sqlx-core/src/mysql/options/ssl_mode.rs index a9abf08441..14493599a7 100644 --- a/sqlx-core/src/mysql/options/ssl_mode.rs +++ b/sqlx-core/src/mysql/options/ssl_mode.rs @@ -4,7 +4,7 @@ use std::str::FromStr; /// Options for controlling the desired security state of the connection to the MySQL server. /// /// It is used by the [`ssl_mode`](super::MySqlConnectOptions::ssl_mode) method. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, Default)] pub enum MySqlSslMode { /// Establish an unencrypted connection. Disabled, @@ -13,6 +13,7 @@ pub enum MySqlSslMode { /// back to an unencrypted connection if an encrypted connection cannot be established. /// /// This is the default if `ssl_mode` is not specified. + #[default] Preferred, /// Establish an encrypted connection if the server supports encrypted connections. @@ -30,12 +31,6 @@ pub enum MySqlSslMode { VerifyIdentity, } -impl Default for MySqlSslMode { - fn default() -> Self { - MySqlSslMode::Preferred - } -} - impl FromStr for MySqlSslMode { type Err = Error; diff --git a/sqlx-core/src/mysql/protocol/auth.rs b/sqlx-core/src/mysql/protocol/auth.rs index 261a8817ab..d62615da7b 100644 --- a/sqlx-core/src/mysql/protocol/auth.rs +++ b/sqlx-core/src/mysql/protocol/auth.rs @@ -3,6 +3,7 @@ use std::str::FromStr; use crate::error::Error; #[derive(Debug, Copy, Clone)] +#[allow(clippy::enum_variant_names)] pub enum AuthPlugin { MySqlNativePassword, CachingSha2Password, diff --git a/sqlx-core/src/mysql/protocol/connect/handshake.rs b/sqlx-core/src/mysql/protocol/connect/handshake.rs index e0a4497bbd..841617a87c 100644 --- a/sqlx-core/src/mysql/protocol/connect/handshake.rs +++ b/sqlx-core/src/mysql/protocol/connect/handshake.rs @@ -61,7 +61,7 @@ impl Decode<'_> for Handshake { } let auth_plugin_data_2 = if capabilities.contains(Capabilities::SECURE_CONNECTION) { - let len = ((auth_plugin_data_len as isize) - 9).max(12) as usize; + let len = std::cmp::max((auth_plugin_data_len as isize) - 9, 12) as usize; let v = buf.get_bytes(len); buf.advance(1); // NUL-terminator diff --git a/sqlx-core/src/mysql/protocol/response/err.rs b/sqlx-core/src/mysql/protocol/response/err.rs index e365a8e0a7..3933621ab9 100644 --- a/sqlx-core/src/mysql/protocol/response/err.rs +++ b/sqlx-core/src/mysql/protocol/response/err.rs @@ -30,7 +30,7 @@ impl Decode<'_, Capabilities> for ErrPacket { if capabilities.contains(Capabilities::PROTOCOL_41) { // If the next byte is '#' then we have a SQL STATE - if buf.get(0) == Some(&0x23) { + if buf.first() == Some(&0x23) { buf.advance(1); sql_state = Some(buf.get_str(5)?); } diff --git a/sqlx-core/src/mysql/protocol/row.rs b/sqlx-core/src/mysql/protocol/row.rs index f027dada5f..6e353693f6 100644 --- a/sqlx-core/src/mysql/protocol/row.rs +++ b/sqlx-core/src/mysql/protocol/row.rs @@ -12,6 +12,6 @@ impl Row { pub(crate) fn get(&self, index: usize) -> Option<&[u8]> { self.values[index] .as_ref() - .map(|col| &self.storage[(col.start as usize)..(col.end as usize)]) + .map(|col| &self.storage[col.start..col.end]) } } diff --git a/sqlx-core/src/mysql/row.rs b/sqlx-core/src/mysql/row.rs index f910ded68d..7dc4907318 100644 --- a/sqlx-core/src/mysql/row.rs +++ b/sqlx-core/src/mysql/row.rs @@ -45,8 +45,8 @@ impl ColumnIndex for &'_ str { fn index(&self, row: &MySqlRow) -> Result { row.column_names .get(*self) + .copied() .ok_or_else(|| Error::ColumnNotFound((*self).into())) - .map(|v| *v) } } diff --git a/sqlx-core/src/mysql/statement.rs b/sqlx-core/src/mysql/statement.rs index b6de92fa49..2a8539348b 100644 --- a/sqlx-core/src/mysql/statement.rs +++ b/sqlx-core/src/mysql/statement.rs @@ -53,8 +53,8 @@ impl ColumnIndex> for &'_ str { .metadata .column_names .get(*self) + .copied() .ok_or_else(|| Error::ColumnNotFound((*self).into())) - .map(|v| *v) } } diff --git a/sqlx-core/src/mysql/testing/mod.rs b/sqlx-core/src/mysql/testing/mod.rs index 5f476e74a2..19fc5248a4 100644 --- a/sqlx-core/src/mysql/testing/mod.rs +++ b/sqlx-core/src/mysql/testing/mod.rs @@ -45,7 +45,7 @@ impl TestSupport for MySql { .await?; query("delete from _sqlx_test_databases where db_id = ?") - .bind(&db_id) + .bind(db_id) .execute(&mut conn) .await?; @@ -127,7 +127,7 @@ async fn test_context(args: &TestArgs) -> Result, Error> { } query("insert into _sqlx_test_databases(test_path) values (?)") - .bind(&args.test_path) + .bind(args.test_path) .execute(&mut conn) .await?; @@ -199,7 +199,8 @@ async fn do_cleanup(conn: &mut MySqlConnection) -> Result { separated.push_bind(db_id); } - drop(separated); + // Finalize the separated query builder + let _ = separated; query.push(")").build().execute(&mut *conn).await?; diff --git a/sqlx-core/src/mysql/transaction.rs b/sqlx-core/src/mysql/transaction.rs index 97cb121d0e..8334eb246f 100644 --- a/sqlx-core/src/mysql/transaction.rs +++ b/sqlx-core/src/mysql/transaction.rs @@ -60,7 +60,7 @@ impl TransactionManager for MySqlTransactionManager { conn.stream.waiting.push_back(Waiting::Result); conn.stream.sequence_id = 0; conn.stream - .write_packet(Query(&*rollback_ansi_transaction_sql(depth))); + .write_packet(Query(&rollback_ansi_transaction_sql(depth))); conn.transaction_depth = depth - 1; } diff --git a/sqlx-core/src/mysql/types/chrono.rs b/sqlx-core/src/mysql/types/chrono.rs index fe1a3756bd..2f5a724a58 100644 --- a/sqlx-core/src/mysql/types/chrono.rs +++ b/sqlx-core/src/mysql/types/chrono.rs @@ -25,7 +25,7 @@ impl Type for DateTime { /// Note: assumes the connection's `time_zone` is set to `+00:00` (UTC). impl Encode<'_, MySql> for DateTime { fn encode_by_ref(&self, buf: &mut Vec) -> IsNull { - Encode::::encode(&self.naive_utc(), buf) + Encode::::encode(self.naive_utc(), buf) } } @@ -40,7 +40,7 @@ impl<'r> Decode<'r, MySql> for DateTime { /// Note: assumes the connection's `time_zone` is set to `+00:00` (UTC). impl Encode<'_, MySql> for DateTime { fn encode_by_ref(&self, buf: &mut Vec) -> IsNull { - Encode::::encode(&self.naive_utc(), buf) + Encode::::encode(self.naive_utc(), buf) } } @@ -242,7 +242,7 @@ fn encode_date(date: &NaiveDate, buf: &mut Vec) { } fn decode_date(mut buf: &[u8]) -> Option { - if buf.len() == 0 { + if buf.is_empty() { // MySQL specifies that if there are no bytes, this is all zeros None } else { @@ -257,7 +257,7 @@ fn encode_time(time: &NaiveTime, include_micros: bool, buf: &mut Vec) { buf.push(time.second() as u8); if include_micros { - buf.extend(&((time.nanosecond() / 1000) as u32).to_le_bytes()); + buf.extend(&(time.nanosecond() / 1000).to_le_bytes()); } } @@ -274,5 +274,5 @@ fn decode_time(len: u8, mut buf: &[u8]) -> NaiveTime { }; NaiveTime::from_hms_micro_opt(hour as u32, minute as u32, seconds as u32, micros as u32) - .unwrap_or_else(NaiveTime::default) + .unwrap_or_default() } diff --git a/sqlx-core/src/mysql/types/float.rs b/sqlx-core/src/mysql/types/float.rs index b6d723142d..99b49349c3 100644 --- a/sqlx-core/src/mysql/types/float.rs +++ b/sqlx-core/src/mysql/types/float.rs @@ -51,6 +51,7 @@ impl Encode<'_, MySql> for f64 { impl Decode<'_, MySql> for f32 { fn decode(value: MySqlValueRef<'_>) -> Result { let as_f64 = >::decode(value)?; + #[allow(clippy::cast_possible_truncation)] Ok(as_f64 as f32) } } @@ -72,8 +73,7 @@ impl Decode<'_, MySql> for f64 { } _ => { let str_val = value.as_str()?; - let parsed = str_val.parse()?; - parsed + str_val.parse()? } }) } diff --git a/sqlx-core/src/mysql/types/json.rs b/sqlx-core/src/mysql/types/json.rs index 86dce1ad8c..cfa32aad21 100644 --- a/sqlx-core/src/mysql/types/json.rs +++ b/sqlx-core/src/mysql/types/json.rs @@ -41,7 +41,7 @@ where fn decode(value: MySqlValueRef<'r>) -> Result { let string_value = <&str as Decode>::decode(value)?; - serde_json::from_str(&string_value) + serde_json::from_str(string_value) .map(Json) .map_err(Into::into) } diff --git a/sqlx-core/src/mysql/types/time.rs b/sqlx-core/src/mysql/types/time.rs index 638e68fcac..6f93abc955 100644 --- a/sqlx-core/src/mysql/types/time.rs +++ b/sqlx-core/src/mysql/types/time.rs @@ -26,7 +26,7 @@ impl Encode<'_, MySql> for OffsetDateTime { let utc_dt = self.to_offset(UtcOffset::UTC); let primitive_dt = PrimitiveDateTime::new(utc_dt.date(), utc_dt.time()); - Encode::::encode(&primitive_dt, buf) + Encode::::encode(primitive_dt, buf) } } @@ -238,8 +238,8 @@ fn decode_date(buf: &[u8]) -> Result, BoxDynError> { Date::from_calendar_date( LittleEndian::read_u16(buf) as i32, - time::Month::try_from(buf[2] as u8)?, - buf[3] as u8, + time::Month::try_from(buf[2])?, + buf[3], ) .map_err(Into::into) .map(Some) @@ -251,7 +251,7 @@ fn encode_time(time: &Time, include_micros: bool, buf: &mut Vec) { buf.push(time.second()); if include_micros { - buf.extend(&((time.nanosecond() / 1000) as u32).to_le_bytes()); + buf.extend(&(time.nanosecond() / 1000).to_le_bytes()); } } diff --git a/sqlx-core/src/mysql/value.rs b/sqlx-core/src/mysql/value.rs index f926f6d767..2d4e614e1f 100644 --- a/sqlx-core/src/mysql/value.rs +++ b/sqlx-core/src/mysql/value.rs @@ -93,7 +93,7 @@ impl<'r> ValueRef<'r> for MySqlValueRef<'r> { #[inline] fn is_null(&self) -> bool { - is_null(self.value.as_deref(), &self.type_info) + is_null(self.value, &self.type_info) } } @@ -125,7 +125,7 @@ fn is_null(value: Option<&[u8]>, ty: &MySqlTypeInfo) -> bool { if matches!( ty.r#type, ColumnType::Date | ColumnType::Timestamp | ColumnType::Datetime - ) && (value.get(0) == Some(&0) + ) && (value.first() == Some(&0) || value == b"0000-00-00" || value == b"0000-00-00 00:00:00") { diff --git a/sqlx-core/src/net/tls/mod.rs b/sqlx-core/src/net/tls/mod.rs index cea69374f2..869b48bee2 100644 --- a/sqlx-core/src/net/tls/mod.rs +++ b/sqlx-core/src/net/tls/mod.rs @@ -98,7 +98,7 @@ where S: AsyncRead + AsyncWrite + Unpin, { Raw(S), - Tls(TlsStream), + Tls(Box>), Upgrading, } @@ -135,36 +135,34 @@ where .map_err(|err| Error::Tls(err.into()))? .to_owned(); - *self = MaybeTlsStream::Tls(connector.connect(host, stream).await?); + *self = MaybeTlsStream::Tls(Box::new(connector.connect(host, stream).await?)); Ok(()) } pub fn downgrade(&mut self) -> Result<(), Error> { match replace(self, MaybeTlsStream::Upgrading) { - MaybeTlsStream::Tls(stream) => { + MaybeTlsStream::Tls(boxed_stream) => { #[cfg(feature = "_tls-rustls")] { - let raw = stream.into_inner().0; + let raw = boxed_stream.into_inner().0; *self = MaybeTlsStream::Raw(raw); - return Ok(()); + Ok(()) } #[cfg(feature = "_tls-native-tls")] { - let _ = stream; // Use the variable to avoid warning - return Err(Error::tls("No way to downgrade a native-tls stream, use rustls instead, or never disable tls")); + let _ = boxed_stream; // Use the variable to avoid warning + Err(Error::tls("No way to downgrade a native-tls stream, use rustls instead, or never disable tls")) } } MaybeTlsStream::Raw(stream) => { *self = MaybeTlsStream::Raw(stream); - return Ok(()); + Ok(()) } - MaybeTlsStream::Upgrading => { - return Err(Error::Io(io::ErrorKind::ConnectionAborted.into())); - } + MaybeTlsStream::Upgrading => Err(Error::Io(io::ErrorKind::ConnectionAborted.into())), } } } @@ -218,7 +216,7 @@ where ) -> Poll> { match &mut *self { MaybeTlsStream::Raw(s) => Pin::new(s).poll_read(cx, buf), - MaybeTlsStream::Tls(s) => Pin::new(s).poll_read(cx, buf), + MaybeTlsStream::Tls(s) => Pin::new(&mut **s).poll_read(cx, buf), MaybeTlsStream::Upgrading => Poll::Ready(Err(io::ErrorKind::ConnectionAborted.into())), } @@ -236,7 +234,7 @@ where ) -> Poll> { match &mut *self { MaybeTlsStream::Raw(s) => Pin::new(s).poll_write(cx, buf), - MaybeTlsStream::Tls(s) => Pin::new(s).poll_write(cx, buf), + MaybeTlsStream::Tls(s) => Pin::new(&mut **s).poll_write(cx, buf), MaybeTlsStream::Upgrading => Poll::Ready(Err(io::ErrorKind::ConnectionAborted.into())), } @@ -245,7 +243,7 @@ where fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { match &mut *self { MaybeTlsStream::Raw(s) => Pin::new(s).poll_flush(cx), - MaybeTlsStream::Tls(s) => Pin::new(s).poll_flush(cx), + MaybeTlsStream::Tls(s) => Pin::new(&mut **s).poll_flush(cx), MaybeTlsStream::Upgrading => Poll::Ready(Err(io::ErrorKind::ConnectionAborted.into())), } @@ -255,7 +253,7 @@ where fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { match &mut *self { MaybeTlsStream::Raw(s) => Pin::new(s).poll_shutdown(cx), - MaybeTlsStream::Tls(s) => Pin::new(s).poll_shutdown(cx), + MaybeTlsStream::Tls(s) => Pin::new(&mut **s).poll_shutdown(cx), MaybeTlsStream::Upgrading => Poll::Ready(Err(io::ErrorKind::ConnectionAborted.into())), } @@ -265,7 +263,7 @@ where fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { match &mut *self { MaybeTlsStream::Raw(s) => Pin::new(s).poll_close(cx), - MaybeTlsStream::Tls(s) => Pin::new(s).poll_close(cx), + MaybeTlsStream::Tls(s) => Pin::new(&mut **s).poll_close(cx), MaybeTlsStream::Upgrading => Poll::Ready(Err(io::ErrorKind::ConnectionAborted.into())), } diff --git a/sqlx-core/src/net/tls/rustls.rs b/sqlx-core/src/net/tls/rustls.rs index b97a06bc2d..cda639ed27 100644 --- a/sqlx-core/src/net/tls/rustls.rs +++ b/sqlx-core/src/net/tls/rustls.rs @@ -72,14 +72,14 @@ pub async fn configure_tls_connector( .with_custom_certificate_verifier(Arc::new(DummyTlsVerifier)) } else { let mut cert_store = RootCertStore { - roots: webpki_roots::TLS_SERVER_ROOTS.iter().cloned().collect(), + roots: webpki_roots::TLS_SERVER_ROOTS.to_vec(), }; if let Some(ca) = tls_config.root_cert_path { let path_description = ca.to_string(); let data = ca.data().await.map_err(|e| RustlsError::ParsePemCert { file_description: path_description.clone(), - source: std::io::Error::new(std::io::ErrorKind::Other, e), + source: std::io::Error::other(e), })?; let mut cursor = Cursor::new(data); @@ -116,13 +116,13 @@ pub async fn configure_tls_connector( let cert_chain = certs_from_pem(cert_path.data().await.map_err(|e| { RustlsError::ParseClientCert { file_description: cert_file_desc.clone(), - source: std::io::Error::new(std::io::ErrorKind::Other, e), + source: std::io::Error::other(e), } })?)?; let key_der = private_key_from_pem(key_path.data().await.map_err(|e| { RustlsError::ParseClientKey { file_description: key_file_desc.clone(), - source: std::io::Error::new(std::io::ErrorKind::Other, e), + source: std::io::Error::other(e), } })?)?; config diff --git a/sqlx-core/src/pool/executor.rs b/sqlx-core/src/pool/executor.rs index 97c8905bab..c2e212550b 100644 --- a/sqlx-core/src/pool/executor.rs +++ b/sqlx-core/src/pool/executor.rs @@ -15,12 +15,12 @@ where { type Database = DB; - fn fetch_many<'e, 'q: 'e, E: 'q>( + fn fetch_many<'e, 'q: 'e, E>( self, query: E, ) -> BoxStream<'e, Result, Error>> where - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { let pool = self.clone(); @@ -36,12 +36,12 @@ where }) } - fn fetch_optional<'e, 'q: 'e, E: 'q>( + fn fetch_optional<'e, 'q: 'e, E>( self, query: E, ) -> BoxFuture<'e, Result, Error>> where - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { let pool = self.clone(); @@ -77,7 +77,7 @@ macro_rules! impl_executor_for_pool_connection { type Database = $DB; #[inline] - fn fetch_many<'e, 'q: 'e, E: 'q>( + fn fetch_many<'e, 'q: 'e, E>( self, query: E, ) -> futures_core::stream::BoxStream< @@ -89,19 +89,19 @@ macro_rules! impl_executor_for_pool_connection { > where 'c: 'e, - E: crate::executor::Execute<'q, $DB>, + E: crate::executor::Execute<'q, $DB> + 'q, { (**self).fetch_many(query) } #[inline] - fn fetch_optional<'e, 'q: 'e, E: 'q>( + fn fetch_optional<'e, 'q: 'e, E>( self, query: E, ) -> futures_core::future::BoxFuture<'e, Result, crate::error::Error>> where 'c: 'e, - E: crate::executor::Execute<'q, $DB>, + E: crate::executor::Execute<'q, $DB> + 'q, { (**self).fetch_optional(query) } diff --git a/sqlx-core/src/pool/inner.rs b/sqlx-core/src/pool/inner.rs index 398ece1f2a..5c5ab1c171 100644 --- a/sqlx-core/src/pool/inner.rs +++ b/sqlx-core/src/pool/inner.rs @@ -142,7 +142,9 @@ impl PoolInner { if parent_close_event.as_mut().poll(cx).is_ready() { // Propagate the parent's close event to the child. - let _ = self.close(); + // Note: We can't await the close future here, but it's fine + // because close() is designed to be fire-and-forget + drop(self.close()); return Poll::Ready(Err(Error::PoolClosed)); } @@ -197,7 +199,7 @@ impl PoolInner { let Floating { inner: idle, guard } = floating.into_idle(); - if !self.idle_conns.push(idle).is_ok() { + if self.idle_conns.push(idle).is_err() { panic!("BUG: connection queue overflow in release()"); } @@ -287,10 +289,10 @@ impl PoolInner { } let mut backoff = Duration::from_millis(10); - let max_backoff = deadline_as_timeout::(deadline)? / 5; + let max_backoff = deadline_as_timeout(deadline)? / 5; loop { - let timeout = deadline_as_timeout::(deadline)?; + let timeout = deadline_as_timeout(deadline)?; // result here is `Result, TimeoutError>` // if this block does not return, sleep for the backoff timeout and try again @@ -403,14 +405,14 @@ impl Drop for PoolInner { fn is_beyond_max_lifetime(live: &Live, options: &PoolOptions) -> bool { options .max_lifetime - .map_or(false, |max| live.created_at.elapsed() > max) + .is_some_and(|max| live.created_at.elapsed() > max) } /// Returns `true` if the connection has exceeded `options.idle_timeout` if set, `false` otherwise. fn is_beyond_idle_timeout(idle: &Idle, options: &PoolOptions) -> bool { options .idle_timeout - .map_or(false, |timeout| idle.idle_since.elapsed() > timeout) + .is_some_and(|timeout| idle.idle_since.elapsed() > timeout) } async fn check_idle_conn( @@ -458,7 +460,7 @@ async fn check_idle_conn( } fn spawn_maintenance_tasks(pool: &Arc>) { - let pool = Arc::clone(&pool); + let pool = Arc::clone(pool); let period = match (pool.options.max_lifetime, pool.options.idle_timeout) { (Some(it), None) | (None, Some(it)) => it, diff --git a/sqlx-core/src/pool/mod.rs b/sqlx-core/src/pool/mod.rs index 576f2ea3b5..b8b3922d8c 100644 --- a/sqlx-core/src/pool/mod.rs +++ b/sqlx-core/src/pool/mod.rs @@ -353,7 +353,7 @@ impl Pool { /// Retrieves a connection and immediately begins a new transaction. pub async fn begin(&self) -> Result, Error> { - Ok(Transaction::begin(MaybePoolConnection::PoolConnection(self.acquire().await?)).await?) + Transaction::begin(MaybePoolConnection::PoolConnection(self.acquire().await?)).await } /// Attempts to retrieve a connection and immediately begins a new transaction if successful. @@ -598,7 +598,7 @@ impl FusedFuture for CloseEvent { /// get the time between the deadline and now and use that as our timeout /// /// returns `Error::PoolTimedOut` if the deadline is in the past -fn deadline_as_timeout(deadline: Instant) -> Result { +fn deadline_as_timeout(deadline: Instant) -> Result { deadline .checked_duration_since(Instant::now()) .ok_or(Error::PoolTimedOut) diff --git a/sqlx-core/src/postgres/column.rs b/sqlx-core/src/postgres/column.rs index 559fd6947a..bab4da5159 100644 --- a/sqlx-core/src/postgres/column.rs +++ b/sqlx-core/src/postgres/column.rs @@ -24,7 +24,7 @@ impl Column for PgColumn { } fn name(&self) -> &str { - &*self.name + &self.name } fn type_info(&self) -> &PgTypeInfo { diff --git a/sqlx-core/src/postgres/connection/describe.rs b/sqlx-core/src/postgres/connection/describe.rs index dda7ada4a8..e7b4b371be 100644 --- a/sqlx-core/src/postgres/connection/describe.rs +++ b/sqlx-core/src/postgres/connection/describe.rs @@ -192,7 +192,9 @@ impl PgConnection { .fetch_one(&mut *self) .await?; + #[allow(clippy::cast_sign_loss)] let typ_type = TypType::try_from(typ_type as u8); + #[allow(clippy::cast_sign_loss)] let category = TypCategory::try_from(category as u8); match (typ_type, category) { @@ -385,6 +387,7 @@ SELECT oid FROM pg_catalog.pg_type WHERE typname ILIKE $1 bind + 2 ); + #[allow(clippy::cast_possible_truncation, clippy::cast_possible_wrap)] args.add(i as i32); args.add(column.relation_id); args.add(column.relation_attribute_no); diff --git a/sqlx-core/src/postgres/connection/executor.rs b/sqlx-core/src/postgres/connection/executor.rs index 8785648dea..92db108d20 100644 --- a/sqlx-core/src/postgres/connection/executor.rs +++ b/sqlx-core/src/postgres/connection/executor.rs @@ -48,7 +48,7 @@ async fn prepare( // next we send the PARSE command to the server conn.stream.write(Parse { - param_types: &*param_types, + param_types: ¶m_types, query: sql, statement: id, }); @@ -160,7 +160,7 @@ impl PgConnection { self.pending_ready_for_query_count += 1; } - async fn get_or_prepare<'a>( + async fn get_or_prepare( &mut self, sql: &str, parameters: &[PgTypeInfo], @@ -226,8 +226,9 @@ impl PgConnection { portal: None, statement, formats: &[PgValueFormat::Binary], + #[allow(clippy::cast_possible_truncation, clippy::cast_possible_wrap)] num_params: arguments.types.len() as i16, - params: &*arguments.buffer, + params: &arguments.buffer, result_formats: &[PgValueFormat::Binary], }); @@ -360,13 +361,13 @@ impl PgConnection { impl<'c> Executor<'c> for &'c mut PgConnection { type Database = Postgres; - fn fetch_many<'e, 'q: 'e, E: 'q>( + fn fetch_many<'e, 'q: 'e, E>( self, mut query: E, ) -> BoxStream<'e, Result, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { let sql = query.sql(); let metadata = query.statement().map(|s| Arc::clone(&s.metadata)); @@ -385,13 +386,13 @@ impl<'c> Executor<'c> for &'c mut PgConnection { }) } - fn fetch_optional<'e, 'q: 'e, E: 'q>( + fn fetch_optional<'e, 'q: 'e, E>( self, mut query: E, ) -> BoxFuture<'e, Result, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { let sql = query.sql(); let metadata = query.statement().map(|s| Arc::clone(&s.metadata)); diff --git a/sqlx-core/src/postgres/connection/sasl.rs b/sqlx-core/src/postgres/connection/sasl.rs index 5e1939c3dc..1852ddbe3a 100644 --- a/sqlx-core/src/postgres/connection/sasl.rs +++ b/sqlx-core/src/postgres/connection/sasl.rs @@ -107,7 +107,7 @@ pub(crate) async fn authenticate( let client_key = mac.finalize().into_bytes(); // StoredKey := H(ClientKey) - let stored_key = Sha256::digest(&client_key); + let stored_key = Sha256::digest(client_key); // client-final-message-without-proof let client_final_message_wo_proof = format!( @@ -126,7 +126,7 @@ pub(crate) async fn authenticate( // ClientSignature := HMAC(StoredKey, AuthMessage) let mut mac = Hmac::::new_from_slice(&stored_key).map_err(Error::protocol)?; - mac.update(&auth_message.as_bytes()); + mac.update(auth_message.as_bytes()); let client_signature = mac.finalize().into_bytes(); @@ -145,7 +145,7 @@ pub(crate) async fn authenticate( // ServerSignature := HMAC(ServerKey, AuthMessage) let mut mac = Hmac::::new_from_slice(&server_key).map_err(Error::protocol)?; - mac.update(&auth_message.as_bytes()); + mac.update(auth_message.as_bytes()); // client-final-message = client-final-message-without-proof "," proof let client_final_message = format!( @@ -183,10 +183,10 @@ fn gen_nonce() -> String { // ;; a valid "value". let nonce: String = std::iter::repeat(()) .map(|()| { - let mut c = rng.gen_range(0x21..0x7F) as u8; + let mut c = rng.gen_range(0x21u8..0x7Fu8); while c == 0x2C { - c = rng.gen_range(0x21..0x7F) as u8; + c = rng.gen_range(0x21u8..0x7Fu8); } c @@ -203,7 +203,7 @@ fn gen_nonce() -> String { fn hi<'a>(s: &'a str, salt: &'a [u8], iter_count: u32) -> Result<[u8; 32], Error> { let mut mac = Hmac::::new_from_slice(s.as_bytes()).map_err(Error::protocol)?; - mac.update(&salt); + mac.update(salt); mac.update(&1u32.to_be_bytes()); let mut u = mac.finalize().into_bytes(); diff --git a/sqlx-core/src/postgres/connection/stream.rs b/sqlx-core/src/postgres/connection/stream.rs index 59b5289b8e..6cbeeda6eb 100644 --- a/sqlx-core/src/postgres/connection/stream.rs +++ b/sqlx-core/src/postgres/connection/stream.rs @@ -203,7 +203,7 @@ fn parse_server_version(s: &str) -> Option { break; } } - _ if ch.is_digit(10) => { + _ if ch.is_ascii_digit() => { if chs.peek().is_none() { if let Ok(num) = u32::from_str(&s[from..]) { parts.push(num); diff --git a/sqlx-core/src/postgres/copy.rs b/sqlx-core/src/postgres/copy.rs index 2719f7ae54..1438352b6b 100644 --- a/sqlx-core/src/postgres/copy.rs +++ b/sqlx-core/src/postgres/copy.rs @@ -42,7 +42,7 @@ impl PgConnection { /// /// 1. by closing the connection, or: /// 2. by using another connection to kill the server process that is sending the data as shown - /// [in this StackOverflow answer](https://stackoverflow.com/a/35319598). + /// [in this StackOverflow answer](https://stackoverflow.com/a/35319598). /// /// If you don't read the stream to completion, the next time the connection is used it will /// need to read and discard all the remaining queued data, which could take some time. @@ -90,7 +90,7 @@ impl Pool { /// /// 1. by closing the connection, or: /// 2. by using another connection to kill the server process that is sending the data as shown - /// [in this StackOverflow answer](https://stackoverflow.com/a/35319598). + /// [in this StackOverflow answer](https://stackoverflow.com/a/35319598). /// /// If you don't read the stream to completion, the next time the connection is used it will /// need to read and discard all the remaining queued data, which could take some time. @@ -148,8 +148,10 @@ impl> PgCopyIn { /// Returns the number of columns expected in the input. pub fn num_columns(&self) -> usize { + #[allow(clippy::cast_sign_loss)] + let num_columns = self.response.num_columns as usize; assert_eq!( - self.response.num_columns as usize, + num_columns, self.response.format_codes.len(), "num_columns does not match format_codes.len()" ); @@ -204,9 +206,9 @@ impl> PgCopyIn { let stream = &mut buf_stream.stream; // ensures the buffer isn't left in an inconsistent state - let mut guard = BufGuard(&mut buf_stream.wbuf); + let guard = BufGuard(&mut buf_stream.wbuf); - let buf: &mut Vec = &mut guard.0; + let buf: &mut Vec = guard.0; buf.push(b'd'); // CopyData format code buf.resize(5, 0); // reserve space for the length diff --git a/sqlx-core/src/postgres/io/buf_mut.rs b/sqlx-core/src/postgres/io/buf_mut.rs index d0b710293d..356a18417e 100644 --- a/sqlx-core/src/postgres/io/buf_mut.rs +++ b/sqlx-core/src/postgres/io/buf_mut.rs @@ -25,6 +25,7 @@ impl PgBufMutExt for Vec { f(self); // now calculate the size of what we wrote and set the length value + #[allow(clippy::cast_possible_truncation, clippy::cast_possible_wrap)] let size = (self.len() - offset) as i32; self[offset..(offset + 4)].copy_from_slice(&size.to_be_bytes()); } diff --git a/sqlx-core/src/postgres/listener.rs b/sqlx-core/src/postgres/listener.rs index 079de6e992..6da884ad2a 100644 --- a/sqlx-core/src/postgres/listener.rs +++ b/sqlx-core/src/postgres/listener.rs @@ -336,13 +336,13 @@ impl Drop for PgListener { impl<'c> Executor<'c> for &'c mut PgListener { type Database = Postgres; - fn fetch_many<'e, 'q: 'e, E: 'q>( + fn fetch_many<'e, 'q: 'e, E>( self, query: E, ) -> BoxStream<'e, Result, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { futures_util::stream::once(async move { // need some basic type annotation to help the compiler a bit @@ -353,13 +353,10 @@ impl<'c> Executor<'c> for &'c mut PgListener { .boxed() } - fn fetch_optional<'e, 'q: 'e, E: 'q>( - self, - query: E, - ) -> BoxFuture<'e, Result, Error>> + fn fetch_optional<'e, 'q: 'e, E>(self, query: E) -> BoxFuture<'e, Result, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { async move { self.connection().await?.fetch_optional(query).await }.boxed() } diff --git a/sqlx-core/src/postgres/message/authentication.rs b/sqlx-core/src/postgres/message/authentication.rs index 4fb1119b79..5793c6f40e 100644 --- a/sqlx-core/src/postgres/message/authentication.rs +++ b/sqlx-core/src/postgres/message/authentication.rs @@ -162,8 +162,8 @@ impl Decode<'_> for AuthenticationSaslContinue { Ok(Self { iterations, salt, - nonce: from_utf8(&*nonce).map_err(Error::protocol)?.to_owned(), - message: from_utf8(&*buf).map_err(Error::protocol)?.to_owned(), + nonce: from_utf8(&nonce).map_err(Error::protocol)?.to_owned(), + message: from_utf8(&buf).map_err(Error::protocol)?.to_owned(), }) } } diff --git a/sqlx-core/src/postgres/message/bind.rs b/sqlx-core/src/postgres/message/bind.rs index ef250350bf..2d2fcd7734 100644 --- a/sqlx-core/src/postgres/message/bind.rs +++ b/sqlx-core/src/postgres/message/bind.rs @@ -42,6 +42,7 @@ impl Encode<'_> for Bind<'_> { buf.put_statement_name(self.statement); + #[allow(clippy::cast_possible_truncation, clippy::cast_possible_wrap)] buf.extend(&(self.formats.len() as i16).to_be_bytes()); for &format in self.formats { diff --git a/sqlx-core/src/postgres/message/data_row.rs b/sqlx-core/src/postgres/message/data_row.rs index 0f86388995..2cb1e6adf2 100644 --- a/sqlx-core/src/postgres/message/data_row.rs +++ b/sqlx-core/src/postgres/message/data_row.rs @@ -42,7 +42,7 @@ impl Decode<'_> for DataRow { if let Ok(length) = u32::try_from(length) { values.push(Some(offset..(offset + length))); - offset += length as u32; + offset += length; } else { values.push(None); } diff --git a/sqlx-core/src/postgres/message/parse.rs b/sqlx-core/src/postgres/message/parse.rs index 82fadd7e18..ba85901a58 100644 --- a/sqlx-core/src/postgres/message/parse.rs +++ b/sqlx-core/src/postgres/message/parse.rs @@ -1,5 +1,3 @@ -use std::i16; - use crate::io::{BufMutExt, Encode}; use crate::postgres::io::PgBufMutExt; use crate::postgres::types::Oid; diff --git a/sqlx-core/src/postgres/options/parse.rs b/sqlx-core/src/postgres/options/parse.rs index 30a5cf75ec..904b5bc809 100644 --- a/sqlx-core/src/postgres/options/parse.rs +++ b/sqlx-core/src/postgres/options/parse.rs @@ -28,7 +28,7 @@ impl FromStr for PgConnectOptions { let username = url.username(); if !username.is_empty() { options = options.username( - &*percent_decode_str(username) + &percent_decode_str(username) .decode_utf8() .map_err(Error::config)?, ); @@ -36,7 +36,7 @@ impl FromStr for PgConnectOptions { if let Some(password) = url.password() { options = options.password( - &*percent_decode_str(password) + &percent_decode_str(password) .decode_utf8() .map_err(Error::config)?, ); @@ -48,18 +48,18 @@ impl FromStr for PgConnectOptions { } for (key, value) in url.query_pairs().into_iter() { - match &*key { + match &key as &str { "sslmode" | "ssl-mode" => { options = options.ssl_mode(value.parse().map_err(Error::config)?); } "sslrootcert" | "ssl-root-cert" | "ssl-ca" => { - options = options.ssl_root_cert(&*value); + options = options.ssl_root_cert(value.as_ref()); } - "sslcert" | "ssl-cert" => options = options.ssl_client_cert(&*value), + "sslcert" | "ssl-cert" => options = options.ssl_client_cert(value.as_ref()), - "sslkey" | "ssl-key" => options = options.ssl_client_key(&*value), + "sslkey" | "ssl-key" => options = options.ssl_client_key(value.as_ref()), "statement-cache-capacity" => { options = @@ -68,31 +68,31 @@ impl FromStr for PgConnectOptions { "host" => { if value.starts_with("/") { - options = options.socket(&*value); + options = options.socket(value.as_ref()); } else { - options = options.host(&*value); + options = options.host(value.as_ref()); } } "hostaddr" => { value.parse::().map_err(Error::config)?; - options = options.host(&*value) + options = options.host(value.as_ref()) } "port" => options = options.port(value.parse().map_err(Error::config)?), - "dbname" => options = options.database(&*value), + "dbname" => options = options.database(value.as_ref()), - "user" => options = options.username(&*value), + "user" => options = options.username(value.as_ref()), - "password" => options = options.password(&*value), + "password" => options = options.password(value.as_ref()), - "application_name" => options = options.application_name(&*value), + "application_name" => options = options.application_name(value.as_ref()), "options" => { if let Some(options) = options.options.as_mut() { options.push(' '); - options.push_str(&*value); + options.push_str(value.as_ref()); } else { options.options = Some(value.to_string()); } @@ -100,7 +100,7 @@ impl FromStr for PgConnectOptions { k if k.starts_with("options[") => { if let Some(key) = k.strip_prefix("options[").unwrap().strip_suffix(']') { - options = options.options([(key, &*value)]); + options = options.options([(key, value.as_ref())]); } } diff --git a/sqlx-core/src/postgres/options/ssl_mode.rs b/sqlx-core/src/postgres/options/ssl_mode.rs index 60125e4670..92890d92c2 100644 --- a/sqlx-core/src/postgres/options/ssl_mode.rs +++ b/sqlx-core/src/postgres/options/ssl_mode.rs @@ -4,7 +4,7 @@ use std::str::FromStr; /// Options for controlling the level of protection provided for PostgreSQL SSL connections. /// /// It is used by the [`ssl_mode`](super::PgConnectOptions::ssl_mode) method. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, Default)] pub enum PgSslMode { /// Only try a non-SSL connection. Disable, @@ -13,6 +13,7 @@ pub enum PgSslMode { Allow, /// First try an SSL connection; if that fails, try a non-SSL connection. + #[default] Prefer, /// Only try an SSL connection. If a root CA file is present, verify the connection @@ -28,12 +29,6 @@ pub enum PgSslMode { VerifyFull, } -impl Default for PgSslMode { - fn default() -> Self { - PgSslMode::Prefer - } -} - impl FromStr for PgSslMode { type Err = Error; diff --git a/sqlx-core/src/postgres/row.rs b/sqlx-core/src/postgres/row.rs index 1b7bdf7e38..3fd9cdc00a 100644 --- a/sqlx-core/src/postgres/row.rs +++ b/sqlx-core/src/postgres/row.rs @@ -46,7 +46,7 @@ impl ColumnIndex for &'_ str { .column_names .get(*self) .ok_or_else(|| Error::ColumnNotFound((*self).into())) - .map(|v| *v) + .copied() } } diff --git a/sqlx-core/src/postgres/statement.rs b/sqlx-core/src/postgres/statement.rs index 4c01b91563..f1eecef01c 100644 --- a/sqlx-core/src/postgres/statement.rs +++ b/sqlx-core/src/postgres/statement.rs @@ -54,7 +54,7 @@ impl ColumnIndex> for &'_ str { .column_names .get(*self) .ok_or_else(|| Error::ColumnNotFound((*self).into())) - .map(|v| *v) + .copied() } } diff --git a/sqlx-core/src/postgres/type_info.rs b/sqlx-core/src/postgres/type_info.rs index c6a96b58ee..2855416aab 100644 --- a/sqlx-core/src/postgres/type_info.rs +++ b/sqlx-core/src/postgres/type_info.rs @@ -540,7 +540,7 @@ impl PgType { PgType::Money => "MONEY", PgType::MoneyArray => "MONEY[]", PgType::Void => "VOID", - PgType::Custom(ty) => &*ty.name, + PgType::Custom(ty) => &ty.name, PgType::DeclareWithOid(_) => "?", PgType::DeclareWithName(name) => name, } @@ -640,7 +640,7 @@ impl PgType { PgType::Money => "money", PgType::MoneyArray => "_money", PgType::Void => "void", - PgType::Custom(ty) => &*ty.name, + PgType::Custom(ty) => &ty.name, PgType::DeclareWithOid(_) => "?", PgType::DeclareWithName(name) => name, } diff --git a/sqlx-core/src/postgres/types/array.rs b/sqlx-core/src/postgres/types/array.rs index a863d8d92c..96dc82219d 100644 --- a/sqlx-core/src/postgres/types/array.rs +++ b/sqlx-core/src/postgres/types/array.rs @@ -94,7 +94,7 @@ where T: Encode<'q, Postgres> + Type, { fn encode_by_ref(&self, buf: &mut PgArgumentBuffer) -> IsNull { - let type_info = if self.len() < 1 { + let type_info = if self.is_empty() { T::type_info() } else { self[0].produces().unwrap_or_else(T::type_info) diff --git a/sqlx-core/src/postgres/types/bit_vec.rs b/sqlx-core/src/postgres/types/bit_vec.rs index fb6f353c71..d5a341adee 100644 --- a/sqlx-core/src/postgres/types/bit_vec.rs +++ b/sqlx-core/src/postgres/types/bit_vec.rs @@ -65,7 +65,7 @@ impl Decode<'_, Postgres> for BitVec { }; // The smallest amount of data we can read is one byte - let bytes_len = (len + 7) / 8; + let bytes_len = len.div_ceil(8); if bytes.remaining() != bytes_len { Err(io::Error::new( diff --git a/sqlx-core/src/postgres/types/float.rs b/sqlx-core/src/postgres/types/float.rs index 1d8d684ef9..5943f258e5 100644 --- a/sqlx-core/src/postgres/types/float.rs +++ b/sqlx-core/src/postgres/types/float.rs @@ -66,7 +66,7 @@ impl Decode<'_, Postgres> for f64 { Ok(match value.format() { PgValueFormat::Binary => { if value.type_info == PgTypeInfo::NUMERIC { - return Ok(PgNumeric::decode(value.as_bytes()?)?.try_into()?); + return PgNumeric::decode(value.as_bytes()?)?.try_into(); } let buf = value.as_bytes()?; match buf.len() { diff --git a/sqlx-core/src/postgres/types/interval.rs b/sqlx-core/src/postgres/types/interval.rs index cb9eca4a44..a5633b90b0 100644 --- a/sqlx-core/src/postgres/types/interval.rs +++ b/sqlx-core/src/postgres/types/interval.rs @@ -182,7 +182,7 @@ impl TryFrom for PgInterval { /// This returns an error if there is a loss of precision using nanoseconds or if there is a /// microsecond overflow. fn try_from(value: std::time::Duration) -> Result { - if value.as_nanos() % 1000 != 0 { + if !value.as_nanos().is_multiple_of(1000) { return Err("PostgreSQL `INTERVAL` does not support nanoseconds precision".into()); } diff --git a/sqlx-core/src/postgres/types/lquery.rs b/sqlx-core/src/postgres/types/lquery.rs index 93492b95f2..5c1d7ffac7 100644 --- a/sqlx-core/src/postgres/types/lquery.rs +++ b/sqlx-core/src/postgres/types/lquery.rs @@ -75,7 +75,7 @@ impl PgLQuery { } /// creates lquery from an iterator with checking labels - pub fn from_iter(levels: I) -> Result + pub fn from_level_iter(levels: I) -> Result where S: Into, I: IntoIterator, @@ -104,7 +104,7 @@ impl FromStr for PgLQuery { Ok(Self { levels: s .split('.') - .map(|s| PgLQueryLevel::from_str(s)) + .map(PgLQueryLevel::from_str) .collect::>()?, }) } @@ -245,12 +245,12 @@ impl FromStr for PgLQueryLevel { b'!' => Ok(PgLQueryLevel::NotNonStar( s[1..] .split('|') - .map(|s| PgLQueryVariant::from_str(s)) + .map(PgLQueryVariant::from_str) .collect::, PgLQueryParseError>>()?, )), _ => Ok(PgLQueryLevel::NonStar( s.split('|') - .map(|s| PgLQueryVariant::from_str(s)) + .map(PgLQueryVariant::from_str) .collect::, PgLQueryParseError>>()?, )), } @@ -263,10 +263,10 @@ impl FromStr for PgLQueryVariant { fn from_str(s: &str) -> Result { let mut label_length = s.len(); - let mut rev_iter = s.bytes().rev(); + let rev_iter = s.bytes().rev(); let mut modifiers = PgLQueryVariantFlag::default(); - while let Some(b) = rev_iter.next() { + for b in rev_iter { match b { b'@' => modifiers.insert(PgLQueryVariantFlag::IN_CASE), b'*' => modifiers.insert(PgLQueryVariantFlag::ANY_END), @@ -307,8 +307,8 @@ impl Display for PgLQueryLevel { PgLQueryLevel::Star(Some(at_least), _) => write!(f, "*{{{},}}", at_least), PgLQueryLevel::Star(_, Some(at_most)) => write!(f, "*{{,{}}}", at_most), PgLQueryLevel::Star(_, _) => write!(f, "*"), - PgLQueryLevel::NonStar(variants) => write_variants(f, &variants, false), - PgLQueryLevel::NotNonStar(variants) => write_variants(f, &variants, true), + PgLQueryLevel::NonStar(variants) => write_variants(f, variants, false), + PgLQueryLevel::NotNonStar(variants) => write_variants(f, variants, true), } } } diff --git a/sqlx-core/src/postgres/types/ltree.rs b/sqlx-core/src/postgres/types/ltree.rs index a1e7c32563..237f5e0968 100644 --- a/sqlx-core/src/postgres/types/ltree.rs +++ b/sqlx-core/src/postgres/types/ltree.rs @@ -103,6 +103,7 @@ impl PgLTree { } /// creates ltree from an iterator with checking labels + #[allow(clippy::should_implement_trait)] pub fn from_iter(labels: I) -> Result where String: From, @@ -142,7 +143,7 @@ impl FromStr for PgLTree { Ok(Self { labels: s .split('.') - .map(|s| PgLTreeLabel::new(s)) + .map(PgLTreeLabel::new) .collect::, Self::Err>>()?, }) } diff --git a/sqlx-core/src/postgres/types/range.rs b/sqlx-core/src/postgres/types/range.rs index 367893119b..5290fd55df 100644 --- a/sqlx-core/src/postgres/types/range.rs +++ b/sqlx-core/src/postgres/types/range.rs @@ -447,7 +447,7 @@ where } count += 1; - if !(element.is_empty() && !quoted) { + if !element.is_empty() || quoted { let value = Some(T::decode(PgValueRef { type_info: T::type_info(), format: PgValueFormat::Text, @@ -518,7 +518,7 @@ fn range_compatible>(ty: &PgTypeInfo) -> bool { // we require the declared type to be a _range_ with an // element type that is acceptable if let PgTypeKind::Range(element) = &ty.kind() { - return E::compatible(&element); + return E::compatible(element); } false diff --git a/sqlx-core/src/postgres/types/str.rs b/sqlx-core/src/postgres/types/str.rs index 7cecefa885..d6a36ecb7b 100644 --- a/sqlx-core/src/postgres/types/str.rs +++ b/sqlx-core/src/postgres/types/str.rs @@ -101,7 +101,7 @@ impl Encode<'_, Postgres> for String { impl<'r> Decode<'r, Postgres> for &'r str { fn decode(value: PgValueRef<'r>) -> Result { - Ok(value.as_str()?) + value.as_str() } } diff --git a/sqlx-core/src/postgres/types/time/mod.rs b/sqlx-core/src/postgres/types/time/mod.rs index 1d3dc128e6..7bc38a15e2 100644 --- a/sqlx-core/src/postgres/types/time/mod.rs +++ b/sqlx-core/src/postgres/types/time/mod.rs @@ -1,6 +1,6 @@ mod date; mod datetime; -mod time; +mod time_impl; #[rustfmt::skip] const PG_EPOCH: ::time::Date = ::time::macros::date!(2000-1-1); diff --git a/sqlx-core/src/postgres/types/time/time.rs b/sqlx-core/src/postgres/types/time/time_impl.rs similarity index 100% rename from sqlx-core/src/postgres/types/time/time.rs rename to sqlx-core/src/postgres/types/time/time_impl.rs diff --git a/sqlx-core/src/postgres/types/time_tz.rs b/sqlx-core/src/postgres/types/time_tz.rs index 8ab9525be6..2e87fc480d 100644 --- a/sqlx-core/src/postgres/types/time_tz.rs +++ b/sqlx-core/src/postgres/types/time_tz.rs @@ -99,23 +99,8 @@ mod chrono { tmp.push_str("2001-07-08 "); tmp.push_str(s); - let dt = 'out: loop { - let mut err = None; - - for fmt in &["%Y-%m-%d %H:%M:%S%.f%#z", "%Y-%m-%d %H:%M:%S%.f"] { - match DateTime::parse_from_str(&tmp, fmt) { - Ok(dt) => { - break 'out dt; - } - - Err(error) => { - err = Some(error); - } - } - } - - return Err(err.unwrap().into()); - }; + let dt = DateTime::parse_from_str(&tmp, "%Y-%m-%d %H:%M:%S%.f%#z") + .or_else(|_| DateTime::parse_from_str(&tmp, "%Y-%m-%d %H:%M:%S%.f"))?; let time = dt.time(); let offset = *dt.offset(); diff --git a/sqlx-core/src/query.rs b/sqlx-core/src/query.rs index 51aea9cf3b..1a19359644 100644 --- a/sqlx-core/src/query.rs +++ b/sqlx-core/src/query.rs @@ -44,14 +44,14 @@ where #[inline] fn sql(&self) -> &'q str { match self.statement { - Either::Right(ref statement) => statement.sql(), + Either::Right(statement) => statement.sql(), Either::Left(sql) => sql, } } fn statement(&self) -> Option<&>::Statement> { match self.statement { - Either::Right(ref statement) => Some(&statement), + Either::Right(statement) => Some(statement), Either::Left(_) => None, } } @@ -293,7 +293,7 @@ where let mut f = self.mapper; Map { inner: self.inner, - mapper: move |row| f(row).and_then(|o| g(o)), + mapper: move |row| f(row).and_then(&mut g), } } diff --git a/sqlx-core/src/query_builder.rs b/sqlx-core/src/query_builder.rs index 82e2c3070f..c233b6f9df 100644 --- a/sqlx-core/src/query_builder.rs +++ b/sqlx-core/src/query_builder.rs @@ -162,7 +162,6 @@ where /// assert!(sql.ends_with("in (?, ?) ")); /// # } /// ``` - pub fn separated<'qb, Sep>(&'qb mut self, separator: Sep) -> Separated<'qb, 'args, DB, Sep> where 'args: 'qb, diff --git a/sqlx-core/src/sqlite/column.rs b/sqlx-core/src/sqlite/column.rs index f543257dd1..84d7da4239 100644 --- a/sqlx-core/src/sqlite/column.rs +++ b/sqlx-core/src/sqlite/column.rs @@ -20,7 +20,7 @@ impl Column for SqliteColumn { } fn name(&self) -> &str { - &*self.name + &self.name } fn type_info(&self) -> &SqliteTypeInfo { diff --git a/sqlx-core/src/sqlite/connection/establish.rs b/sqlx-core/src/sqlite/connection/establish.rs index 9cdf110b78..faa9816aeb 100644 --- a/sqlx-core/src/sqlite/connection/establish.rs +++ b/sqlx-core/src/sqlite/connection/establish.rs @@ -28,7 +28,7 @@ enum SqliteLoadExtensionMode { } impl SqliteLoadExtensionMode { - fn as_int(self) -> c_int { + fn as_int(&self) -> c_int { match self { SqliteLoadExtensionMode::Enable => 1, SqliteLoadExtensionMode::DisableAll => 0, diff --git a/sqlx-core/src/sqlite/connection/execute.rs b/sqlx-core/src/sqlite/connection/execute.rs index b753fa3f0a..807ef184d0 100644 --- a/sqlx-core/src/sqlite/connection/execute.rs +++ b/sqlx-core/src/sqlite/connection/execute.rs @@ -68,10 +68,10 @@ impl Iterator for ExecuteIter<'_> { fn next(&mut self) -> Option { let statement = if self.goto_next { - let mut statement = match self.statement.prepare_next(self.handle) { + let statement = match self.statement.prepare_next(self.handle) { Ok(Some(statement)) => statement, Ok(None) => return None, - Err(e) => return Some(Err(e.into())), + Err(e) => return Some(Err(e)), }; self.goto_next = false; @@ -83,7 +83,7 @@ impl Iterator for ExecuteIter<'_> { statement.handle.clear_bindings(); - match bind(&mut statement.handle, &self.args, self.args_used) { + match bind(statement.handle, &self.args, self.args_used) { Ok(args_used) => self.args_used += args_used, Err(e) => return Some(Err(e)), } @@ -98,9 +98,9 @@ impl Iterator for ExecuteIter<'_> { self.logger.increment_rows_returned(); Some(Ok(Either::Right(SqliteRow::current( - &statement.handle, - &statement.columns, - &statement.column_names, + statement.handle, + statement.columns, + statement.column_names, )))) } Ok(false) => { diff --git a/sqlx-core/src/sqlite/connection/executor.rs b/sqlx-core/src/sqlite/connection/executor.rs index 69ed29b92d..67767e4cdd 100644 --- a/sqlx-core/src/sqlite/connection/executor.rs +++ b/sqlx-core/src/sqlite/connection/executor.rs @@ -12,13 +12,13 @@ use futures_util::{TryFutureExt, TryStreamExt}; impl<'c> Executor<'c> for &'c mut SqliteConnection { type Database = Sqlite; - fn fetch_many<'e, 'q: 'e, E: 'q>( + fn fetch_many<'e, 'q: 'e, E>( self, mut query: E, ) -> BoxStream<'e, Result, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { let sql = query.sql(); let arguments = query.take_arguments(); @@ -32,13 +32,13 @@ impl<'c> Executor<'c> for &'c mut SqliteConnection { ) } - fn fetch_optional<'e, 'q: 'e, E: 'q>( + fn fetch_optional<'e, 'q: 'e, E>( self, mut query: E, ) -> BoxFuture<'e, Result, Error>> where 'c: 'e, - E: Execute<'q, Self::Database>, + E: Execute<'q, Self::Database> + 'q, { let sql = query.sql(); let arguments = query.take_arguments(); diff --git a/sqlx-core/src/sqlite/connection/explain.rs b/sqlx-core/src/sqlite/connection/explain.rs index ace42616f8..832926e427 100644 --- a/sqlx-core/src/sqlite/connection/explain.rs +++ b/sqlx-core/src/sqlite/connection/explain.rs @@ -194,7 +194,7 @@ impl CursorDataType { ) } - fn from_dense_record(record: &Vec) -> Self { + fn from_dense_record(record: &[ColumnType]) -> Self { Self::Normal((0..).zip(record.iter().copied()).collect()) } @@ -203,7 +203,7 @@ impl CursorDataType { Self::Normal(record) => { let mut rowdata = vec![ColumnType::default(); record.len()]; for (idx, col) in record.iter() { - rowdata[*idx as usize] = col.clone(); + rowdata[*idx as usize] = *col; } rowdata } @@ -306,7 +306,7 @@ fn root_block_columns( ); } - return Ok(row_info); + Ok(row_info) } #[derive(Debug, Clone, PartialEq)] @@ -666,7 +666,7 @@ pub(super) fn explain( state.r.insert( p2, RegDataType::Single(ColumnType { - datatype: opcode_to_type(&opcode), + datatype: opcode_to_type(opcode), nullable: Some(false), }), ); @@ -784,8 +784,7 @@ pub(super) fn explain( while let Some(state) = result_states.pop() { // find the datatype info from each ResultRow execution if let Some(result) = state.result { - let mut idx = 0; - for (this_type, this_nullable) in result { + for (idx, (this_type, this_nullable)) in result.into_iter().enumerate() { if output.len() == idx { output.push(this_type); } else if output[idx].is_none() @@ -804,7 +803,6 @@ pub(super) fn explain( } else { nullable[idx] = this_nullable; } - idx += 1; } } } diff --git a/sqlx-core/src/sqlite/connection/worker.rs b/sqlx-core/src/sqlite/connection/worker.rs index 9bf2d825d1..218d101fc6 100644 --- a/sqlx-core/src/sqlite/connection/worker.rs +++ b/sqlx-core/src/sqlite/connection/worker.rs @@ -124,12 +124,11 @@ impl ConnectionWorker { for cmd in command_rx { match cmd { Command::Prepare { query, tx } => { - tx.send(prepare(&mut conn, &query).map(|prepared| { + tx.send(prepare(&mut conn, &query).inspect(|_| { update_cached_statements_size( &conn, &shared.cached_statements_size, ); - prepared })) .ok(); } diff --git a/sqlx-core/src/sqlite/options/auto_vacuum.rs b/sqlx-core/src/sqlite/options/auto_vacuum.rs index 65d0c4613e..258569a188 100644 --- a/sqlx-core/src/sqlite/options/auto_vacuum.rs +++ b/sqlx-core/src/sqlite/options/auto_vacuum.rs @@ -1,8 +1,9 @@ use crate::error::Error; use std::str::FromStr; -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum SqliteAutoVacuum { + #[default] None, Full, Incremental, @@ -18,12 +19,6 @@ impl SqliteAutoVacuum { } } -impl Default for SqliteAutoVacuum { - fn default() -> Self { - SqliteAutoVacuum::None - } -} - impl FromStr for SqliteAutoVacuum { type Err = Error; diff --git a/sqlx-core/src/sqlite/options/journal_mode.rs b/sqlx-core/src/sqlite/options/journal_mode.rs index d0424adbb4..933cc26aa7 100644 --- a/sqlx-core/src/sqlite/options/journal_mode.rs +++ b/sqlx-core/src/sqlite/options/journal_mode.rs @@ -4,12 +4,13 @@ use std::str::FromStr; /// Refer to [SQLite documentation] for the meaning of the database journaling mode. /// /// [SQLite documentation]: https://www.sqlite.org/pragma.html#pragma_journal_mode -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum SqliteJournalMode { Delete, Truncate, Persist, Memory, + #[default] Wal, Off, } @@ -27,12 +28,6 @@ impl SqliteJournalMode { } } -impl Default for SqliteJournalMode { - fn default() -> Self { - SqliteJournalMode::Wal - } -} - impl FromStr for SqliteJournalMode { type Err = Error; diff --git a/sqlx-core/src/sqlite/options/locking_mode.rs b/sqlx-core/src/sqlite/options/locking_mode.rs index 4f0900af69..d488043c39 100644 --- a/sqlx-core/src/sqlite/options/locking_mode.rs +++ b/sqlx-core/src/sqlite/options/locking_mode.rs @@ -4,8 +4,9 @@ use std::str::FromStr; /// Refer to [SQLite documentation] for the meaning of the connection locking mode. /// /// [SQLite documentation]: https://www.sqlite.org/pragma.html#pragma_locking_mode -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum SqliteLockingMode { + #[default] Normal, Exclusive, } @@ -19,12 +20,6 @@ impl SqliteLockingMode { } } -impl Default for SqliteLockingMode { - fn default() -> Self { - SqliteLockingMode::Normal - } -} - impl FromStr for SqliteLockingMode { type Err = Error; diff --git a/sqlx-core/src/sqlite/options/synchronous.rs b/sqlx-core/src/sqlite/options/synchronous.rs index cd37682d4f..adaec0206b 100644 --- a/sqlx-core/src/sqlite/options/synchronous.rs +++ b/sqlx-core/src/sqlite/options/synchronous.rs @@ -4,10 +4,11 @@ use std::str::FromStr; /// Refer to [SQLite documentation] for the meaning of various synchronous settings. /// /// [SQLite documentation]: https://www.sqlite.org/pragma.html#pragma_synchronous -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum SqliteSynchronous { Off, Normal, + #[default] Full, Extra, } @@ -23,12 +24,6 @@ impl SqliteSynchronous { } } -impl Default for SqliteSynchronous { - fn default() -> Self { - SqliteSynchronous::Full - } -} - impl FromStr for SqliteSynchronous { type Err = Error; diff --git a/sqlx-core/src/sqlite/row.rs b/sqlx-core/src/sqlite/row.rs index 6caefd52b2..2877aefcfc 100644 --- a/sqlx-core/src/sqlite/row.rs +++ b/sqlx-core/src/sqlite/row.rs @@ -74,8 +74,8 @@ impl ColumnIndex for &'_ str { fn index(&self, row: &SqliteRow) -> Result { row.column_names .get(*self) + .copied() .ok_or_else(|| Error::ColumnNotFound((*self).into())) - .map(|v| *v) } } diff --git a/sqlx-core/src/sqlite/statement/virtual.rs b/sqlx-core/src/sqlite/statement/virtual.rs index 18a0e8804d..705a56b92e 100644 --- a/sqlx-core/src/sqlite/statement/virtual.rs +++ b/sqlx-core/src/sqlite/statement/virtual.rs @@ -11,10 +11,10 @@ use libsqlite3_sys::{ sqlite3, sqlite3_prepare_v3, sqlite3_stmt, SQLITE_OK, SQLITE_PREPARE_PERSISTENT, }; use smallvec::SmallVec; +use std::cmp; use std::os::raw::c_char; use std::ptr::{null, null_mut, NonNull}; use std::sync::Arc; -use std::{cmp, i32}; // A virtual statement consists of *zero* or more raw SQLite3 statements. We chop up a SQL statement // on `;` to support multiple statements in one query. @@ -72,7 +72,7 @@ impl VirtualStatement { } } - if query.len() > i32::max_value() as usize { + if query.len() > i32::MAX as usize { return Err(err_protocol!( "query string must be smaller than {} bytes", i32::MAX @@ -191,7 +191,7 @@ fn prepare( conn, query_ptr, query_len, - flags as u32, + flags, &mut statement_handle, &mut tail, ) diff --git a/sqlx-core/src/sqlite/types/float.rs b/sqlx-core/src/sqlite/types/float.rs index d8b2c3bd17..2b26332471 100644 --- a/sqlx-core/src/sqlite/types/float.rs +++ b/sqlx-core/src/sqlite/types/float.rs @@ -21,6 +21,7 @@ impl<'q> Encode<'q, Sqlite> for f32 { impl<'r> Decode<'r, Sqlite> for f32 { fn decode(value: SqliteValueRef<'r>) -> Result { + #[allow(clippy::cast_possible_truncation)] Ok(value.double() as f32) } } diff --git a/sqlx-core/src/transaction.rs b/sqlx-core/src/transaction.rs index 76d5ac85ba..4fe555b22b 100644 --- a/sqlx-core/src/transaction.rs +++ b/sqlx-core/src/transaction.rs @@ -103,7 +103,7 @@ macro_rules! impl_executor_for_transaction { { type Database = $DB; - fn fetch_many<'e, 'q: 'e, E: 'q>( + fn fetch_many<'e, 'q: 'e, E>( self, query: E, ) -> futures_core::stream::BoxStream< @@ -115,18 +115,18 @@ macro_rules! impl_executor_for_transaction { > where 't: 'e, - E: crate::executor::Execute<'q, Self::Database>, + E: crate::executor::Execute<'q, Self::Database> + 'q, { (&mut **self).fetch_many(query) } - fn fetch_optional<'e, 'q: 'e, E: 'q>( + fn fetch_optional<'e, 'q: 'e, E>( self, query: E, ) -> futures_core::future::BoxFuture<'e, Result, crate::error::Error>> where 't: 'e, - E: crate::executor::Execute<'q, Self::Database>, + E: crate::executor::Execute<'q, Self::Database> + 'q, { (&mut **self).fetch_optional(query) } diff --git a/sqlx-macros/src/database/mod.rs b/sqlx-macros/src/database/mod.rs index cbc0c5dd61..05cb7a7bff 100644 --- a/sqlx-macros/src/database/mod.rs +++ b/sqlx-macros/src/database/mod.rs @@ -10,8 +10,10 @@ pub enum ParamChecking { pub trait DatabaseExt: Database { const DATABASE_PATH: &'static str; const ROW_PATH: &'static str; + #[allow(dead_code)] const NAME: &'static str; + #[allow(dead_code)] const PARAM_CHECKING: ParamChecking; fn db_path() -> syn::Path { diff --git a/sqlx-macros/src/query/data.rs b/sqlx-macros/src/query/data.rs index 7f8a4bb9b7..2294f6ad83 100644 --- a/sqlx-macros/src/query/data.rs +++ b/sqlx-macros/src/query/data.rs @@ -80,6 +80,7 @@ pub mod offline { } #[derive(serde::Deserialize)] + #[allow(dead_code)] pub struct DynQueryData { #[serde(skip)] pub db_name: String, @@ -153,6 +154,7 @@ pub mod offline { where Describe: serde::Serialize + serde::de::DeserializeOwned, { + #[allow(dead_code)] pub fn from_dyn_data(dyn_data: DynQueryData) -> crate::Result { assert!(!dyn_data.db_name.is_empty()); assert!(!dyn_data.hash.is_empty()); diff --git a/sqlx-macros/src/query/mod.rs b/sqlx-macros/src/query/mod.rs index 7ba3bd2cc4..14a21a6a83 100644 --- a/sqlx-macros/src/query/mod.rs +++ b/sqlx-macros/src/query/mod.rs @@ -49,7 +49,7 @@ impl Metadata { let cargo = env("CARGO").expect("`CARGO` must be set"); let output = Command::new(&cargo) - .args(&["metadata", "--format-version=1", "--no-deps"]) + .args(["metadata", "--format-version=1", "--no-deps"]) .current_dir(&self.manifest_dir) .env_remove("__CARGO_FIX_PLZ") .output() @@ -77,9 +77,7 @@ static METADATA: Lazy = Lazy::new(|| { .into(); #[cfg(feature = "offline")] - let package_name: String = env("CARGO_PKG_NAME") - .expect("`CARGO_PKG_NAME` must be set") - .into(); + let package_name: String = env("CARGO_PKG_NAME").expect("`CARGO_PKG_NAME` must be set"); #[cfg(feature = "offline")] let target_dir = env("CARGO_TARGET_DIR").map_or_else(|_| "target".into(), |dir| dir.into()); @@ -154,7 +152,7 @@ pub fn expand_input(input: QueryMacroInput) -> crate::Result { offline: false, database_url: Some(db_url), .. - } => expand_from_db(input, &db_url), + } => expand_from_db(input, db_url), #[cfg(feature = "offline")] _ => { @@ -246,9 +244,7 @@ fn expand_from_db(input: QueryMacroInput, db_url: &str) -> crate::Result { - return Err(format!("Missing expansion needed for: {:?}", item).into()); - } + item => Err(format!("Missing expansion needed for: {:?}", item).into()), } }) } diff --git a/sqlx-macros/src/test_attr.rs b/sqlx-macros/src/test_attr.rs index c006ff7294..a960dadb84 100644 --- a/sqlx-macros/src/test_attr.rs +++ b/sqlx-macros/src/test_attr.rs @@ -1,13 +1,16 @@ +#![allow(unused_imports)] use proc_macro2::{Span, TokenStream}; use quote::quote; use syn::punctuated::Punctuated; use syn::{LitStr, Token}; +#[allow(dead_code)] struct Args { fixtures: Vec, migrations: MigrationsOpt, } +#[allow(dead_code)] enum MigrationsOpt { InferredPath, ExplicitPath(LitStr), diff --git a/src/macros/mod.rs b/src/macros/mod.rs index 5f33d70a74..0b79fcc28a 100644 --- a/src/macros/mod.rs +++ b/src/macros/mod.rs @@ -42,8 +42,8 @@ /// † Only callable if the query returns no columns; otherwise it's assumed the query *may* return at least one row. /// ## Requirements /// * The `DATABASE_URL` environment variable must be set at build-time to point to a database -/// server with the schema that the query string will be checked against. All variants of `query!()` -/// use [dotenv]1 so this can be in a `.env` file instead. +/// server with the schema that the query string will be checked against. All variants of `query!()` +/// use [dotenv]1 so this can be in a `.env` file instead. /// /// * Or, `sqlx-data.json` must exist at the workspace root. See [Offline Mode](#offline-mode-requires-the-offline-feature) /// below. diff --git a/tests/any/pool.rs b/tests/any/pool.rs index 0237d32ba6..2197cb023f 100644 --- a/tests/any/pool.rs +++ b/tests/any/pool.rs @@ -1,5 +1,8 @@ +#[allow(unused_imports)] use sqlx_oldapi::any::{AnyConnectOptions, AnyPoolOptions}; +#[allow(unused_imports)] use sqlx_oldapi::Executor; +#[allow(unused_imports)] use std::sync::atomic::AtomicI32; use std::sync::{ atomic::{AtomicUsize, Ordering}, diff --git a/tests/mssql/mssql.rs b/tests/mssql/mssql.rs index 7453601d51..90cc8c81d4 100644 --- a/tests/mssql/mssql.rs +++ b/tests/mssql/mssql.rs @@ -1,11 +1,14 @@ use futures::TryStreamExt; use sqlx_core::mssql::MssqlRow; +#[allow(unused_imports)] use sqlx_oldapi::mssql::{Mssql, MssqlPoolOptions}; -use sqlx_oldapi::{ - Column, Connection, Execute, Executor, MssqlConnection, Row, Statement, TypeInfo, -}; +#[allow(unused_imports)] +use sqlx_oldapi::Execute; +use sqlx_oldapi::{Column, Connection, Executor, MssqlConnection, Row, Statement, TypeInfo}; use sqlx_test::new; +#[allow(unused_imports)] use std::sync::atomic::{AtomicI32, Ordering}; +#[allow(unused_imports)] use std::time::Duration; #[sqlx_macros::test] @@ -530,7 +533,7 @@ CREATE TABLE #qb_test ( name: String, } - let items_to_insert = vec![ + let items_to_insert = [ TestItem { id: 1, name: "Alice".to_string(), diff --git a/tests/mssql/types.rs b/tests/mssql/types.rs index e8af00396f..fc531b9cea 100644 --- a/tests/mssql/types.rs +++ b/tests/mssql/types.rs @@ -99,9 +99,9 @@ test_type!(numeric(Mssql, "CAST(0.0000000000000001 AS NUMERIC(38,16))" == 0.0000000000000001_f64, "CAST(939399419.1225182 AS NUMERIC(15,2))" == 939399419.12_f64, "CAST(939399419.1225182 AS DECIMAL(15,2))" == 939399419.12_f64, - "CAST(123456789.0123456789 AS NUMERIC(38,10))" == 123_456_789.012_345_678_9_f64, - "CAST(123456789.0123456789012 AS NUMERIC(38,13))" == 123_456_789.012_345_678_901_2_f64, - "CAST(123456789.012345678901234 AS NUMERIC(38,15))" == 123_456_789.012_345_678_901_234_f64, + "CAST(123456789.0123456789 AS NUMERIC(38,10))" == 123_456_789.012_345_67_f64, + "CAST(123456789.0123456789012 AS NUMERIC(38,13))" == 123_456_789.012_345_67_f64, + "CAST(123456789.012345678901234 AS NUMERIC(38,15))" == 123_456_789.012_345_67_f64, "CAST(1.0000000000000001 AS NUMERIC(18,16))" == 1.0000000000000001_f64, "CAST(0.99999999999999 AS NUMERIC(18,14))" == 0.99999999999999_f64, "CAST(2.00000000000001 AS NUMERIC(18,14))" == 2.00000000000001_f64, @@ -265,8 +265,8 @@ mod decimal { "CAST('-1.0000' AS MONEY)" == -1.0000, "CAST('2.15' AS MONEY)" == 2.15, "CAST('214748.3647' AS SMALLMONEY)" == 214748.3647, - "CAST('922337203685477.5807' AS MONEY)" == 922337203685477.5807, - "CAST('-922337203685477.5808' AS MONEY)" == -922337203685477.5808, + "CAST('922337203685477.5807' AS MONEY)" == 922_337_203_685_477.6, + "CAST('-922337203685477.5808' AS MONEY)" == -922_337_203_685_477.6, "CAST('214748.3647' AS SMALLMONEY)" == 214748.3647, "CAST('-214748.3648' AS SMALLMONEY)" == -214748.3648, )); diff --git a/tests/mysql/types.rs b/tests/mysql/types.rs index cd6059f2bf..c155ef74bb 100644 --- a/tests/mysql/types.rs +++ b/tests/mysql/types.rs @@ -1,3 +1,5 @@ +#![allow(clippy::approx_constant)] + extern crate time_ as time; #[cfg(feature = "decimal")] @@ -88,7 +90,7 @@ mod chrono { test_type!(chrono_timestamp>(MySql, "TIMESTAMP '2019-01-02 05:10:20.115100'" - == DateTime::::from_utc( + == DateTime::::from_naive_utc_and_offset( NaiveDateTime::parse_from_str("2019-01-02 05:10:20.115100", "%Y-%m-%d %H:%M:%S%.f").unwrap(), Utc, ) @@ -96,7 +98,7 @@ mod chrono { test_type!(chrono_fixed_offset>(MySql, "TIMESTAMP '2019-01-02 05:10:20.115100'" - == DateTime::::from_utc( + == DateTime::::from_naive_utc_and_offset( NaiveDateTime::parse_from_str("2019-01-02 05:10:20.115100", "%Y-%m-%d %H:%M:%S%.f").unwrap(), Utc, ) @@ -331,7 +333,7 @@ CREATE TEMPORARY TABLE with_bits ( // as bool let row = conn.fetch_one("SELECT value_1 FROM with_bits").await?; let v1: bool = row.try_get(0)?; - assert_eq!(v1, true); + assert!(v1); // switch the bit sqlx_oldapi::query("UPDATE with_bits SET value_1 = NOT value_1") @@ -340,7 +342,7 @@ CREATE TEMPORARY TABLE with_bits ( let row = conn.fetch_one("SELECT value_1 FROM with_bits").await?; let v1: bool = row.try_get(0)?; - assert_eq!(v1, false); + assert!(!v1); Ok(()) } diff --git a/tests/postgres/derives.rs b/tests/postgres/derives.rs index 190771a655..a6b31dadd3 100644 --- a/tests/postgres/derives.rs +++ b/tests/postgres/derives.rs @@ -129,11 +129,6 @@ struct InventoryItem { #[sqlx(type_name = "float_range")] struct FloatRange(PgRange); -// Custom domain type -#[derive(sqlx_oldapi::Type, Debug)] -#[sqlx(type_name = "int4rangeL0pC")] -struct RangeInclusive(PgRange); - test_type!(transparent(Postgres, "0" == Transparent(0), "23523" == Transparent(23523) diff --git a/tests/postgres/macros.rs b/tests/postgres/macros.rs index 3eb0c36ba5..42918c55ac 100644 --- a/tests/postgres/macros.rs +++ b/tests/postgres/macros.rs @@ -1,3 +1,4 @@ +#[allow(unused_imports)] use sqlx_oldapi as sqlx; use sqlx_oldapi::{Connection, PgConnection, Postgres, Transaction}; use sqlx_test::new; diff --git a/tests/postgres/types.rs b/tests/postgres/types.rs index 238991d26f..bbc66b969d 100644 --- a/tests/postgres/types.rs +++ b/tests/postgres/types.rs @@ -1,3 +1,4 @@ +#![allow(unexpected_cfgs)] extern crate time_ as time; use std::ops::Bound; @@ -5,6 +6,8 @@ use std::ops::Bound; use sqlx_oldapi::postgres::types::{Oid, PgInterval, PgMoney, PgRange}; use sqlx_oldapi::postgres::Postgres; use sqlx_test::{test_decode_type, test_prepared_type, test_type}; + +#[allow(unused_imports)] use std::str::FromStr; test_type!(null>(Postgres, @@ -286,7 +289,7 @@ mod chrono { test_type!(chrono_date_time_tz_utc>(Postgres, "TIMESTAMPTZ '2019-01-02 05:10:20.115100'" - == DateTime::::from_utc( + == DateTime::::from_naive_utc_and_offset( NaiveDate::from_ymd_opt(2019, 1, 2).unwrap().and_hms_micro_opt(5, 10, 20, 115100).unwrap(), Utc, ) @@ -300,7 +303,7 @@ mod chrono { test_type!(chrono_date_time_tz_vec>>(Postgres, "array['2019-01-02 05:10:20.115100']::timestamptz[]" == vec![ - DateTime::::from_utc( + DateTime::::from_naive_utc_and_offset( NaiveDate::from_ymd_opt(2019, 1, 2).unwrap().and_hms_micro_opt(5, 10, 20, 115100).unwrap(), Utc, ) diff --git a/tests/sqlite/types.rs b/tests/sqlite/types.rs index 3dad6d8f29..121f4ba2fb 100644 --- a/tests/sqlite/types.rs +++ b/tests/sqlite/types.rs @@ -1,8 +1,11 @@ #![allow(clippy::approx_constant)] extern crate time_ as time; -use sqlx_core::row::Row; +#[allow(unused_imports)] use sqlx_oldapi::sqlite::{Sqlite, SqliteRow}; +#[allow(unused_imports)] +use sqlx_oldapi::Row; +#[allow(unused_imports)] use sqlx_test::new; use sqlx_test::test_type;