Skip to content

Commit 9d615fc

Browse files
authored
Merge pull request #7 from postgresml/montana/a
Cleanup lints for rust 1.84.0
2 parents 4004fec + d4a5744 commit 9d615fc

File tree

8 files changed

+18
-20
lines changed

8 files changed

+18
-20
lines changed

Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ approx = "0.4"
4242

4343
ndarray = { version = "0.15", features = ["approx"] }
4444
ndarray-linalg = { version = "0.16", optional = true }
45-
sprs = { version = "0.11", default-features = false }
45+
sprs = { version = "=0.11.1", default-features = false }
4646

4747
thiserror = "1.0"
4848

algorithms/linfa-ftrl/Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ version = "1.0"
2424
features = ["derive"]
2525

2626
[dependencies]
27-
ndarray = { version = "0.15.4", features = ["serde"] }
27+
ndarray = { version = "0.15", features = ["serde"] }
2828
ndarray-rand = "0.14.0"
2929
argmin = { version = "0.9.0", default-features = false }
3030
argmin-math = { version = "0.3", features = ["ndarray_v0_15-nolinalg"] }

algorithms/linfa-kernel/Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ features = ["std", "derive"]
2626
[dependencies]
2727
ndarray = "0.15"
2828
num-traits = "0.2"
29-
sprs = { version="0.11", default-features = false }
29+
sprs = { version="=0.11.1", default-features = false }
3030

3131
linfa = { version = "0.7.0", path = "../.." }
3232
linfa-nn = { version = "0.7.0", path = "../linfa-nn" }

algorithms/linfa-preprocessing/Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ ndarray-rand = { version = "0.14" }
2929
unicode-normalization = "0.1.8"
3030
regex = "1.4.5"
3131
encoding = "0.2"
32-
sprs = { version = "0.11.0", default-features = false }
32+
sprs = { version = "=0.11.1", default-features = false }
3333

3434
serde_regex = { version = "1.1", optional = true }
3535

src/correlation.rs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ impl<F: Float> PearsonCorrelation<F> {
128128
///
129129
/// * `dataset`: Data for the correlation analysis
130130
/// * `num_iter`: optionally number of iterations of the p-value test, if none then no p-value
131-
/// are calculate
131+
/// are calculated
132132
///
133133
/// # Example
134134
///
@@ -153,7 +153,6 @@ impl<F: Float> PearsonCorrelation<F> {
153153
/// lamotrigine +0.47 (0.14)
154154
/// blood sugar level
155155
/// ```
156-
157156
pub fn from_dataset<D: Data<Elem = F>, T>(
158157
dataset: &DatasetBase<ArrayBase<D, Ix2>, T>,
159158
num_iter: Option<usize>,

src/dataset/impl_dataset.rs

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ where
208208
/// println!("{} => {}", x, y);
209209
/// }
210210
/// ```
211-
pub fn sample_iter(&'a self) -> Iter<'a, '_, F, T::Elem, T::Ix> {
211+
pub fn sample_iter(&'a self) -> Iter<'a, 'a, F, T::Elem, T::Ix> {
212212
Iter::new(self.records.view(), self.targets.as_targets())
213213
}
214214
}
@@ -232,7 +232,7 @@ where
232232
///
233233
/// This iterator produces dataset views with only a single feature, while the set of targets remain
234234
/// complete. It can be useful to compare each feature individual to all targets.
235-
pub fn feature_iter(&'a self) -> DatasetIter<'a, '_, ArrayBase<D, Ix2>, T> {
235+
pub fn feature_iter(&'a self) -> DatasetIter<'a, 'a, ArrayBase<D, Ix2>, T> {
236236
DatasetIter::new(self, true)
237237
}
238238

@@ -241,7 +241,7 @@ where
241241
/// This functions creates an iterator which produces dataset views complete records, but only
242242
/// a single target each. Useful to train multiple single target models for a multi-target
243243
/// dataset.
244-
pub fn target_iter(&'a self) -> DatasetIter<'a, '_, ArrayBase<D, Ix2>, T> {
244+
pub fn target_iter(&'a self) -> DatasetIter<'a, 'a, ArrayBase<D, Ix2>, T> {
245245
DatasetIter::new(self, false)
246246
}
247247
}
@@ -318,7 +318,7 @@ impl<L: Label, T: Labels<Elem = L>, R: Records> Labels for DatasetBase<R, T> {
318318
}
319319

320320
#[allow(clippy::type_complexity)]
321-
impl<'a, 'b: 'a, F, L: Label, T, D> DatasetBase<ArrayBase<D, Ix2>, T>
321+
impl<F, L: Label, T, D> DatasetBase<ArrayBase<D, Ix2>, T>
322322
where
323323
D: Data<Elem = F>,
324324
T: AsSingleTargets<Elem = L> + Labels<Elem = L>,
@@ -680,8 +680,8 @@ where
680680
/// - `k`: the number of folds to apply to the dataset
681681
/// - `params`: the desired parameters for the fittable algorithm at hand
682682
/// - `fit_closure`: a closure of the type `(params, training_data) -> fitted_model`
683-
/// that will be used to produce the trained model for each fold. The training data given in input
684-
/// won't outlive the closure.
683+
/// that will be used to produce the trained model for each fold. The training data given in
684+
/// input won't outlive the closure.
685685
///
686686
/// ## Returns
687687
///
@@ -732,7 +732,7 @@ where
732732
&'a mut self,
733733
k: usize,
734734
fit_closure: C,
735-
) -> impl Iterator<Item = (O, DatasetBase<ArrayView2<F>, ArrayView<E, I>>)> {
735+
) -> impl Iterator<Item = (O, DatasetBase<ArrayView2<'a, F>, ArrayView<'a, E, I>>)> {
736736
assert!(k > 0);
737737
assert!(k <= self.nsamples());
738738
let samples_count = self.nsamples();
@@ -794,9 +794,9 @@ where
794794
/// - `k`: the number of folds to apply
795795
/// - `parameters`: a list of models to compare
796796
/// - `eval`: closure used to evaluate the performance of each trained model. This closure is
797-
/// called on the model output and validation targets of each fold and outputs the performance
798-
/// score for each target. For single-target dataset the signature is `(Array1, Array1) ->
799-
/// Array0`. For multi-target dataset the signature is `(Array2, Array2) -> Array1`.
797+
/// called on the model output and validation targets of each fold and outputs the performance
798+
/// score for each target. For single-target dataset the signature is `(Array1, Array1) ->
799+
/// Array0`. For multi-target dataset the signature is `(Array2, Array2) -> Array1`.
800800
///
801801
/// ### Returns
802802
///

src/dataset/mod.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ impl Deref for Pr {
161161
/// # Fields
162162
///
163163
/// * `records`: a two-dimensional matrix with dimensionality (nsamples, nfeatures), in case of
164-
/// kernel methods a quadratic matrix with dimensionality (nsamples, nsamples), which may be sparse
164+
/// kernel methods a quadratic matrix with dimensionality (nsamples, nsamples), which may be sparse
165165
/// * `targets`: a two-/one-dimension matrix with dimensionality (nsamples, ntargets)
166166
/// * `weights`: optional weights for each sample with dimensionality (nsamples)
167167
/// * `feature_names`: optional descriptive feature names with dimensionality (nfeatures)
@@ -170,7 +170,7 @@ impl Deref for Pr {
170170
///
171171
/// * `R: Records`: generic over feature matrices or kernel matrices
172172
/// * `T`: generic over any `ndarray` matrix which can be used as targets. The `AsTargets` trait
173-
/// bound is omitted here to avoid some repetition in implementation `src/dataset/impl_dataset.rs`
173+
/// bound is omitted here to avoid some repetition in implementation `src/dataset/impl_dataset.rs`
174174
#[derive(Debug, Clone, PartialEq)]
175175
pub struct DatasetBase<R, T>
176176
where

src/metrics_clustering.rs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,9 +63,8 @@ impl<F: Float> DistanceCount<F> {
6363
}
6464

6565
impl<
66-
'a,
6766
F: Float,
68-
L: 'a + Label,
67+
L: Label,
6968
D: Data<Elem = F>,
7069
T: AsSingleTargets<Elem = L> + Labels<Elem = L>,
7170
> SilhouetteScore<F> for DatasetBase<ArrayBase<D, Ix2>, T>

0 commit comments

Comments
 (0)