diff --git a/.github/workflows/checking.yml b/.github/workflows/checking.yml index 1a18fe81f..424a514e2 100644 --- a/.github/workflows/checking.yml +++ b/.github/workflows/checking.yml @@ -10,7 +10,7 @@ jobs: fail-fast: false matrix: toolchain: - - 1.70.0 + - 1.71.1 - stable - nightly os: diff --git a/.github/workflows/codequality.yml b/.github/workflows/codequality.yml index 641069350..e6eb8d4f8 100644 --- a/.github/workflows/codequality.yml +++ b/.github/workflows/codequality.yml @@ -10,7 +10,7 @@ jobs: strategy: matrix: toolchain: - - 1.70.0 + - 1.71.1 - stable steps: @@ -46,7 +46,7 @@ jobs: run: echo "::set-output name=version::$(cargo --version | cut -d ' ' -f 2)" shell: bash - - uses: actions/cache@v2 + - uses: actions/cache@v4 id: tarpaulin-cache with: path: | @@ -61,6 +61,8 @@ jobs: run: | cargo tarpaulin --verbose --timeout 120 --out Xml --all --release - name: Upload to codecov.io - uses: codecov/codecov-action@v1 + uses: codecov/codecov-action@v4 with: + token: ${{ secrets.CODECOV_TOKEN }} fail_ci_if_error: true + verbose: true diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 22f075b94..7ed3f16d7 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -10,7 +10,7 @@ jobs: fail-fast: false matrix: toolchain: - - 1.70.0 + - 1.71.1 - stable os: - ubuntu-latest @@ -35,7 +35,7 @@ jobs: fail-fast: false matrix: toolchain: - - 1.70.0 + - 1.71.1 - stable os: - ubuntu-latest diff --git a/algorithms/linfa-bayes/README.md b/algorithms/linfa-bayes/README.md index 17003a0db..2fc57679d 100644 --- a/algorithms/linfa-bayes/README.md +++ b/algorithms/linfa-bayes/README.md @@ -10,8 +10,8 @@ `linfa-bayes` currently provides an implementation of the following methods: -- Gaussian Naive Bayes ([`GaussianNb`](crate::GaussianNb)) -- Multinomial Naive Nayes ([`MultinomialNb`](crate::MultinomialNb)) +- Gaussian Naive Bayes ([`GaussianNb`]) +- Multinomial Naive Nayes ([`MultinomialNb`])) ## Examples diff --git a/algorithms/linfa-bayes/src/base_nb.rs b/algorithms/linfa-bayes/src/base_nb.rs index 143223aa1..d54fdc44b 100644 --- a/algorithms/linfa-bayes/src/base_nb.rs +++ b/algorithms/linfa-bayes/src/base_nb.rs @@ -79,7 +79,8 @@ pub fn filter( let index = y .into_iter() .enumerate() - .filter_map(|(i, y)| (*ycondition == *y).then(|| i)) + .filter(|(_, y)| (*ycondition == **y)) + .map(|(i, _)| i) .collect::>(); // We subset x to only records corresponding to the class represented in `ycondition` diff --git a/algorithms/linfa-bayes/src/gaussian_nb.rs b/algorithms/linfa-bayes/src/gaussian_nb.rs index eeaec2841..b89b55d3a 100644 --- a/algorithms/linfa-bayes/src/gaussian_nb.rs +++ b/algorithms/linfa-bayes/src/gaussian_nb.rs @@ -133,7 +133,7 @@ where } } -impl<'a, F, L> GaussianNbValidParams +impl GaussianNbValidParams where F: Float, { @@ -259,7 +259,7 @@ impl GaussianNb { } } -impl<'a, F, L> NaiveBayes<'a, F, L> for GaussianNb +impl NaiveBayes<'_, F, L> for GaussianNb where F: Float, L: Label + Ord, diff --git a/algorithms/linfa-bayes/src/multinomial_nb.rs b/algorithms/linfa-bayes/src/multinomial_nb.rs index 106a0c1d3..1fc852d26 100644 --- a/algorithms/linfa-bayes/src/multinomial_nb.rs +++ b/algorithms/linfa-bayes/src/multinomial_nb.rs @@ -229,7 +229,7 @@ impl MultinomialNb { } } -impl<'a, F, L> NaiveBayes<'a, F, L> for MultinomialNb +impl NaiveBayes<'_, F, L> for MultinomialNb where F: Float, L: Label + Ord, diff --git a/algorithms/linfa-clustering/Cargo.toml b/algorithms/linfa-clustering/Cargo.toml index c01d71211..8c3dbfd93 100644 --- a/algorithms/linfa-clustering/Cargo.toml +++ b/algorithms/linfa-clustering/Cargo.toml @@ -24,6 +24,7 @@ categories = ["algorithms", "mathematics", "science"] [features] default = [] +blas = [] serde = ["serde_crate", "ndarray/serde", "linfa-nn/serde"] [dependencies.serde_crate] diff --git a/algorithms/linfa-clustering/src/dbscan/algorithm.rs b/algorithms/linfa-clustering/src/dbscan/algorithm.rs index 6063f07bf..b5baaebe8 100644 --- a/algorithms/linfa-clustering/src/dbscan/algorithm.rs +++ b/algorithms/linfa-clustering/src/dbscan/algorithm.rs @@ -38,10 +38,8 @@ use linfa::{traits::Transformer, DatasetBase}; /// The algorithm iterates over each point in the dataset and for every point /// not yet assigned to a cluster: /// - Find all points within the neighborhood of size `tolerance` -/// - If the number of points in the neighborhood is below a minimum size label -/// as noise -/// - Otherwise label the point with the cluster ID and repeat with each of the -/// neighbours +/// - If the number of points in the neighborhood is below a minimum size label as noise +/// - Otherwise label the point with the cluster ID and repeat with each of the neighbours /// /// ## Tutorial /// diff --git a/algorithms/linfa-clustering/src/gaussian_mixture/algorithm.rs b/algorithms/linfa-clustering/src/gaussian_mixture/algorithm.rs index c0f7a1c04..5d3da7524 100644 --- a/algorithms/linfa-clustering/src/gaussian_mixture/algorithm.rs +++ b/algorithms/linfa-clustering/src/gaussian_mixture/algorithm.rs @@ -211,6 +211,7 @@ impl GaussianMixtureModel { self.means() } + #[allow(clippy::type_complexity)] fn estimate_gaussian_parameters>( observations: &ArrayBase, resp: &Array2, @@ -505,9 +506,8 @@ mod tests { } pub struct MultivariateNormal { - pub mean: Array1, - pub covariance: Array2, - /// Lower triangular matrix (Cholesky decomposition of the coviariance matrix) + mean: Array1, + /// Lower triangular matrix (Cholesky decomposition of the covariance matrix) lower: Array2, } impl MultivariateNormal { @@ -515,7 +515,6 @@ mod tests { let lower = covariance.cholesky()?; Ok(MultivariateNormal { mean: mean.to_owned(), - covariance: covariance.to_owned(), lower, }) } diff --git a/algorithms/linfa-clustering/src/k_means/algorithm.rs b/algorithms/linfa-clustering/src/k_means/algorithm.rs index bd8297326..f8a94c479 100644 --- a/algorithms/linfa-clustering/src/k_means/algorithm.rs +++ b/algorithms/linfa-clustering/src/k_means/algorithm.rs @@ -766,7 +766,7 @@ mod tests { &mut rng, ); - let expected_memberships = (0..n_centroids).into_iter().collect::>(); + let expected_memberships = (0..n_centroids).collect::>(); assert_eq!( calc_memberships!(L2Dist, centroids, centroids), expected_memberships diff --git a/algorithms/linfa-clustering/src/k_means/init.rs b/algorithms/linfa-clustering/src/k_means/init.rs index e5c40dd6c..723bd1d8f 100644 --- a/algorithms/linfa-clustering/src/k_means/init.rs +++ b/algorithms/linfa-clustering/src/k_means/init.rs @@ -164,7 +164,7 @@ fn k_means_para>( let next_candidates_idx = sample_subsequent_candidates::( &dists, F::cast(candidates_per_round), - rng.gen_range(0..std::u64::MAX), + rng.gen_range(0..u64::MAX), ); // Append the newly generated candidates to the current cadidates, breaking out of the loop @@ -191,6 +191,7 @@ fn k_means_para>( /// Generate candidate centroids by sampling each observation in parallel using a seedable RNG in /// every thread. Average number of generated candidates should equal `multiplier`. +#[allow(clippy::extra_unused_type_parameters)] fn sample_subsequent_candidates( dists: &Array1, multiplier: F, diff --git a/algorithms/linfa-clustering/src/lib.rs b/algorithms/linfa-clustering/src/lib.rs index 40a5f3534..a9aa72858 100644 --- a/algorithms/linfa-clustering/src/lib.rs +++ b/algorithms/linfa-clustering/src/lib.rs @@ -16,7 +16,7 @@ //! * [K-Means](KMeans) //! * [DBSCAN](Dbscan) //! * [Approximated DBSCAN](AppxDbscan) (Currently an alias for DBSCAN, due to its superior -//! performance) +//! performance) //! * [Gaussian-Mixture-Model](GaussianMixtureModel) //! * [OPTICS](OpticsAnalysis) //! diff --git a/algorithms/linfa-clustering/src/optics/algorithm.rs b/algorithms/linfa-clustering/src/optics/algorithm.rs index 56c9fb700..14bfec00a 100644 --- a/algorithms/linfa-clustering/src/optics/algorithm.rs +++ b/algorithms/linfa-clustering/src/optics/algorithm.rs @@ -84,6 +84,7 @@ impl PartialEq for Sample { } } +#[allow(clippy::non_canonical_partial_ord_impl)] impl PartialOrd for Sample { fn partial_cmp(&self, other: &Self) -> Option { self.reachability_distance diff --git a/algorithms/linfa-elasticnet/examples/elasticnet_cv.rs b/algorithms/linfa-elasticnet/examples/elasticnet_cv.rs index e6e4f0771..e2aab1bd6 100644 --- a/algorithms/linfa-elasticnet/examples/elasticnet_cv.rs +++ b/algorithms/linfa-elasticnet/examples/elasticnet_cv.rs @@ -6,7 +6,7 @@ fn main() -> Result<()> { let mut dataset = linfa_datasets::diabetes(); // parameters to compare - let ratios = vec![0.1, 0.2, 0.5, 0.7, 1.0]; + let ratios = &[0.1, 0.2, 0.5, 0.7, 1.0]; // create a model for each parameter let models = ratios diff --git a/algorithms/linfa-elasticnet/src/lib.rs b/algorithms/linfa-elasticnet/src/lib.rs index 80a7e24a6..057e6aa33 100644 --- a/algorithms/linfa-elasticnet/src/lib.rs +++ b/algorithms/linfa-elasticnet/src/lib.rs @@ -35,8 +35,7 @@ pub use hyperparams::{ /// /// See also: /// * [Talk on Fast Regularization Paths](https://web.stanford.edu/~hastie/TALKS/glmnet.pdf) -/// * [Regularization Paths for Generalized Linear Models via Coordinate -/// Descent](http://www.jstatsoft.org/v33/i01/paper) +/// * [Regularization Paths for Generalized Linear Models via Coordinate Descent](http://www.jstatsoft.org/v33/i01/paper) #[derive(Debug, Clone)] pub struct ElasticNet { hyperplane: Array1, diff --git a/algorithms/linfa-ftrl/examples/winequality_ftrl.rs b/algorithms/linfa-ftrl/examples/winequality_ftrl.rs index cb9541cbf..16bea2bf1 100644 --- a/algorithms/linfa-ftrl/examples/winequality_ftrl.rs +++ b/algorithms/linfa-ftrl/examples/winequality_ftrl.rs @@ -6,7 +6,7 @@ use rand::{rngs::SmallRng, SeedableRng}; fn main() -> Result<()> { // Read the data let (train, valid) = linfa_datasets::winequality() - .map_targets(|v| if *v > 6 { true } else { false }) + .map_targets(|v| *v > 6) .split_with_ratio(0.9); let params = Ftrl::params() diff --git a/algorithms/linfa-ftrl/src/algorithm.rs b/algorithms/linfa-ftrl/src/algorithm.rs index 525a689ae..7b99b05cc 100644 --- a/algorithms/linfa-ftrl/src/algorithm.rs +++ b/algorithms/linfa-ftrl/src/algorithm.rs @@ -10,7 +10,7 @@ use rand::Rng; /// Simplified `Result` using [`FtrlError`](crate::FtrlError) as error type pub type Result = std::result::Result; -impl<'a, F, R, D, T> FitWith<'a, ArrayBase, T, FtrlError> for FtrlValidParams +impl FitWith<'_, ArrayBase, T, FtrlError> for FtrlValidParams where F: Float, R: Rng + Clone, @@ -255,7 +255,7 @@ mod test { let gradient: f64 = 0.5; let n: f64 = 0.11; let alpha = 0.5; - let expected_result = (((0.11 + 0.25) as f64).sqrt() - (0.11 as f64).sqrt()) / 0.5; + let expected_result = ((0.11f64 + 0.25).sqrt() - 0.11f64.sqrt()) / 0.5; let result = calculate_weight_in_average(n, gradient, alpha); assert_abs_diff_eq!(result, expected_result) } @@ -302,7 +302,7 @@ mod test { let sigma = model.calculate_sigma(gradient.view()); model.update_params(gradient.clone(), sigma.clone()); let expected_z = initial_z + &gradient - sigma * weights; - let expected_n = initial_n + &gradient.mapv(|grad| (grad as f64).powf(2.)); + let expected_n = initial_n + &gradient.mapv(|grad: f64| grad.powf(2.)); assert_abs_diff_eq!(model.z(), &expected_z, epsilon = 1e-1); assert_abs_diff_eq!(model.n(), &expected_n, epsilon = 1e-1) } diff --git a/algorithms/linfa-ftrl/src/lib.rs b/algorithms/linfa-ftrl/src/lib.rs index 050acfe40..739ffa9b3 100644 --- a/algorithms/linfa-ftrl/src/lib.rs +++ b/algorithms/linfa-ftrl/src/lib.rs @@ -49,7 +49,7 @@ impl Ftrl { /// The description can be found [here](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/41159.pdf) /// /// It requires data preprocessing done in the separate step. - + /// /// Create default hyperparameters. Random number generator will default to rand_xoshiro::Xoshiro256Plus pub fn params() -> FtrlParams { FtrlParams::default_with_rng(Xoshiro256Plus::seed_from_u64(42)) diff --git a/algorithms/linfa-hierarchical/examples/irisflower.rs b/algorithms/linfa-hierarchical/examples/irisflower.rs index 94a4fa9d5..99ecf570c 100644 --- a/algorithms/linfa-hierarchical/examples/irisflower.rs +++ b/algorithms/linfa-hierarchical/examples/irisflower.rs @@ -17,7 +17,7 @@ fn main() -> Result<(), Box> { .transform(kernel)?; for (id, target) in kernel.targets().iter().zip(dataset.targets().into_iter()) { - let name = match *target as usize { + let name = match *target { 0 => "setosa", 1 => "versicolor", 2 => "virginica", diff --git a/algorithms/linfa-hierarchical/src/lib.rs b/algorithms/linfa-hierarchical/src/lib.rs index bacfff1fc..eae797956 100644 --- a/algorithms/linfa-hierarchical/src/lib.rs +++ b/algorithms/linfa-hierarchical/src/lib.rs @@ -65,9 +65,9 @@ impl ParamGuard for HierarchicalCluster { fn check_ref(&self) -> std::result::Result<&Self::Checked, Self::Error> { match self.0.stopping { - Criterion::NumClusters(x) if x == 0 => Err( - HierarchicalError::InvalidStoppingCondition(self.0.stopping.clone()), - ), + Criterion::NumClusters(0) => Err(HierarchicalError::InvalidStoppingCondition( + self.0.stopping.clone(), + )), Criterion::Distance(x) if x.is_negative() || x.is_nan() || x.is_infinite() => Err( HierarchicalError::InvalidStoppingCondition(self.0.stopping.clone()), ), diff --git a/algorithms/linfa-kernel/Cargo.toml b/algorithms/linfa-kernel/Cargo.toml index 4646cea27..f755de63f 100644 --- a/algorithms/linfa-kernel/Cargo.toml +++ b/algorithms/linfa-kernel/Cargo.toml @@ -26,7 +26,7 @@ features = ["std", "derive"] [dependencies] ndarray = "0.15" num-traits = "0.2" -sprs = { version="0.11", default-features = false } +sprs = { version = "=0.11.1", default-features = false } linfa = { version = "0.7.0", path = "../.." } linfa-nn = { version = "0.7.0", path = "../linfa-nn" } diff --git a/algorithms/linfa-kernel/src/inner.rs b/algorithms/linfa-kernel/src/inner.rs index a0bdfb036..3c48aaa2e 100644 --- a/algorithms/linfa-kernel/src/inner.rs +++ b/algorithms/linfa-kernel/src/inner.rs @@ -102,7 +102,7 @@ impl Inner for CsMat { } } -impl<'a, F: Float> Inner for CsMatView<'a, F> { +impl Inner for CsMatView<'_, F> { type Elem = F; fn dot(&self, rhs: &ArrayView2) -> Array2 { diff --git a/algorithms/linfa-kernel/src/lib.rs b/algorithms/linfa-kernel/src/lib.rs index fffe1b9b9..b2361ab8c 100644 --- a/algorithms/linfa-kernel/src/lib.rs +++ b/algorithms/linfa-kernel/src/lib.rs @@ -223,7 +223,7 @@ impl<'a, F: Float> Kernel { } } -impl<'a, F: Float> KernelView<'a, F> { +impl KernelView<'_, F> { pub fn to_owned(&self) -> Kernel { Kernel { inner: match &self.inner { @@ -383,7 +383,7 @@ impl<'a, F: Float, N: NearestNeighbour> Transformer<&ArrayView2<'a, F>, Kernel +impl Transformer, T>, DatasetBase, T>> for KernelParams { /// Builds a new Dataset with the kernel as the records and the same targets as the input one. @@ -398,7 +398,7 @@ impl<'a, F: Float, T: AsTargets, N: NearestNeighbour> /// /// A new dataset with: /// - records: a kernel build from `x.records()` according to the parameters on which - /// this method is called + /// this method is called /// - targets: same as `x.targets()` /// /// ## Panics @@ -425,7 +425,7 @@ impl<'a, F: Float, L: 'a, T: AsTargets + FromTargetArray<'a>, N: Neare /// /// A new dataset with: /// - records: a kernel build from `x.records()` according to the parameters on which - /// this method is called + /// this method is called /// - targets: same as `x.targets()` /// /// ## Panics @@ -460,7 +460,7 @@ impl< /// /// A new dataset with: /// - records: a kernel build from `x.records()` according to the parameters on which - /// this method is called + /// this method is called /// - targets: a slice of `x.targets()` /// /// ## Panics @@ -623,14 +623,14 @@ mod tests { let distance = gauss_1.distance(p1.view(), p2.view()); let expected = 1.; - assert!(((distance - expected) as f64).abs() <= f64::EPSILON); + assert!(f64::abs(distance - expected) <= f64::EPSILON); let p1 = Array1::from_shape_vec(2, vec![1., 1.]).unwrap(); let p2 = Array1::from_shape_vec(2, vec![5., 5.]).unwrap(); let distance = gauss_1.distance(p1.view(), p2.view()); let expected = (consts::E).powf(-32.); // this fails with e^-31 or e^-33 so f64::EPSILON still holds - assert!(((distance - expected) as f64).abs() <= f64::EPSILON); + assert!(f64::abs(distance - expected) <= f64::EPSILON); let gauss_01 = KernelMethod::Gaussian(0.1); @@ -639,14 +639,14 @@ mod tests { let distance = gauss_01.distance(p1.view(), p2.view()); let expected = 1.; - assert!(((distance - expected) as f64).abs() <= f64::EPSILON); + assert!(f64::abs(distance - expected) <= f64::EPSILON); let p1 = Array1::from_shape_vec(2, vec![1., 1.]).unwrap(); let p2 = Array1::from_shape_vec(2, vec![2., 2.]).unwrap(); let distance = gauss_01.distance(p1.view(), p2.view()); let expected = (consts::E).powf(-20.); - assert!(((distance - expected) as f64).abs() <= f64::EPSILON); + assert!(f64::abs(distance - expected) <= f64::EPSILON); } #[test] @@ -658,13 +658,13 @@ mod tests { let distance = pol_0.distance(p1.view(), p2.view()); let expected = 0.; - assert!(((distance - expected) as f64).abs() <= f64::EPSILON); + assert!(f64::abs(distance - expected) <= f64::EPSILON); let p1 = Array1::from_shape_vec(2, vec![1., 1.]).unwrap(); let p2 = Array1::from_shape_vec(2, vec![5., 5.]).unwrap(); let distance = pol_0.distance(p1.view(), p2.view()); let expected = 100.; - assert!(((distance - expected) as f64).abs() <= f64::EPSILON); + assert!(f64::abs(distance - expected) <= f64::EPSILON); let pol_2 = KernelMethod::Polynomial(2., 2.); @@ -673,14 +673,14 @@ mod tests { let distance = pol_2.distance(p1.view(), p2.view()); let expected = 4.; - assert!(((distance - expected) as f64).abs() <= f64::EPSILON); + assert!(f64::abs(distance - expected) <= f64::EPSILON); let p1 = Array1::from_shape_vec(2, vec![1., 1.]).unwrap(); let p2 = Array1::from_shape_vec(2, vec![2., 2.]).unwrap(); let distance = pol_2.distance(p1.view(), p2.view()); let expected = 36.; - assert!(((distance - expected) as f64).abs() <= f64::EPSILON); + assert!(f64::abs(distance - expected) <= f64::EPSILON); } #[test] @@ -720,7 +720,7 @@ mod tests { let input_arr = ndarray::concatenate(Axis(0), &[input_arr_1.view(), input_arr_2.view()]).unwrap(); - for kind in vec![KernelType::Dense, KernelType::Sparse(1)] { + for kind in [KernelType::Dense, KernelType::Sparse(1)] { let kernel = KernelView::params() .kind(kind) // Such a value for eps brings to zero the inner product diff --git a/algorithms/linfa-linear/benches/ols_bench.rs b/algorithms/linfa-linear/benches/ols_bench.rs index e25a08fe8..e8845db6e 100644 --- a/algorithms/linfa-linear/benches/ols_bench.rs +++ b/algorithms/linfa-linear/benches/ols_bench.rs @@ -43,7 +43,7 @@ fn bench(c: &mut Criterion) { BenchmarkId::new(&func_name, size), &dataset, |b, dataset| { - b.iter(|| perform_ols(&dataset)); + b.iter(|| perform_ols(dataset)); }, ); @@ -53,7 +53,7 @@ fn bench(c: &mut Criterion) { BenchmarkId::new(&func_name, size), &dataset, |b, dataset| { - b.iter(|| perform_glm(&dataset)); + b.iter(|| perform_glm(dataset)); }, ); } diff --git a/algorithms/linfa-linear/src/glm/link.rs b/algorithms/linfa-linear/src/glm/link.rs index 9557cbd40..71567a2e5 100644 --- a/algorithms/linfa-linear/src/glm/link.rs +++ b/algorithms/linfa-linear/src/glm/link.rs @@ -177,45 +177,45 @@ mod tests { test_links! [ test_identity_link: { - input: vec![array![1., 1., 1., 1.], array![1.348, 2.879, 4.545, 3.232]], - expected: vec![array![1., 1., 1., 1.], array![1.348, 2.879, 4.545, 3.232]], + input: &[array![1., 1., 1., 1.], array![1.348, 2.879, 4.545, 3.232]], + expected: &[array![1., 1., 1., 1.], array![1.348, 2.879, 4.545, 3.232]], link: IdentityLink::link }, test_identity_link_derivative: { - input: vec![array![1., 1., 1., 1.], array![1.348, 2.879, 4.545, 3.232]], - expected: vec![array![1., 1., 1., 1.], array![1., 1., 1., 1.]], + input: &[array![1., 1., 1., 1.], array![1.348, 2.879, 4.545, 3.232]], + expected: &[array![1., 1., 1., 1.], array![1., 1., 1., 1.]], link: IdentityLink::link_derivative }, test_identity_inverse: { - input: vec![array![1., 1., 1., 1.], array![1.348, 2.879, 4.545, 3.232]], - expected: vec![array![1., 1., 1., 1.], array![1.348, 2.879, 4.545, 3.232]], + input: &[array![1., 1., 1., 1.], array![1.348, 2.879, 4.545, 3.232]], + expected: &[array![1., 1., 1., 1.], array![1.348, 2.879, 4.545, 3.232]], link: IdentityLink::inverse }, test_identity_inverse_derivative: { - input: vec![array![1., 1., 1., 1.], array![1.348, 2.879, 4.545, 3.232]], - expected: vec![array![1., 1., 1., 1.], array![1., 1., 1., 1.]], + input: &[array![1., 1., 1., 1.], array![1.348, 2.879, 4.545, 3.232]], + expected: &[array![1., 1., 1., 1.], array![1., 1., 1., 1.]], link: IdentityLink::inverse_derivative } ]; test_links! [ test_log_link: { - input: vec![ + input: &[ array![1.382, 1.329, 1.32, 1.322], array![4.56432e+01, 4.30000e+01, 2.00000e-07, 3.42000e-01], ], - expected: vec![ + expected: &[ array![0.32353173, 0.28442678, 0.27763174, 0.27914574], array![3.82085464, 3.76120012, -15.42494847, -1.07294454], ], link: LogLink::link }, test_log_link_derivative: { - input: vec![ + input: &[ array![1.382, 1.329, 1.32, 1.322], array![4.56432e+01, 4.30000e+01, 2.00000e-07, 3.42000e-01], ], - expected:vec![ + expected: &[ array![0.723589, 0.75244545, 0.75757576, 0.75642965], array![ 2.19090686e-02, @@ -227,22 +227,22 @@ mod tests { link: LogLink::link_derivative }, test_log_inverse: { - input: vec![ + input: &[ array![1.382f32, 1.329f32, 1.32f32, 1.322f32], array![4.56432e+01, 4.30000e+01, 2.00000e-07, 3.42000e-01], ], - expected: vec![ + expected: &[ array![3.982_859_4, 3.777_264, 3.743_421_3, 3.750_915_8], array![6.646_452e19, 4.727_839_5e18, 1.000_000_2e0, 1.407_760_3e0], ], link: LogLink::inverse }, test_log_inverse_derivative: { - input: vec![ + input: &[ array![1.382f32, 1.329f32, 1.32f32, 1.322f32], array![4.56432e+01, 4.30000e+01, 2.00000e-07, 3.42000e-01], ], - expected: vec![ + expected: &[ array![3.982_859_4, 3.777_264, 3.743_421_3, 3.750_915_8], array![6.646_452e19, 4.727_839_5e18, 1.000_000_2e0, 1.407_760_3e0], ], @@ -252,34 +252,34 @@ mod tests { test_links! [ test_logit_link: { - input: vec![ + input: &[ array![0.934, 0.323, 0.989, 0.412], array![0.044, 0.023, 0.999, 0.124] ], - expected: vec![ + expected: &[ array![2.6498217, -0.74001895, 4.49879906, -0.3557036 ], array![-3.07856828, -3.74899244, 6.90675478, -1.95508453], ], link: LogitLink::link }, test_logit_link_derivative: { - input: vec![array![0.934, 0.323, 0.989, 0.412], array![0.044, 0.023, 0.999, 0.124]], - expected: vec![ + input: &[array![0.934, 0.323, 0.989, 0.412], array![0.044, 0.023, 0.999, 0.124]], + expected: &[ array![16.22217896, 4.57308011, 91.92021325, 4.12786474], array![23.77329783, 44.50180232, 1001.001001, 9.20606864], ], link: LogitLink::link_derivative }, test_logit_inverse: { - input: vec![array![0.934, 0.323, 0.989, 0.412], array![0.044, 0.023, 0.999, 0.124]], - expected: vec![ + input: &[array![0.934, 0.323, 0.989, 0.412], array![0.044, 0.023, 0.999, 0.124]], + expected: &[ array![0.71788609, 0.5800552, 0.72889036, 0.60156734], array![0.51099823, 0.50574975, 0.73086192, 0.53096034], ], link: LogitLink::inverse }, test_logit_inverse_derivative: { - input: vec![array![0.934, 0.323, 0.989, 0.412], array![0.044, 0.023, 0.999, 0.124]], - expected: vec![ + input: &[array![0.934, 0.323, 0.989, 0.412], array![0.044, 0.023, 0.999, 0.124]], + expected: &[ array![0.20252565, 0.24359116, 0.1976092, 0.23968407], array![0.24987904, 0.24996694, 0.19670277, 0.24904146], ], diff --git a/algorithms/linfa-linear/src/glm/mod.rs b/algorithms/linfa-linear/src/glm/mod.rs index f38e011f7..4060ea206 100644 --- a/algorithms/linfa-linear/src/glm/mod.rs +++ b/algorithms/linfa-linear/src/glm/mod.rs @@ -115,7 +115,7 @@ struct TweedieProblem<'a, F: Float> { alpha: F, } -impl<'a, A: Float> TweedieProblem<'a, A> { +impl TweedieProblem<'_, A> { fn ypred(&self, p: &Array1) -> (Array1, Array1, usize) { let mut offset = 0; let mut intercept = A::from(0.).unwrap(); @@ -134,7 +134,7 @@ impl<'a, A: Float> TweedieProblem<'a, A> { } } -impl<'a, A: Float> CostFunction for TweedieProblem<'a, A> { +impl CostFunction for TweedieProblem<'_, A> { type Param = Array1; type Output = A; @@ -160,7 +160,7 @@ impl<'a, A: Float> CostFunction for TweedieProblem<'a, A> { } } -impl<'a, A: Float> Gradient for TweedieProblem<'a, A> { +impl Gradient for TweedieProblem<'_, A> { type Param = Array1; type Gradient = Array1; diff --git a/algorithms/linfa-linear/src/isotonic.rs b/algorithms/linfa-linear/src/isotonic.rs index b33b0ce69..921d9153f 100644 --- a/algorithms/linfa-linear/src/isotonic.rs +++ b/algorithms/linfa-linear/src/isotonic.rs @@ -19,7 +19,7 @@ impl Float for f64 {} fn pva( ys: &ArrayBase, weights: Option<&[f32]>, - index: &Vec, + index: &[usize], ) -> (Vec, Vec) where F: Float, @@ -30,7 +30,7 @@ where let mut W = Vec::::new(); let mut J_index: Vec = (0..n).collect(); let mut i = 0; - let (mut AvB_zero, mut W_B_zero) = waverage(&ys, weights, i, i, &index); + let (mut AvB_zero, mut W_B_zero) = waverage(ys, weights, i, i, index); while i < n { // Step 1 let j = J_index[i]; @@ -39,7 +39,7 @@ where break; } let l = J_index[k]; - let (AvB_plus, W_B_plus) = waverage(&ys, weights, k, l, &index); + let (AvB_plus, W_B_plus) = waverage(ys, weights, k, l, index); if AvB_zero <= AvB_plus { V.push(AvB_zero); W.push(W_B_zero); @@ -56,7 +56,7 @@ where // Step 2.1 let mut AvB_minus = *V.last().unwrap_or(&F::neg_infinity()); - while V.len() > 0 && AvB_zero < AvB_minus { + while !V.is_empty() && AvB_zero < AvB_minus { AvB_minus = V.pop().unwrap(); let W_B_minus = W.pop().unwrap(); i = J_index[J_index[l] - 1]; @@ -70,7 +70,7 @@ where } // Last block average - let (AvB_minus, _) = waverage(&ys, weights, i, J_index[i], &index); + let (AvB_minus, _) = waverage(ys, weights, i, J_index[i], index); V.push(AvB_minus); (V, J_index) @@ -197,7 +197,7 @@ fn waverage( ws: Option<&[f32]>, start: usize, end: usize, - index: &Vec, + index: &[usize], ) -> (F, F) where F: Float, @@ -205,15 +205,14 @@ where { let mut wsum = F::zero(); let mut avg = F::zero(); - for k in start..=end { - let kk = index[k]; - let w = if ws.is_none() { - F::one() + for kk in &index[start..=end] { + let w = if let Some(ws) = ws { + F::cast(ws[*kk]) } else { - F::cast(ws.unwrap()[kk]) + F::one() }; wsum += w; - avg += vs[kk] * w; + avg += vs[*kk] * w; } avg /= wsum; (avg, wsum) @@ -305,7 +304,6 @@ mod tests { let reg = IsotonicRegression::new(); let dataset = Dataset::new(array![[3.3f64, 0.], [3.3, 0.]], array![4., 5.]); let _res = reg.fit(&dataset); - () } #[test] @@ -314,7 +312,6 @@ mod tests { let reg = IsotonicRegression::default(); let dataset = Dataset::new(array![[3.3f64, 0.], [3.3, 0.]], array![4., 5., 6.]); let _res = reg.fit(&dataset); - () } #[test] diff --git a/algorithms/linfa-linear/src/ols.rs b/algorithms/linfa-linear/src/ols.rs index 5e89acc6d..25968719d 100644 --- a/algorithms/linfa-linear/src/ols.rs +++ b/algorithms/linfa-linear/src/ols.rs @@ -243,9 +243,9 @@ mod tests { /// We can't fit a line through three points in general /// - in this case we should find the solution that minimizes - /// the squares. Fitting a line with intercept through the - /// points (0, 0), (1, 0), (2, 2) has the least-squares solution - /// f(x) = -1./3. + x + /// the squares. Fitting a line with intercept through the + /// points (0, 0), (1, 0), (2, 2) has the least-squares solution + /// f(x) = -1./3. + x #[test] fn fits_least_squares_line_through_three_dots() { let lin_reg = LinearRegression::new(); diff --git a/algorithms/linfa-logistic/README.md b/algorithms/linfa-logistic/README.md index 8e9b25a26..a5479cf48 100644 --- a/algorithms/linfa-logistic/README.md +++ b/algorithms/linfa-logistic/README.md @@ -12,12 +12,12 @@ There are usage examples in the `examples/` directory. To run the two-class example, use: ```bash -$ cargo run --example winequality +$ cargo run --example winequality_logistic ``` To run the multinomial example, use: ```bash -$ cargo run --example winequality_multi +$ cargo run --example winequality_multi_logistic ``` ## License diff --git a/algorithms/linfa-logistic/examples/logistic_cv.rs b/algorithms/linfa-logistic/examples/logistic_cv.rs index 1a8115451..5b35da7f6 100644 --- a/algorithms/linfa-logistic/examples/logistic_cv.rs +++ b/algorithms/linfa-logistic/examples/logistic_cv.rs @@ -9,7 +9,7 @@ fn main() -> Result<()> { // define a sequence of models to compare. In this case the // models will differ by the amount of l2 regularization - let alphas = vec![0.1, 1., 10.]; + let alphas = &[0.1, 1., 10.]; let models: Vec<_> = alphas .iter() .map(|alpha| { diff --git a/algorithms/linfa-logistic/src/lib.rs b/algorithms/linfa-logistic/src/lib.rs index 99addeff8..833dbab0b 100644 --- a/algorithms/linfa-logistic/src/lib.rs +++ b/algorithms/linfa-logistic/src/lib.rs @@ -207,7 +207,7 @@ impl< } } -impl<'a, C: 'a + Ord + Clone, F: Float, D: Data, T: AsSingleTargets> +impl, T: AsSingleTargets> Fit, T, Error> for ValidLogisticRegression { type Object = FittedLogisticRegression; @@ -250,7 +250,7 @@ impl<'a, C: 'a + Ord + Clone, F: Float, D: Data, T: AsSingleTargets, T: AsSingleTargets> +impl, T: AsSingleTargets> Fit, T, Error> for ValidMultiLogisticRegression { type Object = MultiFittedLogisticRegression; @@ -778,7 +778,7 @@ struct LogisticRegressionProblem<'a, F: Float, A: Data, D: Dimension> type LogisticRegressionProblem1<'a, F, A> = LogisticRegressionProblem<'a, F, A, Ix1>; type LogisticRegressionProblem2<'a, F, A> = LogisticRegressionProblem<'a, F, A, Ix2>; -impl<'a, F: Float, A: Data> CostFunction for LogisticRegressionProblem1<'a, F, A> { +impl> CostFunction for LogisticRegressionProblem1<'_, F, A> { type Param = ArgminParam; type Output = F; @@ -790,7 +790,7 @@ impl<'a, F: Float, A: Data> CostFunction for LogisticRegressionProblem } } -impl<'a, F: Float, A: Data> Gradient for LogisticRegressionProblem1<'a, F, A> { +impl> Gradient for LogisticRegressionProblem1<'_, F, A> { type Param = ArgminParam; type Gradient = ArgminParam; @@ -802,7 +802,7 @@ impl<'a, F: Float, A: Data> Gradient for LogisticRegressionProblem1<'a } } -impl<'a, F: Float, A: Data> CostFunction for LogisticRegressionProblem2<'a, F, A> { +impl> CostFunction for LogisticRegressionProblem2<'_, F, A> { type Param = ArgminParam; type Output = F; @@ -814,7 +814,7 @@ impl<'a, F: Float, A: Data> CostFunction for LogisticRegressionProblem } } -impl<'a, F: Float, A: Data> Gradient for LogisticRegressionProblem2<'a, F, A> { +impl> Gradient for LogisticRegressionProblem2<'_, F, A> { type Param = ArgminParam; type Gradient = ArgminParam; @@ -830,15 +830,11 @@ trait SolvableProblem: Gradient + Sized { type Solver: Solver>; } -impl<'a, F: Float, A: Data> SolvableProblem - for LogisticRegressionProblem1<'a, F, A> -{ +impl> SolvableProblem for LogisticRegressionProblem1<'_, F, A> { type Solver = LBFGSType1; } -impl<'a, F: Float, A: Data> SolvableProblem - for LogisticRegressionProblem2<'a, F, A> -{ +impl> SolvableProblem for LogisticRegressionProblem2<'_, F, A> { type Solver = LBFGSType2; } @@ -887,7 +883,7 @@ mod test { array![-1.0, 0.0], array![-1.0, -1.0], ]; - let alphas = vec![0.0, 1.0, 10.0]; + let alphas = &[0.0, 1.0, 10.0]; let expecteds = vec![ 6.931471805599453, 6.931471805599453, @@ -948,7 +944,7 @@ mod test { array![-1.0, 0.0], array![-1.0, -1.0], ]; - let alphas = vec![0.0, 1.0, 10.0]; + let alphas = &[0.0, 1.0, 10.0]; let expecteds = vec![ array![-19.5, -3.], array![-19.5, -3.], @@ -1064,7 +1060,7 @@ mod test { #[test] fn rejects_inf_values() { - let infs = vec![std::f64::INFINITY, std::f64::NEG_INFINITY, std::f64::NAN]; + let infs = &[f64::INFINITY, f64::NEG_INFINITY, f64::NAN]; let inf_xs: Vec<_> = infs.iter().map(|&inf| array![[1.0], [inf]]).collect(); let log_reg = LogisticRegression::default(); let normal_x = array![[-1.0], [1.0]]; @@ -1073,12 +1069,12 @@ mod test { let res = log_reg.fit(&DatasetBase::new(inf_x.view(), &y)); assert!(matches!(res.unwrap_err(), Error::InvalidValues)); } - for inf in &infs { + for inf in infs { let log_reg = LogisticRegression::default().alpha(*inf); let res = log_reg.fit(&DatasetBase::new(normal_x.view(), &y)); assert!(matches!(res.unwrap_err(), Error::InvalidAlpha)); } - let mut non_positives = infs; + let mut non_positives = infs.to_vec(); non_positives.push(-1.0); non_positives.push(0.0); for inf in &non_positives { @@ -1090,11 +1086,11 @@ mod test { #[test] fn validates_initial_params() { - let infs = vec![std::f64::INFINITY, std::f64::NEG_INFINITY, std::f64::NAN]; + let infs = &[f64::INFINITY, f64::NEG_INFINITY, f64::NAN]; let normal_x = array![[-1.0], [1.0]]; let normal_y = array![0, 1]; let dataset = Dataset::new(normal_x, normal_y); - for inf in &infs { + for inf in infs { let log_reg = LogisticRegression::default().initial_params(array![*inf, 0.0]); let res = log_reg.fit(&dataset); assert!(matches!(res.unwrap_err(), Error::InvalidInitialParameters)); diff --git a/algorithms/linfa-nn/src/balltree.rs b/algorithms/linfa-nn/src/balltree.rs index 8954b6991..9174bbd1d 100644 --- a/algorithms/linfa-nn/src/balltree.rs +++ b/algorithms/linfa-nn/src/balltree.rs @@ -196,9 +196,9 @@ impl<'a, F: Float, D: Distance> BallTreeIndex<'a, F, D> { } } - fn nn_helper<'b>( + fn nn_helper( &self, - point: Point<'b, F>, + point: Point<'_, F>, k: usize, max_radius: F, ) -> Result, usize)>, NnError> { @@ -260,18 +260,14 @@ impl<'a, F: Float, D: Distance> BallTreeIndex<'a, F, D> { } } -impl<'a, F: Float, D: Distance> NearestNeighbourIndex for BallTreeIndex<'a, F, D> { - fn k_nearest<'b>( - &self, - point: Point<'b, F>, - k: usize, - ) -> Result, usize)>, NnError> { +impl> NearestNeighbourIndex for BallTreeIndex<'_, F, D> { + fn k_nearest(&self, point: Point<'_, F>, k: usize) -> Result, usize)>, NnError> { self.nn_helper(point, k, F::infinity()) } - fn within_range<'b>( + fn within_range( &self, - point: Point<'b, F>, + point: Point<'_, F>, range: F, ) -> Result, usize)>, NnError> { let range = self.dist_fn.dist_to_rdist(range); diff --git a/algorithms/linfa-nn/src/heap_elem.rs b/algorithms/linfa-nn/src/heap_elem.rs index 7fd061030..87b34b387 100644 --- a/algorithms/linfa-nn/src/heap_elem.rs +++ b/algorithms/linfa-nn/src/heap_elem.rs @@ -16,6 +16,7 @@ impl PartialEq for HeapElem { } impl Eq for HeapElem {} +#[allow(clippy::non_canonical_partial_ord_impl)] impl PartialOrd for HeapElem { fn partial_cmp(&self, other: &Self) -> Option { self.dist.partial_cmp(&other.dist) diff --git a/algorithms/linfa-nn/src/kdtree.rs b/algorithms/linfa-nn/src/kdtree.rs index b04135488..9cc328902 100644 --- a/algorithms/linfa-nn/src/kdtree.rs +++ b/algorithms/linfa-nn/src/kdtree.rs @@ -50,12 +50,8 @@ impl From for NnError { } } -impl<'a, F: Float, D: Distance> NearestNeighbourIndex for KdTreeIndex<'a, F, D> { - fn k_nearest<'b>( - &self, - point: Point<'b, F>, - k: usize, - ) -> Result, usize)>, NnError> { +impl> NearestNeighbourIndex for KdTreeIndex<'_, F, D> { + fn k_nearest(&self, point: Point<'_, F>, k: usize) -> Result, usize)>, NnError> { Ok(self .0 .nearest( @@ -68,9 +64,9 @@ impl<'a, F: Float, D: Distance> NearestNeighbourIndex for KdTreeIndex<'a, .collect()) } - fn within_range<'b>( + fn within_range( &self, - point: Point<'b, F>, + point: Point<'_, F>, range: F, ) -> Result, usize)>, NnError> { let range = self.1.dist_to_rdist(range); diff --git a/algorithms/linfa-nn/src/lib.rs b/algorithms/linfa-nn/src/lib.rs index 87a2d0c1a..32cdd254b 100644 --- a/algorithms/linfa-nn/src/lib.rs +++ b/algorithms/linfa-nn/src/lib.rs @@ -67,6 +67,7 @@ pub trait NearestNeighbour: std::fmt::Debug + Send + Sync + Unpin { /// /// Returns an error if the points have dimensionality of 0 or if the leaf size is 0. If any /// value in the batch is NaN or infinite, the behaviour is unspecified. + #[allow(clippy::wrong_self_convention)] fn from_batch_with_leaf_size<'a, F: Float, DT: Data, D: 'a + Distance>( &self, batch: &'a ArrayBase, @@ -76,6 +77,7 @@ pub trait NearestNeighbour: std::fmt::Debug + Send + Sync + Unpin { /// Builds a spatial index using a default leaf size. See `from_batch_with_leaf_size` for more /// information. + #[allow(clippy::wrong_self_convention)] fn from_batch<'a, F: Float, DT: Data, D: 'a + Distance>( &self, batch: &'a ArrayBase, @@ -96,11 +98,7 @@ pub trait NearestNeighbourIndex: Send + Sync + Unpin { /// /// Returns an error if the provided point has different dimensionality than the index's /// points. - fn k_nearest<'b>( - &self, - point: Point<'b, F>, - k: usize, - ) -> Result, usize)>, NnError>; + fn k_nearest(&self, point: Point<'_, F>, k: usize) -> Result, usize)>, NnError>; /// Returns all the points in the index that are within the specified distance to the provided /// point, along with their positions in the original dataset. The points are not guaranteed to @@ -108,9 +106,9 @@ pub trait NearestNeighbourIndex: Send + Sync + Unpin { /// /// Returns an error if the provided point has different dimensionality than the index's /// points. - fn within_range<'b>( + fn within_range( &self, - point: Point<'b, F>, + point: Point<'_, F>, range: F, ) -> Result, usize)>, NnError>; } diff --git a/algorithms/linfa-nn/src/linear.rs b/algorithms/linfa-nn/src/linear.rs index a9948a1a0..f59fdcced 100644 --- a/algorithms/linfa-nn/src/linear.rs +++ b/algorithms/linfa-nn/src/linear.rs @@ -29,12 +29,8 @@ impl<'a, F: Float, D: Distance> LinearSearchIndex<'a, F, D> { } } -impl<'a, F: Float, D: Distance> NearestNeighbourIndex for LinearSearchIndex<'a, F, D> { - fn k_nearest<'b>( - &self, - point: Point<'b, F>, - k: usize, - ) -> Result, usize)>, NnError> { +impl> NearestNeighbourIndex for LinearSearchIndex<'_, F, D> { + fn k_nearest(&self, point: Point<'_, F>, k: usize) -> Result, usize)>, NnError> { if self.0.ncols() != point.len() { Err(NnError::WrongDimension) } else { @@ -53,9 +49,9 @@ impl<'a, F: Float, D: Distance> NearestNeighbourIndex for LinearSearchInde } } - fn within_range<'b>( + fn within_range( &self, - point: Point<'b, F>, + point: Point<'_, F>, range: F, ) -> Result, usize)>, NnError> { if self.0.ncols() != point.len() { diff --git a/algorithms/linfa-pls/benches/pls.rs b/algorithms/linfa-pls/benches/pls.rs index fe4b98959..951eeb4e6 100644 --- a/algorithms/linfa-pls/benches/pls.rs +++ b/algorithms/linfa-pls/benches/pls.rs @@ -13,7 +13,7 @@ fn pls_regression(dataset: &Dataset, alg: Algorithm) { .scale(true) .max_iterations(200) .algorithm(alg); - model.fit(&dataset); + model.fit(dataset); } #[allow(unused_must_use)] @@ -22,7 +22,7 @@ fn pls_canonical(dataset: &Dataset, alg: Algorithm) { .scale(true) .max_iterations(200) .algorithm(alg); - model.fit(&dataset); + model.fit(dataset); } #[allow(unused_must_use)] fn pls_cca(dataset: &Dataset, alg: Algorithm) { @@ -30,7 +30,7 @@ fn pls_cca(dataset: &Dataset, alg: Algorithm) { .scale(true) .max_iterations(200) .algorithm(alg); - model.fit(&dataset); + model.fit(dataset); } fn bench(c: &mut Criterion) { diff --git a/algorithms/linfa-pls/src/lib.rs b/algorithms/linfa-pls/src/lib.rs index 2e5efb0b4..cebe3dd5e 100644 --- a/algorithms/linfa-pls/src/lib.rs +++ b/algorithms/linfa-pls/src/lib.rs @@ -123,7 +123,7 @@ macro_rules! pls_algo { ($name:ident) => { /// Given an input matrix `X`, with shape `(n_samples, n_features)`, /// `predict` returns the target variable according to [] method /// learned from the training data distribution. - fn predict_inplace<'a>(&'a self, x: &ArrayBase, y: &mut Array2) { + fn predict_inplace(&'_ self, x: &ArrayBase, y: &mut Array2) { self.0.predict_inplace(x, y); } diff --git a/algorithms/linfa-preprocessing/benches/vectorizer_bench.rs b/algorithms/linfa-preprocessing/benches/vectorizer_bench.rs index 0debbbafd..b64bcbe2d 100644 --- a/algorithms/linfa-preprocessing/benches/vectorizer_bench.rs +++ b/algorithms/linfa-preprocessing/benches/vectorizer_bench.rs @@ -80,7 +80,7 @@ fn load_test_set(desired_targets: &[&str]) -> Result, st load_set("./20news/20news-bydate-test", desired_targets) } -fn fit_vectorizer(file_names: &Vec) { +fn fit_vectorizer(file_names: &[std::path::PathBuf]) { CountVectorizer::params() .document_frequency(0.05, 0.75) .n_gram_range(1, 2) @@ -92,7 +92,7 @@ fn fit_vectorizer(file_names: &Vec) { .unwrap(); } -fn fit_tf_idf(file_names: &Vec) { +fn fit_tf_idf(file_names: &[std::path::PathBuf]) { TfIdfVectorizer::default() .document_frequency(0.05, 0.75) .n_gram_range(1, 2) @@ -104,7 +104,7 @@ fn fit_tf_idf(file_names: &Vec) { .unwrap(); } -fn fit_transform_vectorizer(file_names: &Vec) { +fn fit_transform_vectorizer(file_names: &[std::path::PathBuf]) { CountVectorizer::params() .document_frequency(0.05, 0.75) .n_gram_range(1, 2) @@ -120,7 +120,7 @@ fn fit_transform_vectorizer(file_names: &Vec) { encoding::DecoderTrap::Strict, ); } -fn fit_transform_tf_idf(file_names: &Vec) { +fn fit_transform_tf_idf(file_names: &[std::path::PathBuf]) { TfIdfVectorizer::default() .document_frequency(0.05, 0.75) .n_gram_range(1, 2) diff --git a/algorithms/linfa-preprocessing/src/countgrams/hyperparams.rs b/algorithms/linfa-preprocessing/src/countgrams/hyperparams.rs index 963124f90..1382dc56e 100644 --- a/algorithms/linfa-preprocessing/src/countgrams/hyperparams.rs +++ b/algorithms/linfa-preprocessing/src/countgrams/hyperparams.rs @@ -49,7 +49,7 @@ impl SerdeRegex { /// they will still be used by the [CountVectorizer](crate::CountVectorizer) to transform any text to be examined. /// /// * `split_regex`: the regex espression used to split decuments into tokens. Defaults to r"\\b\\w\\w+\\b", which selects "words", using whitespaces and -/// punctuation symbols as separators. +/// punctuation symbols as separators. /// * `convert_to_lowercase`: if true, all documents used for fitting will be converted to lowercase. Defaults to `true`. /// * `n_gram_range`: if set to `(1,1)` single tokens will be candidate vocabulary entries, if `(2,2)` then adjacent token pairs will be considered, /// if `(1,2)` then both single tokens and adjacent token pairs will be considered, and so on. The definition of token depends on the diff --git a/algorithms/linfa-preprocessing/src/countgrams/mod.rs b/algorithms/linfa-preprocessing/src/countgrams/mod.rs index 7f1a799e3..8eb7a247d 100644 --- a/algorithms/linfa-preprocessing/src/countgrams/mod.rs +++ b/algorithms/linfa-preprocessing/src/countgrams/mod.rs @@ -294,7 +294,7 @@ impl CountVectorizer { let mut sprs_vectorized = CsMat::empty(sprs::CompressedStorage::CSR, self.vocabulary.len()); sprs_vectorized.reserve_outer_dim_exact(x.len()); let regex = self.properties.split_regex(); - for (_string_index, string) in x.into_iter().map(|s| s.to_string()).enumerate() { + for string in x.into_iter().map(|s| s.to_string()) { let row = self.analyze_document(string, ®ex, document_frequencies.view_mut()); sprs_vectorized = sprs_vectorized.append_outer_csvec(row.view()); } @@ -313,7 +313,7 @@ impl CountVectorizer { let mut sprs_vectorized = CsMat::empty(sprs::CompressedStorage::CSR, self.vocabulary.len()); sprs_vectorized.reserve_outer_dim_exact(input.len()); let regex = self.properties.split_regex(); - for (_file_index, file_path) in input.iter().enumerate() { + for file_path in input.iter() { let mut file = std::fs::File::open(file_path).unwrap(); let mut document_bytes = Vec::new(); file.read_to_end(&mut document_bytes).unwrap(); @@ -755,7 +755,7 @@ mod tests { "./count_vectorization_test_file_3", "./count_vectorization_test_file_4", ]; - let contents = vec!["oNe two three four", "TWO three four", "three;four", "four"]; + let contents = &["oNe two three four", "TWO three four", "three;four", "four"]; //create files and write contents for (f_name, f_content) in file_names.iter().zip(contents.iter()) { let mut file = File::create(f_name).unwrap(); diff --git a/algorithms/linfa-preprocessing/src/helpers.rs b/algorithms/linfa-preprocessing/src/helpers.rs index 87e7ea07b..2a98b01f5 100644 --- a/algorithms/linfa-preprocessing/src/helpers.rs +++ b/algorithms/linfa-preprocessing/src/helpers.rs @@ -13,7 +13,7 @@ pub struct NGramListIntoIterator<'a> { index: usize, } -impl<'a> Iterator for NGramListIntoIterator<'a> { +impl Iterator for NGramListIntoIterator<'_> { type Item = Vec; fn next(&mut self) -> Option { if self.index >= self.list.len() { diff --git a/algorithms/linfa-preprocessing/src/linear_scaling.rs b/algorithms/linfa-preprocessing/src/linear_scaling.rs index 03f094748..d0806045b 100644 --- a/algorithms/linfa-preprocessing/src/linear_scaling.rs +++ b/algorithms/linfa-preprocessing/src/linear_scaling.rs @@ -22,8 +22,7 @@ use serde_crate::{Deserialize, Serialize}; /// Possible scaling methods for [LinearScaler] /// /// * Standard (with mean, with std): subtracts the mean to each feature and scales it by the inverse of its standard deviation -/// * MinMax (min, max): scales each feature to fit in the range `min..=max`, default values are -/// `0..=1` +/// * MinMax (min, max): scales each feature to fit in the range `min..=max`, default values are `0..=1` /// * MaxAbs: scales each feature by the inverse of its maximum absolute value, so that it fits the range `-1..=1` pub enum ScalingMethod { Standard(bool, bool), diff --git a/algorithms/linfa-preprocessing/src/tf_idf_vectorization.rs b/algorithms/linfa-preprocessing/src/tf_idf_vectorization.rs index 3e3bf464f..72fd04b79 100644 --- a/algorithms/linfa-preprocessing/src/tf_idf_vectorization.rs +++ b/algorithms/linfa-preprocessing/src/tf_idf_vectorization.rs @@ -334,7 +334,7 @@ mod tests { "./tf_idf_vectorization_test_file_4", "./tf_idf_vectorization_test_file_5", ]; - let contents = vec![ + let contents = &[ "one and two and three", "three and four and five", "seven and eight", diff --git a/algorithms/linfa-reduction/src/pca.rs b/algorithms/linfa-reduction/src/pca.rs index d24eab1d2..dfc4cab47 100644 --- a/algorithms/linfa-reduction/src/pca.rs +++ b/algorithms/linfa-reduction/src/pca.rs @@ -191,12 +191,12 @@ impl Pca { &self.sigma } - /// Transform data back to its original space + /// Transform data back to its original space pub fn inverse_transform( - &self, - prediction: ArrayBase, ndarray::Dim<[usize; 2]>>, - ) -> ArrayBase, ndarray::Dim<[usize; 2]>> { - prediction.dot(&self.embedding) + &self.mean + &self, + prediction: ArrayBase, ndarray::Dim<[usize; 2]>>, + ) -> ArrayBase, ndarray::Dim<[usize; 2]>> { + prediction.dot(&self.embedding) + &self.mean } } diff --git a/algorithms/linfa-reduction/src/random_projection/methods.rs b/algorithms/linfa-reduction/src/random_projection/methods.rs index cba33aaae..9d2642017 100644 --- a/algorithms/linfa-reduction/src/random_projection/methods.rs +++ b/algorithms/linfa-reduction/src/random_projection/methods.rs @@ -31,7 +31,10 @@ pub struct Gaussian; impl ProjectionMethod for Gaussian { type RandomDistribution = StandardNormal; - type ProjectionMatrix = Array2 where StandardNormal: Distribution; + type ProjectionMatrix + = Array2 + where + StandardNormal: Distribution; fn generate_matrix( n_features: usize, @@ -52,7 +55,10 @@ pub struct Sparse; impl ProjectionMethod for Sparse { type RandomDistribution = Standard; - type ProjectionMatrix = CsMat where Standard: Distribution; + type ProjectionMatrix + = CsMat + where + Standard: Distribution; fn generate_matrix( n_features: usize, diff --git a/algorithms/linfa-svm/src/classification.rs b/algorithms/linfa-svm/src/classification.rs index cd91dad07..13205a52b 100644 --- a/algorithms/linfa-svm/src/classification.rs +++ b/algorithms/linfa-svm/src/classification.rs @@ -327,7 +327,7 @@ impl> Predict, Pr> for Svm } /// Predict a probability with a feature vector -impl<'a, F: Float, D: Data> Predict, bool> for Svm { +impl> Predict, bool> for Svm { fn predict(&self, data: ArrayBase) -> bool { let val = self.weighted_sum(&data) - self.rho; @@ -335,24 +335,24 @@ impl<'a, F: Float, D: Data> Predict, bool> for Svm Predict, Pr> for Svm { - fn predict(&self, data: ArrayView1<'a, F>) -> Pr { - let val = self.weighted_sum(&data) - self.rho; - let (a, b) = self.probability_coeffs.clone().unwrap(); +// /// Predict a probability with a feature vector +// impl<'a, F: Float> Predict, Pr> for Svm { +// fn predict(&self, data: ArrayView1<'a, F>) -> Pr { +// let val = self.weighted_sum(&data) - self.rho; +// let (a, b) = self.probability_coeffs.clone().unwrap(); - platt_predict(val, a, b) - } -} +// platt_predict(val, a, b) +// } +// } -/// Predict a probability with a feature vector -impl Predict, bool> for Svm { - fn predict(&self, data: Array1) -> bool { - let val = self.weighted_sum(&data) - self.rho; +// /// Predict a probability with a feature vector +// impl Predict, bool> for Svm { +// fn predict(&self, data: Array1) -> bool { +// let val = self.weighted_sum(&data) - self.rho; - val >= F::zero() - } -}*/ +// val >= F::zero() +// } +// } /// Classify observations /// diff --git a/algorithms/linfa-svm/src/lib.rs b/algorithms/linfa-svm/src/lib.rs index 65c5fee75..75726ff54 100644 --- a/algorithms/linfa-svm/src/lib.rs +++ b/algorithms/linfa-svm/src/lib.rs @@ -199,7 +199,7 @@ impl Svm { /// /// In order to understand the solution of the SMO solver the objective, number of iterations and /// required support vectors are printed here. -impl<'a, F: Float, T> fmt::Display for Svm { +impl fmt::Display for Svm { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.exit_reason { ExitReason::ReachedThreshold => write!( diff --git a/algorithms/linfa-svm/src/permutable_kernel.rs b/algorithms/linfa-svm/src/permutable_kernel.rs index 89fefb285..ef3e2d7f1 100644 --- a/algorithms/linfa-svm/src/permutable_kernel.rs +++ b/algorithms/linfa-svm/src/permutable_kernel.rs @@ -146,7 +146,7 @@ pub struct PermutableKernelRegression { signs: Vec, } -impl<'a, F: Float> PermutableKernelRegression { +impl PermutableKernelRegression { pub fn new(kernel: Kernel) -> PermutableKernelRegression { let kernel_diag = kernel.diagonal(); let kernel_indices = (0..2 * kernel.size()) @@ -171,7 +171,7 @@ impl<'a, F: Float> PermutableKernelRegression { } } -impl<'a, F: Float> Permutable for PermutableKernelRegression { +impl Permutable for PermutableKernelRegression { /// Swap two indices fn swap_indices(&mut self, i: usize, j: usize) { self.kernel_indices.swap(i, j); diff --git a/algorithms/linfa-svm/src/regression.rs b/algorithms/linfa-svm/src/regression.rs index 0f703cbee..5d9757b66 100644 --- a/algorithms/linfa-svm/src/regression.rs +++ b/algorithms/linfa-svm/src/regression.rs @@ -179,7 +179,7 @@ macro_rules! impl_predict { /// This function takes a number of features and predicts target probabilities that they belong to /// the positive class. impl> PredictInplace, Array1<$t>> for Svm<$t, $t> { - fn predict_inplace<'a>(&'a self, data: &ArrayBase, targets: &mut Array1<$t>) { + fn predict_inplace(&'_ self, data: &ArrayBase, targets: &mut Array1<$t>) { assert_eq!(data.nrows(), targets.len(), "The number of data points must match the number of output targets."); for (data, target) in data.outer_iter().zip(targets.iter_mut()) { diff --git a/algorithms/linfa-svm/src/solver_smo.rs b/algorithms/linfa-svm/src/solver_smo.rs index 69ba1bc2a..51007a97e 100644 --- a/algorithms/linfa-svm/src/solver_smo.rs +++ b/algorithms/linfa-svm/src/solver_smo.rs @@ -780,8 +780,8 @@ impl<'a, F: Float, K: 'a + Permutable> SolverState<'a, F, K> { pub fn solve(mut self) -> Svm { let mut iter = 0; - let max_iter = if self.targets.len() > std::usize::MAX / 100 { - std::usize::MAX + let max_iter = if self.targets.len() > usize::MAX / 100 { + usize::MAX } else { 100 * self.targets.len() }; diff --git a/algorithms/linfa-trees/benches/decision_tree.rs b/algorithms/linfa-trees/benches/decision_tree.rs index f16eae3ab..fdaeeb0c9 100644 --- a/algorithms/linfa-trees/benches/decision_tree.rs +++ b/algorithms/linfa-trees/benches/decision_tree.rs @@ -22,7 +22,7 @@ fn decision_tree_bench(c: &mut Criterion) { let mut rng = SmallRng::seed_from_u64(42); // Controls how many samples for each class are generated - let training_set_sizes = vec![100, 1000, 10000, 100000]; + let training_set_sizes = &[100, 1000, 10000, 100000]; let n_classes = 4; let n_features = 4; @@ -40,8 +40,7 @@ fn decision_tree_bench(c: &mut Criterion) { let train_x = generate_blobs(¢roids, *n, &mut rng); let train_y: Array1 = (0..n_classes) - .map(|x| std::iter::repeat(x).take(*n).collect::>()) - .flatten() + .flat_map(|x| std::iter::repeat(x).take(*n).collect::>()) .collect::>(); let dataset = DatasetBase::new(train_x, train_y); diff --git a/algorithms/linfa-trees/src/decision_trees/algorithm.rs b/algorithms/linfa-trees/src/decision_trees/algorithm.rs index c460a9299..245f09f05 100644 --- a/algorithms/linfa-trees/src/decision_trees/algorithm.rs +++ b/algorithms/linfa-trees/src/decision_trees/algorithm.rs @@ -41,7 +41,7 @@ impl RowMask { /// fn all(nsamples: usize) -> Self { RowMask { - mask: vec![true; nsamples as usize], + mask: vec![true; nsamples], nsamples, } } @@ -53,7 +53,7 @@ impl RowMask { /// * `nsamples`: the total number of observations fn none(nsamples: usize) -> Self { RowMask { - mask: vec![false; nsamples as usize], + mask: vec![false; nsamples], nsamples: 0, } } @@ -431,7 +431,7 @@ impl TreeNode { /// ### Structure /// A decision tree structure is a binary tree where: /// * Each internal node specifies a decision, represented by a choice of a feature and a "split value" such that all observations for which -/// `feature <= split_value` is true fall in the left subtree, while the others fall in the right subtree. +/// `feature <= split_value` is true fall in the left subtree, while the others fall in the right subtree. /// /// * leaf nodes make predictions, and their prediction is the most popular label in the node /// @@ -511,7 +511,7 @@ impl> PredictInplace Fit, T, Error> +impl Fit, T, Error> for DecisionTreeValidParams where D: Data, @@ -578,7 +578,7 @@ impl DecisionTree { impurity_decrease .into_iter() - .zip(num_nodes.into_iter()) + .zip(num_nodes) .map(|(val, n)| if n == 0 { F::zero() } else { val / F::cast(n) }) .collect() } diff --git a/algorithms/linfa-trees/src/decision_trees/iter.rs b/algorithms/linfa-trees/src/decision_trees/iter.rs index 0fe523cd8..522c57d58 100644 --- a/algorithms/linfa-trees/src/decision_trees/iter.rs +++ b/algorithms/linfa-trees/src/decision_trees/iter.rs @@ -20,12 +20,12 @@ impl<'a, F: Float, L: Debug + Label> Iterator for NodeIter<'a, F, L> { type Item = &'a TreeNode; fn next(&mut self) -> Option { + #[allow(clippy::manual_inspect)] self.queue.pop().map(|node| { node.children() .into_iter() .filter_map(|x| x.as_ref()) .for_each(|child| self.queue.push(child)); - node }) } diff --git a/algorithms/linfa-trees/src/decision_trees/tikz.rs b/algorithms/linfa-trees/src/decision_trees/tikz.rs index 786f8a9fb..e0f67536c 100644 --- a/algorithms/linfa-trees/src/decision_trees/tikz.rs +++ b/algorithms/linfa-trees/src/decision_trees/tikz.rs @@ -8,8 +8,7 @@ use std::fmt::Debug; /// There are two settable parameters: /// /// * `legend`: if true, a box with the names of the split features will appear in the top right of the tree -/// * `complete`: if true, a complete and standalone Tex document will be generated; otherwise the result will an embeddable -/// Tex tree. +/// * `complete`: if true, a complete and standalone Tex document will be generated; otherwise the result will an embeddable Tex tree. /// /// ### Usage /// @@ -49,7 +48,7 @@ impl<'a, F: Float, L: Debug + Label> Tikz<'a, F, L> { } } - fn format_node(&self, node: &'a TreeNode) -> String { + fn format_node(node: &'a TreeNode) -> String { let depth = vec![""; node.depth() + 1].join("\t"); if let Some(prediction) = node.prediction() { format!("{}[Label: {:?}]", depth, prediction) @@ -61,7 +60,7 @@ impl<'a, F: Float, L: Debug + Label> Tikz<'a, F, L> { ); for child in node.children().into_iter().filter_map(|x| x.as_ref()) { out.push('\n'); - out.push_str(&self.format_node(child)); + out.push_str(&Self::format_node(child)); } out.push(']'); @@ -113,7 +112,7 @@ impl<'a, F: Float, L: Debug + Label> Tikz<'a, F, L> { use std::fmt; -impl<'a, F: Float, L: Debug + Label> fmt::Display for Tikz<'a, F, L> { +impl fmt::Display for Tikz<'_, F, L> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut out = if self.complete { String::from( @@ -161,7 +160,7 @@ for tree={ } out.push_str(r#"\begin{forest}"#); - out.push_str(&self.format_node(self.tree.root_node())); + out.push_str(&Self::format_node(self.tree.root_node())); out.push_str(&self.legend()); out.push_str("\n\t\\end{forest}\n"); if self.complete { diff --git a/algorithms/linfa-tsne/examples/tsne.rs b/algorithms/linfa-tsne/examples/tsne.rs index 20017b4fd..dc7d8206b 100644 --- a/algorithms/linfa-tsne/examples/tsne.rs +++ b/algorithms/linfa-tsne/examples/tsne.rs @@ -25,7 +25,9 @@ fn main() -> Result<()> { .spawn() .expect( "Failed to launch gnuplot. Pleasure ensure that gnuplot is installed and on the $PATH.", - ); + ) + .wait() + .expect("Failed to wait on gnuplot sub-process"); Ok(()) } diff --git a/algorithms/linfa-tsne/src/hyperparams.rs b/algorithms/linfa-tsne/src/hyperparams.rs index 2a9894fe8..ac2f8da7c 100644 --- a/algorithms/linfa-tsne/src/hyperparams.rs +++ b/algorithms/linfa-tsne/src/hyperparams.rs @@ -29,7 +29,7 @@ use crate::TSneError; /// .approx_threshold(0.6) /// .transform(ds); /// ``` - +/// /// A verified hyper-parameter set ready for prediction #[derive(Debug, Clone, PartialEq)] pub struct TSneValidParams { diff --git a/algorithms/linfa-tsne/src/lib.rs b/algorithms/linfa-tsne/src/lib.rs index ea0617734..889ec0b20 100644 --- a/algorithms/linfa-tsne/src/lib.rs +++ b/algorithms/linfa-tsne/src/lib.rs @@ -31,18 +31,18 @@ impl Transformer, Result>> for TSn None => usize::min(self.max_iter() / 2, 250), }; - let mut data = data.as_slice_mut().unwrap(); + let data = data.as_slice_mut().unwrap(); let mut rng = self.rng().clone(); let normal = Normal::new(0.0, 1e-4 * 10e-4).unwrap(); let mut embedding: Vec = (0..nsamples * self.embedding_size()) - .map(|_| rng.sample(&normal)) + .map(|_| rng.sample(normal)) .map(F::cast) .collect(); bhtsne::run( - &mut data, + data, nsamples, nfeatures, &mut embedding, diff --git a/src/correlation.rs b/src/correlation.rs index e77905813..9220091e5 100644 --- a/src/correlation.rs +++ b/src/correlation.rs @@ -128,7 +128,7 @@ impl PearsonCorrelation { /// /// * `dataset`: Data for the correlation analysis /// * `num_iter`: optionally number of iterations of the p-value test, if none then no p-value - /// are calculate + /// are calculate /// /// # Example /// @@ -153,7 +153,6 @@ impl PearsonCorrelation { /// lamotrigine +0.47 (0.14) /// blood sugar level /// ``` - pub fn from_dataset, T>( dataset: &DatasetBase, T>, num_iter: Option, diff --git a/src/dataset/impl_dataset.rs b/src/dataset/impl_dataset.rs index b8148793f..239a144e8 100644 --- a/src/dataset/impl_dataset.rs +++ b/src/dataset/impl_dataset.rs @@ -208,7 +208,7 @@ where /// println!("{} => {}", x, y); /// } /// ``` - pub fn sample_iter(&'a self) -> Iter<'a, '_, F, T::Elem, T::Ix> { + pub fn sample_iter(&'a self) -> Iter<'a, 'a, F, T::Elem, T::Ix> { Iter::new(self.records.view(), self.targets.as_targets()) } } @@ -232,7 +232,7 @@ where /// /// This iterator produces dataset views with only a single feature, while the set of targets remain /// complete. It can be useful to compare each feature individual to all targets. - pub fn feature_iter(&'a self) -> DatasetIter<'a, '_, ArrayBase, T> { + pub fn feature_iter(&'a self) -> DatasetIter<'a, 'a, ArrayBase, T> { DatasetIter::new(self, true) } @@ -241,7 +241,7 @@ where /// This functions creates an iterator which produces dataset views complete records, but only /// a single target each. Useful to train multiple single target models for a multi-target /// dataset. - pub fn target_iter(&'a self) -> DatasetIter<'a, '_, ArrayBase, T> { + pub fn target_iter(&'a self) -> DatasetIter<'a, 'a, ArrayBase, T> { DatasetIter::new(self, false) } } @@ -318,7 +318,7 @@ impl, R: Records> Labels for DatasetBase { } #[allow(clippy::type_complexity)] -impl<'a, 'b: 'a, F, L: Label, T, D> DatasetBase, T> +impl DatasetBase, T> where D: Data, T: AsSingleTargets + Labels, @@ -680,8 +680,8 @@ where /// - `k`: the number of folds to apply to the dataset /// - `params`: the desired parameters for the fittable algorithm at hand /// - `fit_closure`: a closure of the type `(params, training_data) -> fitted_model` - /// that will be used to produce the trained model for each fold. The training data given in input - /// won't outlive the closure. + /// that will be used to produce the trained model for each fold. The training data given in input + /// won't outlive the closure. /// /// ## Returns /// @@ -732,7 +732,7 @@ where &'a mut self, k: usize, fit_closure: C, - ) -> impl Iterator, ArrayView>)> { + ) -> impl Iterator, ArrayView<'a, E, I>>)> { assert!(k > 0); assert!(k <= self.nsamples()); let samples_count = self.nsamples(); @@ -794,9 +794,9 @@ where /// - `k`: the number of folds to apply /// - `parameters`: a list of models to compare /// - `eval`: closure used to evaluate the performance of each trained model. This closure is - /// called on the model output and validation targets of each fold and outputs the performance - /// score for each target. For single-target dataset the signature is `(Array1, Array1) -> - /// Array0`. For multi-target dataset the signature is `(Array2, Array2) -> Array1`. + /// called on the model output and validation targets of each fold and outputs the performance + /// score for each target. For single-target dataset the signature is `(Array1, Array1) -> + /// Array0`. For multi-target dataset the signature is `(Array2, Array2) -> Array1`. /// /// ### Returns /// diff --git a/src/dataset/mod.rs b/src/dataset/mod.rs index 251bf0f8f..24c8ac2a4 100644 --- a/src/dataset/mod.rs +++ b/src/dataset/mod.rs @@ -161,7 +161,7 @@ impl Deref for Pr { /// # Fields /// /// * `records`: a two-dimensional matrix with dimensionality (nsamples, nfeatures), in case of -/// kernel methods a quadratic matrix with dimensionality (nsamples, nsamples), which may be sparse +/// kernel methods a quadratic matrix with dimensionality (nsamples, nsamples), which may be sparse /// * `targets`: a two-/one-dimension matrix with dimensionality (nsamples, ntargets) /// * `weights`: optional weights for each sample with dimensionality (nsamples) /// * `feature_names`: optional descriptive feature names with dimensionality (nfeatures) @@ -170,7 +170,7 @@ impl Deref for Pr { /// /// * `R: Records`: generic over feature matrices or kernel matrices /// * `T`: generic over any `ndarray` matrix which can be used as targets. The `AsTargets` trait -/// bound is omitted here to avoid some repetition in implementation `src/dataset/impl_dataset.rs` +/// bound is omitted here to avoid some repetition in implementation `src/dataset/impl_dataset.rs` #[derive(Debug, Clone, PartialEq)] pub struct DatasetBase where diff --git a/src/metrics_clustering.rs b/src/metrics_clustering.rs index b49f4c5be..0ba14264d 100644 --- a/src/metrics_clustering.rs +++ b/src/metrics_clustering.rs @@ -62,13 +62,8 @@ impl DistanceCount { } } -impl< - 'a, - F: Float, - L: 'a + Label, - D: Data, - T: AsSingleTargets + Labels, - > SilhouetteScore for DatasetBase, T> +impl, T: AsSingleTargets + Labels> + SilhouetteScore for DatasetBase, T> { fn silhouette_score(&self) -> Result { let mut labels: HashMap> = self