diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 1bca099aa..35f72acf4 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -26,6 +26,22 @@ jobs: components: clippy - uses: Swatinem/rust-cache@v2 - run: cargo clippy --features docs + + format: + runs-on: ubuntu-latest + strategy: + matrix: + rust: + - nightly + name: format/${{ matrix.rust }} + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.rust }} + components: rustfmt + - run: cargo fmt --all --check + tests: runs-on: ubuntu-latest strategy: @@ -103,6 +119,7 @@ jobs: conclusion: needs: - clippy + # - format # should format be required? - tests - cross_test - cargo-careful diff --git a/benches/append.rs b/benches/append.rs index 1a911a278..a37df256f 100644 --- a/benches/append.rs +++ b/benches/append.rs @@ -6,30 +6,27 @@ use test::Bencher; use ndarray::prelude::*; #[bench] -fn select_axis0(bench: &mut Bencher) { +fn select_axis0(bench: &mut Bencher) +{ let a = Array::::zeros((256, 256)); let selectable = vec![0, 1, 2, 0, 1, 3, 0, 4, 16, 32, 128, 147, 149, 220, 221, 255, 221, 0, 1]; - bench.iter(|| { - a.select(Axis(0), &selectable) - }); + bench.iter(|| a.select(Axis(0), &selectable)); } #[bench] -fn select_axis1(bench: &mut Bencher) { +fn select_axis1(bench: &mut Bencher) +{ let a = Array::::zeros((256, 256)); let selectable = vec![0, 1, 2, 0, 1, 3, 0, 4, 16, 32, 128, 147, 149, 220, 221, 255, 221, 0, 1]; - bench.iter(|| { - a.select(Axis(1), &selectable) - }); + bench.iter(|| a.select(Axis(1), &selectable)); } #[bench] -fn select_1d(bench: &mut Bencher) { +fn select_1d(bench: &mut Bencher) +{ let a = Array::::zeros(1024); let mut selectable = (0..a.len()).step_by(17).collect::>(); selectable.extend(selectable.clone().iter().rev()); - bench.iter(|| { - a.select(Axis(0), &selectable) - }); + bench.iter(|| a.select(Axis(0), &selectable)); } diff --git a/benches/bench1.rs b/benches/bench1.rs index 6b6864194..33185844a 100644 --- a/benches/bench1.rs +++ b/benches/bench1.rs @@ -1,25 +1,23 @@ #![feature(test)] #![allow(unused_imports)] #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names )] extern crate test; use std::mem::MaybeUninit; -use ndarray::{ShapeBuilder, Array3, Array4}; use ndarray::{arr0, arr1, arr2, azip, s}; use ndarray::{Array, Array1, Array2, Axis, Ix, Zip}; +use ndarray::{Array3, Array4, ShapeBuilder}; use ndarray::{Ix1, Ix2, Ix3, Ix5, IxDyn}; use test::black_box; #[bench] -fn iter_sum_1d_regular(bench: &mut test::Bencher) { +fn iter_sum_1d_regular(bench: &mut test::Bencher) +{ let a = Array::::zeros(64 * 64); let a = black_box(a); bench.iter(|| { @@ -32,7 +30,8 @@ fn iter_sum_1d_regular(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_1d_raw(bench: &mut test::Bencher) { +fn iter_sum_1d_raw(bench: &mut test::Bencher) +{ // this is autovectorized to death (= great performance) let a = Array::::zeros(64 * 64); let a = black_box(a); @@ -46,7 +45,8 @@ fn iter_sum_1d_raw(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_2d_regular(bench: &mut test::Bencher) { +fn iter_sum_2d_regular(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let a = black_box(a); bench.iter(|| { @@ -59,7 +59,8 @@ fn iter_sum_2d_regular(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_2d_by_row(bench: &mut test::Bencher) { +fn iter_sum_2d_by_row(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let a = black_box(a); bench.iter(|| { @@ -74,7 +75,8 @@ fn iter_sum_2d_by_row(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_2d_raw(bench: &mut test::Bencher) { +fn iter_sum_2d_raw(bench: &mut test::Bencher) +{ // this is autovectorized to death (= great performance) let a = Array::::zeros((64, 64)); let a = black_box(a); @@ -88,7 +90,8 @@ fn iter_sum_2d_raw(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_2d_cutout(bench: &mut test::Bencher) { +fn iter_sum_2d_cutout(bench: &mut test::Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -102,7 +105,8 @@ fn iter_sum_2d_cutout(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_2d_cutout_by_row(bench: &mut test::Bencher) { +fn iter_sum_2d_cutout_by_row(bench: &mut test::Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -118,7 +122,8 @@ fn iter_sum_2d_cutout_by_row(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_2d_cutout_outer_iter(bench: &mut test::Bencher) { +fn iter_sum_2d_cutout_outer_iter(bench: &mut test::Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -134,7 +139,8 @@ fn iter_sum_2d_cutout_outer_iter(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_2d_transpose_regular(bench: &mut test::Bencher) { +fn iter_sum_2d_transpose_regular(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((64, 64)); a.swap_axes(0, 1); let a = black_box(a); @@ -148,7 +154,8 @@ fn iter_sum_2d_transpose_regular(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_2d_transpose_by_row(bench: &mut test::Bencher) { +fn iter_sum_2d_transpose_by_row(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((64, 64)); a.swap_axes(0, 1); let a = black_box(a); @@ -164,14 +171,16 @@ fn iter_sum_2d_transpose_by_row(bench: &mut test::Bencher) { } #[bench] -fn sum_2d_regular(bench: &mut test::Bencher) { +fn sum_2d_regular(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let a = black_box(a); bench.iter(|| a.sum()); } #[bench] -fn sum_2d_cutout(bench: &mut test::Bencher) { +fn sum_2d_cutout(bench: &mut test::Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -179,14 +188,16 @@ fn sum_2d_cutout(bench: &mut test::Bencher) { } #[bench] -fn sum_2d_float(bench: &mut test::Bencher) { +fn sum_2d_float(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let a = black_box(a.view()); bench.iter(|| a.sum()); } #[bench] -fn sum_2d_float_cutout(bench: &mut test::Bencher) { +fn sum_2d_float_cutout(bench: &mut test::Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -194,7 +205,8 @@ fn sum_2d_float_cutout(bench: &mut test::Bencher) { } #[bench] -fn sum_2d_float_t_cutout(bench: &mut test::Bencher) { +fn sum_2d_float_t_cutout(bench: &mut test::Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]).reversed_axes(); let a = black_box(av); @@ -202,13 +214,15 @@ fn sum_2d_float_t_cutout(bench: &mut test::Bencher) { } #[bench] -fn fold_sum_i32_2d_regular(bench: &mut test::Bencher) { +fn fold_sum_i32_2d_regular(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); bench.iter(|| a.fold(0, |acc, &x| acc + x)); } #[bench] -fn fold_sum_i32_2d_cutout(bench: &mut test::Bencher) { +fn fold_sum_i32_2d_cutout(bench: &mut test::Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -216,7 +230,8 @@ fn fold_sum_i32_2d_cutout(bench: &mut test::Bencher) { } #[bench] -fn fold_sum_i32_2d_stride(bench: &mut test::Bencher) { +fn fold_sum_i32_2d_stride(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 128)); let av = a.slice(s![.., ..;2]); let a = black_box(av); @@ -224,14 +239,16 @@ fn fold_sum_i32_2d_stride(bench: &mut test::Bencher) { } #[bench] -fn fold_sum_i32_2d_transpose(bench: &mut test::Bencher) { +fn fold_sum_i32_2d_transpose(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let a = a.t(); bench.iter(|| a.fold(0, |acc, &x| acc + x)); } #[bench] -fn fold_sum_i32_2d_cutout_transpose(bench: &mut test::Bencher) { +fn fold_sum_i32_2d_cutout_transpose(bench: &mut test::Bencher) +{ let a = Array::::zeros((66, 66)); let mut av = a.slice(s![1..-1, 1..-1]); av.swap_axes(0, 1); @@ -242,7 +259,8 @@ fn fold_sum_i32_2d_cutout_transpose(bench: &mut test::Bencher) { const ADD2DSZ: usize = 64; #[bench] -fn add_2d_regular(bench: &mut test::Bencher) { +fn add_2d_regular(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); let bv = b.view(); @@ -252,7 +270,8 @@ fn add_2d_regular(bench: &mut test::Bencher) { } #[bench] -fn add_2d_zip(bench: &mut test::Bencher) { +fn add_2d_zip(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); bench.iter(|| { @@ -261,14 +280,16 @@ fn add_2d_zip(bench: &mut test::Bencher) { } #[bench] -fn add_2d_alloc_plus(bench: &mut test::Bencher) { +fn add_2d_alloc_plus(bench: &mut test::Bencher) +{ let a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); bench.iter(|| &a + &b); } #[bench] -fn add_2d_alloc_zip_uninit(bench: &mut test::Bencher) { +fn add_2d_alloc_zip_uninit(bench: &mut test::Bencher) +{ let a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); bench.iter(|| unsafe { @@ -281,49 +302,44 @@ fn add_2d_alloc_zip_uninit(bench: &mut test::Bencher) { } #[bench] -fn add_2d_alloc_zip_collect(bench: &mut test::Bencher) { +fn add_2d_alloc_zip_collect(bench: &mut test::Bencher) +{ let a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); - bench.iter(|| { - Zip::from(&a).and(&b).map_collect(|&x, &y| x + y) - }); + bench.iter(|| Zip::from(&a).and(&b).map_collect(|&x, &y| x + y)); } #[bench] -fn vec_string_collect(bench: &mut test::Bencher) { +fn vec_string_collect(bench: &mut test::Bencher) +{ let v = vec![""; 10240]; - bench.iter(|| { - v.iter().map(|s| s.to_owned()).collect::>() - }); + bench.iter(|| v.iter().map(|s| s.to_owned()).collect::>()); } #[bench] -fn array_string_collect(bench: &mut test::Bencher) { +fn array_string_collect(bench: &mut test::Bencher) +{ let v = Array::from(vec![""; 10240]); - bench.iter(|| { - Zip::from(&v).map_collect(|s| s.to_owned()) - }); + bench.iter(|| Zip::from(&v).map_collect(|s| s.to_owned())); } #[bench] -fn vec_f64_collect(bench: &mut test::Bencher) { +fn vec_f64_collect(bench: &mut test::Bencher) +{ let v = vec![1.; 10240]; - bench.iter(|| { - v.iter().map(|s| s + 1.).collect::>() - }); + bench.iter(|| v.iter().map(|s| s + 1.).collect::>()); } #[bench] -fn array_f64_collect(bench: &mut test::Bencher) { +fn array_f64_collect(bench: &mut test::Bencher) +{ let v = Array::from(vec![1.; 10240]); - bench.iter(|| { - Zip::from(&v).map_collect(|s| s + 1.) - }); + bench.iter(|| Zip::from(&v).map_collect(|s| s + 1.)); } - #[bench] -fn add_2d_assign_ops(bench: &mut test::Bencher) { +fn add_2d_assign_ops(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); let bv = b.view(); @@ -335,7 +351,8 @@ fn add_2d_assign_ops(bench: &mut test::Bencher) { } #[bench] -fn add_2d_cutout(bench: &mut test::Bencher) { +fn add_2d_cutout(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ + 2, ADD2DSZ + 2)); let mut acut = a.slice_mut(s![1..-1, 1..-1]); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -346,7 +363,8 @@ fn add_2d_cutout(bench: &mut test::Bencher) { } #[bench] -fn add_2d_zip_cutout(bench: &mut test::Bencher) { +fn add_2d_zip_cutout(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ + 2, ADD2DSZ + 2)); let mut acut = a.slice_mut(s![1..-1, 1..-1]); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -357,7 +375,8 @@ fn add_2d_zip_cutout(bench: &mut test::Bencher) { #[bench] #[allow(clippy::identity_op)] -fn add_2d_cutouts_by_4(bench: &mut test::Bencher) { +fn add_2d_cutouts_by_4(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((64 * 1, 64 * 1)); let b = Array::::zeros((64 * 1, 64 * 1)); let chunksz = (4, 4); @@ -370,7 +389,8 @@ fn add_2d_cutouts_by_4(bench: &mut test::Bencher) { #[bench] #[allow(clippy::identity_op)] -fn add_2d_cutouts_by_16(bench: &mut test::Bencher) { +fn add_2d_cutouts_by_16(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((64 * 1, 64 * 1)); let b = Array::::zeros((64 * 1, 64 * 1)); let chunksz = (16, 16); @@ -383,7 +403,8 @@ fn add_2d_cutouts_by_16(bench: &mut test::Bencher) { #[bench] #[allow(clippy::identity_op)] -fn add_2d_cutouts_by_32(bench: &mut test::Bencher) { +fn add_2d_cutouts_by_32(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((64 * 1, 64 * 1)); let b = Array::::zeros((64 * 1, 64 * 1)); let chunksz = (32, 32); @@ -395,7 +416,8 @@ fn add_2d_cutouts_by_32(bench: &mut test::Bencher) { } #[bench] -fn add_2d_broadcast_1_to_2(bench: &mut test::Bencher) { +fn add_2d_broadcast_1_to_2(bench: &mut test::Bencher) +{ let mut a = Array2::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array1::::zeros(ADD2DSZ); let bv = b.view(); @@ -405,7 +427,8 @@ fn add_2d_broadcast_1_to_2(bench: &mut test::Bencher) { } #[bench] -fn add_2d_broadcast_0_to_2(bench: &mut test::Bencher) { +fn add_2d_broadcast_0_to_2(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros(()); let bv = b.view(); @@ -415,50 +438,55 @@ fn add_2d_broadcast_0_to_2(bench: &mut test::Bencher) { } #[bench] -fn scalar_toowned(bench: &mut test::Bencher) { +fn scalar_toowned(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); bench.iter(|| a.to_owned()); } #[bench] -fn scalar_add_1(bench: &mut test::Bencher) { +fn scalar_add_1(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let n = 1.; bench.iter(|| &a + n); } #[bench] -fn scalar_add_2(bench: &mut test::Bencher) { +fn scalar_add_2(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let n = 1.; bench.iter(|| n + &a); } #[bench] -fn scalar_add_strided_1(bench: &mut test::Bencher) { - let a = - Array::from_shape_fn((64, 64 * 2), |(i, j)| (i * 64 + j) as f32).slice_move(s![.., ..;2]); +fn scalar_add_strided_1(bench: &mut test::Bencher) +{ + let a = Array::from_shape_fn((64, 64 * 2), |(i, j)| (i * 64 + j) as f32).slice_move(s![.., ..;2]); let n = 1.; bench.iter(|| &a + n); } #[bench] -fn scalar_add_strided_2(bench: &mut test::Bencher) { - let a = - Array::from_shape_fn((64, 64 * 2), |(i, j)| (i * 64 + j) as f32).slice_move(s![.., ..;2]); +fn scalar_add_strided_2(bench: &mut test::Bencher) +{ + let a = Array::from_shape_fn((64, 64 * 2), |(i, j)| (i * 64 + j) as f32).slice_move(s![.., ..;2]); let n = 1.; bench.iter(|| n + &a); } #[bench] -fn scalar_sub_1(bench: &mut test::Bencher) { +fn scalar_sub_1(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let n = 1.; bench.iter(|| &a - n); } #[bench] -fn scalar_sub_2(bench: &mut test::Bencher) { +fn scalar_sub_2(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let n = 1.; bench.iter(|| n - &a); @@ -466,7 +494,8 @@ fn scalar_sub_2(bench: &mut test::Bencher) { // This is for comparison with add_2d_broadcast_0_to_2 #[bench] -fn add_2d_0_to_2_iadd_scalar(bench: &mut test::Bencher) { +fn add_2d_0_to_2_iadd_scalar(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let n = black_box(0); bench.iter(|| { @@ -475,7 +504,8 @@ fn add_2d_0_to_2_iadd_scalar(bench: &mut test::Bencher) { } #[bench] -fn add_2d_strided(bench: &mut test::Bencher) { +fn add_2d_strided(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ * 2)); let mut a = a.slice_mut(s![.., ..;2]); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -486,7 +516,8 @@ fn add_2d_strided(bench: &mut test::Bencher) { } #[bench] -fn add_2d_regular_dyn(bench: &mut test::Bencher) { +fn add_2d_regular_dyn(bench: &mut test::Bencher) +{ let mut a = Array::::zeros(&[ADD2DSZ, ADD2DSZ][..]); let b = Array::::zeros(&[ADD2DSZ, ADD2DSZ][..]); let bv = b.view(); @@ -496,7 +527,8 @@ fn add_2d_regular_dyn(bench: &mut test::Bencher) { } #[bench] -fn add_2d_strided_dyn(bench: &mut test::Bencher) { +fn add_2d_strided_dyn(bench: &mut test::Bencher) +{ let mut a = Array::::zeros(&[ADD2DSZ, ADD2DSZ * 2][..]); let mut a = a.slice_mut(s![.., ..;2]); let b = Array::::zeros(&[ADD2DSZ, ADD2DSZ][..]); @@ -507,7 +539,8 @@ fn add_2d_strided_dyn(bench: &mut test::Bencher) { } #[bench] -fn add_2d_zip_strided(bench: &mut test::Bencher) { +fn add_2d_zip_strided(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ * 2)); let mut a = a.slice_mut(s![.., ..;2]); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -517,7 +550,8 @@ fn add_2d_zip_strided(bench: &mut test::Bencher) { } #[bench] -fn add_2d_one_transposed(bench: &mut test::Bencher) { +fn add_2d_one_transposed(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -527,7 +561,8 @@ fn add_2d_one_transposed(bench: &mut test::Bencher) { } #[bench] -fn add_2d_zip_one_transposed(bench: &mut test::Bencher) { +fn add_2d_zip_one_transposed(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -537,7 +572,8 @@ fn add_2d_zip_one_transposed(bench: &mut test::Bencher) { } #[bench] -fn add_2d_both_transposed(bench: &mut test::Bencher) { +fn add_2d_both_transposed(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let mut b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -548,7 +584,8 @@ fn add_2d_both_transposed(bench: &mut test::Bencher) { } #[bench] -fn add_2d_zip_both_transposed(bench: &mut test::Bencher) { +fn add_2d_zip_both_transposed(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let mut b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -559,7 +596,8 @@ fn add_2d_zip_both_transposed(bench: &mut test::Bencher) { } #[bench] -fn add_2d_f32_regular(bench: &mut test::Bencher) { +fn add_2d_f32_regular(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); let bv = b.view(); @@ -571,7 +609,8 @@ fn add_2d_f32_regular(bench: &mut test::Bencher) { const ADD3DSZ: usize = 16; #[bench] -fn add_3d_strided(bench: &mut test::Bencher) { +fn add_3d_strided(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD3DSZ, ADD3DSZ, ADD3DSZ * 2)); let mut a = a.slice_mut(s![.., .., ..;2]); let b = Array::::zeros(a.dim()); @@ -582,7 +621,8 @@ fn add_3d_strided(bench: &mut test::Bencher) { } #[bench] -fn add_3d_strided_dyn(bench: &mut test::Bencher) { +fn add_3d_strided_dyn(bench: &mut test::Bencher) +{ let mut a = Array::::zeros(&[ADD3DSZ, ADD3DSZ, ADD3DSZ * 2][..]); let mut a = a.slice_mut(s![.., .., ..;2]); let b = Array::::zeros(a.dim()); @@ -595,7 +635,8 @@ fn add_3d_strided_dyn(bench: &mut test::Bencher) { const ADD1D_SIZE: usize = 64 * 64; #[bench] -fn add_1d_regular(bench: &mut test::Bencher) { +fn add_1d_regular(bench: &mut test::Bencher) +{ let mut a = Array::::zeros(ADD1D_SIZE); let b = Array::::zeros(a.dim()); bench.iter(|| { @@ -604,7 +645,8 @@ fn add_1d_regular(bench: &mut test::Bencher) { } #[bench] -fn add_1d_strided(bench: &mut test::Bencher) { +fn add_1d_strided(bench: &mut test::Bencher) +{ let mut a = Array::::zeros(ADD1D_SIZE * 2); let mut av = a.slice_mut(s![..;2]); let b = Array::::zeros(av.dim()); @@ -614,7 +656,8 @@ fn add_1d_strided(bench: &mut test::Bencher) { } #[bench] -fn iadd_scalar_2d_regular(bench: &mut test::Bencher) { +fn iadd_scalar_2d_regular(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); bench.iter(|| { a += 1.; @@ -622,7 +665,8 @@ fn iadd_scalar_2d_regular(bench: &mut test::Bencher) { } #[bench] -fn iadd_scalar_2d_strided(bench: &mut test::Bencher) { +fn iadd_scalar_2d_strided(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ * 2)); let mut a = a.slice_mut(s![.., ..;2]); bench.iter(|| { @@ -631,7 +675,8 @@ fn iadd_scalar_2d_strided(bench: &mut test::Bencher) { } #[bench] -fn iadd_scalar_2d_regular_dyn(bench: &mut test::Bencher) { +fn iadd_scalar_2d_regular_dyn(bench: &mut test::Bencher) +{ let mut a = Array::::zeros(vec![ADD2DSZ, ADD2DSZ]); bench.iter(|| { a += 1.; @@ -639,7 +684,8 @@ fn iadd_scalar_2d_regular_dyn(bench: &mut test::Bencher) { } #[bench] -fn iadd_scalar_2d_strided_dyn(bench: &mut test::Bencher) { +fn iadd_scalar_2d_strided_dyn(bench: &mut test::Bencher) +{ let mut a = Array::::zeros(vec![ADD2DSZ, ADD2DSZ * 2]); let mut a = a.slice_mut(s![.., ..;2]); bench.iter(|| { @@ -648,7 +694,8 @@ fn iadd_scalar_2d_strided_dyn(bench: &mut test::Bencher) { } #[bench] -fn scaled_add_2d_f32_regular(bench: &mut test::Bencher) { +fn scaled_add_2d_f32_regular(bench: &mut test::Bencher) +{ let mut av = Array::::zeros((ADD2DSZ, ADD2DSZ)); let bv = Array::::zeros((ADD2DSZ, ADD2DSZ)); let scalar = std::f32::consts::PI; @@ -658,7 +705,8 @@ fn scaled_add_2d_f32_regular(bench: &mut test::Bencher) { } #[bench] -fn assign_scalar_2d_corder(bench: &mut test::Bencher) { +fn assign_scalar_2d_corder(bench: &mut test::Bencher) +{ let a = Array::zeros((ADD2DSZ, ADD2DSZ)); let mut a = black_box(a); let s = 3.; @@ -666,7 +714,8 @@ fn assign_scalar_2d_corder(bench: &mut test::Bencher) { } #[bench] -fn assign_scalar_2d_cutout(bench: &mut test::Bencher) { +fn assign_scalar_2d_cutout(bench: &mut test::Bencher) +{ let mut a = Array::zeros((66, 66)); let a = a.slice_mut(s![1..-1, 1..-1]); let mut a = black_box(a); @@ -675,7 +724,8 @@ fn assign_scalar_2d_cutout(bench: &mut test::Bencher) { } #[bench] -fn assign_scalar_2d_forder(bench: &mut test::Bencher) { +fn assign_scalar_2d_forder(bench: &mut test::Bencher) +{ let mut a = Array::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let mut a = black_box(a); @@ -684,14 +734,16 @@ fn assign_scalar_2d_forder(bench: &mut test::Bencher) { } #[bench] -fn assign_zero_2d_corder(bench: &mut test::Bencher) { +fn assign_zero_2d_corder(bench: &mut test::Bencher) +{ let a = Array::zeros((ADD2DSZ, ADD2DSZ)); let mut a = black_box(a); bench.iter(|| a.fill(0.)) } #[bench] -fn assign_zero_2d_cutout(bench: &mut test::Bencher) { +fn assign_zero_2d_cutout(bench: &mut test::Bencher) +{ let mut a = Array::zeros((66, 66)); let a = a.slice_mut(s![1..-1, 1..-1]); let mut a = black_box(a); @@ -699,7 +751,8 @@ fn assign_zero_2d_cutout(bench: &mut test::Bencher) { } #[bench] -fn assign_zero_2d_forder(bench: &mut test::Bencher) { +fn assign_zero_2d_forder(bench: &mut test::Bencher) +{ let mut a = Array::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let mut a = black_box(a); @@ -707,7 +760,8 @@ fn assign_zero_2d_forder(bench: &mut test::Bencher) { } #[bench] -fn bench_iter_diag(bench: &mut test::Bencher) { +fn bench_iter_diag(bench: &mut test::Bencher) +{ let a = Array::::zeros((1024, 1024)); bench.iter(|| { for elt in a.diag() { @@ -717,7 +771,8 @@ fn bench_iter_diag(bench: &mut test::Bencher) { } #[bench] -fn bench_row_iter(bench: &mut test::Bencher) { +fn bench_row_iter(bench: &mut test::Bencher) +{ let a = Array::::zeros((1024, 1024)); let it = a.row(17); bench.iter(|| { @@ -728,7 +783,8 @@ fn bench_row_iter(bench: &mut test::Bencher) { } #[bench] -fn bench_col_iter(bench: &mut test::Bencher) { +fn bench_col_iter(bench: &mut test::Bencher) +{ let a = Array::::zeros((1024, 1024)); let it = a.column(17); bench.iter(|| { @@ -755,7 +811,7 @@ macro_rules! mat_mul { } )+ } - } + }; } mat_mul! {mat_mul_f32, f32, @@ -798,7 +854,8 @@ mat_mul! {mat_mul_i32, i32, } #[bench] -fn create_iter_4d(bench: &mut test::Bencher) { +fn create_iter_4d(bench: &mut test::Bencher) +{ let mut a = Array::from_elem((4, 5, 3, 2), 1.0); a.swap_axes(0, 1); a.swap_axes(2, 1); @@ -808,82 +865,94 @@ fn create_iter_4d(bench: &mut test::Bencher) { } #[bench] -fn bench_to_owned_n(bench: &mut test::Bencher) { +fn bench_to_owned_n(bench: &mut test::Bencher) +{ let a = Array::::zeros((32, 32)); bench.iter(|| a.to_owned()); } #[bench] -fn bench_to_owned_t(bench: &mut test::Bencher) { +fn bench_to_owned_t(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((32, 32)); a.swap_axes(0, 1); bench.iter(|| a.to_owned()); } #[bench] -fn bench_to_owned_strided(bench: &mut test::Bencher) { +fn bench_to_owned_strided(bench: &mut test::Bencher) +{ let a = Array::::zeros((32, 64)); let a = a.slice(s![.., ..;2]); bench.iter(|| a.to_owned()); } #[bench] -fn equality_i32(bench: &mut test::Bencher) { +fn equality_i32(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let b = Array::::zeros((64, 64)); bench.iter(|| a == b); } #[bench] -fn equality_f32(bench: &mut test::Bencher) { +fn equality_f32(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let b = Array::::zeros((64, 64)); bench.iter(|| a == b); } #[bench] -fn equality_f32_mixorder(bench: &mut test::Bencher) { +fn equality_f32_mixorder(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let b = Array::::zeros((64, 64).f()); bench.iter(|| a == b); } #[bench] -fn dot_f32_16(bench: &mut test::Bencher) { +fn dot_f32_16(bench: &mut test::Bencher) +{ let a = Array::::zeros(16); let b = Array::::zeros(16); bench.iter(|| a.dot(&b)); } #[bench] -fn dot_f32_20(bench: &mut test::Bencher) { +fn dot_f32_20(bench: &mut test::Bencher) +{ let a = Array::::zeros(20); let b = Array::::zeros(20); bench.iter(|| a.dot(&b)); } #[bench] -fn dot_f32_32(bench: &mut test::Bencher) { +fn dot_f32_32(bench: &mut test::Bencher) +{ let a = Array::::zeros(32); let b = Array::::zeros(32); bench.iter(|| a.dot(&b)); } #[bench] -fn dot_f32_256(bench: &mut test::Bencher) { +fn dot_f32_256(bench: &mut test::Bencher) +{ let a = Array::::zeros(256); let b = Array::::zeros(256); bench.iter(|| a.dot(&b)); } #[bench] -fn dot_f32_1024(bench: &mut test::Bencher) { +fn dot_f32_1024(bench: &mut test::Bencher) +{ let av = Array::::zeros(1024); let bv = Array::::zeros(1024); bench.iter(|| av.dot(&bv)); } #[bench] -fn dot_f32_10e6(bench: &mut test::Bencher) { +fn dot_f32_10e6(bench: &mut test::Bencher) +{ let n = 1_000_000; let av = Array::::zeros(n); let bv = Array::::zeros(n); @@ -891,7 +960,8 @@ fn dot_f32_10e6(bench: &mut test::Bencher) { } #[bench] -fn dot_extended(bench: &mut test::Bencher) { +fn dot_extended(bench: &mut test::Bencher) +{ let m = 10; let n = 33; let k = 10; @@ -912,7 +982,8 @@ fn dot_extended(bench: &mut test::Bencher) { const MEAN_SUM_N: usize = 127; -fn range_mat(m: Ix, n: Ix) -> Array2 { +fn range_mat(m: Ix, n: Ix) -> Array2 +{ assert!(m * n != 0); Array::linspace(0., (m * n - 1) as f32, m * n) .into_shape_with_order((m, n)) @@ -920,87 +991,100 @@ fn range_mat(m: Ix, n: Ix) -> Array2 { } #[bench] -fn mean_axis0(bench: &mut test::Bencher) { +fn mean_axis0(bench: &mut test::Bencher) +{ let a = range_mat(MEAN_SUM_N, MEAN_SUM_N); bench.iter(|| a.mean_axis(Axis(0))); } #[bench] -fn mean_axis1(bench: &mut test::Bencher) { +fn mean_axis1(bench: &mut test::Bencher) +{ let a = range_mat(MEAN_SUM_N, MEAN_SUM_N); bench.iter(|| a.mean_axis(Axis(1))); } #[bench] -fn sum_axis0(bench: &mut test::Bencher) { +fn sum_axis0(bench: &mut test::Bencher) +{ let a = range_mat(MEAN_SUM_N, MEAN_SUM_N); bench.iter(|| a.sum_axis(Axis(0))); } #[bench] -fn sum_axis1(bench: &mut test::Bencher) { +fn sum_axis1(bench: &mut test::Bencher) +{ let a = range_mat(MEAN_SUM_N, MEAN_SUM_N); bench.iter(|| a.sum_axis(Axis(1))); } #[bench] -fn into_dimensionality_ix1_ok(bench: &mut test::Bencher) { +fn into_dimensionality_ix1_ok(bench: &mut test::Bencher) +{ let a = Array::::zeros(Ix1(10)); let a = a.view(); bench.iter(|| a.into_dimensionality::()); } #[bench] -fn into_dimensionality_ix3_ok(bench: &mut test::Bencher) { +fn into_dimensionality_ix3_ok(bench: &mut test::Bencher) +{ let a = Array::::zeros(Ix3(10, 10, 10)); let a = a.view(); bench.iter(|| a.into_dimensionality::()); } #[bench] -fn into_dimensionality_ix3_err(bench: &mut test::Bencher) { +fn into_dimensionality_ix3_err(bench: &mut test::Bencher) +{ let a = Array::::zeros(Ix3(10, 10, 10)); let a = a.view(); bench.iter(|| a.into_dimensionality::()); } #[bench] -fn into_dimensionality_dyn_to_ix3(bench: &mut test::Bencher) { +fn into_dimensionality_dyn_to_ix3(bench: &mut test::Bencher) +{ let a = Array::::zeros(IxDyn(&[10, 10, 10])); let a = a.view(); bench.iter(|| a.clone().into_dimensionality::()); } #[bench] -fn into_dimensionality_dyn_to_dyn(bench: &mut test::Bencher) { +fn into_dimensionality_dyn_to_dyn(bench: &mut test::Bencher) +{ let a = Array::::zeros(IxDyn(&[10, 10, 10])); let a = a.view(); bench.iter(|| a.clone().into_dimensionality::()); } #[bench] -fn into_dyn_ix3(bench: &mut test::Bencher) { +fn into_dyn_ix3(bench: &mut test::Bencher) +{ let a = Array::::zeros(Ix3(10, 10, 10)); let a = a.view(); bench.iter(|| a.into_dyn()); } #[bench] -fn into_dyn_ix5(bench: &mut test::Bencher) { +fn into_dyn_ix5(bench: &mut test::Bencher) +{ let a = Array::::zeros(Ix5(2, 2, 2, 2, 2)); let a = a.view(); bench.iter(|| a.into_dyn()); } #[bench] -fn into_dyn_dyn(bench: &mut test::Bencher) { +fn into_dyn_dyn(bench: &mut test::Bencher) +{ let a = Array::::zeros(IxDyn(&[10, 10, 10])); let a = a.view(); bench.iter(|| a.clone().into_dyn()); } #[bench] -fn broadcast_same_dim(bench: &mut test::Bencher) { +fn broadcast_same_dim(bench: &mut test::Bencher) +{ let s = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; let s = Array4::from_shape_vec((2, 2, 3, 2), s.to_vec()).unwrap(); let a = s.slice(s![.., ..;-1, ..;2, ..]); @@ -1009,10 +1093,11 @@ fn broadcast_same_dim(bench: &mut test::Bencher) { } #[bench] -fn broadcast_one_side(bench: &mut test::Bencher) { +fn broadcast_one_side(bench: &mut test::Bencher) +{ let s = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; - let s2 = [1 ,2 ,3 ,4 ,5 ,6]; + let s2 = [1, 2, 3, 4, 5, 6]; let a = Array4::from_shape_vec((4, 1, 3, 2), s.to_vec()).unwrap(); let b = Array3::from_shape_vec((1, 3, 2), s2.to_vec()).unwrap(); bench.iter(|| &a + &b); -} \ No newline at end of file +} diff --git a/benches/chunks.rs b/benches/chunks.rs index 5ea9ba466..46780492f 100644 --- a/benches/chunks.rs +++ b/benches/chunks.rs @@ -7,7 +7,8 @@ use ndarray::prelude::*; use ndarray::NdProducer; #[bench] -fn chunk2x2_iter_sum(bench: &mut Bencher) { +fn chunk2x2_iter_sum(bench: &mut Bencher) +{ let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -19,7 +20,8 @@ fn chunk2x2_iter_sum(bench: &mut Bencher) { } #[bench] -fn chunk2x2_sum(bench: &mut Bencher) { +fn chunk2x2_sum(bench: &mut Bencher) +{ let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -31,7 +33,8 @@ fn chunk2x2_sum(bench: &mut Bencher) { } #[bench] -fn chunk2x2_sum_get1(bench: &mut Bencher) { +fn chunk2x2_sum_get1(bench: &mut Bencher) +{ let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -46,7 +49,8 @@ fn chunk2x2_sum_get1(bench: &mut Bencher) { } #[bench] -fn chunk2x2_sum_uget1(bench: &mut Bencher) { +fn chunk2x2_sum_uget1(bench: &mut Bencher) +{ let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -64,7 +68,8 @@ fn chunk2x2_sum_uget1(bench: &mut Bencher) { #[bench] #[allow(clippy::identity_op)] -fn chunk2x2_sum_get2(bench: &mut Bencher) { +fn chunk2x2_sum_get2(bench: &mut Bencher) +{ let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::::zeros(a.exact_chunks(chunksz).raw_dim()); diff --git a/benches/construct.rs b/benches/construct.rs index c3603ce7c..278174388 100644 --- a/benches/construct.rs +++ b/benches/construct.rs @@ -1,9 +1,6 @@ #![feature(test)] #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names )] extern crate test; use test::Bencher; @@ -11,24 +8,32 @@ use test::Bencher; use ndarray::prelude::*; #[bench] -fn default_f64(bench: &mut Bencher) { +fn default_f64(bench: &mut Bencher) +{ bench.iter(|| Array::::default((128, 128))) } #[bench] -fn zeros_f64(bench: &mut Bencher) { +fn zeros_f64(bench: &mut Bencher) +{ bench.iter(|| Array::::zeros((128, 128))) } #[bench] -fn map_regular(bench: &mut test::Bencher) { - let a = Array::linspace(0., 127., 128).into_shape_with_order((8, 16)).unwrap(); +fn map_regular(bench: &mut test::Bencher) +{ + let a = Array::linspace(0., 127., 128) + .into_shape_with_order((8, 16)) + .unwrap(); bench.iter(|| a.map(|&x| 2. * x)); } #[bench] -fn map_stride(bench: &mut test::Bencher) { - let a = Array::linspace(0., 127., 256).into_shape_with_order((8, 32)).unwrap(); +fn map_stride(bench: &mut test::Bencher) +{ + let a = Array::linspace(0., 127., 256) + .into_shape_with_order((8, 32)) + .unwrap(); let av = a.slice(s![.., ..;2]); bench.iter(|| av.map(|&x| 2. * x)); } diff --git a/benches/gemv_gemm.rs b/benches/gemv_gemm.rs index cfa14beac..2d1642623 100644 --- a/benches/gemv_gemm.rs +++ b/benches/gemv_gemm.rs @@ -1,9 +1,6 @@ #![feature(test)] #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names )] extern crate test; @@ -14,12 +11,13 @@ use num_traits::{Float, One, Zero}; use ndarray::prelude::*; -use ndarray::LinalgScalar; use ndarray::linalg::general_mat_mul; use ndarray::linalg::general_mat_vec_mul; +use ndarray::LinalgScalar; #[bench] -fn gemv_64_64c(bench: &mut Bencher) { +fn gemv_64_64c(bench: &mut Bencher) +{ let a = Array::zeros((64, 64)); let (m, n) = a.dim(); let x = Array::zeros(n); @@ -30,7 +28,8 @@ fn gemv_64_64c(bench: &mut Bencher) { } #[bench] -fn gemv_64_64f(bench: &mut Bencher) { +fn gemv_64_64f(bench: &mut Bencher) +{ let a = Array::zeros((64, 64).f()); let (m, n) = a.dim(); let x = Array::zeros(n); @@ -41,7 +40,8 @@ fn gemv_64_64f(bench: &mut Bencher) { } #[bench] -fn gemv_64_32(bench: &mut Bencher) { +fn gemv_64_32(bench: &mut Bencher) +{ let a = Array::zeros((64, 32)); let (m, n) = a.dim(); let x = Array::zeros(n); @@ -52,18 +52,19 @@ fn gemv_64_32(bench: &mut Bencher) { } #[bench] -fn cgemm_100(bench: &mut Bencher) { +fn cgemm_100(bench: &mut Bencher) +{ cgemm_bench::(100, bench); } #[bench] -fn zgemm_100(bench: &mut Bencher) { +fn zgemm_100(bench: &mut Bencher) +{ cgemm_bench::(100, bench); } fn cgemm_bench(size: usize, bench: &mut Bencher) -where - A: LinalgScalar + Float, +where A: LinalgScalar + Float { let (m, k, n) = (size, size, size); let a = Array::, _>::zeros((m, k)); diff --git a/benches/higher-order.rs b/benches/higher-order.rs index f593cd026..9cc3bd961 100644 --- a/benches/higher-order.rs +++ b/benches/higher-order.rs @@ -1,9 +1,6 @@ #![feature(test)] #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names )] extern crate test; use test::black_box; @@ -16,17 +13,22 @@ const X: usize = 64; const Y: usize = 16; #[bench] -fn map_regular(bench: &mut Bencher) { - let a = Array::linspace(0., 127., N).into_shape_with_order((X, Y)).unwrap(); +fn map_regular(bench: &mut Bencher) +{ + let a = Array::linspace(0., 127., N) + .into_shape_with_order((X, Y)) + .unwrap(); bench.iter(|| a.map(|&x| 2. * x)); } -pub fn double_array(mut a: ArrayViewMut2<'_, f64>) { +pub fn double_array(mut a: ArrayViewMut2<'_, f64>) +{ a *= 2.0; } #[bench] -fn map_stride_double_f64(bench: &mut Bencher) { +fn map_stride_double_f64(bench: &mut Bencher) +{ let mut a = Array::linspace(0., 127., N * 2) .into_shape_with_order([X, Y * 2]) .unwrap(); @@ -37,7 +39,8 @@ fn map_stride_double_f64(bench: &mut Bencher) { } #[bench] -fn map_stride_f64(bench: &mut Bencher) { +fn map_stride_f64(bench: &mut Bencher) +{ let a = Array::linspace(0., 127., N * 2) .into_shape_with_order([X, Y * 2]) .unwrap(); @@ -46,7 +49,8 @@ fn map_stride_f64(bench: &mut Bencher) { } #[bench] -fn map_stride_u32(bench: &mut Bencher) { +fn map_stride_u32(bench: &mut Bencher) +{ let a = Array::linspace(0., 127., N * 2) .into_shape_with_order([X, Y * 2]) .unwrap(); @@ -56,7 +60,8 @@ fn map_stride_u32(bench: &mut Bencher) { } #[bench] -fn fold_axis(bench: &mut Bencher) { +fn fold_axis(bench: &mut Bencher) +{ let a = Array::linspace(0., 127., N * 2) .into_shape_with_order([X, Y * 2]) .unwrap(); @@ -67,7 +72,8 @@ const MA: usize = 64; const MASZ: usize = MA * MA; #[bench] -fn map_axis_0(bench: &mut Bencher) { +fn map_axis_0(bench: &mut Bencher) +{ let a = Array::from_iter(0..MASZ as i32) .into_shape_with_order([MA, MA]) .unwrap(); @@ -75,7 +81,8 @@ fn map_axis_0(bench: &mut Bencher) { } #[bench] -fn map_axis_1(bench: &mut Bencher) { +fn map_axis_1(bench: &mut Bencher) +{ let a = Array::from_iter(0..MASZ as i32) .into_shape_with_order([MA, MA]) .unwrap(); diff --git a/benches/iter.rs b/benches/iter.rs index 22c8b8d17..77f511745 100644 --- a/benches/iter.rs +++ b/benches/iter.rs @@ -1,9 +1,6 @@ #![feature(test)] #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names )] extern crate test; @@ -16,13 +13,15 @@ use ndarray::Slice; use ndarray::{FoldWhile, Zip}; #[bench] -fn iter_sum_2d_regular(bench: &mut Bencher) { +fn iter_sum_2d_regular(bench: &mut Bencher) +{ let a = Array::::zeros((64, 64)); bench.iter(|| a.iter().sum::()); } #[bench] -fn iter_sum_2d_cutout(bench: &mut Bencher) { +fn iter_sum_2d_cutout(bench: &mut Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = av; @@ -30,7 +29,8 @@ fn iter_sum_2d_cutout(bench: &mut Bencher) { } #[bench] -fn iter_all_2d_cutout(bench: &mut Bencher) { +fn iter_all_2d_cutout(bench: &mut Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = av; @@ -38,44 +38,58 @@ fn iter_all_2d_cutout(bench: &mut Bencher) { } #[bench] -fn iter_sum_2d_transpose(bench: &mut Bencher) { +fn iter_sum_2d_transpose(bench: &mut Bencher) +{ let a = Array::::zeros((66, 66)); let a = a.t(); bench.iter(|| a.iter().sum::()); } #[bench] -fn iter_filter_sum_2d_u32(bench: &mut Bencher) { - let a = Array::linspace(0., 1., 256).into_shape_with_order((16, 16)).unwrap(); +fn iter_filter_sum_2d_u32(bench: &mut Bencher) +{ + let a = Array::linspace(0., 1., 256) + .into_shape_with_order((16, 16)) + .unwrap(); let b = a.mapv(|x| (x * 100.) as u32); bench.iter(|| b.iter().filter(|&&x| x < 75).sum::()); } #[bench] -fn iter_filter_sum_2d_f32(bench: &mut Bencher) { - let a = Array::linspace(0., 1., 256).into_shape_with_order((16, 16)).unwrap(); +fn iter_filter_sum_2d_f32(bench: &mut Bencher) +{ + let a = Array::linspace(0., 1., 256) + .into_shape_with_order((16, 16)) + .unwrap(); let b = a * 100.; bench.iter(|| b.iter().filter(|&&x| x < 75.).sum::()); } #[bench] -fn iter_filter_sum_2d_stride_u32(bench: &mut Bencher) { - let a = Array::linspace(0., 1., 256).into_shape_with_order((16, 16)).unwrap(); +fn iter_filter_sum_2d_stride_u32(bench: &mut Bencher) +{ + let a = Array::linspace(0., 1., 256) + .into_shape_with_order((16, 16)) + .unwrap(); let b = a.mapv(|x| (x * 100.) as u32); let b = b.slice(s![.., ..;2]); bench.iter(|| b.iter().filter(|&&x| x < 75).sum::()); } #[bench] -fn iter_filter_sum_2d_stride_f32(bench: &mut Bencher) { - let a = Array::linspace(0., 1., 256).into_shape_with_order((16, 16)).unwrap(); +fn iter_filter_sum_2d_stride_f32(bench: &mut Bencher) +{ + let a = Array::linspace(0., 1., 256) + .into_shape_with_order((16, 16)) + .unwrap(); let b = a * 100.; let b = b.slice(s![.., ..;2]); bench.iter(|| b.iter().filter(|&&x| x < 75.).sum::()); } #[bench] -fn iter_rev_step_by_contiguous(bench: &mut Bencher) { +fn iter_rev_step_by_contiguous(bench: &mut Bencher) +{ let a = Array::linspace(0., 1., 512); bench.iter(|| { a.iter().rev().step_by(2).for_each(|x| { @@ -85,7 +99,8 @@ fn iter_rev_step_by_contiguous(bench: &mut Bencher) { } #[bench] -fn iter_rev_step_by_discontiguous(bench: &mut Bencher) { +fn iter_rev_step_by_discontiguous(bench: &mut Bencher) +{ let mut a = Array::linspace(0., 1., 1024); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); bench.iter(|| { @@ -98,7 +113,8 @@ fn iter_rev_step_by_discontiguous(bench: &mut Bencher) { const ZIPSZ: usize = 10_000; #[bench] -fn sum_3_std_zip1(bench: &mut Bencher) { +fn sum_3_std_zip1(bench: &mut Bencher) +{ let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -110,7 +126,8 @@ fn sum_3_std_zip1(bench: &mut Bencher) { } #[bench] -fn sum_3_std_zip2(bench: &mut Bencher) { +fn sum_3_std_zip2(bench: &mut Bencher) +{ let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -123,7 +140,8 @@ fn sum_3_std_zip2(bench: &mut Bencher) { } #[bench] -fn sum_3_std_zip3(bench: &mut Bencher) { +fn sum_3_std_zip3(bench: &mut Bencher) +{ let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -137,7 +155,8 @@ fn sum_3_std_zip3(bench: &mut Bencher) { } #[bench] -fn vector_sum_3_std_zip(bench: &mut Bencher) { +fn vector_sum_3_std_zip(bench: &mut Bencher) +{ let a = vec![1.; ZIPSZ]; let b = vec![1.; ZIPSZ]; let mut c = vec![1.; ZIPSZ]; @@ -149,7 +168,8 @@ fn vector_sum_3_std_zip(bench: &mut Bencher) { } #[bench] -fn sum_3_azip(bench: &mut Bencher) { +fn sum_3_azip(bench: &mut Bencher) +{ let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -163,7 +183,8 @@ fn sum_3_azip(bench: &mut Bencher) { } #[bench] -fn sum_3_azip_fold(bench: &mut Bencher) { +fn sum_3_azip_fold(bench: &mut Bencher) +{ let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -177,7 +198,8 @@ fn sum_3_azip_fold(bench: &mut Bencher) { } #[bench] -fn vector_sum_3_azip(bench: &mut Bencher) { +fn vector_sum_3_azip(bench: &mut Bencher) +{ let a = vec![1.; ZIPSZ]; let b = vec![1.; ZIPSZ]; let mut c = vec![1.; ZIPSZ]; @@ -188,7 +210,8 @@ fn vector_sum_3_azip(bench: &mut Bencher) { }); } -fn vector_sum3_unchecked(a: &[f64], b: &[f64], c: &mut [f64]) { +fn vector_sum3_unchecked(a: &[f64], b: &[f64], c: &mut [f64]) +{ for i in 0..c.len() { unsafe { *c.get_unchecked_mut(i) += *a.get_unchecked(i) + *b.get_unchecked(i); @@ -197,7 +220,8 @@ fn vector_sum3_unchecked(a: &[f64], b: &[f64], c: &mut [f64]) { } #[bench] -fn vector_sum_3_zip_unchecked(bench: &mut Bencher) { +fn vector_sum_3_zip_unchecked(bench: &mut Bencher) +{ let a = vec![1.; ZIPSZ]; let b = vec![1.; ZIPSZ]; let mut c = vec![1.; ZIPSZ]; @@ -207,7 +231,8 @@ fn vector_sum_3_zip_unchecked(bench: &mut Bencher) { } #[bench] -fn vector_sum_3_zip_unchecked_manual(bench: &mut Bencher) { +fn vector_sum_3_zip_unchecked_manual(bench: &mut Bencher) +{ let a = vec![1.; ZIPSZ]; let b = vec![1.; ZIPSZ]; let mut c = vec![1.; ZIPSZ]; @@ -227,7 +252,8 @@ const ISZ: usize = 16; const I2DSZ: usize = 64; #[bench] -fn indexed_iter_1d_ix1(bench: &mut Bencher) { +fn indexed_iter_1d_ix1(bench: &mut Bencher) +{ let mut a = Array::::zeros(I2DSZ * I2DSZ); for (i, elt) in a.indexed_iter_mut() { *elt = i as _; @@ -242,7 +268,8 @@ fn indexed_iter_1d_ix1(bench: &mut Bencher) { } #[bench] -fn indexed_zip_1d_ix1(bench: &mut Bencher) { +fn indexed_zip_1d_ix1(bench: &mut Bencher) +{ let mut a = Array::::zeros(I2DSZ * I2DSZ); for (i, elt) in a.indexed_iter_mut() { *elt = i as _; @@ -257,7 +284,8 @@ fn indexed_zip_1d_ix1(bench: &mut Bencher) { } #[bench] -fn indexed_iter_2d_ix2(bench: &mut Bencher) { +fn indexed_iter_2d_ix2(bench: &mut Bencher) +{ let mut a = Array::::zeros((I2DSZ, I2DSZ)); for ((i, j), elt) in a.indexed_iter_mut() { *elt = (i + 100 * j) as _; @@ -271,7 +299,8 @@ fn indexed_iter_2d_ix2(bench: &mut Bencher) { }) } #[bench] -fn indexed_zip_2d_ix2(bench: &mut Bencher) { +fn indexed_zip_2d_ix2(bench: &mut Bencher) +{ let mut a = Array::::zeros((I2DSZ, I2DSZ)); for ((i, j), elt) in a.indexed_iter_mut() { *elt = (i + 100 * j) as _; @@ -286,7 +315,8 @@ fn indexed_zip_2d_ix2(bench: &mut Bencher) { } #[bench] -fn indexed_iter_3d_ix3(bench: &mut Bencher) { +fn indexed_iter_3d_ix3(bench: &mut Bencher) +{ let mut a = Array::::zeros((ISZ, ISZ, ISZ)); for ((i, j, k), elt) in a.indexed_iter_mut() { *elt = (i + 100 * j + 10000 * k) as _; @@ -301,7 +331,8 @@ fn indexed_iter_3d_ix3(bench: &mut Bencher) { } #[bench] -fn indexed_zip_3d_ix3(bench: &mut Bencher) { +fn indexed_zip_3d_ix3(bench: &mut Bencher) +{ let mut a = Array::::zeros((ISZ, ISZ, ISZ)); for ((i, j, k), elt) in a.indexed_iter_mut() { *elt = (i + 100 * j + 10000 * k) as _; @@ -316,7 +347,8 @@ fn indexed_zip_3d_ix3(bench: &mut Bencher) { } #[bench] -fn indexed_iter_3d_dyn(bench: &mut Bencher) { +fn indexed_iter_3d_dyn(bench: &mut Bencher) +{ let mut a = Array::::zeros((ISZ, ISZ, ISZ)); for ((i, j, k), elt) in a.indexed_iter_mut() { *elt = (i + 100 * j + 10000 * k) as _; @@ -332,27 +364,31 @@ fn indexed_iter_3d_dyn(bench: &mut Bencher) { } #[bench] -fn iter_sum_1d_strided_fold(bench: &mut Bencher) { +fn iter_sum_1d_strided_fold(bench: &mut Bencher) +{ let mut a = Array::::ones(10240); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); bench.iter(|| a.iter().sum::()); } #[bench] -fn iter_sum_1d_strided_rfold(bench: &mut Bencher) { +fn iter_sum_1d_strided_rfold(bench: &mut Bencher) +{ let mut a = Array::::ones(10240); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); bench.iter(|| a.iter().rfold(0, |acc, &x| acc + x)); } #[bench] -fn iter_axis_iter_sum(bench: &mut Bencher) { +fn iter_axis_iter_sum(bench: &mut Bencher) +{ let a = Array::::zeros((64, 64)); bench.iter(|| a.axis_iter(Axis(0)).map(|plane| plane.sum()).sum::()); } #[bench] -fn iter_axis_chunks_1_iter_sum(bench: &mut Bencher) { +fn iter_axis_chunks_1_iter_sum(bench: &mut Bencher) +{ let a = Array::::zeros((64, 64)); bench.iter(|| { a.axis_chunks_iter(Axis(0), 1) @@ -362,7 +398,8 @@ fn iter_axis_chunks_1_iter_sum(bench: &mut Bencher) { } #[bench] -fn iter_axis_chunks_5_iter_sum(bench: &mut Bencher) { +fn iter_axis_chunks_5_iter_sum(bench: &mut Bencher) +{ let a = Array::::zeros((64, 64)); bench.iter(|| { a.axis_chunks_iter(Axis(0), 5) @@ -371,21 +408,24 @@ fn iter_axis_chunks_5_iter_sum(bench: &mut Bencher) { }); } -pub fn zip_mut_with(data: &Array3, out: &mut Array3) { +pub fn zip_mut_with(data: &Array3, out: &mut Array3) +{ out.zip_mut_with(&data, |o, &i| { *o = i; }); } #[bench] -fn zip_mut_with_cc(b: &mut Bencher) { +fn zip_mut_with_cc(b: &mut Bencher) +{ let data: Array3 = Array3::zeros((ISZ, ISZ, ISZ)); let mut out = Array3::zeros(data.dim()); b.iter(|| zip_mut_with(&data, &mut out)); } #[bench] -fn zip_mut_with_ff(b: &mut Bencher) { +fn zip_mut_with_ff(b: &mut Bencher) +{ let data: Array3 = Array3::zeros((ISZ, ISZ, ISZ).f()); let mut out = Array3::zeros(data.dim().f()); b.iter(|| zip_mut_with(&data, &mut out)); diff --git a/benches/numeric.rs b/benches/numeric.rs index d9b9187ff..e2ffa1b84 100644 --- a/benches/numeric.rs +++ b/benches/numeric.rs @@ -10,7 +10,8 @@ const X: usize = 64; const Y: usize = 16; #[bench] -fn clip(bench: &mut Bencher) { +fn clip(bench: &mut Bencher) +{ let mut a = Array::linspace(0., 127., N * 2) .into_shape_with_order([X, Y * 2]) .unwrap(); diff --git a/benches/par_rayon.rs b/benches/par_rayon.rs index 18176d846..1301ae75a 100644 --- a/benches/par_rayon.rs +++ b/benches/par_rayon.rs @@ -12,7 +12,8 @@ use ndarray::Zip; const EXP_N: usize = 256; const ADDN: usize = 512; -fn set_threads() { +fn set_threads() +{ // Consider setting a fixed number of threads here, for example to avoid // oversubscribing on hyperthreaded cores. // let n = 4; @@ -20,7 +21,8 @@ fn set_threads() { } #[bench] -fn map_exp_regular(bench: &mut Bencher) { +fn map_exp_regular(bench: &mut Bencher) +{ let mut a = Array2::::zeros((EXP_N, EXP_N)); a.swap_axes(0, 1); bench.iter(|| { @@ -29,7 +31,8 @@ fn map_exp_regular(bench: &mut Bencher) { } #[bench] -fn rayon_exp_regular(bench: &mut Bencher) { +fn rayon_exp_regular(bench: &mut Bencher) +{ set_threads(); let mut a = Array2::::zeros((EXP_N, EXP_N)); a.swap_axes(0, 1); @@ -41,19 +44,22 @@ fn rayon_exp_regular(bench: &mut Bencher) { const FASTEXP: usize = EXP_N; #[inline] -fn fastexp(x: f64) -> f64 { +fn fastexp(x: f64) -> f64 +{ let x = 1. + x / 1024.; x.powi(1024) } #[bench] -fn map_fastexp_regular(bench: &mut Bencher) { +fn map_fastexp_regular(bench: &mut Bencher) +{ let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| a.mapv_inplace(|x| fastexp(x))); } #[bench] -fn rayon_fastexp_regular(bench: &mut Bencher) { +fn rayon_fastexp_regular(bench: &mut Bencher) +{ set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { @@ -62,14 +68,16 @@ fn rayon_fastexp_regular(bench: &mut Bencher) { } #[bench] -fn map_fastexp_cut(bench: &mut Bencher) { +fn map_fastexp_cut(bench: &mut Bencher) +{ let mut a = Array2::::zeros((FASTEXP, FASTEXP)); let mut a = a.slice_mut(s![.., ..-1]); bench.iter(|| a.mapv_inplace(|x| fastexp(x))); } #[bench] -fn rayon_fastexp_cut(bench: &mut Bencher) { +fn rayon_fastexp_cut(bench: &mut Bencher) +{ set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); let mut a = a.slice_mut(s![.., ..-1]); @@ -79,7 +87,8 @@ fn rayon_fastexp_cut(bench: &mut Bencher) { } #[bench] -fn map_fastexp_by_axis(bench: &mut Bencher) { +fn map_fastexp_by_axis(bench: &mut Bencher) +{ let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { for mut sheet in a.axis_iter_mut(Axis(0)) { @@ -89,7 +98,8 @@ fn map_fastexp_by_axis(bench: &mut Bencher) { } #[bench] -fn rayon_fastexp_by_axis(bench: &mut Bencher) { +fn rayon_fastexp_by_axis(bench: &mut Bencher) +{ set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { @@ -100,7 +110,8 @@ fn rayon_fastexp_by_axis(bench: &mut Bencher) { } #[bench] -fn rayon_fastexp_zip(bench: &mut Bencher) { +fn rayon_fastexp_zip(bench: &mut Bencher) +{ set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { @@ -111,7 +122,8 @@ fn rayon_fastexp_zip(bench: &mut Bencher) { } #[bench] -fn add(bench: &mut Bencher) { +fn add(bench: &mut Bencher) +{ let mut a = Array2::::zeros((ADDN, ADDN)); let b = Array2::::zeros((ADDN, ADDN)); let c = Array2::::zeros((ADDN, ADDN)); @@ -124,7 +136,8 @@ fn add(bench: &mut Bencher) { } #[bench] -fn rayon_add(bench: &mut Bencher) { +fn rayon_add(bench: &mut Bencher) +{ set_threads(); let mut a = Array2::::zeros((ADDN, ADDN)); let b = Array2::::zeros((ADDN, ADDN)); @@ -141,34 +154,29 @@ const COLL_STRING_N: usize = 64; const COLL_F64_N: usize = 128; #[bench] -fn vec_string_collect(bench: &mut test::Bencher) { +fn vec_string_collect(bench: &mut test::Bencher) +{ let v = vec![""; COLL_STRING_N * COLL_STRING_N]; - bench.iter(|| { - v.iter().map(|s| s.to_owned()).collect::>() - }); + bench.iter(|| v.iter().map(|s| s.to_owned()).collect::>()); } #[bench] -fn array_string_collect(bench: &mut test::Bencher) { +fn array_string_collect(bench: &mut test::Bencher) +{ let v = Array::from_elem((COLL_STRING_N, COLL_STRING_N), ""); - bench.iter(|| { - Zip::from(&v).par_map_collect(|s| s.to_owned()) - }); + bench.iter(|| Zip::from(&v).par_map_collect(|s| s.to_owned())); } #[bench] -fn vec_f64_collect(bench: &mut test::Bencher) { +fn vec_f64_collect(bench: &mut test::Bencher) +{ let v = vec![1.; COLL_F64_N * COLL_F64_N]; - bench.iter(|| { - v.iter().map(|s| s + 1.).collect::>() - }); + bench.iter(|| v.iter().map(|s| s + 1.).collect::>()); } #[bench] -fn array_f64_collect(bench: &mut test::Bencher) { +fn array_f64_collect(bench: &mut test::Bencher) +{ let v = Array::from_elem((COLL_F64_N, COLL_F64_N), 1.); - bench.iter(|| { - Zip::from(&v).par_map_collect(|s| s + 1.) - }); + bench.iter(|| Zip::from(&v).par_map_collect(|s| s + 1.)); } - diff --git a/benches/to_shape.rs b/benches/to_shape.rs index a048eb774..f056a9852 100644 --- a/benches/to_shape.rs +++ b/benches/to_shape.rs @@ -7,100 +7,89 @@ use ndarray::prelude::*; use ndarray::Order; #[bench] -fn to_shape2_1(bench: &mut Bencher) { +fn to_shape2_1(bench: &mut Bencher) +{ let a = Array::::zeros((4, 5)); let view = a.view(); - bench.iter(|| { - view.to_shape(4 * 5).unwrap() - }); + bench.iter(|| view.to_shape(4 * 5).unwrap()); } #[bench] -fn to_shape2_2_same(bench: &mut Bencher) { +fn to_shape2_2_same(bench: &mut Bencher) +{ let a = Array::::zeros((4, 5)); let view = a.view(); - bench.iter(|| { - view.to_shape((4, 5)).unwrap() - }); + bench.iter(|| view.to_shape((4, 5)).unwrap()); } #[bench] -fn to_shape2_2_flip(bench: &mut Bencher) { +fn to_shape2_2_flip(bench: &mut Bencher) +{ let a = Array::::zeros((4, 5)); let view = a.view(); - bench.iter(|| { - view.to_shape((5, 4)).unwrap() - }); + bench.iter(|| view.to_shape((5, 4)).unwrap()); } #[bench] -fn to_shape2_3(bench: &mut Bencher) { +fn to_shape2_3(bench: &mut Bencher) +{ let a = Array::::zeros((4, 5)); let view = a.view(); - bench.iter(|| { - view.to_shape((2, 5, 2)).unwrap() - }); + bench.iter(|| view.to_shape((2, 5, 2)).unwrap()); } #[bench] -fn to_shape3_1(bench: &mut Bencher) { +fn to_shape3_1(bench: &mut Bencher) +{ let a = Array::::zeros((3, 4, 5)); let view = a.view(); - bench.iter(|| { - view.to_shape(3 * 4 * 5).unwrap() - }); + bench.iter(|| view.to_shape(3 * 4 * 5).unwrap()); } #[bench] -fn to_shape3_2_order(bench: &mut Bencher) { +fn to_shape3_2_order(bench: &mut Bencher) +{ let a = Array::::zeros((3, 4, 5)); let view = a.view(); - bench.iter(|| { - view.to_shape((12, 5)).unwrap() - }); + bench.iter(|| view.to_shape((12, 5)).unwrap()); } #[bench] -fn to_shape3_2_outoforder(bench: &mut Bencher) { +fn to_shape3_2_outoforder(bench: &mut Bencher) +{ let a = Array::::zeros((3, 4, 5)); let view = a.view(); - bench.iter(|| { - view.to_shape((4, 15)).unwrap() - }); + bench.iter(|| view.to_shape((4, 15)).unwrap()); } #[bench] -fn to_shape3_3c(bench: &mut Bencher) { +fn to_shape3_3c(bench: &mut Bencher) +{ let a = Array::::zeros((3, 4, 5)); let view = a.view(); - bench.iter(|| { - view.to_shape((3, 4, 5)).unwrap() - }); + bench.iter(|| view.to_shape((3, 4, 5)).unwrap()); } #[bench] -fn to_shape3_3f(bench: &mut Bencher) { +fn to_shape3_3f(bench: &mut Bencher) +{ let a = Array::::zeros((3, 4, 5).f()); let view = a.view(); - bench.iter(|| { - view.to_shape(((3, 4, 5), Order::F)).unwrap() - }); + bench.iter(|| view.to_shape(((3, 4, 5), Order::F)).unwrap()); } #[bench] -fn to_shape3_4c(bench: &mut Bencher) { +fn to_shape3_4c(bench: &mut Bencher) +{ let a = Array::::zeros((3, 4, 5)); let view = a.view(); - bench.iter(|| { - view.to_shape(((2, 3, 2, 5), Order::C)).unwrap() - }); + bench.iter(|| view.to_shape(((2, 3, 2, 5), Order::C)).unwrap()); } #[bench] -fn to_shape3_4f(bench: &mut Bencher) { +fn to_shape3_4f(bench: &mut Bencher) +{ let a = Array::::zeros((3, 4, 5).f()); let view = a.view(); - bench.iter(|| { - view.to_shape(((2, 3, 2, 5), Order::F)).unwrap() - }); + bench.iter(|| view.to_shape(((2, 3, 2, 5), Order::F)).unwrap()); } diff --git a/benches/zip.rs b/benches/zip.rs index 10e1dee3c..461497310 100644 --- a/benches/zip.rs +++ b/benches/zip.rs @@ -1,14 +1,15 @@ #![feature(test)] extern crate test; -use test::{black_box, Bencher}; -use ndarray::{Array3, ShapeBuilder, Zip}; use ndarray::s; use ndarray::IntoNdProducer; +use ndarray::{Array3, ShapeBuilder, Zip}; +use test::{black_box, Bencher}; pub fn zip_copy<'a, A, P, Q>(data: P, out: Q) - where P: IntoNdProducer, - Q: IntoNdProducer, - A: Copy + 'a +where + P: IntoNdProducer, + Q: IntoNdProducer, + A: Copy + 'a, { Zip::from(data).and(out).for_each(|&i, o| { *o = i; @@ -16,9 +17,10 @@ pub fn zip_copy<'a, A, P, Q>(data: P, out: Q) } pub fn zip_copy_split<'a, A, P, Q>(data: P, out: Q) - where P: IntoNdProducer, - Q: IntoNdProducer, - A: Copy + 'a +where + P: IntoNdProducer, + Q: IntoNdProducer, + A: Copy + 'a, { let z = Zip::from(data).and(out); let (z1, z2) = z.split(); @@ -31,7 +33,8 @@ pub fn zip_copy_split<'a, A, P, Q>(data: P, out: Q) z22.for_each(f); } -pub fn zip_indexed(data: &Array3, out: &mut Array3) { +pub fn zip_indexed(data: &Array3, out: &mut Array3) +{ Zip::indexed(data).and(out).for_each(|idx, &i, o| { let _ = black_box(idx); *o = i; @@ -42,49 +45,56 @@ pub fn zip_indexed(data: &Array3, out: &mut Array3) { const SZ3: (usize, usize, usize) = (100, 110, 100); #[bench] -fn zip_cc(b: &mut Bencher) { +fn zip_cc(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3); let mut out = Array3::zeros(data.dim()); b.iter(|| zip_copy(&data, &mut out)); } #[bench] -fn zip_cf(b: &mut Bencher) { +fn zip_cf(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3); let mut out = Array3::zeros(data.dim().f()); b.iter(|| zip_copy(&data, &mut out)); } #[bench] -fn zip_fc(b: &mut Bencher) { +fn zip_fc(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3.f()); let mut out = Array3::zeros(data.dim()); b.iter(|| zip_copy(&data, &mut out)); } #[bench] -fn zip_ff(b: &mut Bencher) { +fn zip_ff(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3.f()); let mut out = Array3::zeros(data.dim().f()); b.iter(|| zip_copy(&data, &mut out)); } #[bench] -fn zip_indexed_cc(b: &mut Bencher) { +fn zip_indexed_cc(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3); let mut out = Array3::zeros(data.dim()); b.iter(|| zip_indexed(&data, &mut out)); } #[bench] -fn zip_indexed_ff(b: &mut Bencher) { +fn zip_indexed_ff(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3.f()); let mut out = Array3::zeros(data.dim().f()); b.iter(|| zip_indexed(&data, &mut out)); } #[bench] -fn slice_zip_cc(b: &mut Bencher) { +fn slice_zip_cc(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3); let mut out = Array3::zeros(data.dim()); let data = data.slice(s![1.., 1.., 1..]); @@ -93,7 +103,8 @@ fn slice_zip_cc(b: &mut Bencher) { } #[bench] -fn slice_zip_ff(b: &mut Bencher) { +fn slice_zip_ff(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3.f()); let mut out = Array3::zeros(data.dim().f()); let data = data.slice(s![1.., 1.., 1..]); @@ -102,7 +113,8 @@ fn slice_zip_ff(b: &mut Bencher) { } #[bench] -fn slice_split_zip_cc(b: &mut Bencher) { +fn slice_split_zip_cc(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3); let mut out = Array3::zeros(data.dim()); let data = data.slice(s![1.., 1.., 1..]); @@ -111,7 +123,8 @@ fn slice_split_zip_cc(b: &mut Bencher) { } #[bench] -fn slice_split_zip_ff(b: &mut Bencher) { +fn slice_split_zip_ff(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3.f()); let mut out = Array3::zeros(data.dim().f()); let data = data.slice(s![1.., 1.., 1..]); diff --git a/examples/axis_ops.rs b/examples/axis_ops.rs index e84d112c2..3a54a52fb 100644 --- a/examples/axis_ops.rs +++ b/examples/axis_ops.rs @@ -1,8 +1,5 @@ #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names )] use ndarray::prelude::*; @@ -58,7 +55,8 @@ where Ok(()) } -fn main() { +fn main() +{ let mut a = Array::::zeros((2, 3, 4)); for (i, elt) in (0..).zip(&mut a) { *elt = i; diff --git a/examples/bounds_check_elim.rs b/examples/bounds_check_elim.rs index d1c247433..e6b57c719 100644 --- a/examples/bounds_check_elim.rs +++ b/examples/bounds_check_elim.rs @@ -1,9 +1,6 @@ #![crate_type = "lib"] #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names )] // Test cases for bounds check elimination @@ -38,7 +35,8 @@ pub fn testvec_as_slice(a: &Vec) -> f64 { */ #[no_mangle] -pub fn test1d_single(a: &Array1, i: usize) -> f64 { +pub fn test1d_single(a: &Array1, i: usize) -> f64 +{ if i < a.len() { a[i] } else { @@ -47,7 +45,8 @@ pub fn test1d_single(a: &Array1, i: usize) -> f64 { } #[no_mangle] -pub fn test1d_single_mut(a: &mut Array1, i: usize) -> f64 { +pub fn test1d_single_mut(a: &mut Array1, i: usize) -> f64 +{ if i < a.len() { *&mut a[i] } else { @@ -56,7 +55,8 @@ pub fn test1d_single_mut(a: &mut Array1, i: usize) -> f64 { } #[no_mangle] -pub fn test1d_len_of(a: &Array1) -> f64 { +pub fn test1d_len_of(a: &Array1) -> f64 +{ let a = &*a; let mut sum = 0.; for i in 0..a.len_of(Axis(0)) { @@ -66,7 +66,8 @@ pub fn test1d_len_of(a: &Array1) -> f64 { } #[no_mangle] -pub fn test1d_range(a: &Array1) -> f64 { +pub fn test1d_range(a: &Array1) -> f64 +{ let mut sum = 0.; for i in 0..a.len() { sum += a[i]; @@ -75,7 +76,8 @@ pub fn test1d_range(a: &Array1) -> f64 { } #[no_mangle] -pub fn test1d_while(a: &Array1) -> f64 { +pub fn test1d_while(a: &Array1) -> f64 +{ let mut sum = 0.; let mut i = 0; while i < a.len() { @@ -86,7 +88,8 @@ pub fn test1d_while(a: &Array1) -> f64 { } #[no_mangle] -pub fn test2d_ranges(a: &Array2) -> f64 { +pub fn test2d_ranges(a: &Array2) -> f64 +{ let mut sum = 0.; for i in 0..a.nrows() { for j in 0..a.ncols() { @@ -97,7 +100,8 @@ pub fn test2d_ranges(a: &Array2) -> f64 { } #[no_mangle] -pub fn test2d_whiles(a: &Array2) -> f64 { +pub fn test2d_whiles(a: &Array2) -> f64 +{ let mut sum = 0.; let mut i = 0; while i < a.nrows() { diff --git a/examples/column_standardize.rs b/examples/column_standardize.rs index 6a1840f03..329ad2ccb 100644 --- a/examples/column_standardize.rs +++ b/examples/column_standardize.rs @@ -2,7 +2,8 @@ use ndarray::prelude::*; #[cfg(feature = "std")] -fn main() { +fn main() +{ // This example recreates the following from python/numpy // counts -= np.mean(counts, axis=0) // counts /= np.std(counts, axis=0) diff --git a/examples/convo.rs b/examples/convo.rs index b50ab5247..a59795e12 100644 --- a/examples/convo.rs +++ b/examples/convo.rs @@ -15,8 +15,7 @@ type Kernel3x3 = [[A; 3]; 3]; #[inline(never)] #[cfg(feature = "std")] fn conv_3x3(a: &ArrayView2<'_, F>, out: &mut ArrayViewMut2<'_, F>, kernel: &Kernel3x3) -where - F: Float, +where F: Float { let (n, m) = a.dim(); let (np, mp) = out.dim(); @@ -43,7 +42,8 @@ where } #[cfg(feature = "std")] -fn main() { +fn main() +{ let n = 16; let mut a = Array::zeros((n, n)); // make a circle diff --git a/examples/life.rs b/examples/life.rs index e0675ae17..7db384678 100644 --- a/examples/life.rs +++ b/examples/life.rs @@ -1,8 +1,5 @@ #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names )] use ndarray::prelude::*; @@ -13,7 +10,8 @@ const N: usize = 100; type Board = Array2; -fn parse(x: &[u8]) -> Board { +fn parse(x: &[u8]) -> Board +{ // make a border of 0 cells let mut map = Board::from_elem(((N + 2), (N + 2)), 0); let a = Array::from_iter(x.iter().filter_map(|&b| match b { @@ -33,7 +31,8 @@ fn parse(x: &[u8]) -> Board { // 3 neighbors: birth // otherwise: death -fn iterate(z: &mut Board, scratch: &mut Board) { +fn iterate(z: &mut Board, scratch: &mut Board) +{ // compute number of neighbors let mut neigh = scratch.view_mut(); neigh.fill(0); @@ -56,7 +55,8 @@ fn iterate(z: &mut Board, scratch: &mut Board) { zv.zip_mut_with(&neigh, |y, &n| *y = ((n == 3) || (n == 2 && *y > 0)) as u8); } -fn turn_on_corners(z: &mut Board) { +fn turn_on_corners(z: &mut Board) +{ let n = z.nrows(); let m = z.ncols(); z[[1, 1]] = 1; @@ -65,7 +65,8 @@ fn turn_on_corners(z: &mut Board) { z[[n - 2, m - 2]] = 1; } -fn render(a: &Board) { +fn render(a: &Board) +{ for row in a.rows() { for &x in row { if x > 0 { @@ -78,7 +79,8 @@ fn render(a: &Board) { } } -fn main() { +fn main() +{ let mut a = parse(INPUT); let mut scratch = Board::zeros((N, N)); let steps = 100; diff --git a/examples/rollaxis.rs b/examples/rollaxis.rs index 8efdd0ce0..82c381297 100644 --- a/examples/rollaxis.rs +++ b/examples/rollaxis.rs @@ -22,7 +22,8 @@ where a } -fn main() { +fn main() +{ let mut data = array![ [[-1., 0., -2.], [1., 7., -3.]], [[1., 0., -3.], [1., 7., 5.]], diff --git a/examples/sort-axis.rs b/examples/sort-axis.rs index 2ff6ceb32..17ce52e3a 100644 --- a/examples/sort-axis.rs +++ b/examples/sort-axis.rs @@ -12,13 +12,16 @@ use std::ptr::copy_nonoverlapping; // Type invariant: Each index appears exactly once #[derive(Clone, Debug)] -pub struct Permutation { +pub struct Permutation +{ indices: Vec, } -impl Permutation { +impl Permutation +{ /// Checks if the permutation is correct - pub fn from_indices(v: Vec) -> Result { + pub fn from_indices(v: Vec) -> Result + { let perm = Permutation { indices: v }; if perm.correct() { Ok(perm) @@ -27,34 +30,35 @@ impl Permutation { } } - fn correct(&self) -> bool { + fn correct(&self) -> bool + { let axis_len = self.indices.len(); let mut seen = vec![false; axis_len]; for &i in &self.indices { match seen.get_mut(i) { None => return false, - Some(s) => { + Some(s) => if *s { return false; } else { *s = true; - } - } + }, } } true } } -pub trait SortArray { +pub trait SortArray +{ /// ***Panics*** if `axis` is out of bounds. fn identity(&self, axis: Axis) -> Permutation; fn sort_axis_by(&self, axis: Axis, less_than: F) -> Permutation - where - F: FnMut(usize, usize) -> bool; + where F: FnMut(usize, usize) -> bool; } -pub trait PermuteArray { +pub trait PermuteArray +{ type Elem; type Dim; fn permute_axis(self, axis: Axis, perm: &Permutation) -> Array @@ -68,15 +72,15 @@ where S: Data, D: Dimension, { - fn identity(&self, axis: Axis) -> Permutation { + fn identity(&self, axis: Axis) -> Permutation + { Permutation { indices: (0..self.len_of(axis)).collect(), } } fn sort_axis_by(&self, axis: Axis, mut less_than: F) -> Permutation - where - F: FnMut(usize, usize) -> bool, + where F: FnMut(usize, usize) -> bool { let mut perm = self.identity(axis); perm.indices.sort_by(move |&a, &b| { @@ -93,15 +97,13 @@ where } impl PermuteArray for Array -where - D: Dimension, +where D: Dimension { type Elem = A; type Dim = D; fn permute_axis(self, axis: Axis, perm: &Permutation) -> Array - where - D: RemoveAxis, + where D: RemoveAxis { let axis_len = self.len_of(axis); let axis_stride = self.stride_of(axis); @@ -165,8 +167,11 @@ where } #[cfg(feature = "std")] -fn main() { - let a = Array::linspace(0., 63., 64).into_shape_with_order((8, 8)).unwrap(); +fn main() +{ + let a = Array::linspace(0., 63., 64) + .into_shape_with_order((8, 8)) + .unwrap(); let strings = a.map(|x| x.to_string()); let perm = a.sort_axis_by(Axis(1), |i, j| a[[i, 0]] > a[[j, 0]]); @@ -183,10 +188,12 @@ fn main() { fn main() {} #[cfg(test)] -mod tests { +mod tests +{ use super::*; #[test] - fn test_permute_axis() { + fn test_permute_axis() + { let a = array![ [107998.96, 1.], [107999.08, 2.], diff --git a/examples/type_conversion.rs b/examples/type_conversion.rs index 7bec2542f..a419af740 100644 --- a/examples/type_conversion.rs +++ b/examples/type_conversion.rs @@ -7,7 +7,8 @@ use approx::assert_abs_diff_eq; use ndarray::prelude::*; #[cfg(feature = "approx")] -fn main() { +fn main() +{ // Converting an array from one datatype to another is implemented with the // `ArrayBase::mapv()` function. We pass a closure that is applied to each // element independently. This allows for more control and flexiblity in diff --git a/examples/zip_many.rs b/examples/zip_many.rs index e2c5169f2..57d66a956 100644 --- a/examples/zip_many.rs +++ b/examples/zip_many.rs @@ -1,14 +1,12 @@ #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names )] use ndarray::prelude::*; use ndarray::Zip; -fn main() { +fn main() +{ let n = 6; let mut a = Array::::zeros((n, n)); @@ -20,7 +18,9 @@ fn main() { let c = c.slice(s![.., ..-1]); // Using Zip for arithmetic ops across a, b, c - Zip::from(&mut a).and(&b).and(&c) + Zip::from(&mut a) + .and(&b) + .and(&c) .for_each(|a, &b, &c| *a = b + c); assert_eq!(a, &b + &c); @@ -31,7 +31,8 @@ fn main() { // sum of each row let mut sums = Array::zeros(a.nrows()); - Zip::from(a.rows()).and(&mut sums) + Zip::from(a.rows()) + .and(&mut sums) .for_each(|row, sum| *sum = row.sum()); // show sums as a column matrix println!("{:8.4}", sums.insert_axis(Axis(1))); diff --git a/ndarray-rand/benches/bench.rs b/ndarray-rand/benches/bench.rs index b58d80a88..0e5eb2ff7 100644 --- a/ndarray-rand/benches/bench.rs +++ b/ndarray-rand/benches/bench.rs @@ -10,19 +10,22 @@ use rand_distr::Uniform; use test::Bencher; #[bench] -fn uniform_f32(b: &mut Bencher) { +fn uniform_f32(b: &mut Bencher) +{ let m = 100; b.iter(|| Array::random((m, m), Uniform::new(-1f32, 1.))); } #[bench] -fn norm_f32(b: &mut Bencher) { +fn norm_f32(b: &mut Bencher) +{ let m = 100; b.iter(|| Array::random((m, m), Normal::new(0f32, 1.).unwrap())); } #[bench] -fn norm_f64(b: &mut Bencher) { +fn norm_f64(b: &mut Bencher) +{ let m = 100; b.iter(|| Array::random((m, m), Normal::new(0f64, 1.).unwrap())); } diff --git a/ndarray-rand/src/lib.rs b/ndarray-rand/src/lib.rs index 448eb10d9..027198538 100644 --- a/ndarray-rand/src/lib.rs +++ b/ndarray-rand/src/lib.rs @@ -35,17 +35,19 @@ use crate::rand::seq::index; use crate::rand::{thread_rng, Rng, SeedableRng}; use ndarray::{Array, Axis, RemoveAxis, ShapeBuilder}; -use ndarray::{ArrayBase, DataOwned, RawData, Data, Dimension}; +use ndarray::{ArrayBase, Data, DataOwned, Dimension, RawData}; #[cfg(feature = "quickcheck")] use quickcheck::{Arbitrary, Gen}; /// `rand`, re-exported for convenience and version-compatibility. -pub mod rand { +pub mod rand +{ pub use rand::*; } /// `rand-distr`, re-exported for convenience and version-compatibility. -pub mod rand_distr { +pub mod rand_distr +{ pub use rand_distr::*; } @@ -217,11 +219,7 @@ where /// # } /// ``` fn sample_axis_using( - &self, - axis: Axis, - n_samples: usize, - strategy: SamplingStrategy, - rng: &mut R, + &self, axis: Axis, n_samples: usize, strategy: SamplingStrategy, rng: &mut R, ) -> Array where R: Rng + ?Sized, @@ -263,13 +261,7 @@ where self.sample_axis_using(axis, n_samples, strategy, &mut get_rng()) } - fn sample_axis_using( - &self, - axis: Axis, - n_samples: usize, - strategy: SamplingStrategy, - rng: &mut R, - ) -> Array + fn sample_axis_using(&self, axis: Axis, n_samples: usize, strategy: SamplingStrategy, rng: &mut R) -> Array where R: Rng + ?Sized, A: Copy, @@ -281,9 +273,7 @@ where let distribution = Uniform::from(0..self.len_of(axis)); (0..n_samples).map(|_| distribution.sample(rng)).collect() } - SamplingStrategy::WithoutReplacement => { - index::sample(rng, self.len_of(axis), n_samples).into_vec() - } + SamplingStrategy::WithoutReplacement => index::sample(rng, self.len_of(axis), n_samples).into_vec(), }; self.select(axis, &indices) } @@ -296,15 +286,18 @@ where /// [`sample_axis`]: RandomExt::sample_axis /// [`sample_axis_using`]: RandomExt::sample_axis_using #[derive(Debug, Clone)] -pub enum SamplingStrategy { +pub enum SamplingStrategy +{ WithReplacement, WithoutReplacement, } // `Arbitrary` enables `quickcheck` to generate random `SamplingStrategy` values for testing. #[cfg(feature = "quickcheck")] -impl Arbitrary for SamplingStrategy { - fn arbitrary(g: &mut Gen) -> Self { +impl Arbitrary for SamplingStrategy +{ + fn arbitrary(g: &mut Gen) -> Self + { if bool::arbitrary(g) { SamplingStrategy::WithReplacement } else { @@ -313,7 +306,8 @@ impl Arbitrary for SamplingStrategy { } } -fn get_rng() -> SmallRng { +fn get_rng() -> SmallRng +{ SmallRng::from_rng(thread_rng()).expect("create SmallRng from thread_rng failed") } @@ -333,15 +327,15 @@ fn get_rng() -> SmallRng { /// // [ -0.6810, 0.1678, -0.9487, 0.3150, 1.2981]] /// # } #[derive(Copy, Clone, Debug)] -#[deprecated(since="0.14.0", note="Redundant with rand 0.8")] +#[deprecated(since = "0.14.0", note = "Redundant with rand 0.8")] pub struct F32(pub S); #[allow(deprecated)] impl Distribution for F32 -where - S: Distribution, +where S: Distribution { - fn sample(&self, rng: &mut R) -> f32 { + fn sample(&self, rng: &mut R) -> f32 + { self.0.sample(rng) as f32 } } diff --git a/ndarray-rand/tests/tests.rs b/ndarray-rand/tests/tests.rs index 5a0cd6c1b..2db040310 100644 --- a/ndarray-rand/tests/tests.rs +++ b/ndarray-rand/tests/tests.rs @@ -8,7 +8,8 @@ use ndarray_rand::{RandomExt, SamplingStrategy}; use quickcheck::{quickcheck, TestResult}; #[test] -fn test_dim() { +fn test_dim() +{ let (mm, nn) = (5, 5); for m in 0..mm { for n in 0..nn { @@ -22,7 +23,8 @@ fn test_dim() { } #[test] -fn test_dim_f() { +fn test_dim_f() +{ let (mm, nn) = (5, 5); for m in 0..mm { for n in 0..nn { @@ -36,15 +38,19 @@ fn test_dim_f() { } #[test] -fn sample_axis_on_view() { +fn sample_axis_on_view() +{ let m = 5; let a = Array::random((m, 4), Uniform::new(0., 2.)); - let _samples = a.view().sample_axis(Axis(0), m, SamplingStrategy::WithoutReplacement); + let _samples = a + .view() + .sample_axis(Axis(0), m, SamplingStrategy::WithoutReplacement); } #[test] #[should_panic] -fn oversampling_without_replacement_should_panic() { +fn oversampling_without_replacement_should_panic() +{ let m = 5; let a = Array::random((m, 4), Uniform::new(0., 2.)); let _samples = a.sample_axis(Axis(0), m + 1, SamplingStrategy::WithoutReplacement); @@ -109,12 +115,8 @@ quickcheck! { } } -fn sampling_works( - a: &Array2, - strategy: SamplingStrategy, - axis: Axis, - n_samples: usize, -) -> bool { +fn sampling_works(a: &Array2, strategy: SamplingStrategy, axis: Axis, n_samples: usize) -> bool +{ let samples = a.sample_axis(axis, n_samples, strategy); samples .axis_iter(axis) @@ -122,13 +124,15 @@ fn sampling_works( } // Check if, when sliced along `axis`, there is at least one lane in `a` equal to `b` -fn is_subset(a: &Array2, b: &ArrayView1, axis: Axis) -> bool { +fn is_subset(a: &Array2, b: &ArrayView1, axis: Axis) -> bool +{ a.axis_iter(axis).any(|lane| &lane == b) } #[test] #[should_panic] -fn sampling_without_replacement_from_a_zero_length_axis_should_panic() { +fn sampling_without_replacement_from_a_zero_length_axis_should_panic() +{ let n = 5; let a = Array::random((0, n), Uniform::new(0., 2.)); let _samples = a.sample_axis(Axis(0), 1, SamplingStrategy::WithoutReplacement); @@ -136,7 +140,8 @@ fn sampling_without_replacement_from_a_zero_length_axis_should_panic() { #[test] #[should_panic] -fn sampling_with_replacement_from_a_zero_length_axis_should_panic() { +fn sampling_with_replacement_from_a_zero_length_axis_should_panic() +{ let n = 5; let a = Array::random((0, n), Uniform::new(0., 2.)); let _samples = a.sample_axis(Axis(0), 1, SamplingStrategy::WithReplacement); diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 000000000..f3e376ccc --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,26 @@ +edition = "2018" +array_width = 100 +chain_width = 60 +fn_call_width = 100 +max_width = 120 +brace_style = "AlwaysNextLine" +control_brace_style = "AlwaysSameLine" +fn_params_layout = "Compressed" # ? +format_macro_bodies = false +imports_granularity = "Preserve" +imports_indent = "Block" +imports_layout = "HorizontalVertical" +inline_attribute_width = 0 +indent_style = "Block" +match_arm_blocks = false +match_arm_leading_pipes = "Preserve" +merge_derives = false +overflow_delimited_expr = true +reorder_modules = false # impacts rustdoc order +short_array_element_width_threshold = 32 +skip_macro_invocations = ["*"] +unstable_features = true +where_single_line = true + +# ignored files +ignore = [] diff --git a/src/aliases.rs b/src/aliases.rs index 9a8ea8f2c..5df0c95ec 100644 --- a/src/aliases.rs +++ b/src/aliases.rs @@ -7,50 +7,58 @@ use crate::{ArcArray, Array, ArrayView, ArrayViewMut, Ix, IxDynImpl}; /// Create a zero-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix0() -> Ix0 { +pub const fn Ix0() -> Ix0 +{ Dim::new([]) } /// Create a one-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix1(i0: Ix) -> Ix1 { +pub const fn Ix1(i0: Ix) -> Ix1 +{ Dim::new([i0]) } /// Create a two-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix2(i0: Ix, i1: Ix) -> Ix2 { +pub const fn Ix2(i0: Ix, i1: Ix) -> Ix2 +{ Dim::new([i0, i1]) } /// Create a three-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix3(i0: Ix, i1: Ix, i2: Ix) -> Ix3 { +pub const fn Ix3(i0: Ix, i1: Ix, i2: Ix) -> Ix3 +{ Dim::new([i0, i1, i2]) } /// Create a four-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix4(i0: Ix, i1: Ix, i2: Ix, i3: Ix) -> Ix4 { +pub const fn Ix4(i0: Ix, i1: Ix, i2: Ix, i3: Ix) -> Ix4 +{ Dim::new([i0, i1, i2, i3]) } /// Create a five-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix5(i0: Ix, i1: Ix, i2: Ix, i3: Ix, i4: Ix) -> Ix5 { +pub const fn Ix5(i0: Ix, i1: Ix, i2: Ix, i3: Ix, i4: Ix) -> Ix5 +{ Dim::new([i0, i1, i2, i3, i4]) } /// Create a six-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix6(i0: Ix, i1: Ix, i2: Ix, i3: Ix, i4: Ix, i5: Ix) -> Ix6 { +pub const fn Ix6(i0: Ix, i1: Ix, i2: Ix, i3: Ix, i4: Ix, i5: Ix) -> Ix6 +{ Dim::new([i0, i1, i2, i3, i4, i5]) } /// Create a dynamic-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub fn IxDyn(ix: &[Ix]) -> IxDyn { +pub fn IxDyn(ix: &[Ix]) -> IxDyn +{ Dim(ix) } diff --git a/src/argument_traits.rs b/src/argument_traits.rs index 82d4869a9..de8ac7f99 100644 --- a/src/argument_traits.rs +++ b/src/argument_traits.rs @@ -4,36 +4,45 @@ use std::mem::MaybeUninit; use crate::math_cell::MathCell; /// A producer element that can be assigned to once -pub trait AssignElem { +pub trait AssignElem +{ /// Assign the value `input` to the element that self represents. fn assign_elem(self, input: T); } /// Assignable element, simply `*self = input`. -impl<'a, T> AssignElem for &'a mut T { - fn assign_elem(self, input: T) { +impl<'a, T> AssignElem for &'a mut T +{ + fn assign_elem(self, input: T) + { *self = input; } } /// Assignable element, simply `self.set(input)`. -impl<'a, T> AssignElem for &'a Cell { - fn assign_elem(self, input: T) { +impl<'a, T> AssignElem for &'a Cell +{ + fn assign_elem(self, input: T) + { self.set(input); } } /// Assignable element, simply `self.set(input)`. -impl<'a, T> AssignElem for &'a MathCell { - fn assign_elem(self, input: T) { +impl<'a, T> AssignElem for &'a MathCell +{ + fn assign_elem(self, input: T) + { self.set(input); } } /// Assignable element, the item in the MaybeUninit is overwritten (prior value, if any, is not /// read or dropped). -impl<'a, T> AssignElem for &'a mut MaybeUninit { - fn assign_elem(self, input: T) { +impl<'a, T> AssignElem for &'a mut MaybeUninit +{ + fn assign_elem(self, input: T) + { *self = MaybeUninit::new(input); } } diff --git a/src/array_approx.rs b/src/array_approx.rs index a40982a56..286a1146c 100644 --- a/src/array_approx.rs +++ b/src/array_approx.rs @@ -1,5 +1,6 @@ #[cfg(feature = "approx")] -mod approx_methods { +mod approx_methods +{ use crate::imp_prelude::*; impl ArrayBase @@ -24,12 +25,7 @@ mod approx_methods { /// apart; and the absolute difference otherwise. /// /// **Requires crate feature `"approx"`** - pub fn relative_eq( - &self, - other: &ArrayBase, - epsilon: A::Epsilon, - max_relative: A::Epsilon, - ) -> bool + pub fn relative_eq(&self, other: &ArrayBase, epsilon: A::Epsilon, max_relative: A::Epsilon) -> bool where A: ::approx::RelativeEq, A::Epsilon: Clone, diff --git a/src/array_serde.rs b/src/array_serde.rs index a6f3c617c..31b613d4c 100644 --- a/src/array_serde.rs +++ b/src/array_serde.rs @@ -9,11 +9,11 @@ use serde::de::{self, MapAccess, SeqAccess, Visitor}; use serde::ser::{SerializeSeq, SerializeStruct}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use std::fmt; -use std::marker::PhantomData; use alloc::format; #[cfg(not(feature = "std"))] use alloc::vec::Vec; +use std::fmt; +use std::marker::PhantomData; use crate::imp_prelude::*; @@ -24,8 +24,7 @@ use crate::IntoDimension; /// Verifies that the version of the deserialized array matches the current /// `ARRAY_FORMAT_VERSION`. pub fn verify_version(v: u8) -> Result<(), E> -where - E: de::Error, +where E: de::Error { if v != ARRAY_FORMAT_VERSION { let err_msg = format!("unknown array version: {}", v); @@ -37,12 +36,10 @@ where /// **Requires crate feature `"serde"`** impl Serialize for Dim -where - I: Serialize, +where I: Serialize { fn serialize(&self, serializer: Se) -> Result - where - Se: Serializer, + where Se: Serializer { self.ix().serialize(serializer) } @@ -50,32 +47,30 @@ where /// **Requires crate feature `"serde"`** impl<'de, I> Deserialize<'de> for Dim -where - I: Deserialize<'de>, +where I: Deserialize<'de> { fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, + where D: Deserializer<'de> { I::deserialize(deserializer).map(Dim::new) } } /// **Requires crate feature `"serde"`** -impl Serialize for IxDyn { +impl Serialize for IxDyn +{ fn serialize(&self, serializer: Se) -> Result - where - Se: Serializer, + where Se: Serializer { self.ix().serialize(serializer) } } /// **Requires crate feature `"serde"`** -impl<'de> Deserialize<'de> for IxDyn { +impl<'de> Deserialize<'de> for IxDyn +{ fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, + where D: Deserializer<'de> { let v = Vec::::deserialize(deserializer)?; Ok(v.into_dimension()) @@ -90,8 +85,7 @@ where S: Data, { fn serialize(&self, serializer: Se) -> Result - where - Se: Serializer, + where Se: Serializer { let mut state = serializer.serialize_struct("Array", 3)?; state.serialize_field("v", &ARRAY_FORMAT_VERSION)?; @@ -110,8 +104,7 @@ where D: Dimension + Serialize, { fn serialize(&self, serializer: S) -> Result - where - S: Serializer, + where S: Serializer { let iter = &self.0; let mut seq = serializer.serialize_seq(Some(iter.len()))?; @@ -122,19 +115,23 @@ where } } -struct ArrayVisitor { +struct ArrayVisitor +{ _marker_a: PhantomData, _marker_b: PhantomData, } -enum ArrayField { +enum ArrayField +{ Version, Dim, Data, } -impl ArrayVisitor { - pub fn new() -> Self { +impl ArrayVisitor +{ + pub fn new() -> Self + { ArrayVisitor { _marker_a: PhantomData, _marker_b: PhantomData, @@ -152,30 +149,30 @@ where S: DataOwned, { fn deserialize(deserializer: D) -> Result, D::Error> - where - D: Deserializer<'de>, + where D: Deserializer<'de> { deserializer.deserialize_struct("Array", ARRAY_FIELDS, ArrayVisitor::new()) } } -impl<'de> Deserialize<'de> for ArrayField { +impl<'de> Deserialize<'de> for ArrayField +{ fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, + where D: Deserializer<'de> { struct ArrayFieldVisitor; - impl<'de> Visitor<'de> for ArrayFieldVisitor { + impl<'de> Visitor<'de> for ArrayFieldVisitor + { type Value = ArrayField; - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result + { formatter.write_str(r#""v", "dim", or "data""#) } fn visit_str(self, value: &str) -> Result - where - E: de::Error, + where E: de::Error { match value { "v" => Ok(ArrayField::Version), @@ -186,17 +183,13 @@ impl<'de> Deserialize<'de> for ArrayField { } fn visit_bytes(self, value: &[u8]) -> Result - where - E: de::Error, + where E: de::Error { match value { b"v" => Ok(ArrayField::Version), b"dim" => Ok(ArrayField::Dim), b"data" => Ok(ArrayField::Data), - other => Err(de::Error::unknown_field( - &format!("{:?}", other), - ARRAY_FIELDS, - )), + other => Err(de::Error::unknown_field(&format!("{:?}", other), ARRAY_FIELDS)), } } } @@ -213,13 +206,13 @@ where { type Value = ArrayBase; - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result + { formatter.write_str("ndarray representation") } fn visit_seq(self, mut visitor: V) -> Result, V::Error> - where - V: SeqAccess<'de>, + where V: SeqAccess<'de> { let v: u8 = match visitor.next_element()? { Some(value) => value, @@ -252,8 +245,7 @@ where } fn visit_map(self, mut visitor: V) -> Result, V::Error> - where - V: MapAccess<'de>, + where V: MapAccess<'de> { let mut v: Option = None; let mut data: Option> = None; diff --git a/src/arrayformat.rs b/src/arrayformat.rs index ec5b041d9..202805604 100644 --- a/src/arrayformat.rs +++ b/src/arrayformat.rs @@ -7,8 +7,8 @@ // except according to those terms. use super::{ArrayBase, ArrayView, Axis, Data, Dimension, NdProducer}; use crate::aliases::{Ix1, IxDyn}; -use std::fmt; use alloc::format; +use std::fmt; /// Default threshold, below this element count, we don't ellipsize const ARRAY_MANY_ELEMENT_LIMIT: usize = 500; @@ -29,14 +29,17 @@ const AXIS_2D_OVERFLOW_LIMIT: usize = 22; const ELLIPSIS: &str = "..."; #[derive(Clone, Debug)] -struct FormatOptions { +struct FormatOptions +{ axis_collapse_limit: usize, axis_collapse_limit_next_last: usize, axis_collapse_limit_last: usize, } -impl FormatOptions { - pub(crate) fn default_for_array(nelem: usize, no_limit: bool) -> Self { +impl FormatOptions +{ + pub(crate) fn default_for_array(nelem: usize, no_limit: bool) -> Self + { let default = Self { axis_collapse_limit: AXIS_LIMIT_STACKED, axis_collapse_limit_next_last: AXIS_LIMIT_COL, @@ -45,7 +48,8 @@ impl FormatOptions { default.set_no_limit(no_limit || nelem < ARRAY_MANY_ELEMENT_LIMIT) } - fn set_no_limit(mut self, no_limit: bool) -> Self { + fn set_no_limit(mut self, no_limit: bool) -> Self + { if no_limit { self.axis_collapse_limit = std::usize::MAX; self.axis_collapse_limit_next_last = std::usize::MAX; @@ -56,7 +60,8 @@ impl FormatOptions { /// Axis length collapse limit before ellipsizing, where `axis_rindex` is /// the index of the axis from the back. - pub(crate) fn collapse_limit(&self, axis_rindex: usize) -> usize { + pub(crate) fn collapse_limit(&self, axis_rindex: usize) -> usize + { match axis_rindex { 0 => self.axis_collapse_limit_last, 1 => self.axis_collapse_limit_next_last, @@ -78,13 +83,10 @@ impl FormatOptions { /// * `fmt_elem`: A function that formats an element in the list, given the /// formatter and the index of the item in the list. fn format_with_overflow( - f: &mut fmt::Formatter<'_>, - length: usize, - limit: usize, - separator: &str, - ellipsis: &str, + f: &mut fmt::Formatter<'_>, length: usize, limit: usize, separator: &str, ellipsis: &str, fmt_elem: &mut dyn FnMut(&mut fmt::Formatter, usize) -> fmt::Result, -) -> fmt::Result { +) -> fmt::Result +{ if length == 0 { // no-op } else if length <= limit { @@ -111,10 +113,7 @@ fn format_with_overflow( } fn format_array( - array: &ArrayBase, - f: &mut fmt::Formatter<'_>, - format: F, - fmt_opt: &FormatOptions, + array: &ArrayBase, f: &mut fmt::Formatter<'_>, format: F, fmt_opt: &FormatOptions, ) -> fmt::Result where F: FnMut(&A, &mut fmt::Formatter<'_>) -> fmt::Result + Clone, @@ -127,11 +126,7 @@ where } fn format_array_inner( - view: ArrayView, - f: &mut fmt::Formatter<'_>, - mut format: F, - fmt_opt: &FormatOptions, - depth: usize, + view: ArrayView, f: &mut fmt::Formatter<'_>, mut format: F, fmt_opt: &FormatOptions, depth: usize, full_ndim: usize, ) -> fmt::Result where @@ -150,14 +145,9 @@ where &[len] => { let view = view.view().into_dimensionality::().unwrap(); f.write_str("[")?; - format_with_overflow( - f, - len, - fmt_opt.collapse_limit(0), - ", ", - ELLIPSIS, - &mut |f, index| format(&view[index], f), - )?; + format_with_overflow(f, len, fmt_opt.collapse_limit(0), ", ", ELLIPSIS, &mut |f, index| { + format(&view[index], f) + })?; f.write_str("]")?; } // For n-dimensional arrays, we proceed recursively @@ -169,14 +159,7 @@ where f.write_str("[")?; let limit = fmt_opt.collapse_limit(full_ndim - depth - 1); format_with_overflow(f, shape[0], limit, &separator, ELLIPSIS, &mut |f, index| { - format_array_inner( - view.index_axis(Axis(0), index), - f, - format.clone(), - fmt_opt, - depth + 1, - full_ndim, - ) + format_array_inner(view.index_axis(Axis(0), index), f, format.clone(), fmt_opt, depth + 1, full_ndim) })?; f.write_str("]")?; } @@ -190,10 +173,10 @@ where /// /// The array is shown in multiline style. impl fmt::Display for ArrayBase -where - S: Data, +where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt) } @@ -204,10 +187,10 @@ where /// /// The array is shown in multiline style. impl fmt::Debug for ArrayBase -where - S: Data, +where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt)?; @@ -232,10 +215,10 @@ where /// /// The array is shown in multiline style. impl fmt::LowerExp for ArrayBase -where - S: Data, +where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt) } @@ -246,10 +229,10 @@ where /// /// The array is shown in multiline style. impl fmt::UpperExp for ArrayBase -where - S: Data, +where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt) } @@ -259,10 +242,10 @@ where /// /// The array is shown in multiline style. impl fmt::LowerHex for ArrayBase -where - S: Data, +where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt) } @@ -273,27 +256,29 @@ where /// /// The array is shown in multiline style. impl fmt::Binary for ArrayBase -where - S: Data, +where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt) } } #[cfg(test)] -mod formatting_with_omit { - use itertools::Itertools; +mod formatting_with_omit +{ #[cfg(not(feature = "std"))] use alloc::string::String; #[cfg(not(feature = "std"))] use alloc::vec::Vec; + use itertools::Itertools; use super::*; use crate::prelude::*; - fn assert_str_eq(expected: &str, actual: &str) { + fn assert_str_eq(expected: &str, actual: &str) + { // use assert to avoid printing the strings twice on failure assert!( expected == actual, @@ -303,11 +288,8 @@ mod formatting_with_omit { ); } - fn ellipsize( - limit: usize, - sep: &str, - elements: impl IntoIterator, - ) -> String { + fn ellipsize(limit: usize, sep: &str, elements: impl IntoIterator) -> String + { let elements = elements.into_iter().collect::>(); let edge = limit / 2; if elements.len() <= limit { @@ -325,7 +307,8 @@ mod formatting_with_omit { } #[test] - fn empty_arrays() { + fn empty_arrays() + { let a: Array2 = arr2(&[[], []]); let actual = format!("{}", a); let expected = "[[]]"; @@ -333,7 +316,8 @@ mod formatting_with_omit { } #[test] - fn zero_length_axes() { + fn zero_length_axes() + { let a = Array3::::zeros((3, 0, 4)); let actual = format!("{}", a); let expected = "[[[]]]"; @@ -341,7 +325,8 @@ mod formatting_with_omit { } #[test] - fn dim_0() { + fn dim_0() + { let element = 12; let a = arr0(element); let actual = format!("{}", a); @@ -350,7 +335,8 @@ mod formatting_with_omit { } #[test] - fn dim_1() { + fn dim_1() + { let overflow: usize = 2; let a = Array1::from_elem(ARRAY_MANY_ELEMENT_LIMIT + overflow, 1); let actual = format!("{}", a); @@ -359,7 +345,8 @@ mod formatting_with_omit { } #[test] - fn dim_1_alternate() { + fn dim_1_alternate() + { let overflow: usize = 2; let a = Array1::from_elem(ARRAY_MANY_ELEMENT_LIMIT + overflow, 1); let actual = format!("{:#}", a); @@ -368,12 +355,10 @@ mod formatting_with_omit { } #[test] - fn dim_2_last_axis_overflow() { + fn dim_2_last_axis_overflow() + { let overflow: usize = 2; - let a = Array2::from_elem( - (AXIS_2D_OVERFLOW_LIMIT, AXIS_2D_OVERFLOW_LIMIT + overflow), - 1, - ); + let a = Array2::from_elem((AXIS_2D_OVERFLOW_LIMIT, AXIS_2D_OVERFLOW_LIMIT + overflow), 1); let actual = format!("{}", a); let expected = "\ [[1, 1, 1, 1, 1, ..., 1, 1, 1, 1, 1], @@ -391,7 +376,8 @@ mod formatting_with_omit { } #[test] - fn dim_2_non_last_axis_overflow() { + fn dim_2_non_last_axis_overflow() + { let a = Array2::from_elem((ARRAY_MANY_ELEMENT_LIMIT / 10, 10), 1); let actual = format!("{}", a); let row = format!("{}", a.row(0)); @@ -403,7 +389,8 @@ mod formatting_with_omit { } #[test] - fn dim_2_non_last_axis_overflow_alternate() { + fn dim_2_non_last_axis_overflow_alternate() + { let a = Array2::from_elem((AXIS_LIMIT_COL * 4, 6), 1); let actual = format!("{:#}", a); let row = format!("{}", a.row(0)); @@ -412,15 +399,10 @@ mod formatting_with_omit { } #[test] - fn dim_2_multi_directional_overflow() { + fn dim_2_multi_directional_overflow() + { let overflow: usize = 2; - let a = Array2::from_elem( - ( - AXIS_2D_OVERFLOW_LIMIT + overflow, - AXIS_2D_OVERFLOW_LIMIT + overflow, - ), - 1, - ); + let a = Array2::from_elem((AXIS_2D_OVERFLOW_LIMIT + overflow, AXIS_2D_OVERFLOW_LIMIT + overflow), 1); let actual = format!("{}", a); let row = format!("[{}]", ellipsize(AXIS_LIMIT_ROW, ", ", a.row(0))); let expected = format!( @@ -431,15 +413,10 @@ mod formatting_with_omit { } #[test] - fn dim_2_multi_directional_overflow_alternate() { + fn dim_2_multi_directional_overflow_alternate() + { let overflow: usize = 2; - let a = Array2::from_elem( - ( - AXIS_2D_OVERFLOW_LIMIT + overflow, - AXIS_2D_OVERFLOW_LIMIT + overflow, - ), - 1, - ); + let a = Array2::from_elem((AXIS_2D_OVERFLOW_LIMIT + overflow, AXIS_2D_OVERFLOW_LIMIT + overflow), 1); let actual = format!("{:#}", a); let row = format!("{}", a.row(0)); let expected = format!("[{}]", (0..a.nrows()).map(|_| &row).format(",\n ")); @@ -447,13 +424,11 @@ mod formatting_with_omit { } #[test] - fn dim_3_overflow_most() { - let a = Array3::from_shape_fn( - (AXIS_LIMIT_STACKED + 1, AXIS_LIMIT_COL, AXIS_LIMIT_ROW + 1), - |(i, j, k)| { - 1000. + (100. * ((i as f64).sqrt() + (j as f64).sin() + k as f64)).round() / 100. - }, - ); + fn dim_3_overflow_most() + { + let a = Array3::from_shape_fn((AXIS_LIMIT_STACKED + 1, AXIS_LIMIT_COL, AXIS_LIMIT_ROW + 1), |(i, j, k)| { + 1000. + (100. * ((i as f64).sqrt() + (j as f64).sin() + k as f64)).round() / 100. + }); let actual = format!("{:6.1}", a); let expected = "\ [[[1000.0, 1001.0, 1002.0, 1003.0, 1004.0, ..., 1007.0, 1008.0, 1009.0, 1010.0, 1011.0], @@ -533,7 +508,8 @@ mod formatting_with_omit { } #[test] - fn dim_4_overflow_outer() { + fn dim_4_overflow_outer() + { let a = Array4::from_shape_fn((10, 10, 3, 3), |(i, j, k, l)| i + j + k + l); let actual = format!("{:2}", a); // Generated using NumPy with: diff --git a/src/arraytraits.rs b/src/arraytraits.rs index 8d44c1e72..5c376cb0a 100644 --- a/src/arraytraits.rs +++ b/src/arraytraits.rs @@ -19,12 +19,16 @@ use crate::imp_prelude::*; use crate::{ dimension, iter::{Iter, IterMut}, - numeric_util, FoldWhile, NdIndex, Zip, + numeric_util, + FoldWhile, + NdIndex, + Zip, }; #[cold] #[inline(never)] -pub(crate) fn array_out_of_bounds() -> ! { +pub(crate) fn array_out_of_bounds() -> ! +{ panic!("ndarray: index out of bounds"); } @@ -49,7 +53,8 @@ where { type Output = S::Elem; #[inline] - fn index(&self, index: I) -> &S::Elem { + fn index(&self, index: I) -> &S::Elem + { debug_bounds_check!(self, index); unsafe { &*self.ptr.as_ptr().offset( @@ -71,7 +76,8 @@ where S: DataMut, { #[inline] - fn index_mut(&mut self, index: I) -> &mut S::Elem { + fn index_mut(&mut self, index: I) -> &mut S::Elem + { debug_bounds_check!(self, index); unsafe { &mut *self.as_mut_ptr().offset( @@ -92,7 +98,8 @@ where S2: Data, D: Dimension, { - fn eq(&self, rhs: &ArrayBase) -> bool { + fn eq(&self, rhs: &ArrayBase) -> bool + { if self.shape() != rhs.shape() { return false; } @@ -124,7 +131,8 @@ where S2: Data, D: Dimension, { - fn eq(&self, rhs: &&ArrayBase) -> bool { + fn eq(&self, rhs: &&ArrayBase) -> bool + { *self == **rhs } } @@ -139,7 +147,8 @@ where S2: Data, D: Dimension, { - fn eq(&self, rhs: &ArrayBase) -> bool { + fn eq(&self, rhs: &ArrayBase) -> bool + { **self == *rhs } } @@ -153,20 +162,19 @@ where } impl From> for ArrayBase -where - S: DataOwned, +where S: DataOwned { /// Create a one-dimensional array from a boxed slice (no copying needed). /// /// **Panics** if the length is greater than `isize::MAX`. - fn from(b: Box<[A]>) -> Self { + fn from(b: Box<[A]>) -> Self + { Self::from_vec(b.into_vec()) } } impl From> for ArrayBase -where - S: DataOwned, +where S: DataOwned { /// Create a one-dimensional array from a vector (no copying needed). /// @@ -177,14 +185,14 @@ where /// /// let array = Array::from(vec![1., 2., 3., 4.]); /// ``` - fn from(v: Vec) -> Self { + fn from(v: Vec) -> Self + { Self::from_vec(v) } } impl FromIterator for ArrayBase -where - S: DataOwned, +where S: DataOwned { /// Create a one-dimensional array from an iterable. /// @@ -198,8 +206,7 @@ where /// assert!(array == arr1(&[0, 1, 4, 9, 16])) /// ``` fn from_iter(iterable: I) -> ArrayBase - where - I: IntoIterator, + where I: IntoIterator { Self::from_iter(iterable) } @@ -213,7 +220,8 @@ where type Item = &'a S::Elem; type IntoIter = Iter<'a, S::Elem, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { self.iter() } } @@ -226,31 +234,32 @@ where type Item = &'a mut S::Elem; type IntoIter = IterMut<'a, S::Elem, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { self.iter_mut() } } impl<'a, A, D> IntoIterator for ArrayView<'a, A, D> -where - D: Dimension, +where D: Dimension { type Item = &'a A; type IntoIter = Iter<'a, A, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { self.into_iter_() } } impl<'a, A, D> IntoIterator for ArrayViewMut<'a, A, D> -where - D: Dimension, +where D: Dimension { type Item = &'a mut A; type IntoIter = IterMut<'a, A, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { self.into_iter_() } } @@ -262,7 +271,8 @@ where S::Elem: hash::Hash, { // Note: elements are hashed in the logical order - fn hash(&self, state: &mut H) { + fn hash(&self, state: &mut H) + { self.shape().hash(state); if let Some(self_s) = self.as_slice() { hash::Hash::hash_slice(self_s, state); @@ -312,13 +322,13 @@ pub const ARRAY_FORMAT_VERSION: u8 = 1u8; /// occur if `A` is zero-sized, because slices cannot contain more than /// `isize::MAX` number of bytes.) impl<'a, A, Slice: ?Sized> From<&'a Slice> for ArrayView<'a, A, Ix1> -where - Slice: AsRef<[A]>, +where Slice: AsRef<[A]> { /// Create a one-dimensional read-only array view of the data in `slice`. /// /// **Panics** if the slice length is greater than `isize::MAX`. - fn from(slice: &'a Slice) -> Self { + fn from(slice: &'a Slice) -> Self + { aview1(slice.as_ref()) } } @@ -328,9 +338,11 @@ where /// **Panics** if the product of non-zero axis lengths overflows `isize`. (This /// can only occur if A is zero-sized or if `N` is zero, because slices cannot /// contain more than `isize::MAX` number of bytes.) -impl<'a, A, const N: usize> From<&'a [[A; N]]> for ArrayView<'a, A, Ix2> { +impl<'a, A, const N: usize> From<&'a [[A; N]]> for ArrayView<'a, A, Ix2> +{ /// Create a two-dimensional read-only array view of the data in `slice` - fn from(xs: &'a [[A; N]]) -> Self { + fn from(xs: &'a [[A; N]]) -> Self + { aview2(xs) } } @@ -342,20 +354,21 @@ where D: Dimension, { /// Create a read-only array view of the array. - fn from(array: &'a ArrayBase) -> Self { + fn from(array: &'a ArrayBase) -> Self + { array.view() } } /// Implementation of `ArrayViewMut::from(&mut S)` where `S` is a slice or sliceable. impl<'a, A, Slice: ?Sized> From<&'a mut Slice> for ArrayViewMut<'a, A, Ix1> -where - Slice: AsMut<[A]>, +where Slice: AsMut<[A]> { /// Create a one-dimensional read-write array view of the data in `slice`. /// /// **Panics** if the slice length is greater than `isize::MAX`. - fn from(slice: &'a mut Slice) -> Self { + fn from(slice: &'a mut Slice) -> Self + { let xs = slice.as_mut(); if mem::size_of::() == 0 { assert!( @@ -372,15 +385,16 @@ where /// **Panics** if the product of non-zero axis lengths overflows `isize`. (This /// can only occur if `A` is zero-sized or if `N` is zero, because slices /// cannot contain more than `isize::MAX` number of bytes.) -impl<'a, A, const N: usize> From<&'a mut [[A; N]]> for ArrayViewMut<'a, A, Ix2> { +impl<'a, A, const N: usize> From<&'a mut [[A; N]]> for ArrayViewMut<'a, A, Ix2> +{ /// Create a two-dimensional read-write array view of the data in `slice` - fn from(xs: &'a mut [[A; N]]) -> Self { + fn from(xs: &'a mut [[A; N]]) -> Self + { let cols = N; let rows = xs.len(); let dim = Ix2(rows, cols); if size_of::() == 0 { - dimension::size_of_shape_checked(&dim) - .expect("Product of non-zero axis lengths must not overflow isize."); + dimension::size_of_shape_checked(&dim).expect("Product of non-zero axis lengths must not overflow isize."); } else if N == 0 { assert!( xs.len() <= isize::MAX as usize, @@ -404,16 +418,17 @@ where D: Dimension, { /// Create a read-write array view of the array. - fn from(array: &'a mut ArrayBase) -> Self { + fn from(array: &'a mut ArrayBase) -> Self + { array.view_mut() } } impl From> for ArcArray -where - D: Dimension, +where D: Dimension { - fn from(arr: Array) -> ArcArray { + fn from(arr: Array) -> ArcArray + { arr.into_shared() } } @@ -440,8 +455,7 @@ where /// /// ``` pub trait AsArray<'a, A: 'a, D = Ix1>: Into> -where - D: Dimension, +where D: Dimension { } impl<'a, A: 'a, D, T> AsArray<'a, A, D> for T @@ -471,7 +485,8 @@ where { // NOTE: We can implement Default for non-zero dimensional array views by // using an empty slice, however we need a trait for nonzero Dimension. - fn default() -> Self { + fn default() -> Self + { ArrayBase::default(D::default()) } } diff --git a/src/data_repr.rs b/src/data_repr.rs index f740988f4..c64cbcfcf 100644 --- a/src/data_repr.rs +++ b/src/data_repr.rs @@ -1,12 +1,12 @@ -use std::mem; -use std::mem::ManuallyDrop; -use std::ptr::NonNull; -use alloc::slice; +use crate::extension::nonnull; #[cfg(not(feature = "std"))] use alloc::borrow::ToOwned; +use alloc::slice; #[cfg(not(feature = "std"))] use alloc::vec::Vec; -use crate::extension::nonnull; +use std::mem; +use std::mem::ManuallyDrop; +use std::ptr::NonNull; use rawpointer::PointerExt; @@ -20,63 +20,68 @@ use rawpointer::PointerExt; // transmutable A -> B. #[derive(Debug)] #[repr(C)] -pub struct OwnedRepr { +pub struct OwnedRepr +{ ptr: NonNull, len: usize, capacity: usize, } -impl OwnedRepr { - pub(crate) fn from(v: Vec) -> Self { +impl OwnedRepr +{ + pub(crate) fn from(v: Vec) -> Self + { let mut v = ManuallyDrop::new(v); let len = v.len(); let capacity = v.capacity(); let ptr = nonnull::nonnull_from_vec_data(&mut v); - Self { - ptr, - len, - capacity, - } + Self { ptr, len, capacity } } - pub(crate) fn into_vec(self) -> Vec { + pub(crate) fn into_vec(self) -> Vec + { ManuallyDrop::new(self).take_as_vec() } - pub(crate) fn as_slice(&self) -> &[A] { - unsafe { - slice::from_raw_parts(self.ptr.as_ptr(), self.len) - } + pub(crate) fn as_slice(&self) -> &[A] + { + unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } } - pub(crate) fn len(&self) -> usize { self.len } + pub(crate) fn len(&self) -> usize + { + self.len + } - pub(crate) fn as_ptr(&self) -> *const A { + pub(crate) fn as_ptr(&self) -> *const A + { self.ptr.as_ptr() } - pub(crate) fn as_ptr_mut(&self) -> *mut A { + pub(crate) fn as_ptr_mut(&self) -> *mut A + { self.ptr.as_ptr() } - pub(crate) fn as_nonnull_mut(&mut self) -> NonNull { + pub(crate) fn as_nonnull_mut(&mut self) -> NonNull + { self.ptr } /// Return end pointer - pub(crate) fn as_end_nonnull(&self) -> NonNull { - unsafe { - self.ptr.add(self.len) - } + pub(crate) fn as_end_nonnull(&self) -> NonNull + { + unsafe { self.ptr.add(self.len) } } /// Reserve `additional` elements; return the new pointer - /// + /// /// ## Safety /// /// Note that existing pointers into the data are invalidated #[must_use = "must use new pointer to update existing pointers"] - pub(crate) fn reserve(&mut self, additional: usize) -> NonNull { + pub(crate) fn reserve(&mut self, additional: usize) -> NonNull + { self.modify_as_vec(|mut v| { v.reserve(additional); v @@ -89,13 +94,15 @@ impl OwnedRepr { /// ## Safety /// /// The first `new_len` elements of the data should be valid. - pub(crate) unsafe fn set_len(&mut self, new_len: usize) { + pub(crate) unsafe fn set_len(&mut self, new_len: usize) + { debug_assert!(new_len <= self.capacity); self.len = new_len; } /// Return the length (number of elements in total) - pub(crate) fn release_all_elements(&mut self) -> usize { + pub(crate) fn release_all_elements(&mut self) -> usize + { let ret = self.len; self.len = 0; ret @@ -107,7 +114,8 @@ impl OwnedRepr { /// /// Caller must ensure the two types have the same representation. /// **Panics** if sizes don't match (which is not a sufficient check). - pub(crate) unsafe fn data_subst(self) -> OwnedRepr { + pub(crate) unsafe fn data_subst(self) -> OwnedRepr + { // necessary but not sufficient check assert_eq!(mem::size_of::(), mem::size_of::()); let self_ = ManuallyDrop::new(self); @@ -118,30 +126,32 @@ impl OwnedRepr { } } - fn modify_as_vec(&mut self, f: impl FnOnce(Vec) -> Vec) { + fn modify_as_vec(&mut self, f: impl FnOnce(Vec) -> Vec) + { let v = self.take_as_vec(); *self = Self::from(f(v)); } - fn take_as_vec(&mut self) -> Vec { + fn take_as_vec(&mut self) -> Vec + { let capacity = self.capacity; let len = self.len; self.len = 0; self.capacity = 0; - unsafe { - Vec::from_raw_parts(self.ptr.as_ptr(), len, capacity) - } + unsafe { Vec::from_raw_parts(self.ptr.as_ptr(), len, capacity) } } } impl Clone for OwnedRepr - where A: Clone +where A: Clone { - fn clone(&self) -> Self { + fn clone(&self) -> Self + { Self::from(self.as_slice().to_owned()) } - fn clone_from(&mut self, other: &Self) { + fn clone_from(&mut self, other: &Self) + { let mut v = self.take_as_vec(); let other = other.as_slice(); @@ -155,8 +165,10 @@ impl Clone for OwnedRepr } } -impl Drop for OwnedRepr { - fn drop(&mut self) { +impl Drop for OwnedRepr +{ + fn drop(&mut self) + { if self.capacity > 0 { // correct because: If the elements don't need dropping, an // empty Vec is ok. Only the Vec's allocation needs dropping. @@ -176,6 +188,5 @@ impl Drop for OwnedRepr { } } -unsafe impl Sync for OwnedRepr where A: Sync { } -unsafe impl Send for OwnedRepr where A: Send { } - +unsafe impl Sync for OwnedRepr where A: Sync {} +unsafe impl Send for OwnedRepr where A: Send {} diff --git a/src/data_traits.rs b/src/data_traits.rs index 50e5a3e6e..a2784b8d3 100644 --- a/src/data_traits.rs +++ b/src/data_traits.rs @@ -10,16 +10,14 @@ use rawpointer::PointerExt; -use std::mem::{self, size_of}; -use std::mem::MaybeUninit; -use std::ptr::NonNull; use alloc::sync::Arc; #[cfg(not(feature = "std"))] use alloc::vec::Vec; +use std::mem::MaybeUninit; +use std::mem::{self, size_of}; +use std::ptr::NonNull; -use crate::{ - ArcArray, Array, ArrayBase, CowRepr, Dimension, OwnedArcRepr, OwnedRepr, RawViewRepr, ViewRepr, -}; +use crate::{ArcArray, Array, ArrayBase, CowRepr, Dimension, OwnedArcRepr, OwnedRepr, RawViewRepr, ViewRepr}; /// Array representation trait. /// @@ -31,13 +29,14 @@ use crate::{ /// Traits in Rust can serve many different roles. This trait is public because /// it is used as a bound on public methods. #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait RawData: Sized { +pub unsafe trait RawData: Sized +{ /// The array element type. type Elem; #[doc(hidden)] // This method is only used for debugging - #[deprecated(note="Unused", since="0.15.2")] + #[deprecated(note = "Unused", since = "0.15.2")] fn _data_slice(&self) -> Option<&[Self::Elem]>; #[doc(hidden)] @@ -52,7 +51,8 @@ pub unsafe trait RawData: Sized { /// /// ***Internal trait, see `RawData`.*** #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait RawDataMut: RawData { +pub unsafe trait RawDataMut: RawData +{ /// If possible, ensures that the array has unique access to its data. /// /// The implementer must ensure that if the input is contiguous, then the @@ -80,17 +80,15 @@ pub unsafe trait RawDataMut: RawData { /// /// ***Internal trait, see `RawData`.*** #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait RawDataClone: RawData { +pub unsafe trait RawDataClone: RawData +{ #[doc(hidden)] /// Unsafe because, `ptr` must point inside the current storage. unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull); #[doc(hidden)] - unsafe fn clone_from_with_ptr( - &mut self, - other: &Self, - ptr: NonNull, - ) -> NonNull { + unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: NonNull) -> NonNull + { let (data, ptr) = other.clone_with_ptr(ptr); *self = data; ptr @@ -103,7 +101,8 @@ pub unsafe trait RawDataClone: RawData { /// /// ***Internal trait, see `RawData`.*** #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait Data: RawData { +pub unsafe trait Data: RawData +{ /// Converts the array to a uniquely owned array, cloning elements if necessary. #[doc(hidden)] #[allow(clippy::wrong_self_convention)] @@ -115,11 +114,8 @@ pub unsafe trait Data: RawData { /// Converts the array into `Array` if this is possible without /// cloning the array elements. Otherwise, returns `self_` unchanged. #[doc(hidden)] - fn try_into_owned_nocopy( - self_: ArrayBase, - ) -> Result, ArrayBase> - where - D: Dimension; + fn try_into_owned_nocopy(self_: ArrayBase) -> Result, ArrayBase> + where D: Dimension; /// Return a shared ownership (copy on write) array based on the existing one, /// cloning elements if necessary. @@ -148,7 +144,8 @@ pub unsafe trait Data: RawData { // the data is unique. You are also guaranteeing that `try_is_unique` always // returns `Some(_)`. #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait DataMut: Data + RawDataMut { +pub unsafe trait DataMut: Data + RawDataMut +{ /// Ensures that the array has unique access to its data. #[doc(hidden)] #[inline] @@ -163,47 +160,61 @@ pub unsafe trait DataMut: Data + RawDataMut { /// Returns whether the array has unique access to its data. #[doc(hidden)] #[inline] - #[allow(clippy::wrong_self_convention)] // mut needed for Arc types - fn is_unique(&mut self) -> bool { + #[allow(clippy::wrong_self_convention)] // mut needed for Arc types + fn is_unique(&mut self) -> bool + { self.try_is_unique().unwrap() } } -unsafe impl RawData for RawViewRepr<*const A> { +unsafe impl RawData for RawViewRepr<*const A> +{ type Elem = A; #[inline] - fn _data_slice(&self) -> Option<&[A]> { + fn _data_slice(&self) -> Option<&[A]> + { None } #[inline(always)] - fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool { true } + fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool + { + true + } private_impl! {} } -unsafe impl RawDataClone for RawViewRepr<*const A> { - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { +unsafe impl RawDataClone for RawViewRepr<*const A> +{ + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) + { (*self, ptr) } } -unsafe impl RawData for RawViewRepr<*mut A> { +unsafe impl RawData for RawViewRepr<*mut A> +{ type Elem = A; #[inline] - fn _data_slice(&self) -> Option<&[A]> { + fn _data_slice(&self) -> Option<&[A]> + { None } #[inline(always)] - fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool { true } + fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool + { + true + } private_impl! {} } -unsafe impl RawDataMut for RawViewRepr<*mut A> { +unsafe impl RawDataMut for RawViewRepr<*mut A> +{ #[inline] fn try_ensure_unique(_: &mut ArrayBase) where @@ -213,24 +224,30 @@ unsafe impl RawDataMut for RawViewRepr<*mut A> { } #[inline] - fn try_is_unique(&mut self) -> Option { + fn try_is_unique(&mut self) -> Option + { None } } -unsafe impl RawDataClone for RawViewRepr<*mut A> { - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { +unsafe impl RawDataClone for RawViewRepr<*mut A> +{ + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) + { (*self, ptr) } } -unsafe impl RawData for OwnedArcRepr { +unsafe impl RawData for OwnedArcRepr +{ type Elem = A; - fn _data_slice(&self) -> Option<&[A]> { + fn _data_slice(&self) -> Option<&[A]> + { Some(self.0.as_slice()) } - fn _is_pointer_inbounds(&self, self_ptr: *const Self::Elem) -> bool { + fn _is_pointer_inbounds(&self, self_ptr: *const Self::Elem) -> bool + { self.0._is_pointer_inbounds(self_ptr) } @@ -239,8 +256,7 @@ unsafe impl RawData for OwnedArcRepr { // NOTE: Copy on write unsafe impl RawDataMut for OwnedArcRepr -where - A: Clone, +where A: Clone { fn try_ensure_unique(self_: &mut ArrayBase) where @@ -269,12 +285,14 @@ where } } - fn try_is_unique(&mut self) -> Option { + fn try_is_unique(&mut self) -> Option + { Some(Arc::get_mut(&mut self.0).is_some()) } } -unsafe impl Data for OwnedArcRepr { +unsafe impl Data for OwnedArcRepr +{ fn into_owned(mut self_: ArrayBase) -> Array where A: Clone, @@ -283,23 +301,16 @@ unsafe impl Data for OwnedArcRepr { Self::ensure_unique(&mut self_); let data = Arc::try_unwrap(self_.data.0).ok().unwrap(); // safe because data is equivalent - unsafe { - ArrayBase::from_data_ptr(data, self_.ptr) - .with_strides_dim(self_.strides, self_.dim) - } + unsafe { ArrayBase::from_data_ptr(data, self_.ptr).with_strides_dim(self_.strides, self_.dim) } } - fn try_into_owned_nocopy( - self_: ArrayBase, - ) -> Result, ArrayBase> - where - D: Dimension, + fn try_into_owned_nocopy(self_: ArrayBase) -> Result, ArrayBase> + where D: Dimension { match Arc::try_unwrap(self_.data.0) { Ok(owned_data) => unsafe { // Safe because the data is equivalent. - Ok(ArrayBase::from_data_ptr(owned_data, self_.ptr) - .with_strides_dim(self_.strides, self_.dim)) + Ok(ArrayBase::from_data_ptr(owned_data, self_.ptr).with_strides_dim(self_.strides, self_.dim)) }, Err(arc_data) => unsafe { // Safe because the data is equivalent; we're just @@ -323,21 +334,26 @@ unsafe impl Data for OwnedArcRepr { unsafe impl DataMut for OwnedArcRepr where A: Clone {} -unsafe impl RawDataClone for OwnedArcRepr { - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { +unsafe impl RawDataClone for OwnedArcRepr +{ + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) + { // pointer is preserved (self.clone(), ptr) } } -unsafe impl RawData for OwnedRepr { +unsafe impl RawData for OwnedRepr +{ type Elem = A; - fn _data_slice(&self) -> Option<&[A]> { + fn _data_slice(&self) -> Option<&[A]> + { Some(self.as_slice()) } - fn _is_pointer_inbounds(&self, self_ptr: *const Self::Elem) -> bool { + fn _is_pointer_inbounds(&self, self_ptr: *const Self::Elem) -> bool + { let slc = self.as_slice(); let ptr = slc.as_ptr() as *mut A; let end = unsafe { ptr.add(slc.len()) }; @@ -347,7 +363,8 @@ unsafe impl RawData for OwnedRepr { private_impl! {} } -unsafe impl RawDataMut for OwnedRepr { +unsafe impl RawDataMut for OwnedRepr +{ #[inline] fn try_ensure_unique(_: &mut ArrayBase) where @@ -357,12 +374,14 @@ unsafe impl RawDataMut for OwnedRepr { } #[inline] - fn try_is_unique(&mut self) -> Option { + fn try_is_unique(&mut self) -> Option + { Some(true) } } -unsafe impl Data for OwnedRepr { +unsafe impl Data for OwnedRepr +{ #[inline] fn into_owned(self_: ArrayBase) -> Array where @@ -373,11 +392,8 @@ unsafe impl Data for OwnedRepr { } #[inline] - fn try_into_owned_nocopy( - self_: ArrayBase, - ) -> Result, ArrayBase> - where - D: Dimension, + fn try_into_owned_nocopy(self_: ArrayBase) -> Result, ArrayBase> + where D: Dimension { Ok(self_) } @@ -386,25 +402,21 @@ unsafe impl Data for OwnedRepr { unsafe impl DataMut for OwnedRepr {} unsafe impl RawDataClone for OwnedRepr -where - A: Clone, +where A: Clone { - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) + { let mut u = self.clone(); let mut new_ptr = u.as_nonnull_mut(); if size_of::() != 0 { - let our_off = - (ptr.as_ptr() as isize - self.as_ptr() as isize) / mem::size_of::() as isize; + let our_off = (ptr.as_ptr() as isize - self.as_ptr() as isize) / mem::size_of::() as isize; new_ptr = new_ptr.offset(our_off); } (u, new_ptr) } - unsafe fn clone_from_with_ptr( - &mut self, - other: &Self, - ptr: NonNull, - ) -> NonNull { + unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: NonNull) -> NonNull + { let our_off = if size_of::() != 0 { (ptr.as_ptr() as isize - other.as_ptr() as isize) / mem::size_of::() as isize } else { @@ -415,21 +427,27 @@ where } } -unsafe impl<'a, A> RawData for ViewRepr<&'a A> { +unsafe impl<'a, A> RawData for ViewRepr<&'a A> +{ type Elem = A; #[inline] - fn _data_slice(&self) -> Option<&[A]> { + fn _data_slice(&self) -> Option<&[A]> + { None } #[inline(always)] - fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool { true } + fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool + { + true + } private_impl! {} } -unsafe impl<'a, A> Data for ViewRepr<&'a A> { +unsafe impl<'a, A> Data for ViewRepr<&'a A> +{ fn into_owned(self_: ArrayBase) -> Array where Self::Elem: Clone, @@ -438,37 +456,42 @@ unsafe impl<'a, A> Data for ViewRepr<&'a A> { self_.to_owned() } - fn try_into_owned_nocopy( - self_: ArrayBase, - ) -> Result, ArrayBase> - where - D: Dimension, + fn try_into_owned_nocopy(self_: ArrayBase) -> Result, ArrayBase> + where D: Dimension { Err(self_) } } -unsafe impl<'a, A> RawDataClone for ViewRepr<&'a A> { - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { +unsafe impl<'a, A> RawDataClone for ViewRepr<&'a A> +{ + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) + { (*self, ptr) } } -unsafe impl<'a, A> RawData for ViewRepr<&'a mut A> { +unsafe impl<'a, A> RawData for ViewRepr<&'a mut A> +{ type Elem = A; #[inline] - fn _data_slice(&self) -> Option<&[A]> { + fn _data_slice(&self) -> Option<&[A]> + { None } #[inline(always)] - fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool { true } + fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool + { + true + } private_impl! {} } -unsafe impl<'a, A> RawDataMut for ViewRepr<&'a mut A> { +unsafe impl<'a, A> RawDataMut for ViewRepr<&'a mut A> +{ #[inline] fn try_ensure_unique(_: &mut ArrayBase) where @@ -478,12 +501,14 @@ unsafe impl<'a, A> RawDataMut for ViewRepr<&'a mut A> { } #[inline] - fn try_is_unique(&mut self) -> Option { + fn try_is_unique(&mut self) -> Option + { Some(true) } } -unsafe impl<'a, A> Data for ViewRepr<&'a mut A> { +unsafe impl<'a, A> Data for ViewRepr<&'a mut A> +{ fn into_owned(self_: ArrayBase) -> Array where Self::Elem: Clone, @@ -492,11 +517,8 @@ unsafe impl<'a, A> Data for ViewRepr<&'a mut A> { self_.to_owned() } - fn try_into_owned_nocopy( - self_: ArrayBase, - ) -> Result, ArrayBase> - where - D: Dimension, + fn try_into_owned_nocopy(self_: ArrayBase) -> Result, ArrayBase> + where D: Dimension { Err(self_) } @@ -517,10 +539,10 @@ unsafe impl<'a, A> DataMut for ViewRepr<&'a mut A> {} // unsharing storage before mutating it. The initially allocated storage must be mutable so // that it can be mutated directly - through .raw_view_mut_unchecked() - for initialization. #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait DataOwned: Data { +pub unsafe trait DataOwned: Data +{ /// Corresponding owned data with MaybeUninit elements - type MaybeUninit: DataOwned> - + RawDataSubst; + type MaybeUninit: DataOwned> + RawDataSubst; #[doc(hidden)] fn new(elements: Vec) -> Self; @@ -541,34 +563,42 @@ pub unsafe trait DataShared: Clone + Data + RawDataClone {} unsafe impl DataShared for OwnedArcRepr {} unsafe impl<'a, A> DataShared for ViewRepr<&'a A> {} -unsafe impl DataOwned for OwnedRepr { +unsafe impl DataOwned for OwnedRepr +{ type MaybeUninit = OwnedRepr>; - fn new(elements: Vec) -> Self { + fn new(elements: Vec) -> Self + { OwnedRepr::from(elements) } - fn into_shared(self) -> OwnedArcRepr { + fn into_shared(self) -> OwnedArcRepr + { OwnedArcRepr(Arc::new(self)) } } -unsafe impl DataOwned for OwnedArcRepr { +unsafe impl DataOwned for OwnedArcRepr +{ type MaybeUninit = OwnedArcRepr>; - fn new(elements: Vec) -> Self { + fn new(elements: Vec) -> Self + { OwnedArcRepr(Arc::new(OwnedRepr::from(elements))) } - fn into_shared(self) -> OwnedArcRepr { + fn into_shared(self) -> OwnedArcRepr + { self } } -unsafe impl<'a, A> RawData for CowRepr<'a, A> { +unsafe impl<'a, A> RawData for CowRepr<'a, A> +{ type Elem = A; - fn _data_slice(&self) -> Option<&[A]> { + fn _data_slice(&self) -> Option<&[A]> + { #[allow(deprecated)] match self { CowRepr::View(view) => view._data_slice(), @@ -577,7 +607,8 @@ unsafe impl<'a, A> RawData for CowRepr<'a, A> { } #[inline] - fn _is_pointer_inbounds(&self, ptr: *const Self::Elem) -> bool { + fn _is_pointer_inbounds(&self, ptr: *const Self::Elem) -> bool + { match self { CowRepr::View(view) => view._is_pointer_inbounds(ptr), CowRepr::Owned(data) => data._is_pointer_inbounds(ptr), @@ -588,8 +619,7 @@ unsafe impl<'a, A> RawData for CowRepr<'a, A> { } unsafe impl<'a, A> RawDataMut for CowRepr<'a, A> -where - A: Clone, +where A: Clone { #[inline] fn try_ensure_unique(array: &mut ArrayBase) @@ -610,16 +640,17 @@ where } #[inline] - fn try_is_unique(&mut self) -> Option { + fn try_is_unique(&mut self) -> Option + { Some(self.is_owned()) } } unsafe impl<'a, A> RawDataClone for CowRepr<'a, A> -where - A: Clone, +where A: Clone { - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) + { match self { CowRepr::View(view) => { let (new_view, ptr) = view.clone_with_ptr(ptr); @@ -632,11 +663,8 @@ where } } - unsafe fn clone_from_with_ptr( - &mut self, - other: &Self, - ptr: NonNull, - ) -> NonNull { + unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: NonNull) -> NonNull + { match (&mut *self, other) { (CowRepr::View(self_), CowRepr::View(other)) => self_.clone_from_with_ptr(other, ptr), (CowRepr::Owned(self_), CowRepr::Owned(other)) => self_.clone_from_with_ptr(other, ptr), @@ -654,7 +682,8 @@ where } } -unsafe impl<'a, A> Data for CowRepr<'a, A> { +unsafe impl<'a, A> Data for CowRepr<'a, A> +{ #[inline] fn into_owned(self_: ArrayBase, D>) -> Array where @@ -665,24 +694,19 @@ unsafe impl<'a, A> Data for CowRepr<'a, A> { CowRepr::View(_) => self_.to_owned(), CowRepr::Owned(data) => unsafe { // safe because the data is equivalent so ptr, dims remain valid - ArrayBase::from_data_ptr(data, self_.ptr) - .with_strides_dim(self_.strides, self_.dim) + ArrayBase::from_data_ptr(data, self_.ptr).with_strides_dim(self_.strides, self_.dim) }, } } - fn try_into_owned_nocopy( - self_: ArrayBase, - ) -> Result, ArrayBase> - where - D: Dimension, + fn try_into_owned_nocopy(self_: ArrayBase) -> Result, ArrayBase> + where D: Dimension { match self_.data { CowRepr::View(_) => Err(self_), CowRepr::Owned(data) => unsafe { // safe because the data is equivalent so ptr, dims remain valid - Ok(ArrayBase::from_data_ptr(data, self_.ptr) - .with_strides_dim(self_.strides, self_.dim)) + Ok(ArrayBase::from_data_ptr(data, self_.ptr).with_strides_dim(self_.strides, self_.dim)) }, } } @@ -696,7 +720,8 @@ unsafe impl<'a, A> DataMut for CowRepr<'a, A> where A: Clone {} /// keeping the same kind of storage. /// /// For example, `RawDataSubst` can map the type `OwnedRepr` to `OwnedRepr`. -pub trait RawDataSubst: RawData { +pub trait RawDataSubst: RawData +{ /// The resulting array storage of the same kind but substituted element type type Output: RawData; @@ -709,58 +734,72 @@ pub trait RawDataSubst: RawData { unsafe fn data_subst(self) -> Self::Output; } -impl RawDataSubst for OwnedRepr { +impl RawDataSubst for OwnedRepr +{ type Output = OwnedRepr; - unsafe fn data_subst(self) -> Self::Output { + unsafe fn data_subst(self) -> Self::Output + { self.data_subst() } } -impl RawDataSubst for OwnedArcRepr { +impl RawDataSubst for OwnedArcRepr +{ type Output = OwnedArcRepr; - unsafe fn data_subst(self) -> Self::Output { + unsafe fn data_subst(self) -> Self::Output + { OwnedArcRepr(Arc::from_raw(Arc::into_raw(self.0) as *const OwnedRepr)) } } -impl RawDataSubst for RawViewRepr<*const A> { +impl RawDataSubst for RawViewRepr<*const A> +{ type Output = RawViewRepr<*const B>; - unsafe fn data_subst(self) -> Self::Output { + unsafe fn data_subst(self) -> Self::Output + { RawViewRepr::new() } } -impl RawDataSubst for RawViewRepr<*mut A> { +impl RawDataSubst for RawViewRepr<*mut A> +{ type Output = RawViewRepr<*mut B>; - unsafe fn data_subst(self) -> Self::Output { + unsafe fn data_subst(self) -> Self::Output + { RawViewRepr::new() } } -impl<'a, A: 'a, B: 'a> RawDataSubst for ViewRepr<&'a A> { +impl<'a, A: 'a, B: 'a> RawDataSubst for ViewRepr<&'a A> +{ type Output = ViewRepr<&'a B>; - unsafe fn data_subst(self) -> Self::Output { + unsafe fn data_subst(self) -> Self::Output + { ViewRepr::new() } } -impl<'a, A: 'a, B: 'a> RawDataSubst for ViewRepr<&'a mut A> { +impl<'a, A: 'a, B: 'a> RawDataSubst for ViewRepr<&'a mut A> +{ type Output = ViewRepr<&'a mut B>; - unsafe fn data_subst(self) -> Self::Output { + unsafe fn data_subst(self) -> Self::Output + { ViewRepr::new() } } -impl<'a, A: 'a, B: 'a> RawDataSubst for CowRepr<'a, A> { +impl<'a, A: 'a, B: 'a> RawDataSubst for CowRepr<'a, A> +{ type Output = CowRepr<'a, B>; - unsafe fn data_subst(self) -> Self::Output { + unsafe fn data_subst(self) -> Self::Output + { match self { CowRepr::View(view) => CowRepr::View(view.data_subst()), CowRepr::Owned(owned) => CowRepr::Owned(owned.data_subst()), diff --git a/src/dimension/axes.rs b/src/dimension/axes.rs index 5660675b5..925b257a7 100644 --- a/src/dimension/axes.rs +++ b/src/dimension/axes.rs @@ -2,8 +2,7 @@ use crate::{Axis, Dimension, Ix, Ixs}; /// Create a new Axes iterator pub(crate) fn axes_of<'a, D>(d: &'a D, strides: &'a D) -> Axes<'a, D> -where - D: Dimension, +where D: Dimension { Axes { dim: d, @@ -38,7 +37,8 @@ where /// assert_eq!(largest_axis.len, 5); /// ``` #[derive(Debug)] -pub struct Axes<'a, D> { +pub struct Axes<'a, D> +{ dim: &'a D, strides: &'a D, start: usize, @@ -47,7 +47,8 @@ pub struct Axes<'a, D> { /// Description of the axis, its length and its stride. #[derive(Debug)] -pub struct AxisDescription { +pub struct AxisDescription +{ /// Axis identifier (index) pub axis: Axis, /// Length in count of elements of the current axis @@ -61,23 +62,27 @@ copy_and_clone!(AxisDescription); // AxisDescription can't really be empty // https://github.com/rust-ndarray/ndarray/pull/642#discussion_r296051702 #[allow(clippy::len_without_is_empty)] -impl AxisDescription { +impl AxisDescription +{ /// Return axis #[deprecated(note = "Use .axis field instead", since = "0.15.0")] #[inline(always)] - pub fn axis(self) -> Axis { + pub fn axis(self) -> Axis + { self.axis } /// Return length #[deprecated(note = "Use .len field instead", since = "0.15.0")] #[inline(always)] - pub fn len(self) -> Ix { + pub fn len(self) -> Ix + { self.len } /// Return stride #[deprecated(note = "Use .stride field instead", since = "0.15.0")] #[inline(always)] - pub fn stride(self) -> Ixs { + pub fn stride(self) -> Ixs + { self.stride } } @@ -85,13 +90,13 @@ impl AxisDescription { copy_and_clone!(['a, D] Axes<'a, D>); impl<'a, D> Iterator for Axes<'a, D> -where - D: Dimension, +where D: Dimension { /// Description of the axis, its length and its stride. type Item = AxisDescription; - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { if self.start < self.end { let i = self.start.post_inc(); Some(AxisDescription { @@ -105,8 +110,7 @@ where } fn fold(self, init: B, f: F) -> B - where - F: FnMut(B, AxisDescription) -> B, + where F: FnMut(B, AxisDescription) -> B { (self.start..self.end) .map(move |i| AxisDescription { @@ -117,17 +121,18 @@ where .fold(init, f) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { let len = self.end - self.start; (len, Some(len)) } } impl<'a, D> DoubleEndedIterator for Axes<'a, D> -where - D: Dimension, +where D: Dimension { - fn next_back(&mut self) -> Option { + fn next_back(&mut self) -> Option + { if self.start < self.end { let i = self.end.pre_dec(); Some(AxisDescription { @@ -141,20 +146,24 @@ where } } -trait IncOps: Copy { +trait IncOps: Copy +{ fn post_inc(&mut self) -> Self; fn pre_dec(&mut self) -> Self; } -impl IncOps for usize { +impl IncOps for usize +{ #[inline(always)] - fn post_inc(&mut self) -> Self { + fn post_inc(&mut self) -> Self + { let x = *self; *self += 1; x } #[inline(always)] - fn pre_dec(&mut self) -> Self { + fn pre_dec(&mut self) -> Self + { *self -= 1; *self } diff --git a/src/dimension/axis.rs b/src/dimension/axis.rs index f4625d2da..8c896f6b7 100644 --- a/src/dimension/axis.rs +++ b/src/dimension/axis.rs @@ -13,7 +13,7 @@ /// /// All array axis arguments use this type to make the code easier to write /// correctly and easier to understand. -/// +/// /// For example: in a method like `index_axis(axis, index)` the code becomes /// self-explanatory when it's called like `.index_axis(Axis(1), i)`; it's /// evident which integer is the axis number and which is the index. @@ -26,10 +26,12 @@ #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Axis(pub usize); -impl Axis { +impl Axis +{ /// Return the index of the axis. #[inline(always)] - pub fn index(self) -> usize { + pub fn index(self) -> usize + { self.0 } } diff --git a/src/dimension/broadcast.rs b/src/dimension/broadcast.rs index dc1513f04..d277cfea2 100644 --- a/src/dimension/broadcast.rs +++ b/src/dimension/broadcast.rs @@ -34,7 +34,8 @@ where Ok(out) } -pub trait DimMax { +pub trait DimMax +{ /// The resulting dimension type after broadcasting. type Output: Dimension; } @@ -42,7 +43,8 @@ pub trait DimMax { /// Dimensions of the same type remain unchanged when co_broadcast. /// So you can directly use D as the resulting type. /// (Instead of >::BroadcastOutput) -impl DimMax for D { +impl DimMax for D +{ type Output = D; } @@ -87,20 +89,18 @@ impl_broadcast_distinct_fixed!(Ix4, IxDyn); impl_broadcast_distinct_fixed!(Ix5, IxDyn); impl_broadcast_distinct_fixed!(Ix6, IxDyn); - #[cfg(test)] #[cfg(feature = "std")] -mod tests { +mod tests +{ use super::co_broadcast; - use crate::{Dimension, Dim, DimMax, ShapeError, Ix0, IxDynImpl, ErrorKind}; + use crate::{Dim, DimMax, Dimension, ErrorKind, Ix0, IxDynImpl, ShapeError}; #[test] - fn test_broadcast_shape() { - fn test_co( - d1: &D1, - d2: &D2, - r: Result<>::Output, ShapeError>, - ) where + fn test_broadcast_shape() + { + fn test_co(d1: &D1, d2: &D2, r: Result<>::Output, ShapeError>) + where D1: Dimension + DimMax, D2: Dimension, { @@ -108,36 +108,16 @@ mod tests { assert_eq!(d, r); } test_co(&Dim([2, 3]), &Dim([4, 1, 3]), Ok(Dim([4, 2, 3]))); - test_co( - &Dim([1, 2, 2]), - &Dim([1, 3, 4]), - Err(ShapeError::from_kind(ErrorKind::IncompatibleShape)), - ); + test_co(&Dim([1, 2, 2]), &Dim([1, 3, 4]), Err(ShapeError::from_kind(ErrorKind::IncompatibleShape))); test_co(&Dim([3, 4, 5]), &Ix0(), Ok(Dim([3, 4, 5]))); let v = vec![1, 2, 3, 4, 5, 6, 7]; - test_co( - &Dim(vec![1, 1, 3, 1, 5, 1, 7]), - &Dim([2, 1, 4, 1, 6, 1]), - Ok(Dim(IxDynImpl::from(v.as_slice()))), - ); + test_co(&Dim(vec![1, 1, 3, 1, 5, 1, 7]), &Dim([2, 1, 4, 1, 6, 1]), Ok(Dim(IxDynImpl::from(v.as_slice())))); let d = Dim([1, 2, 1, 3]); test_co(&d, &d, Ok(d)); - test_co( - &Dim([2, 1, 2]).into_dyn(), - &Dim(0), - Err(ShapeError::from_kind(ErrorKind::IncompatibleShape)), - ); - test_co( - &Dim([2, 1, 1]), - &Dim([0, 0, 1, 3, 4]), - Ok(Dim([0, 0, 2, 3, 4])), - ); + test_co(&Dim([2, 1, 2]).into_dyn(), &Dim(0), Err(ShapeError::from_kind(ErrorKind::IncompatibleShape))); + test_co(&Dim([2, 1, 1]), &Dim([0, 0, 1, 3, 4]), Ok(Dim([0, 0, 2, 3, 4]))); test_co(&Dim([0]), &Dim([0, 0, 0]), Ok(Dim([0, 0, 0]))); test_co(&Dim(1), &Dim([1, 0, 0]), Ok(Dim([1, 0, 0]))); - test_co( - &Dim([1, 3, 0, 1, 1]), - &Dim([1, 2, 3, 1]), - Err(ShapeError::from_kind(ErrorKind::IncompatibleShape)), - ); + test_co(&Dim([1, 3, 0, 1, 1]), &Dim([1, 2, 3, 1]), Err(ShapeError::from_kind(ErrorKind::IncompatibleShape))); } } diff --git a/src/dimension/conversion.rs b/src/dimension/conversion.rs index f6c408a75..0cf2e1296 100644 --- a/src/dimension/conversion.rs +++ b/src/dimension/conversion.rs @@ -8,10 +8,10 @@ //! Tuple to array conversion, IntoDimension, and related things -use num_traits::Zero; -use std::ops::{Index, IndexMut}; #[cfg(not(feature = "std"))] use alloc::vec::Vec; +use num_traits::Zero; +use std::ops::{Index, IndexMut}; use crate::{Dim, Dimension, Ix, Ix1, IxDyn, IxDynImpl}; @@ -40,47 +40,55 @@ macro_rules! index_item { } /// Argument conversion a dimension. -pub trait IntoDimension { +pub trait IntoDimension +{ type Dim: Dimension; fn into_dimension(self) -> Self::Dim; } -impl IntoDimension for Ix { +impl IntoDimension for Ix +{ type Dim = Ix1; #[inline(always)] - fn into_dimension(self) -> Ix1 { + fn into_dimension(self) -> Ix1 + { Ix1(self) } } impl IntoDimension for D -where - D: Dimension, +where D: Dimension { type Dim = D; #[inline(always)] - fn into_dimension(self) -> Self { + fn into_dimension(self) -> Self + { self } } -impl IntoDimension for IxDynImpl { +impl IntoDimension for IxDynImpl +{ type Dim = IxDyn; #[inline(always)] - fn into_dimension(self) -> Self::Dim { + fn into_dimension(self) -> Self::Dim + { Dim::new(self) } } -impl IntoDimension for Vec { +impl IntoDimension for Vec +{ type Dim = IxDyn; #[inline(always)] - fn into_dimension(self) -> Self::Dim { + fn into_dimension(self) -> Self::Dim + { Dim::new(IxDynImpl::from(self)) } } -pub trait Convert { +pub trait Convert +{ type To; fn convert(self) -> Self::To; } @@ -94,25 +102,25 @@ macro_rules! sub { macro_rules! tuple_type { ([$T:ident] $($index:tt)*) => ( ( $(sub!($index $T), )* ) - ) + ); } macro_rules! tuple_expr { ([$self_:expr] $($index:tt)*) => ( ( $($self_[$index], )* ) - ) + ); } macro_rules! array_expr { ([$self_:expr] $($index:tt)*) => ( [$($self_ . $index, )*] - ) + ); } macro_rules! array_zero { ([] $($index:tt)*) => ( [$(sub!($index 0), )*] - ) + ); } macro_rules! tuple_to_array { @@ -168,7 +176,7 @@ macro_rules! tuple_to_array { } )* - } + }; } index_item!(tuple_to_array [] 7); diff --git a/src/dimension/dim.rs b/src/dimension/dim.rs index 3f8ff23e3..96e433bb3 100644 --- a/src/dimension/dim.rs +++ b/src/dimension/dim.rs @@ -6,12 +6,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - -use std::fmt; use super::Dimension; use super::IntoDimension; use crate::itertools::zip; use crate::Ix; +use std::fmt; /// Dimension description. /// @@ -36,21 +35,26 @@ use crate::Ix; /// assert_eq!(array.raw_dim(), Dim([3, 2])); /// ``` #[derive(Copy, Clone, PartialEq, Eq, Hash, Default)] -pub struct Dim { +pub struct Dim +{ index: I, } -impl Dim { +impl Dim +{ /// Private constructor and accessors for Dim - pub(crate) const fn new(index: I) -> Dim { + pub(crate) const fn new(index: I) -> Dim + { Dim { index } } #[inline(always)] - pub(crate) fn ix(&self) -> &I { + pub(crate) fn ix(&self) -> &I + { &self.index } #[inline(always)] - pub(crate) fn ixm(&mut self) -> &mut I { + pub(crate) fn ixm(&mut self) -> &mut I + { &mut self.index } } @@ -58,26 +62,25 @@ impl Dim { /// Create a new dimension value. #[allow(non_snake_case)] pub fn Dim(index: T) -> T::Dim -where - T: IntoDimension, +where T: IntoDimension { index.into_dimension() } impl PartialEq for Dim -where - I: PartialEq, +where I: PartialEq { - fn eq(&self, rhs: &I) -> bool { + fn eq(&self, rhs: &I) -> bool + { self.index == *rhs } } impl fmt::Debug for Dim -where - I: fmt::Debug, +where I: fmt::Debug { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { write!(f, "{:?}", self.index) } } diff --git a/src/dimension/dimension_trait.rs b/src/dimension/dimension_trait.rs index c594859e9..3544a7f3c 100644 --- a/src/dimension/dimension_trait.rs +++ b/src/dimension/dimension_trait.rs @@ -6,21 +6,21 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; use std::fmt::Debug; use std::ops::{Add, AddAssign, Mul, MulAssign, Sub, SubAssign}; use std::ops::{Index, IndexMut}; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; use super::axes_of; use super::conversion::Convert; use super::ops::DimAdd; use super::{stride_offset, stride_offset_checked}; use crate::itertools::{enumerate, zip}; -use crate::{Axis, DimMax}; use crate::IntoDimension; use crate::RemoveAxis; use crate::{ArrayView1, ArrayViewMut1}; +use crate::{Axis, DimMax}; use crate::{Dim, Ix, Ix0, Ix1, Ix2, Ix3, Ix4, Ix5, Ix6, IxDyn, IxDynImpl, Ixs}; /// Array shape and index trait. @@ -48,11 +48,11 @@ pub trait Dimension: + MulAssign + for<'x> MulAssign<&'x Self> + MulAssign - + DimMax - + DimMax - + DimMax - + DimMax<::Smaller, Output=Self> - + DimMax<::Larger, Output=::Larger> + + DimMax + + DimMax + + DimMax + + DimMax<::Smaller, Output = Self> + + DimMax<::Larger, Output = ::Larger> + DimAdd + DimAdd<::Smaller> + DimAdd<::Larger> @@ -83,12 +83,14 @@ pub trait Dimension: fn into_pattern(self) -> Self::Pattern; /// Compute the size of the dimension (number of elements) - fn size(&self) -> usize { + fn size(&self) -> usize + { self.slice().iter().product() } /// Compute the size while checking for overflow. - fn size_checked(&self) -> Option { + fn size_checked(&self) -> Option + { self.slice() .iter() .try_fold(1_usize, |s, &a| s.checked_mul(a)) @@ -101,17 +103,20 @@ pub trait Dimension: fn slice_mut(&mut self) -> &mut [Ix]; /// Borrow as a read-only array view. - fn as_array_view(&self) -> ArrayView1<'_, Ix> { + fn as_array_view(&self) -> ArrayView1<'_, Ix> + { ArrayView1::from(self.slice()) } /// Borrow as a read-write array view. - fn as_array_view_mut(&mut self) -> ArrayViewMut1<'_, Ix> { + fn as_array_view_mut(&mut self) -> ArrayViewMut1<'_, Ix> + { ArrayViewMut1::from(self.slice_mut()) } #[doc(hidden)] - fn equal(&self, rhs: &Self) -> bool { + fn equal(&self, rhs: &Self) -> bool + { self.slice() == rhs.slice() } @@ -120,7 +125,8 @@ pub trait Dimension: /// If the array is non-empty, the strides result in contiguous layout; if /// the array is empty, the strides are all zeros. #[doc(hidden)] - fn default_strides(&self) -> Self { + fn default_strides(&self) -> Self + { // Compute default array strides // Shape (a, b, c) => Give strides (b * c, c, 1) let mut strides = Self::zeros(self.ndim()); @@ -145,7 +151,8 @@ pub trait Dimension: /// If the array is non-empty, the strides result in contiguous layout; if /// the array is empty, the strides are all zeros. #[doc(hidden)] - fn fortran_strides(&self) -> Self { + fn fortran_strides(&self) -> Self + { // Compute fortran array strides // Shape (a, b, c) => Give strides (1, a, a * b) let mut strides = Self::zeros(self.ndim()); @@ -175,7 +182,8 @@ pub trait Dimension: #[doc(hidden)] #[inline] - fn first_index(&self) -> Option { + fn first_index(&self) -> Option + { for ax in self.slice().iter() { if *ax == 0 { return None; @@ -189,7 +197,8 @@ pub trait Dimension: /// or None if there are no more. // FIXME: use &Self for index or even &mut? #[inline] - fn next_for(&self, index: Self) -> Option { + fn next_for(&self, index: Self) -> Option + { let mut index = index; let mut done = false; for (&dim, ix) in zip(self.slice(), index.slice_mut()).rev() { @@ -214,7 +223,8 @@ pub trait Dimension: /// /// Next in f-order #[inline] - fn next_for_f(&self, index: &mut Self) -> bool { + fn next_for_f(&self, index: &mut Self) -> bool + { let mut end_iteration = true; for (&dim, ix) in zip(self.slice(), index.slice_mut()) { *ix += 1; @@ -237,8 +247,7 @@ pub trait Dimension: /// Note: Returns `false` if any of the ndims don't match. #[doc(hidden)] fn strides_equivalent(&self, strides1: &Self, strides2: &D) -> bool - where - D: Dimension, + where D: Dimension { let shape_ndim = self.ndim(); shape_ndim == strides1.ndim() @@ -249,7 +258,8 @@ pub trait Dimension: #[doc(hidden)] /// Return stride offset for index. - fn stride_offset(index: &Self, strides: &Self) -> isize { + fn stride_offset(index: &Self, strides: &Self) -> isize + { let mut offset = 0; for (&i, &s) in izip!(index.slice(), strides.slice()) { offset += stride_offset(i, s); @@ -259,12 +269,14 @@ pub trait Dimension: #[doc(hidden)] /// Return stride offset for this dimension and index. - fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option { + fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option + { stride_offset_checked(self.slice(), strides.slice(), index.slice()) } #[doc(hidden)] - fn last_elem(&self) -> usize { + fn last_elem(&self) -> usize + { if self.ndim() == 0 { 0 } else { @@ -273,13 +285,15 @@ pub trait Dimension: } #[doc(hidden)] - fn set_last_elem(&mut self, i: usize) { + fn set_last_elem(&mut self, i: usize) + { let nd = self.ndim(); self.slice_mut()[nd - 1] = i; } #[doc(hidden)] - fn is_contiguous(dim: &Self, strides: &Self) -> bool { + fn is_contiguous(dim: &Self, strides: &Self) -> bool + { let defaults = dim.default_strides(); if strides.equal(&defaults) { return true; @@ -311,7 +325,8 @@ pub trait Dimension: /// /// Assumes that no stride value appears twice. #[doc(hidden)] - fn _fastest_varying_stride_order(&self) -> Self { + fn _fastest_varying_stride_order(&self) -> Self + { let mut indices = self.clone(); for (i, elt) in enumerate(indices.slice_mut()) { *elt = i; @@ -326,7 +341,8 @@ pub trait Dimension: /// Compute the minimum stride axis (absolute value), under the constraint /// that the length of the axis is > 1; #[doc(hidden)] - fn min_stride_axis(&self, strides: &Self) -> Axis { + fn min_stride_axis(&self, strides: &Self) -> Axis + { let n = match self.ndim() { 0 => panic!("min_stride_axis: Array must have ndim > 0"), 1 => return Axis(0), @@ -341,7 +357,8 @@ pub trait Dimension: /// Compute the maximum stride axis (absolute value), under the constraint /// that the length of the axis is > 1; #[doc(hidden)] - fn max_stride_axis(&self, strides: &Self) -> Axis { + fn max_stride_axis(&self, strides: &Self) -> Axis + { match self.ndim() { 0 => panic!("max_stride_axis: Array must have ndim > 0"), 1 => return Axis(0), @@ -354,12 +371,14 @@ pub trait Dimension: } /// Convert the dimensional into a dynamic dimensional (IxDyn). - fn into_dyn(self) -> IxDyn { + fn into_dyn(self) -> IxDyn + { IxDyn(self.slice()) } #[doc(hidden)] - fn from_dimension(d: &D2) -> Option { + fn from_dimension(d: &D2) -> Option + { let mut s = Self::default(); if s.ndim() == d.ndim() { for i in 0..d.ndim() { @@ -395,76 +414,91 @@ macro_rules! impl_insert_axis_array( ); ); -impl Dimension for Dim<[Ix; 0]> { +impl Dimension for Dim<[Ix; 0]> +{ const NDIM: Option = Some(0); type Pattern = (); type Smaller = Self; type Larger = Ix1; // empty product is 1 -> size is 1 #[inline] - fn ndim(&self) -> usize { + fn ndim(&self) -> usize + { 0 } #[inline] - fn slice(&self) -> &[Ix] { + fn slice(&self) -> &[Ix] + { &[] } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] { + fn slice_mut(&mut self) -> &mut [Ix] + { &mut [] } #[inline] - fn _fastest_varying_stride_order(&self) -> Self { + fn _fastest_varying_stride_order(&self) -> Self + { Ix0() } #[inline] fn into_pattern(self) -> Self::Pattern {} #[inline] - fn zeros(ndim: usize) -> Self { + fn zeros(ndim: usize) -> Self + { assert_eq!(ndim, 0); Self::default() } #[inline] - fn next_for(&self, _index: Self) -> Option { + fn next_for(&self, _index: Self) -> Option + { None } impl_insert_axis_array!(0); #[inline] - fn try_remove_axis(&self, _ignore: Axis) -> Self::Smaller { + fn try_remove_axis(&self, _ignore: Axis) -> Self::Smaller + { *self } private_impl! {} } -impl Dimension for Dim<[Ix; 1]> { +impl Dimension for Dim<[Ix; 1]> +{ const NDIM: Option = Some(1); type Pattern = Ix; type Smaller = Ix0; type Larger = Ix2; #[inline] - fn ndim(&self) -> usize { + fn ndim(&self) -> usize + { 1 } #[inline] - fn slice(&self) -> &[Ix] { + fn slice(&self) -> &[Ix] + { self.ix() } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] { + fn slice_mut(&mut self) -> &mut [Ix] + { self.ixm() } #[inline] - fn into_pattern(self) -> Self::Pattern { + fn into_pattern(self) -> Self::Pattern + { get!(&self, 0) } #[inline] - fn zeros(ndim: usize) -> Self { + fn zeros(ndim: usize) -> Self + { assert_eq!(ndim, 1); Self::default() } #[inline] - fn next_for(&self, mut index: Self) -> Option { + fn next_for(&self, mut index: Self) -> Option + { getm!(index, 0) += 1; if get!(&index, 0) < get!(self, 0) { Some(index) @@ -474,21 +508,25 @@ impl Dimension for Dim<[Ix; 1]> { } #[inline] - fn equal(&self, rhs: &Self) -> bool { + fn equal(&self, rhs: &Self) -> bool + { get!(self, 0) == get!(rhs, 0) } #[inline] - fn size(&self) -> usize { + fn size(&self) -> usize + { get!(self, 0) } #[inline] - fn size_checked(&self) -> Option { + fn size_checked(&self) -> Option + { Some(get!(self, 0)) } #[inline] - fn default_strides(&self) -> Self { + fn default_strides(&self) -> Self + { if get!(self, 0) == 0 { Ix1(0) } else { @@ -497,22 +535,26 @@ impl Dimension for Dim<[Ix; 1]> { } #[inline] - fn _fastest_varying_stride_order(&self) -> Self { + fn _fastest_varying_stride_order(&self) -> Self + { Ix1(0) } #[inline(always)] - fn min_stride_axis(&self, _: &Self) -> Axis { + fn min_stride_axis(&self, _: &Self) -> Axis + { Axis(0) } #[inline(always)] - fn max_stride_axis(&self, _: &Self) -> Axis { + fn max_stride_axis(&self, _: &Self) -> Axis + { Axis(0) } #[inline] - fn first_index(&self) -> Option { + fn first_index(&self) -> Option + { if get!(self, 0) != 0 { Some(Ix1(0)) } else { @@ -522,13 +564,15 @@ impl Dimension for Dim<[Ix; 1]> { /// Self is an index, return the stride offset #[inline(always)] - fn stride_offset(index: &Self, stride: &Self) -> isize { + fn stride_offset(index: &Self, stride: &Self) -> isize + { stride_offset(get!(index, 0), get!(stride, 0)) } /// Return stride offset for this dimension and index. #[inline] - fn stride_offset_checked(&self, stride: &Self, index: &Self) -> Option { + fn stride_offset_checked(&self, stride: &Self, index: &Self) -> Option + { if get!(index, 0) < get!(self, 0) { Some(stride_offset(get!(index, 0), get!(stride, 0))) } else { @@ -537,11 +581,13 @@ impl Dimension for Dim<[Ix; 1]> { } impl_insert_axis_array!(1); #[inline] - fn try_remove_axis(&self, axis: Axis) -> Self::Smaller { + fn try_remove_axis(&self, axis: Axis) -> Self::Smaller + { self.remove_axis(axis) } - fn from_dimension(d: &D2) -> Option { + fn from_dimension(d: &D2) -> Option + { if 1 == d.ndim() { Some(Ix1(d[0])) } else { @@ -551,34 +597,41 @@ impl Dimension for Dim<[Ix; 1]> { private_impl! {} } -impl Dimension for Dim<[Ix; 2]> { +impl Dimension for Dim<[Ix; 2]> +{ const NDIM: Option = Some(2); type Pattern = (Ix, Ix); type Smaller = Ix1; type Larger = Ix3; #[inline] - fn ndim(&self) -> usize { + fn ndim(&self) -> usize + { 2 } #[inline] - fn into_pattern(self) -> Self::Pattern { + fn into_pattern(self) -> Self::Pattern + { self.ix().convert() } #[inline] - fn slice(&self) -> &[Ix] { + fn slice(&self) -> &[Ix] + { self.ix() } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] { + fn slice_mut(&mut self) -> &mut [Ix] + { self.ixm() } #[inline] - fn zeros(ndim: usize) -> Self { + fn zeros(ndim: usize) -> Self + { assert_eq!(ndim, 2); Self::default() } #[inline] - fn next_for(&self, index: Self) -> Option { + fn next_for(&self, index: Self) -> Option + { let mut i = get!(&index, 0); let mut j = get!(&index, 1); let imax = get!(self, 0); @@ -595,34 +648,40 @@ impl Dimension for Dim<[Ix; 2]> { } #[inline] - fn equal(&self, rhs: &Self) -> bool { + fn equal(&self, rhs: &Self) -> bool + { get!(self, 0) == get!(rhs, 0) && get!(self, 1) == get!(rhs, 1) } #[inline] - fn size(&self) -> usize { + fn size(&self) -> usize + { get!(self, 0) * get!(self, 1) } #[inline] - fn size_checked(&self) -> Option { + fn size_checked(&self) -> Option + { let m = get!(self, 0); let n = get!(self, 1); m.checked_mul(n) } #[inline] - fn last_elem(&self) -> usize { + fn last_elem(&self) -> usize + { get!(self, 1) } #[inline] - fn set_last_elem(&mut self, i: usize) { + fn set_last_elem(&mut self, i: usize) + { getm!(self, 1) = i; } #[inline] - fn default_strides(&self) -> Self { + fn default_strides(&self) -> Self + { let m = get!(self, 0); let n = get!(self, 1); if m == 0 || n == 0 { @@ -632,7 +691,8 @@ impl Dimension for Dim<[Ix; 2]> { } } #[inline] - fn fortran_strides(&self) -> Self { + fn fortran_strides(&self) -> Self + { let m = get!(self, 0); let n = get!(self, 1); if m == 0 || n == 0 { @@ -643,7 +703,8 @@ impl Dimension for Dim<[Ix; 2]> { } #[inline] - fn _fastest_varying_stride_order(&self) -> Self { + fn _fastest_varying_stride_order(&self) -> Self + { if (get!(self, 0) as Ixs).abs() <= (get!(self, 1) as Ixs).abs() { Ix2(0, 1) } else { @@ -652,7 +713,8 @@ impl Dimension for Dim<[Ix; 2]> { } #[inline] - fn min_stride_axis(&self, strides: &Self) -> Axis { + fn min_stride_axis(&self, strides: &Self) -> Axis + { let s = get!(strides, 0) as Ixs; let t = get!(strides, 1) as Ixs; if s.abs() < t.abs() { @@ -663,7 +725,8 @@ impl Dimension for Dim<[Ix; 2]> { } #[inline] - fn first_index(&self) -> Option { + fn first_index(&self) -> Option + { let m = get!(self, 0); let n = get!(self, 1); if m != 0 && n != 0 { @@ -675,7 +738,8 @@ impl Dimension for Dim<[Ix; 2]> { /// Self is an index, return the stride offset #[inline(always)] - fn stride_offset(index: &Self, strides: &Self) -> isize { + fn stride_offset(index: &Self, strides: &Self) -> isize + { let i = get!(index, 0); let j = get!(index, 1); let s = get!(strides, 0); @@ -685,7 +749,8 @@ impl Dimension for Dim<[Ix; 2]> { /// Return stride offset for this dimension and index. #[inline] - fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option { + fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option + { let m = get!(self, 0); let n = get!(self, 1); let i = get!(index, 0); @@ -700,36 +765,43 @@ impl Dimension for Dim<[Ix; 2]> { } impl_insert_axis_array!(2); #[inline] - fn try_remove_axis(&self, axis: Axis) -> Self::Smaller { + fn try_remove_axis(&self, axis: Axis) -> Self::Smaller + { self.remove_axis(axis) } private_impl! {} } -impl Dimension for Dim<[Ix; 3]> { +impl Dimension for Dim<[Ix; 3]> +{ const NDIM: Option = Some(3); type Pattern = (Ix, Ix, Ix); type Smaller = Ix2; type Larger = Ix4; #[inline] - fn ndim(&self) -> usize { + fn ndim(&self) -> usize + { 3 } #[inline] - fn into_pattern(self) -> Self::Pattern { + fn into_pattern(self) -> Self::Pattern + { self.ix().convert() } #[inline] - fn slice(&self) -> &[Ix] { + fn slice(&self) -> &[Ix] + { self.ix() } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] { + fn slice_mut(&mut self) -> &mut [Ix] + { self.ixm() } #[inline] - fn size(&self) -> usize { + fn size(&self) -> usize + { let m = get!(self, 0); let n = get!(self, 1); let o = get!(self, 2); @@ -737,13 +809,15 @@ impl Dimension for Dim<[Ix; 3]> { } #[inline] - fn zeros(ndim: usize) -> Self { + fn zeros(ndim: usize) -> Self + { assert_eq!(ndim, 3); Self::default() } #[inline] - fn next_for(&self, index: Self) -> Option { + fn next_for(&self, index: Self) -> Option + { let mut i = get!(&index, 0); let mut j = get!(&index, 1); let mut k = get!(&index, 2); @@ -767,7 +841,8 @@ impl Dimension for Dim<[Ix; 3]> { /// Self is an index, return the stride offset #[inline] - fn stride_offset(index: &Self, strides: &Self) -> isize { + fn stride_offset(index: &Self, strides: &Self) -> isize + { let i = get!(index, 0); let j = get!(index, 1); let k = get!(index, 2); @@ -779,7 +854,8 @@ impl Dimension for Dim<[Ix; 3]> { /// Return stride offset for this dimension and index. #[inline] - fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option { + fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option + { let m = get!(self, 0); let n = get!(self, 1); let l = get!(self, 2); @@ -797,7 +873,8 @@ impl Dimension for Dim<[Ix; 3]> { } #[inline] - fn _fastest_varying_stride_order(&self) -> Self { + fn _fastest_varying_stride_order(&self) -> Self + { let mut stride = *self; let mut order = Ix3(0, 1, 2); macro_rules! swap { @@ -819,7 +896,8 @@ impl Dimension for Dim<[Ix; 3]> { } impl_insert_axis_array!(3); #[inline] - fn try_remove_axis(&self, axis: Axis) -> Self::Smaller { + fn try_remove_axis(&self, axis: Axis) -> Self::Smaller + { self.remove_axis(axis) } private_impl! {} @@ -854,7 +932,7 @@ macro_rules! large_dim { } private_impl!{} } - ) + ); } large_dim!(4, Ix4, (Ix, Ix, Ix, Ix), Ix5, { @@ -876,41 +954,49 @@ large_dim!(6, Ix6, (Ix, Ix, Ix, Ix, Ix, Ix), IxDyn, { /// IxDyn is a "dynamic" index, pretty hard to use when indexing, /// and memory wasteful, but it allows an arbitrary and dynamic number of axes. -impl Dimension for IxDyn { +impl Dimension for IxDyn +{ const NDIM: Option = None; type Pattern = Self; type Smaller = Self; type Larger = Self; #[inline] - fn ndim(&self) -> usize { + fn ndim(&self) -> usize + { self.ix().len() } #[inline] - fn slice(&self) -> &[Ix] { + fn slice(&self) -> &[Ix] + { self.ix() } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] { + fn slice_mut(&mut self) -> &mut [Ix] + { self.ixm() } #[inline] - fn into_pattern(self) -> Self::Pattern { + fn into_pattern(self) -> Self::Pattern + { self } #[inline] - fn zeros(ndim: usize) -> Self { + fn zeros(ndim: usize) -> Self + { IxDyn::zeros(ndim) } #[inline] - fn insert_axis(&self, axis: Axis) -> Self::Larger { + fn insert_axis(&self, axis: Axis) -> Self::Larger + { debug_assert!(axis.index() <= self.ndim()); Dim::new(self.ix().insert(axis.index())) } #[inline] - fn try_remove_axis(&self, axis: Axis) -> Self::Smaller { + fn try_remove_axis(&self, axis: Axis) -> Self::Smaller + { if self.ndim() > 0 { self.remove_axis(axis) } else { @@ -918,26 +1004,32 @@ impl Dimension for IxDyn { } } - fn from_dimension(d: &D2) -> Option { + fn from_dimension(d: &D2) -> Option + { Some(IxDyn(d.slice())) } - fn into_dyn(self) -> IxDyn { + fn into_dyn(self) -> IxDyn + { self } private_impl! {} } -impl Index for Dim { +impl Index for Dim +{ type Output = >::Output; - fn index(&self, index: usize) -> &Self::Output { + fn index(&self, index: usize) -> &Self::Output + { &self.ix()[index] } } -impl IndexMut for Dim { - fn index_mut(&mut self, index: usize) -> &mut Self::Output { +impl IndexMut for Dim +{ + fn index_mut(&mut self, index: usize) -> &mut Self::Output + { &mut self.ixm()[index] } } diff --git a/src/dimension/dynindeximpl.rs b/src/dimension/dynindeximpl.rs index c2aea032e..60aeacd80 100644 --- a/src/dimension/dynindeximpl.rs +++ b/src/dimension/dynindeximpl.rs @@ -1,23 +1,26 @@ use crate::imp_prelude::*; -use std::hash::{Hash, Hasher}; -use std::ops::{Deref, DerefMut, Index, IndexMut}; -use alloc::vec; #[cfg(not(feature = "std"))] use alloc::boxed::Box; +use alloc::vec; #[cfg(not(feature = "std"))] use alloc::vec::Vec; +use std::hash::{Hash, Hasher}; +use std::ops::{Deref, DerefMut, Index, IndexMut}; const CAP: usize = 4; /// T is usize or isize #[derive(Debug)] -enum IxDynRepr { +enum IxDynRepr +{ Inline(u32, [T; CAP]), Alloc(Box<[T]>), } -impl Deref for IxDynRepr { +impl Deref for IxDynRepr +{ type Target = [T]; - fn deref(&self) -> &[T] { + fn deref(&self) -> &[T] + { match *self { IxDynRepr::Inline(len, ref ar) => { debug_assert!(len as usize <= ar.len()); @@ -28,8 +31,10 @@ impl Deref for IxDynRepr { } } -impl DerefMut for IxDynRepr { - fn deref_mut(&mut self) -> &mut [T] { +impl DerefMut for IxDynRepr +{ + fn deref_mut(&mut self) -> &mut [T] + { match *self { IxDynRepr::Inline(len, ref mut ar) => { debug_assert!(len as usize <= ar.len()); @@ -41,16 +46,20 @@ impl DerefMut for IxDynRepr { } /// The default is equivalent to `Self::from(&[0])`. -impl Default for IxDynRepr { - fn default() -> Self { +impl Default for IxDynRepr +{ + fn default() -> Self + { Self::copy_from(&[0]) } } use num_traits::Zero; -impl IxDynRepr { - pub fn copy_from(x: &[T]) -> Self { +impl IxDynRepr +{ + pub fn copy_from(x: &[T]) -> Self + { if x.len() <= CAP { let mut arr = [T::zero(); CAP]; arr[..x.len()].copy_from_slice(x); @@ -61,9 +70,11 @@ impl IxDynRepr { } } -impl IxDynRepr { +impl IxDynRepr +{ // make an Inline or Alloc version as appropriate - fn from_vec_auto(v: Vec) -> Self { + fn from_vec_auto(v: Vec) -> Self + { if v.len() <= CAP { Self::copy_from(&v) } else { @@ -72,18 +83,23 @@ impl IxDynRepr { } } -impl IxDynRepr { - fn from_vec(v: Vec) -> Self { +impl IxDynRepr +{ + fn from_vec(v: Vec) -> Self + { IxDynRepr::Alloc(v.into_boxed_slice()) } - fn from(x: &[T]) -> Self { + fn from(x: &[T]) -> Self + { Self::from_vec(x.to_vec()) } } -impl Clone for IxDynRepr { - fn clone(&self) -> Self { +impl Clone for IxDynRepr +{ + fn clone(&self) -> Self + { match *self { IxDynRepr::Inline(len, arr) => IxDynRepr::Inline(len, arr), _ => Self::from(&self[..]), @@ -93,22 +109,25 @@ impl Clone for IxDynRepr { impl Eq for IxDynRepr {} -impl PartialEq for IxDynRepr { - fn eq(&self, rhs: &Self) -> bool { +impl PartialEq for IxDynRepr +{ + fn eq(&self, rhs: &Self) -> bool + { match (self, rhs) { - (&IxDynRepr::Inline(slen, ref sarr), &IxDynRepr::Inline(rlen, ref rarr)) => { + (&IxDynRepr::Inline(slen, ref sarr), &IxDynRepr::Inline(rlen, ref rarr)) => slen == rlen && (0..CAP) .filter(|&i| i < slen as usize) - .all(|i| sarr[i] == rarr[i]) - } + .all(|i| sarr[i] == rarr[i]), _ => self[..] == rhs[..], } } } -impl Hash for IxDynRepr { - fn hash(&self, state: &mut H) { +impl Hash for IxDynRepr +{ + fn hash(&self, state: &mut H) + { Hash::hash(&self[..], state) } } @@ -121,8 +140,10 @@ impl Hash for IxDynRepr { #[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] pub struct IxDynImpl(IxDynRepr); -impl IxDynImpl { - pub(crate) fn insert(&self, i: usize) -> Self { +impl IxDynImpl +{ + pub(crate) fn insert(&self, i: usize) -> Self + { let len = self.len(); debug_assert!(i <= len); IxDynImpl(if len < CAP { @@ -139,7 +160,8 @@ impl IxDynImpl { }) } - fn remove(&self, i: usize) -> Self { + fn remove(&self, i: usize) -> Self + { IxDynImpl(match self.0 { IxDynRepr::Inline(0, _) => IxDynRepr::Inline(0, [0; CAP]), IxDynRepr::Inline(1, _) => IxDynRepr::Inline(0, [0; CAP]), @@ -160,74 +182,88 @@ impl IxDynImpl { } } -impl<'a> From<&'a [Ix]> for IxDynImpl { +impl<'a> From<&'a [Ix]> for IxDynImpl +{ #[inline] - fn from(ix: &'a [Ix]) -> Self { + fn from(ix: &'a [Ix]) -> Self + { IxDynImpl(IxDynRepr::copy_from(ix)) } } -impl From> for IxDynImpl { +impl From> for IxDynImpl +{ #[inline] - fn from(ix: Vec) -> Self { + fn from(ix: Vec) -> Self + { IxDynImpl(IxDynRepr::from_vec_auto(ix)) } } impl Index for IxDynImpl -where - [Ix]: Index, +where [Ix]: Index { type Output = <[Ix] as Index>::Output; - fn index(&self, index: J) -> &Self::Output { + fn index(&self, index: J) -> &Self::Output + { &self.0[index] } } impl IndexMut for IxDynImpl -where - [Ix]: IndexMut, +where [Ix]: IndexMut { - fn index_mut(&mut self, index: J) -> &mut Self::Output { + fn index_mut(&mut self, index: J) -> &mut Self::Output + { &mut self.0[index] } } -impl Deref for IxDynImpl { +impl Deref for IxDynImpl +{ type Target = [Ix]; #[inline] - fn deref(&self) -> &[Ix] { + fn deref(&self) -> &[Ix] + { &self.0 } } -impl DerefMut for IxDynImpl { +impl DerefMut for IxDynImpl +{ #[inline] - fn deref_mut(&mut self) -> &mut [Ix] { + fn deref_mut(&mut self) -> &mut [Ix] + { &mut self.0 } } -impl<'a> IntoIterator for &'a IxDynImpl { +impl<'a> IntoIterator for &'a IxDynImpl +{ type Item = &'a Ix; type IntoIter = <&'a [Ix] as IntoIterator>::IntoIter; #[inline] - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { self[..].iter() } } -impl RemoveAxis for Dim { - fn remove_axis(&self, axis: Axis) -> Self { +impl RemoveAxis for Dim +{ + fn remove_axis(&self, axis: Axis) -> Self + { debug_assert!(axis.index() < self.ndim()); Dim::new(self.ix().remove(axis.index())) } } -impl IxDyn { +impl IxDyn +{ /// Create a new dimension value with `n` axes, all zeros #[inline] - pub fn zeros(n: usize) -> IxDyn { + pub fn zeros(n: usize) -> IxDyn + { const ZEROS: &[usize] = &[0; 4]; if n <= ZEROS.len() { Dim(&ZEROS[..n]) diff --git a/src/dimension/mod.rs b/src/dimension/mod.rs index 76a3984fe..e1563613e 100644 --- a/src/dimension/mod.rs +++ b/src/dimension/mod.rs @@ -7,9 +7,9 @@ // except according to those terms. use crate::error::{from_kind, ErrorKind, ShapeError}; +use crate::shape_builder::Strides; use crate::slice::SliceArg; use crate::{Ix, Ixs, Slice, SliceInfoElem}; -use crate::shape_builder::Strides; use num_integer::div_floor; pub use self::axes::{Axes, AxisDescription}; @@ -46,7 +46,8 @@ mod sequence; /// Calculate offset from `Ix` stride converting sign properly #[inline(always)] -pub fn stride_offset(n: Ix, stride: Ix) -> isize { +pub fn stride_offset(n: Ix, stride: Ix) -> isize +{ (n as isize) * (stride as Ixs) } @@ -55,7 +56,8 @@ pub fn stride_offset(n: Ix, stride: Ix) -> isize { /// There is overlap if, when iterating through the dimensions in order of /// increasing stride, the current stride is less than or equal to the maximum /// possible offset along the preceding axes. (Axes of length ≤1 are ignored.) -pub fn dim_stride_overlap(dim: &D, strides: &D) -> bool { +pub fn dim_stride_overlap(dim: &D, strides: &D) -> bool +{ let order = strides._fastest_varying_stride_order(); let mut sum_prev_offsets = 0; for &index in order.slice() { @@ -84,7 +86,8 @@ pub fn dim_stride_overlap(dim: &D, strides: &D) -> bool { /// are met to construct an array from the data buffer, `dim`, and `strides`. /// (The data buffer being a slice or `Vec` guarantees that it contains no more /// than `isize::MAX` bytes.) -pub fn size_of_shape_checked(dim: &D) -> Result { +pub fn size_of_shape_checked(dim: &D) -> Result +{ let size_nonzero = dim .slice() .iter() @@ -122,9 +125,9 @@ pub fn size_of_shape_checked(dim: &D) -> Result /// conditions 1 and 2 are sufficient to guarantee that the offset in units of /// `A` and in units of bytes between the least address and greatest address /// accessible by moving along all axes does not exceed `isize::MAX`. -pub(crate) fn can_index_slice_with_strides(data: &[A], dim: &D, - strides: &Strides) - -> Result<(), ShapeError> +pub(crate) fn can_index_slice_with_strides( + data: &[A], dim: &D, strides: &Strides, +) -> Result<(), ShapeError> { if let Strides::Custom(strides) = strides { can_index_slice(data, dim, strides) @@ -133,8 +136,7 @@ pub(crate) fn can_index_slice_with_strides(data: &[A], dim: &D, } } -pub(crate) fn can_index_slice_not_custom(data_len: usize, dim: &D) - -> Result<(), ShapeError> +pub(crate) fn can_index_slice_not_custom(data_len: usize, dim: &D) -> Result<(), ShapeError> { // Condition 1. let len = size_of_shape_checked(dim)?; @@ -160,16 +162,13 @@ pub(crate) fn can_index_slice_not_custom(data_len: usize, dim: &D) /// also implies that the length of any individual axis does not exceed /// `isize::MAX`.) pub fn max_abs_offset_check_overflow(dim: &D, strides: &D) -> Result -where - D: Dimension, +where D: Dimension { max_abs_offset_check_overflow_impl(mem::size_of::(), dim, strides) } -fn max_abs_offset_check_overflow_impl(elem_size: usize, dim: &D, strides: &D) - -> Result -where - D: Dimension, +fn max_abs_offset_check_overflow_impl(elem_size: usize, dim: &D, strides: &D) -> Result +where D: Dimension { // Condition 1. if dim.ndim() != strides.ndim() { @@ -241,22 +240,17 @@ where /// allocation. (In other words, the pointer to the first element of the array /// must be computed using `offset_from_low_addr_ptr_to_logical_ptr` so that /// negative strides are correctly handled.) -pub(crate) fn can_index_slice( - data: &[A], - dim: &D, - strides: &D, -) -> Result<(), ShapeError> { +pub(crate) fn can_index_slice(data: &[A], dim: &D, strides: &D) -> Result<(), ShapeError> +{ // Check conditions 1 and 2 and calculate `max_offset`. let max_offset = max_abs_offset_check_overflow::(dim, strides)?; can_index_slice_impl(max_offset, data.len(), dim, strides) } fn can_index_slice_impl( - max_offset: usize, - data_len: usize, - dim: &D, - strides: &D, -) -> Result<(), ShapeError> { + max_offset: usize, data_len: usize, dim: &D, strides: &D, +) -> Result<(), ShapeError> +{ // Check condition 3. let is_empty = dim.slice().iter().any(|&d| d == 0); if is_empty && max_offset > data_len { @@ -276,7 +270,8 @@ fn can_index_slice_impl( /// Stride offset checked general version (slices) #[inline] -pub fn stride_offset_checked(dim: &[Ix], strides: &[Ix], index: &[Ix]) -> Option { +pub fn stride_offset_checked(dim: &[Ix], strides: &[Ix], index: &[Ix]) -> Option +{ if index.len() != dim.len() { return None; } @@ -292,8 +287,7 @@ pub fn stride_offset_checked(dim: &[Ix], strides: &[Ix], index: &[Ix]) -> Option /// Checks if strides are non-negative. pub fn strides_non_negative(strides: &D) -> Result<(), ShapeError> -where - D: Dimension, +where D: Dimension { for &stride in strides.slice() { if (stride as isize) < 0 { @@ -304,7 +298,8 @@ where } /// Implementation-specific extensions to `Dimension` -pub trait DimensionExt { +pub trait DimensionExt +{ // note: many extensions go in the main trait if they need to be special- // cased per dimension /// Get the dimension at `axis`. @@ -321,28 +316,32 @@ pub trait DimensionExt { } impl DimensionExt for D -where - D: Dimension, +where D: Dimension { #[inline] - fn axis(&self, axis: Axis) -> Ix { + fn axis(&self, axis: Axis) -> Ix + { self[axis.index()] } #[inline] - fn set_axis(&mut self, axis: Axis, value: Ix) { + fn set_axis(&mut self, axis: Axis, value: Ix) + { self[axis.index()] = value; } } -impl DimensionExt for [Ix] { +impl DimensionExt for [Ix] +{ #[inline] - fn axis(&self, axis: Axis) -> Ix { + fn axis(&self, axis: Axis) -> Ix + { self[axis.index()] } #[inline] - fn set_axis(&mut self, axis: Axis, value: Ix) { + fn set_axis(&mut self, axis: Axis, value: Ix) + { self[axis.index()] = value; } } @@ -353,12 +352,8 @@ impl DimensionExt for [Ix] { /// **Panics** if `index` is larger than the size of the axis #[track_caller] // FIXME: Move to Dimension trait -pub fn do_collapse_axis( - dims: &mut D, - strides: &D, - axis: usize, - index: usize, -) -> isize { +pub fn do_collapse_axis(dims: &mut D, strides: &D, axis: usize, index: usize) -> isize +{ let dim = dims.slice()[axis]; let stride = strides.slice()[axis]; ndassert!( @@ -375,7 +370,8 @@ pub fn do_collapse_axis( /// Compute the equivalent unsigned index given the axis length and signed index. #[inline] -pub fn abs_index(len: Ix, index: Ixs) -> Ix { +pub fn abs_index(len: Ix, index: Ixs) -> Ix +{ if index < 0 { len - (-index as Ix) } else { @@ -389,7 +385,8 @@ pub fn abs_index(len: Ix, index: Ixs) -> Ix { /// /// **Panics** if stride is 0 or if any index is out of bounds. #[track_caller] -fn to_abs_slice(axis_len: usize, slice: Slice) -> (usize, usize, isize) { +fn to_abs_slice(axis_len: usize, slice: Slice) -> (usize, usize, isize) +{ let Slice { start, end, step } = slice; let start = abs_index(axis_len, start); let mut end = abs_index(axis_len, end.unwrap_or(axis_len as isize)); @@ -414,7 +411,8 @@ fn to_abs_slice(axis_len: usize, slice: Slice) -> (usize, usize, isize) { /// Returns the offset from the lowest-address element to the logically first /// element. -pub fn offset_from_low_addr_ptr_to_logical_ptr(dim: &D, strides: &D) -> usize { +pub fn offset_from_low_addr_ptr_to_logical_ptr(dim: &D, strides: &D) -> usize +{ let offset = izip!(dim.slice(), strides.slice()).fold(0, |_offset, (&d, &s)| { let s = s as isize; if s < 0 && d > 1 { @@ -431,7 +429,8 @@ pub fn offset_from_low_addr_ptr_to_logical_ptr(dim: &D, strides: & /// /// **Panics** if stride is 0 or if any index is out of bounds. #[track_caller] -pub fn do_slice(dim: &mut usize, stride: &mut usize, slice: Slice) -> isize { +pub fn do_slice(dim: &mut usize, stride: &mut usize, slice: Slice) -> isize +{ let (start, end, step) = to_abs_slice(*dim, slice); let m = end - start; @@ -484,7 +483,8 @@ pub fn do_slice(dim: &mut usize, stride: &mut usize, slice: Slice) -> isize { /// nonnegative. /// /// See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm -fn extended_gcd(a: isize, b: isize) -> (isize, (isize, isize)) { +fn extended_gcd(a: isize, b: isize) -> (isize, (isize, isize)) +{ if a == 0 { (b.abs(), (0, b.signum())) } else if b == 0 { @@ -520,7 +520,8 @@ fn extended_gcd(a: isize, b: isize) -> (isize, (isize, isize)) { /// /// See https://en.wikipedia.org/wiki/Diophantine_equation#One_equation /// and https://math.stackexchange.com/questions/1656120#1656138 -fn solve_linear_diophantine_eq(a: isize, b: isize, c: isize) -> Option<(isize, isize)> { +fn solve_linear_diophantine_eq(a: isize, b: isize, c: isize) -> Option<(isize, isize)> +{ debug_assert_ne!(a, 0); debug_assert_ne!(b, 0); let (g, (u, _)) = extended_gcd(a, b); @@ -538,10 +539,8 @@ fn solve_linear_diophantine_eq(a: isize, b: isize, c: isize) -> Option<(isize, i /// consecutive elements (the sign is irrelevant). /// /// **Note** `step1` and `step2` must be nonzero. -fn arith_seq_intersect( - (min1, max1, step1): (isize, isize, isize), - (min2, max2, step2): (isize, isize, isize), -) -> bool { +fn arith_seq_intersect((min1, max1, step1): (isize, isize, isize), (min2, max2, step2): (isize, isize, isize)) -> bool +{ debug_assert!(max1 >= min1); debug_assert!(max2 >= min2); debug_assert_eq!((max1 - min1) % step1, 0); @@ -597,7 +596,8 @@ fn arith_seq_intersect( /// Returns the minimum and maximum values of the indices (inclusive). /// /// If the slice is empty, then returns `None`, otherwise returns `Some((min, max))`. -fn slice_min_max(axis_len: usize, slice: Slice) -> Option<(usize, usize)> { +fn slice_min_max(axis_len: usize, slice: Slice) -> Option<(usize, usize)> +{ let (start, end, step) = to_abs_slice(axis_len, slice); if start == end { None @@ -609,11 +609,8 @@ fn slice_min_max(axis_len: usize, slice: Slice) -> Option<(usize, usize)> { } /// Returns `true` iff the slices intersect. -pub fn slices_intersect( - dim: &D, - indices1: impl SliceArg, - indices2: impl SliceArg, -) -> bool { +pub fn slices_intersect(dim: &D, indices1: impl SliceArg, indices2: impl SliceArg) -> bool +{ debug_assert_eq!(indices1.in_ndim(), indices2.in_ndim()); for (&axis_len, &si1, &si2) in izip!( dim.slice(), @@ -642,10 +639,7 @@ pub fn slices_intersect( Some(m) => m, None => return false, }; - if !arith_seq_intersect( - (min1 as isize, max1 as isize, step1), - (min2 as isize, max2 as isize, step2), - ) { + if !arith_seq_intersect((min1 as isize, max1 as isize, step1), (min2 as isize, max2 as isize, step2)) { return false; } } @@ -673,7 +667,8 @@ pub fn slices_intersect( true } -pub(crate) fn is_layout_c(dim: &D, strides: &D) -> bool { +pub(crate) fn is_layout_c(dim: &D, strides: &D) -> bool +{ if let Some(1) = D::NDIM { return strides[0] == 1 || dim[0] <= 1; } @@ -698,7 +693,8 @@ pub(crate) fn is_layout_c(dim: &D, strides: &D) -> bool { true } -pub(crate) fn is_layout_f(dim: &D, strides: &D) -> bool { +pub(crate) fn is_layout_f(dim: &D, strides: &D) -> bool +{ if let Some(1) = D::NDIM { return strides[0] == 1 || dim[0] <= 1; } @@ -724,8 +720,7 @@ pub(crate) fn is_layout_f(dim: &D, strides: &D) -> bool { } pub fn merge_axes(dim: &mut D, strides: &mut D, take: Axis, into: Axis) -> bool -where - D: Dimension, +where D: Dimension { let into_len = dim.axis(into); let into_stride = strides.axis(into) as isize; @@ -753,20 +748,16 @@ where /// Move the axis which has the smallest absolute stride and a length /// greater than one to be the last axis. pub fn move_min_stride_axis_to_last(dim: &mut D, strides: &mut D) -where - D: Dimension, +where D: Dimension { debug_assert_eq!(dim.ndim(), strides.ndim()); match dim.ndim() { 0 | 1 => {} - 2 => { - if dim[1] <= 1 - || dim[0] > 1 && (strides[0] as isize).abs() < (strides[1] as isize).abs() - { + 2 => + if dim[1] <= 1 || dim[0] > 1 && (strides[0] as isize).abs() < (strides[1] as isize).abs() { dim.slice_mut().swap(0, 1); strides.slice_mut().swap(0, 1); - } - } + }, n => { if let Some(min_stride_axis) = (0..n) .filter(|&ax| dim[ax] > 1) @@ -781,11 +772,18 @@ where } #[cfg(test)] -mod test { +mod test +{ use super::{ - arith_seq_intersect, can_index_slice, can_index_slice_not_custom, extended_gcd, - max_abs_offset_check_overflow, slice_min_max, slices_intersect, - solve_linear_diophantine_eq, IntoDimension, + arith_seq_intersect, + can_index_slice, + can_index_slice_not_custom, + extended_gcd, + max_abs_offset_check_overflow, + slice_min_max, + slices_intersect, + solve_linear_diophantine_eq, + IntoDimension, }; use crate::error::{from_kind, ErrorKind}; use crate::slice::Slice; @@ -794,7 +792,8 @@ mod test { use quickcheck::{quickcheck, TestResult}; #[test] - fn slice_indexing_uncommon_strides() { + fn slice_indexing_uncommon_strides() + { let v: alloc::vec::Vec<_> = (0..12).collect(); let dim = (2, 3, 2).into_dimension(); let strides = (1, 2, 6).into_dimension(); @@ -808,7 +807,8 @@ mod test { } #[test] - fn overlapping_strides_dim() { + fn overlapping_strides_dim() + { let dim = (2, 3, 2).into_dimension(); let strides = (5, 2, 1).into_dimension(); assert!(super::dim_stride_overlap(&dim, &strides)); @@ -830,7 +830,8 @@ mod test { } #[test] - fn max_abs_offset_check_overflow_examples() { + fn max_abs_offset_check_overflow_examples() + { let dim = (1, ::std::isize::MAX as usize, 1).into_dimension(); let strides = (1, 1, 1).into_dimension(); max_abs_offset_check_overflow::(&dim, &strides).unwrap(); @@ -846,13 +847,15 @@ mod test { } #[test] - fn can_index_slice_ix0() { + fn can_index_slice_ix0() + { can_index_slice::(&[1], &Ix0(), &Ix0()).unwrap(); can_index_slice::(&[], &Ix0(), &Ix0()).unwrap_err(); } #[test] - fn can_index_slice_ix1() { + fn can_index_slice_ix1() + { can_index_slice::(&[], &Ix1(0), &Ix1(0)).unwrap(); can_index_slice::(&[], &Ix1(0), &Ix1(1)).unwrap(); can_index_slice::(&[], &Ix1(1), &Ix1(0)).unwrap_err(); @@ -867,7 +870,8 @@ mod test { } #[test] - fn can_index_slice_ix2() { + fn can_index_slice_ix2() + { can_index_slice::(&[], &Ix2(0, 0), &Ix2(0, 0)).unwrap(); can_index_slice::(&[], &Ix2(0, 0), &Ix2(2, 1)).unwrap(); can_index_slice::(&[], &Ix2(0, 1), &Ix2(0, 0)).unwrap(); @@ -882,7 +886,8 @@ mod test { } #[test] - fn can_index_slice_ix3() { + fn can_index_slice_ix3() + { can_index_slice::(&[], &Ix3(0, 0, 1), &Ix3(2, 1, 3)).unwrap(); can_index_slice::(&[], &Ix3(1, 1, 1), &Ix3(2, 1, 3)).unwrap_err(); can_index_slice::(&[1], &Ix3(1, 1, 1), &Ix3(2, 1, 3)).unwrap(); @@ -891,7 +896,8 @@ mod test { } #[test] - fn can_index_slice_zero_size_elem() { + fn can_index_slice_zero_size_elem() + { can_index_slice::<(), _>(&[], &Ix1(0), &Ix1(1)).unwrap(); can_index_slice::<(), _>(&[()], &Ix1(1), &Ix1(1)).unwrap(); can_index_slice::<(), _>(&[(), ()], &Ix1(2), &Ix1(1)).unwrap(); @@ -941,7 +947,8 @@ mod test { } #[test] - fn extended_gcd_zero() { + fn extended_gcd_zero() + { assert_eq!(extended_gcd(0, 0), (0, (0, 0))); assert_eq!(extended_gcd(0, 5), (5, (0, 1))); assert_eq!(extended_gcd(5, 0), (5, (1, 0))); @@ -1031,7 +1038,8 @@ mod test { } #[test] - fn slice_min_max_empty() { + fn slice_min_max_empty() + { assert_eq!(slice_min_max(0, Slice::new(0, None, 3)), None); assert_eq!(slice_min_max(10, Slice::new(1, Some(1), 3)), None); assert_eq!(slice_min_max(10, Slice::new(-1, Some(-1), 3)), None); @@ -1040,7 +1048,8 @@ mod test { } #[test] - fn slice_min_max_pos_step() { + fn slice_min_max_pos_step() + { assert_eq!(slice_min_max(10, Slice::new(1, Some(8), 3)), Some((1, 7))); assert_eq!(slice_min_max(10, Slice::new(1, Some(9), 3)), Some((1, 7))); assert_eq!(slice_min_max(10, Slice::new(-9, Some(8), 3)), Some((1, 7))); @@ -1056,7 +1065,8 @@ mod test { } #[test] - fn slice_min_max_neg_step() { + fn slice_min_max_neg_step() + { assert_eq!(slice_min_max(10, Slice::new(1, Some(8), -3)), Some((1, 7))); assert_eq!(slice_min_max(10, Slice::new(2, Some(8), -3)), Some((4, 7))); assert_eq!(slice_min_max(10, Slice::new(-9, Some(8), -3)), Some((1, 7))); @@ -1078,7 +1088,8 @@ mod test { } #[test] - fn slices_intersect_true() { + fn slices_intersect_true() + { assert!(slices_intersect( &Dim([4, 5]), s![NewAxis, .., NewAxis, ..], @@ -1103,7 +1114,8 @@ mod test { } #[test] - fn slices_intersect_false() { + fn slices_intersect_false() + { assert!(!slices_intersect( &Dim([4, 5]), s![..;2, ..], diff --git a/src/dimension/ndindex.rs b/src/dimension/ndindex.rs index 4d4046bc8..e27e68c99 100644 --- a/src/dimension/ndindex.rs +++ b/src/dimension/ndindex.rs @@ -2,9 +2,7 @@ use std::fmt::Debug; use super::{stride_offset, stride_offset_checked}; use crate::itertools::zip; -use crate::{ - Dim, Dimension, IntoDimension, Ix, Ix0, Ix1, Ix2, Ix3, Ix4, Ix5, Ix6, IxDyn, IxDynImpl, -}; +use crate::{Dim, Dimension, IntoDimension, Ix, Ix0, Ix1, Ix2, Ix3, Ix4, Ix5, Ix6, IxDyn, IxDynImpl}; /// Tuple or fixed size arrays that can be used to index an array. /// @@ -19,7 +17,8 @@ use crate::{ /// assert_eq!(a[(1, 1)], 4); /// ``` #[allow(clippy::missing_safety_doc)] // TODO: Add doc -pub unsafe trait NdIndex: Debug { +pub unsafe trait NdIndex: Debug +{ #[doc(hidden)] fn index_checked(&self, dim: &E, strides: &E) -> Option; #[doc(hidden)] @@ -27,96 +26,118 @@ pub unsafe trait NdIndex: Debug { } unsafe impl NdIndex for D -where - D: Dimension, +where D: Dimension { - fn index_checked(&self, dim: &D, strides: &D) -> Option { + fn index_checked(&self, dim: &D, strides: &D) -> Option + { dim.stride_offset_checked(strides, self) } - fn index_unchecked(&self, strides: &D) -> isize { + fn index_unchecked(&self, strides: &D) -> isize + { D::stride_offset(self, strides) } } -unsafe impl NdIndex for () { +unsafe impl NdIndex for () +{ #[inline] - fn index_checked(&self, dim: &Ix0, strides: &Ix0) -> Option { + fn index_checked(&self, dim: &Ix0, strides: &Ix0) -> Option + { dim.stride_offset_checked(strides, &Ix0()) } #[inline(always)] - fn index_unchecked(&self, _strides: &Ix0) -> isize { + fn index_unchecked(&self, _strides: &Ix0) -> isize + { 0 } } -unsafe impl NdIndex for (Ix, Ix) { +unsafe impl NdIndex for (Ix, Ix) +{ #[inline] - fn index_checked(&self, dim: &Ix2, strides: &Ix2) -> Option { + fn index_checked(&self, dim: &Ix2, strides: &Ix2) -> Option + { dim.stride_offset_checked(strides, &Ix2(self.0, self.1)) } #[inline] - fn index_unchecked(&self, strides: &Ix2) -> isize { + fn index_unchecked(&self, strides: &Ix2) -> isize + { stride_offset(self.0, get!(strides, 0)) + stride_offset(self.1, get!(strides, 1)) } } -unsafe impl NdIndex for (Ix, Ix, Ix) { +unsafe impl NdIndex for (Ix, Ix, Ix) +{ #[inline] - fn index_checked(&self, dim: &Ix3, strides: &Ix3) -> Option { + fn index_checked(&self, dim: &Ix3, strides: &Ix3) -> Option + { dim.stride_offset_checked(strides, &self.into_dimension()) } #[inline] - fn index_unchecked(&self, strides: &Ix3) -> isize { + fn index_unchecked(&self, strides: &Ix3) -> isize + { stride_offset(self.0, get!(strides, 0)) + stride_offset(self.1, get!(strides, 1)) + stride_offset(self.2, get!(strides, 2)) } } -unsafe impl NdIndex for (Ix, Ix, Ix, Ix) { +unsafe impl NdIndex for (Ix, Ix, Ix, Ix) +{ #[inline] - fn index_checked(&self, dim: &Ix4, strides: &Ix4) -> Option { + fn index_checked(&self, dim: &Ix4, strides: &Ix4) -> Option + { dim.stride_offset_checked(strides, &self.into_dimension()) } #[inline] - fn index_unchecked(&self, strides: &Ix4) -> isize { + fn index_unchecked(&self, strides: &Ix4) -> isize + { zip(strides.ix(), self.into_dimension().ix()) .map(|(&s, &i)| stride_offset(i, s)) .sum() } } -unsafe impl NdIndex for (Ix, Ix, Ix, Ix, Ix) { +unsafe impl NdIndex for (Ix, Ix, Ix, Ix, Ix) +{ #[inline] - fn index_checked(&self, dim: &Ix5, strides: &Ix5) -> Option { + fn index_checked(&self, dim: &Ix5, strides: &Ix5) -> Option + { dim.stride_offset_checked(strides, &self.into_dimension()) } #[inline] - fn index_unchecked(&self, strides: &Ix5) -> isize { + fn index_unchecked(&self, strides: &Ix5) -> isize + { zip(strides.ix(), self.into_dimension().ix()) .map(|(&s, &i)| stride_offset(i, s)) .sum() } } -unsafe impl NdIndex for Ix { +unsafe impl NdIndex for Ix +{ #[inline] - fn index_checked(&self, dim: &Ix1, strides: &Ix1) -> Option { + fn index_checked(&self, dim: &Ix1, strides: &Ix1) -> Option + { dim.stride_offset_checked(strides, &Ix1(*self)) } #[inline(always)] - fn index_unchecked(&self, strides: &Ix1) -> isize { + fn index_unchecked(&self, strides: &Ix1) -> isize + { stride_offset(*self, get!(strides, 0)) } } -unsafe impl NdIndex for Ix { +unsafe impl NdIndex for Ix +{ #[inline] - fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option { + fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option + { debug_assert_eq!(dim.ndim(), 1); stride_offset_checked(dim.ix(), strides.ix(), &[*self]) } #[inline(always)] - fn index_unchecked(&self, strides: &IxDyn) -> isize { + fn index_unchecked(&self, strides: &IxDyn) -> isize + { debug_assert_eq!(strides.ndim(), 1); stride_offset(*self, get!(strides, 0)) } @@ -155,9 +176,11 @@ ndindex_with_array! { } // implement NdIndex for Dim<[Ix; 2]> and so on -unsafe impl NdIndex for Dim<[Ix; N]> { +unsafe impl NdIndex for Dim<[Ix; N]> +{ #[inline] - fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option { + fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option + { debug_assert_eq!( strides.ndim(), N, @@ -169,7 +192,8 @@ unsafe impl NdIndex for Dim<[Ix; N]> { } #[inline] - fn index_unchecked(&self, strides: &IxDyn) -> isize { + fn index_unchecked(&self, strides: &IxDyn) -> isize + { debug_assert_eq!( strides.ndim(), N, @@ -184,9 +208,11 @@ unsafe impl NdIndex for Dim<[Ix; N]> { } // implement NdIndex for [Ix; 2] and so on -unsafe impl NdIndex for [Ix; N] { +unsafe impl NdIndex for [Ix; N] +{ #[inline] - fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option { + fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option + { debug_assert_eq!( strides.ndim(), N, @@ -198,7 +224,8 @@ unsafe impl NdIndex for [Ix; N] { } #[inline] - fn index_unchecked(&self, strides: &IxDyn) -> isize { + fn index_unchecked(&self, strides: &IxDyn) -> isize + { debug_assert_eq!( strides.ndim(), N, @@ -212,27 +239,35 @@ unsafe impl NdIndex for [Ix; N] { } } -impl<'a> IntoDimension for &'a [Ix] { +impl<'a> IntoDimension for &'a [Ix] +{ type Dim = IxDyn; - fn into_dimension(self) -> Self::Dim { + fn into_dimension(self) -> Self::Dim + { Dim(IxDynImpl::from(self)) } } -unsafe impl<'a> NdIndex for &'a IxDyn { - fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option { +unsafe impl<'a> NdIndex for &'a IxDyn +{ + fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option + { (**self).index_checked(dim, strides) } - fn index_unchecked(&self, strides: &IxDyn) -> isize { + fn index_unchecked(&self, strides: &IxDyn) -> isize + { (**self).index_unchecked(strides) } } -unsafe impl<'a> NdIndex for &'a [Ix] { - fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option { +unsafe impl<'a> NdIndex for &'a [Ix] +{ + fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option + { stride_offset_checked(dim.ix(), strides.ix(), self) } - fn index_unchecked(&self, strides: &IxDyn) -> isize { + fn index_unchecked(&self, strides: &IxDyn) -> isize + { zip(strides.ix(), *self) .map(|(&s, &i)| stride_offset(i, s)) .sum() diff --git a/src/dimension/ops.rs b/src/dimension/ops.rs index dd23216f6..1365ab488 100644 --- a/src/dimension/ops.rs +++ b/src/dimension/ops.rs @@ -1,7 +1,8 @@ use crate::imp_prelude::*; /// Adds the two dimensions at compile time. -pub trait DimAdd { +pub trait DimAdd +{ /// The sum of the two dimensions. type Output: Dimension; } @@ -27,7 +28,8 @@ macro_rules! impl_dimadd_const_out_dyn { }; } -impl DimAdd for Ix0 { +impl DimAdd for Ix0 +{ type Output = D; } @@ -85,6 +87,7 @@ impl_dimadd_const_out_dyn!(6, 5); impl_dimadd_const_out_dyn!(6, 6); impl_dimadd_const_out_dyn!(6, IxDyn); -impl DimAdd for IxDyn { +impl DimAdd for IxDyn +{ type Output = IxDyn; } diff --git a/src/dimension/remove_axis.rs b/src/dimension/remove_axis.rs index da366ae17..cbb039fc5 100644 --- a/src/dimension/remove_axis.rs +++ b/src/dimension/remove_axis.rs @@ -12,21 +12,26 @@ use crate::{Axis, Dim, Dimension, Ix, Ix0, Ix1}; /// /// `RemoveAxis` defines a larger-than relation for array shapes: /// removing one axis from *Self* gives smaller dimension *Smaller*. -pub trait RemoveAxis: Dimension { +pub trait RemoveAxis: Dimension +{ fn remove_axis(&self, axis: Axis) -> Self::Smaller; } -impl RemoveAxis for Dim<[Ix; 1]> { +impl RemoveAxis for Dim<[Ix; 1]> +{ #[inline] - fn remove_axis(&self, axis: Axis) -> Ix0 { + fn remove_axis(&self, axis: Axis) -> Ix0 + { debug_assert!(axis.index() < self.ndim()); Ix0() } } -impl RemoveAxis for Dim<[Ix; 2]> { +impl RemoveAxis for Dim<[Ix; 2]> +{ #[inline] - fn remove_axis(&self, axis: Axis) -> Ix1 { + fn remove_axis(&self, axis: Axis) -> Ix1 + { let axis = axis.index(); debug_assert!(axis < self.ndim()); if axis == 0 { diff --git a/src/dimension/reshape.rs b/src/dimension/reshape.rs index c6e08848d..52d9e719a 100644 --- a/src/dimension/reshape.rs +++ b/src/dimension/reshape.rs @@ -1,10 +1,8 @@ - -use crate::{Dimension, Order, ShapeError, ErrorKind}; -use crate::dimension::sequence::{Sequence, SequenceMut, Forward, Reverse}; +use crate::dimension::sequence::{Forward, Reverse, Sequence, SequenceMut}; +use crate::{Dimension, ErrorKind, Order, ShapeError}; #[inline] -pub(crate) fn reshape_dim(from: &D, strides: &D, to: &E, order: Order) - -> Result +pub(crate) fn reshape_dim(from: &D, strides: &D, to: &E, order: Order) -> Result where D: Dimension, E: Dimension, @@ -13,12 +11,10 @@ where let mut to_strides = E::zeros(to.ndim()); match order { Order::RowMajor => { - reshape_dim_c(&Forward(from), &Forward(strides), - &Forward(to), Forward(&mut to_strides))?; + reshape_dim_c(&Forward(from), &Forward(strides), &Forward(to), Forward(&mut to_strides))?; } Order::ColumnMajor => { - reshape_dim_c(&Reverse(from), &Reverse(strides), - &Reverse(to), Reverse(&mut to_strides))?; + reshape_dim_c(&Reverse(from), &Reverse(strides), &Reverse(to), Reverse(&mut to_strides))?; } } Ok(to_strides) @@ -31,7 +27,7 @@ where /// This function uses RowMajor index ordering if the inputs are read in the forward direction /// (index 0 is axis 0 etc) and ColumnMajor index ordering if the inputs are read in reversed /// direction (as made possible with the Sequence trait). -/// +/// /// Preconditions: /// /// 1. from_dim and to_dim are valid dimensions (product of all non-zero axes @@ -47,16 +43,15 @@ where /// - IncompatibleLayout if the input shape and stride can not be remapped to the output shape /// without moving the array data into a new memory layout. /// - Ok if the from dim could be mapped to the new to dim. -fn reshape_dim_c(from_dim: &D, from_strides: &D, to_dim: &E, mut to_strides: E2) - -> Result<(), ShapeError> +fn reshape_dim_c(from_dim: &D, from_strides: &D, to_dim: &E, mut to_strides: E2) -> Result<(), ShapeError> where - D: Sequence, - E: Sequence, - E2: SequenceMut, + D: Sequence, + E: Sequence, + E2: SequenceMut, { // cursor indexes into the from and to dimensions - let mut fi = 0; // index into `from_dim` - let mut ti = 0; // index into `to_dim`. + let mut fi = 0; // index into `from_dim` + let mut ti = 0; // index into `to_dim`. while fi < from_dim.len() && ti < to_dim.len() { let mut fd = from_dim[fi]; @@ -67,7 +62,7 @@ where to_strides[ti] = from_strides[fi]; fi += 1; ti += 1; - continue + continue; } if fd == 1 { @@ -88,8 +83,8 @@ where // stride times element count is to be distributed out over a combination of axes. let mut fstride_whole = fs * (fd as isize); - let mut fd_product = fd; // cumulative product of axis lengths in the combination (from) - let mut td_product = td; // cumulative product of axis lengths in the combination (to) + let mut fd_product = fd; // cumulative product of axis lengths in the combination (from) + let mut td_product = td; // cumulative product of axis lengths in the combination (to) // The two axis lengths are not a match, so try to combine multiple axes // to get it to match up. @@ -151,7 +146,8 @@ where #[cfg(feature = "std")] #[test] -fn test_reshape() { +fn test_reshape() +{ use crate::Dim; macro_rules! test_reshape { @@ -238,4 +234,3 @@ fn test_reshape() { test_reshape!(ok F from [1, 5, 1, 2, 1], [1, 1, 1, 5, 1], to [10], [1]); test_reshape!(fail C from [1, 5, 1, 2, 1], [1, 1, 1, 5, 1], to [10]); } - diff --git a/src/dimension/sequence.rs b/src/dimension/sequence.rs index 835e00d18..ed3605d57 100644 --- a/src/dimension/sequence.rs +++ b/src/dimension/sequence.rs @@ -7,103 +7,123 @@ pub(in crate::dimension) struct Forward(pub(crate) D); pub(in crate::dimension) struct Reverse(pub(crate) D); impl Index for Forward<&D> -where - D: Dimension, +where D: Dimension { type Output = usize; #[inline] - fn index(&self, index: usize) -> &usize { + fn index(&self, index: usize) -> &usize + { &self.0[index] } } impl Index for Forward<&mut D> -where - D: Dimension, +where D: Dimension { type Output = usize; #[inline] - fn index(&self, index: usize) -> &usize { + fn index(&self, index: usize) -> &usize + { &self.0[index] } } impl IndexMut for Forward<&mut D> -where - D: Dimension, +where D: Dimension { #[inline] - fn index_mut(&mut self, index: usize) -> &mut usize { + fn index_mut(&mut self, index: usize) -> &mut usize + { &mut self.0[index] } } impl Index for Reverse<&D> -where - D: Dimension, +where D: Dimension { type Output = usize; #[inline] - fn index(&self, index: usize) -> &usize { + fn index(&self, index: usize) -> &usize + { &self.0[self.len() - index - 1] } } impl Index for Reverse<&mut D> -where - D: Dimension, +where D: Dimension { type Output = usize; #[inline] - fn index(&self, index: usize) -> &usize { + fn index(&self, index: usize) -> &usize + { &self.0[self.len() - index - 1] } } impl IndexMut for Reverse<&mut D> -where - D: Dimension, +where D: Dimension { #[inline] - fn index_mut(&mut self, index: usize) -> &mut usize { + fn index_mut(&mut self, index: usize) -> &mut usize + { let len = self.len(); &mut self.0[len - index - 1] } } /// Indexable sequence with length -pub(in crate::dimension) trait Sequence: Index { +pub(in crate::dimension) trait Sequence: Index +{ fn len(&self) -> usize; } /// Indexable sequence with length (mut) -pub(in crate::dimension) trait SequenceMut: Sequence + IndexMut { } +pub(in crate::dimension) trait SequenceMut: Sequence + IndexMut {} -impl Sequence for Forward<&D> where D: Dimension { +impl Sequence for Forward<&D> +where D: Dimension +{ #[inline] - fn len(&self) -> usize { self.0.ndim() } + fn len(&self) -> usize + { + self.0.ndim() + } } -impl Sequence for Forward<&mut D> where D: Dimension { +impl Sequence for Forward<&mut D> +where D: Dimension +{ #[inline] - fn len(&self) -> usize { self.0.ndim() } + fn len(&self) -> usize + { + self.0.ndim() + } } -impl SequenceMut for Forward<&mut D> where D: Dimension { } +impl SequenceMut for Forward<&mut D> where D: Dimension {} -impl Sequence for Reverse<&D> where D: Dimension { +impl Sequence for Reverse<&D> +where D: Dimension +{ #[inline] - fn len(&self) -> usize { self.0.ndim() } + fn len(&self) -> usize + { + self.0.ndim() + } } -impl Sequence for Reverse<&mut D> where D: Dimension { +impl Sequence for Reverse<&mut D> +where D: Dimension +{ #[inline] - fn len(&self) -> usize { self.0.ndim() } + fn len(&self) -> usize + { + self.0.ndim() + } } -impl SequenceMut for Reverse<&mut D> where D: Dimension { } - +impl SequenceMut for Reverse<&mut D> where D: Dimension {} diff --git a/src/error.rs b/src/error.rs index c45496142..eb7395ad8 100644 --- a/src/error.rs +++ b/src/error.rs @@ -12,20 +12,24 @@ use std::fmt; /// An error related to array shape or layout. #[derive(Clone)] -pub struct ShapeError { +pub struct ShapeError +{ // we want to be able to change this representation later repr: ErrorKind, } -impl ShapeError { +impl ShapeError +{ /// Return the `ErrorKind` of this error. #[inline] - pub fn kind(&self) -> ErrorKind { + pub fn kind(&self) -> ErrorKind + { self.repr } /// Create a new `ShapeError` - pub fn from_kind(error: ErrorKind) -> Self { + pub fn from_kind(error: ErrorKind) -> Self + { from_kind(error) } } @@ -36,7 +40,8 @@ impl ShapeError { /// is not guaranteed. #[non_exhaustive] #[derive(Copy, Clone, Debug)] -pub enum ErrorKind { +pub enum ErrorKind +{ /// incompatible shape IncompatibleShape = 1, /// incompatible memory layout @@ -52,20 +57,25 @@ pub enum ErrorKind { } #[inline(always)] -pub fn from_kind(k: ErrorKind) -> ShapeError { +pub fn from_kind(k: ErrorKind) -> ShapeError +{ ShapeError { repr: k } } -impl PartialEq for ErrorKind { +impl PartialEq for ErrorKind +{ #[inline(always)] - fn eq(&self, rhs: &Self) -> bool { + fn eq(&self, rhs: &Self) -> bool + { *self as u8 == *rhs as u8 } } -impl PartialEq for ShapeError { +impl PartialEq for ShapeError +{ #[inline(always)] - fn eq(&self, rhs: &Self) -> bool { + fn eq(&self, rhs: &Self) -> bool + { self.repr == rhs.repr } } @@ -73,8 +83,10 @@ impl PartialEq for ShapeError { #[cfg(feature = "std")] impl Error for ShapeError {} -impl fmt::Display for ShapeError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { +impl fmt::Display for ShapeError +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { let description = match self.kind() { ErrorKind::IncompatibleShape => "incompatible shapes", ErrorKind::IncompatibleLayout => "incompatible memory layout", @@ -87,8 +99,10 @@ impl fmt::Display for ShapeError { } } -impl fmt::Debug for ShapeError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { +impl fmt::Debug for ShapeError +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { write!(f, "{}", self) } } diff --git a/src/extension/nonnull.rs b/src/extension/nonnull.rs index 4deee11ac..08f80927e 100644 --- a/src/extension/nonnull.rs +++ b/src/extension/nonnull.rs @@ -1,9 +1,10 @@ -use std::ptr::NonNull; #[cfg(not(feature = "std"))] use alloc::vec::Vec; +use std::ptr::NonNull; /// Return a NonNull pointer to the vector's data -pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull { +pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull +{ // this pointer is guaranteed to be non-null unsafe { NonNull::new_unchecked(v.as_mut_ptr()) } } @@ -14,7 +15,8 @@ pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull { /// This is checked with a debug assertion, and will panic if this is not true, /// but treat this as an unconditional conversion. #[inline] -pub(crate) unsafe fn nonnull_debug_checked_from_ptr(ptr: *mut T) -> NonNull { +pub(crate) unsafe fn nonnull_debug_checked_from_ptr(ptr: *mut T) -> NonNull +{ debug_assert!(!ptr.is_null()); NonNull::new_unchecked(ptr) } diff --git a/src/free_functions.rs b/src/free_functions.rs index 11a32a1f0..3adf2d8f3 100644 --- a/src/free_functions.rs +++ b/src/free_functions.rs @@ -51,22 +51,26 @@ macro_rules! array { } /// Create a zero-dimensional array with the element `x`. -pub fn arr0(x: A) -> Array0 { +pub fn arr0(x: A) -> Array0 +{ unsafe { ArrayBase::from_shape_vec_unchecked((), vec![x]) } } /// Create a one-dimensional array with elements from `xs`. -pub fn arr1(xs: &[A]) -> Array1 { +pub fn arr1(xs: &[A]) -> Array1 +{ ArrayBase::from(xs.to_vec()) } /// Create a one-dimensional array with elements from `xs`. -pub fn rcarr1(xs: &[A]) -> ArcArray1 { +pub fn rcarr1(xs: &[A]) -> ArcArray1 +{ arr1(xs).into_shared() } /// Create a zero-dimensional array view borrowing `x`. -pub const fn aview0(x: &A) -> ArrayView0<'_, A> { +pub const fn aview0(x: &A) -> ArrayView0<'_, A> +{ ArrayBase { data: ViewRepr::new(), // Safe because references are always non-null. @@ -97,7 +101,8 @@ pub const fn aview0(x: &A) -> ArrayView0<'_, A> { /// /// assert_eq!(C.sum(), 6.); /// ``` -pub const fn aview1(xs: &[A]) -> ArrayView1<'_, A> { +pub const fn aview1(xs: &[A]) -> ArrayView1<'_, A> +{ if size_of::() == 0 { assert!( xs.len() <= isize::MAX as usize, @@ -131,7 +136,8 @@ pub const fn aview1(xs: &[A]) -> ArrayView1<'_, A> { /// const C: ArrayView2<'static, f64> = aview2(&[[1., 2., 3.], [4., 5., 6.]]); /// assert_eq!(C.sum(), 21.); /// ``` -pub const fn aview2(xs: &[[A; N]]) -> ArrayView2<'_, A> { +pub const fn aview2(xs: &[[A; N]]) -> ArrayView2<'_, A> +{ let cols = N; let rows = xs.len(); if size_of::() == 0 { @@ -179,7 +185,8 @@ pub const fn aview2(xs: &[[A; N]]) -> ArrayView2<'_, A> { /// } /// assert_eq!(&data[..10], [5, 0, 0, 5, 0, 0, 5, 0, 0, 5]); /// ``` -pub fn aview_mut1(xs: &mut [A]) -> ArrayViewMut1<'_, A> { +pub fn aview_mut1(xs: &mut [A]) -> ArrayViewMut1<'_, A> +{ ArrayViewMut::from(xs) } @@ -205,7 +212,8 @@ pub fn aview_mut1(xs: &mut [A]) -> ArrayViewMut1<'_, A> { /// // look at the start of the result /// assert_eq!(&data[..3], [[1., -1.], [1., -1.], [1., -1.]]); /// ``` -pub fn aview_mut2(xs: &mut [[A; N]]) -> ArrayViewMut2<'_, A> { +pub fn aview_mut2(xs: &mut [[A; N]]) -> ArrayViewMut2<'_, A> +{ ArrayViewMut2::from(xs) } @@ -220,20 +228,23 @@ pub fn aview_mut2(xs: &mut [[A; N]]) -> ArrayViewMut2<'_, A> /// a.shape() == [2, 3] /// ); /// ``` -pub fn arr2(xs: &[[A; N]]) -> Array2 { +pub fn arr2(xs: &[[A; N]]) -> Array2 +{ Array2::from(xs.to_vec()) } -impl From> for Array2 { +impl From> for Array2 +{ /// Converts the `Vec` of arrays to an owned 2-D array. /// /// **Panics** if the product of non-zero axis lengths overflows `isize`. - fn from(mut xs: Vec<[A; N]>) -> Self { + fn from(mut xs: Vec<[A; N]>) -> Self + { let dim = Ix2(xs.len(), N); let ptr = xs.as_mut_ptr(); let cap = xs.capacity(); - let expand_len = dimension::size_of_shape_checked(&dim) - .expect("Product of non-zero axis lengths must not overflow isize."); + let expand_len = + dimension::size_of_shape_checked(&dim).expect("Product of non-zero axis lengths must not overflow isize."); forget(xs); unsafe { let v = if size_of::() == 0 { @@ -251,16 +262,18 @@ impl From> for Array2 { } } -impl From> for Array3 { +impl From> for Array3 +{ /// Converts the `Vec` of arrays to an owned 3-D array. /// /// **Panics** if the product of non-zero axis lengths overflows `isize`. - fn from(mut xs: Vec<[[A; M]; N]>) -> Self { + fn from(mut xs: Vec<[[A; M]; N]>) -> Self + { let dim = Ix3(xs.len(), N, M); let ptr = xs.as_mut_ptr(); let cap = xs.capacity(); - let expand_len = dimension::size_of_shape_checked(&dim) - .expect("Product of non-zero axis lengths must not overflow isize."); + let expand_len = + dimension::size_of_shape_checked(&dim).expect("Product of non-zero axis lengths must not overflow isize."); forget(xs); unsafe { let v = if size_of::() == 0 { @@ -280,7 +293,8 @@ impl From> for Array3 { /// Create a two-dimensional array with elements from `xs`. /// -pub fn rcarr2(xs: &[[A; N]]) -> ArcArray2 { +pub fn rcarr2(xs: &[[A; N]]) -> ArcArray2 +{ arr2(xs).into_shared() } @@ -301,11 +315,13 @@ pub fn rcarr2(xs: &[[A; N]]) -> ArcArray2 { /// a.shape() == [3, 2, 2] /// ); /// ``` -pub fn arr3(xs: &[[[A; M]; N]]) -> Array3 { +pub fn arr3(xs: &[[[A; M]; N]]) -> Array3 +{ Array3::from(xs.to_vec()) } /// Create a three-dimensional array with elements from `xs`. -pub fn rcarr3(xs: &[[[A; M]; N]]) -> ArcArray { +pub fn rcarr3(xs: &[[[A; M]; N]]) -> ArcArray +{ arr3(xs).into_shared() } diff --git a/src/geomspace.rs b/src/geomspace.rs index c1935c71e..0ac91f529 100644 --- a/src/geomspace.rs +++ b/src/geomspace.rs @@ -11,7 +11,8 @@ use num_traits::Float; /// An iterator of a sequence of geometrically spaced floats. /// /// Iterator element type is `F`. -pub struct Geomspace { +pub struct Geomspace +{ sign: F, start: F, step: F, @@ -20,13 +21,13 @@ pub struct Geomspace { } impl Iterator for Geomspace -where - F: Float, +where F: Float { type Item = F; #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { if self.index >= self.len { None } else { @@ -39,18 +40,19 @@ where } #[inline] - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { let n = self.len - self.index; (n, Some(n)) } } impl DoubleEndedIterator for Geomspace -where - F: Float, +where F: Float { #[inline] - fn next_back(&mut self) -> Option { + fn next_back(&mut self) -> Option + { if self.index >= self.len { None } else { @@ -80,8 +82,7 @@ impl ExactSizeIterator for Geomspace where Geomspace: Iterator {} /// **Panics** if converting `n - 1` to type `F` fails. #[inline] pub fn geomspace(a: F, b: F, n: usize) -> Option> -where - F: Float, +where F: Float { if a == F::zero() || b == F::zero() || a.is_sign_negative() != b.is_sign_negative() { return None; @@ -104,12 +105,14 @@ where } #[cfg(test)] -mod tests { +mod tests +{ use super::geomspace; #[test] #[cfg(feature = "approx")] - fn valid() { + fn valid() + { use crate::{arr1, Array1}; use approx::assert_abs_diff_eq; @@ -127,7 +130,8 @@ mod tests { } #[test] - fn iter_forward() { + fn iter_forward() + { let mut iter = geomspace(1.0f64, 1e3, 4).unwrap(); assert!(iter.size_hint() == (4, Some(4))); @@ -142,7 +146,8 @@ mod tests { } #[test] - fn iter_backward() { + fn iter_backward() + { let mut iter = geomspace(1.0f64, 1e3, 4).unwrap(); assert!(iter.size_hint() == (4, Some(4))); @@ -157,17 +162,20 @@ mod tests { } #[test] - fn zero_lower() { + fn zero_lower() + { assert!(geomspace(0.0, 1.0, 4).is_none()); } #[test] - fn zero_upper() { + fn zero_upper() + { assert!(geomspace(1.0, 0.0, 4).is_none()); } #[test] - fn zero_included() { + fn zero_included() + { assert!(geomspace(-1.0, 1.0, 4).is_none()); } } diff --git a/src/impl_1d.rs b/src/impl_1d.rs index a9fe84407..e49fdd731 100644 --- a/src/impl_1d.rs +++ b/src/impl_1d.rs @@ -16,8 +16,7 @@ use crate::low_level_util::AbortIfPanic; /// # Methods For 1-D Arrays impl ArrayBase -where - S: RawData, +where S: RawData { /// Return an vector with the elements of the one-dimensional array. pub fn to_vec(&self) -> Vec @@ -35,8 +34,7 @@ where /// Rotate the elements of the array by 1 element towards the front; /// the former first element becomes the last. pub(crate) fn rotate1_front(&mut self) - where - S: DataMut, + where S: DataMut { // use swapping to keep all elements initialized (as required by owned storage) let mut lane_iter = self.iter_mut(); diff --git a/src/impl_2d.rs b/src/impl_2d.rs index cd5cf7e5c..c2e9725ac 100644 --- a/src/impl_2d.rs +++ b/src/impl_2d.rs @@ -11,8 +11,7 @@ use crate::imp_prelude::*; /// # Methods For 2-D Arrays impl ArrayBase -where - S: RawData, +where S: RawData { /// Return an array view of row `index`. /// @@ -25,8 +24,7 @@ where /// ``` #[track_caller] pub fn row(&self, index: Ix) -> ArrayView1<'_, A> - where - S: Data, + where S: Data { self.index_axis(Axis(0), index) } @@ -43,8 +41,7 @@ where /// ``` #[track_caller] pub fn row_mut(&mut self, index: Ix) -> ArrayViewMut1<'_, A> - where - S: DataMut, + where S: DataMut { self.index_axis_mut(Axis(0), index) } @@ -66,7 +63,8 @@ where /// // get length of any particular axis with .len_of() /// assert_eq!(m, array.len_of(Axis(0))); /// ``` - pub fn nrows(&self) -> usize { + pub fn nrows(&self) -> usize + { self.len_of(Axis(0)) } @@ -81,8 +79,7 @@ where /// ``` #[track_caller] pub fn column(&self, index: Ix) -> ArrayView1<'_, A> - where - S: Data, + where S: Data { self.index_axis(Axis(1), index) } @@ -99,8 +96,7 @@ where /// ``` #[track_caller] pub fn column_mut(&mut self, index: Ix) -> ArrayViewMut1<'_, A> - where - S: DataMut, + where S: DataMut { self.index_axis_mut(Axis(1), index) } @@ -122,7 +118,8 @@ where /// // get length of any particular axis with .len_of() /// assert_eq!(n, array.len_of(Axis(1))); /// ``` - pub fn ncols(&self) -> usize { + pub fn ncols(&self) -> usize + { self.len_of(Axis(1)) } @@ -141,7 +138,8 @@ where /// let array = array![[1., 2., 5.], [3., 4., 6.]]; /// assert!(!array.is_square()); /// ``` - pub fn is_square(&self) -> bool { + pub fn is_square(&self) -> bool + { let (m, n) = self.dim(); m == n } diff --git a/src/impl_clone.rs b/src/impl_clone.rs index e2e111a12..d65f6c338 100644 --- a/src/impl_clone.rs +++ b/src/impl_clone.rs @@ -9,8 +9,10 @@ use crate::imp_prelude::*; use crate::RawDataClone; -impl Clone for ArrayBase { - fn clone(&self) -> ArrayBase { +impl Clone for ArrayBase +{ + fn clone(&self) -> ArrayBase + { // safe because `clone_with_ptr` promises to provide equivalent data and ptr unsafe { let (data, ptr) = self.data.clone_with_ptr(self.ptr); @@ -26,7 +28,8 @@ impl Clone for ArrayBase { /// `Array` implements `.clone_from()` to reuse an array's existing /// allocation. Semantically equivalent to `*self = other.clone()`, but /// potentially more efficient. - fn clone_from(&mut self, other: &Self) { + fn clone_from(&mut self, other: &Self) + { unsafe { self.ptr = self.data.clone_from_with_ptr(&other.data, other.ptr); self.dim.clone_from(&other.dim); diff --git a/src/impl_constructors.rs b/src/impl_constructors.rs index 94ddebcd6..e5f19a837 100644 --- a/src/impl_constructors.rs +++ b/src/impl_constructors.rs @@ -11,14 +11,14 @@ //! #![allow(clippy::match_wild_err_arm)] +use alloc::vec; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; #[cfg(feature = "std")] use num_traits::Float; use num_traits::{One, Zero}; use std::mem; use std::mem::MaybeUninit; -use alloc::vec; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; use crate::dimension; use crate::dimension::offset_from_low_addr_ptr_to_logical_ptr; @@ -36,7 +36,6 @@ use crate::StrideShape; use crate::{geomspace, linspace, logspace}; use rawpointer::PointerExt; - /// # Constructor Methods for Owned Arrays /// /// Note that the constructor methods apply to `Array` and `ArcArray`, @@ -44,8 +43,7 @@ use rawpointer::PointerExt; /// /// ## Constructor methods for one-dimensional arrays. impl ArrayBase -where - S: DataOwned, +where S: DataOwned { /// Create a one-dimensional array from a vector (no copying needed). /// @@ -56,7 +54,8 @@ where /// /// let array = Array::from_vec(vec![1., 2., 3., 4.]); /// ``` - pub fn from_vec(v: Vec) -> Self { + pub fn from_vec(v: Vec) -> Self + { if mem::size_of::() == 0 { assert!( v.len() <= isize::MAX as usize, @@ -76,7 +75,8 @@ where /// let array = Array::from_iter(0..10); /// ``` #[allow(clippy::should_implement_trait)] - pub fn from_iter>(iterable: I) -> Self { + pub fn from_iter>(iterable: I) -> Self + { Self::from_vec(iterable.into_iter().collect()) } @@ -99,8 +99,7 @@ where /// ``` #[cfg(feature = "std")] pub fn linspace(start: A, end: A, n: usize) -> Self - where - A: Float, + where A: Float { Self::from(to_vec(linspace::linspace(start, end, n))) } @@ -118,8 +117,7 @@ where /// ``` #[cfg(feature = "std")] pub fn range(start: A, end: A, step: A) -> Self - where - A: Float, + where A: Float { Self::from(to_vec(linspace::range(start, end, step))) } @@ -147,8 +145,7 @@ where /// ``` #[cfg(feature = "std")] pub fn logspace(base: A, start: A, end: A, n: usize) -> Self - where - A: Float, + where A: Float { Self::from(to_vec(logspace::logspace(base, start, end, n))) } @@ -182,8 +179,7 @@ where /// ``` #[cfg(feature = "std")] pub fn geomspace(start: A, end: A, n: usize) -> Option - where - A: Float, + where A: Float { Some(Self::from(to_vec(geomspace::geomspace(start, end, n)?))) } @@ -191,8 +187,7 @@ where /// ## Constructor methods for two-dimensional arrays. impl ArrayBase -where - S: DataOwned, +where S: DataOwned { /// Create an identity matrix of size `n` (square 2D array). /// @@ -460,14 +455,14 @@ where /// ); /// ``` pub fn from_shape_vec(shape: Sh, v: Vec) -> Result - where - Sh: Into>, + where Sh: Into> { // eliminate the type parameter Sh as soon as possible Self::from_shape_vec_impl(shape.into(), v) } - fn from_shape_vec_impl(shape: StrideShape, v: Vec) -> Result { + fn from_shape_vec_impl(shape: StrideShape, v: Vec) -> Result + { let dim = shape.dim; let is_custom = shape.strides.is_custom(); dimension::can_index_slice_with_strides(&v, &dim, &shape.strides)?; @@ -503,8 +498,7 @@ where /// 5. The strides must not allow any element to be referenced by two different /// indices. pub unsafe fn from_shape_vec_unchecked(shape: Sh, v: Vec) -> Self - where - Sh: Into>, + where Sh: Into> { let shape = shape.into(); let dim = shape.dim; @@ -512,7 +506,8 @@ where Self::from_vec_dim_stride_unchecked(dim, strides, v) } - unsafe fn from_vec_dim_stride_unchecked(dim: D, strides: D, mut v: Vec) -> Self { + unsafe fn from_vec_dim_stride_unchecked(dim: D, strides: D, mut v: Vec) -> Self + { // debug check for issues that indicates wrong use of this constructor debug_assert!(dimension::can_index_slice(&v, &dim, &strides).is_ok()); @@ -526,8 +521,7 @@ where /// # Safety /// /// See from_shape_vec_unchecked - pub(crate) unsafe fn from_shape_trusted_iter_unchecked(shape: Sh, iter: I, map: F) - -> Self + pub(crate) unsafe fn from_shape_trusted_iter_unchecked(shape: Sh, iter: I, map: F) -> Self where Sh: Into>, I: TrustedIterator + ExactSizeIterator, @@ -540,7 +534,6 @@ where Self::from_vec_dim_stride_unchecked(dim, strides, v) } - /// Create an array with uninitialized elements, shape `shape`. /// /// The uninitialized elements of type `A` are represented by the type `MaybeUninit`, @@ -583,12 +576,11 @@ where /// b.assume_init() /// } /// } - /// + /// /// # let _ = shift_by_two; /// ``` pub fn uninit(shape: Sh) -> ArrayBase - where - Sh: ShapeBuilder, + where Sh: ShapeBuilder { unsafe { let shape = shape.into_shape_with_order(); @@ -633,9 +625,11 @@ where array } - #[deprecated(note = "This method is hard to use correctly. Use `uninit` instead.", - since = "0.15.0")] - #[allow(clippy::uninit_vec)] // this is explicitly intended to create uninitialized memory + #[deprecated( + note = "This method is hard to use correctly. Use `uninit` instead.", + since = "0.15.0" + )] + #[allow(clippy::uninit_vec)] // this is explicitly intended to create uninitialized memory /// Create an array with uninitialized elements, shape `shape`. /// /// Prefer to use [`uninit()`](ArrayBase::uninit) if possible, because it is @@ -670,7 +664,6 @@ where v.set_len(size); Self::from_shape_vec_unchecked(shape, v) } - } impl ArrayBase @@ -683,8 +676,7 @@ where /// This method has been renamed to `uninit` #[deprecated(note = "Renamed to `uninit`", since = "0.15.0")] pub fn maybe_uninit(shape: Sh) -> Self - where - Sh: ShapeBuilder, + where Sh: ShapeBuilder { unsafe { let shape = shape.into_shape_with_order(); diff --git a/src/impl_cow.rs b/src/impl_cow.rs index 22d5c78b2..f064ce7bd 100644 --- a/src/impl_cow.rs +++ b/src/impl_cow.rs @@ -12,49 +12,45 @@ use crate::imp_prelude::*; /// /// ***See also all methods for [`ArrayBase`]*** impl<'a, A, D> CowArray<'a, A, D> -where - D: Dimension, +where D: Dimension { /// Returns `true` iff the array is the view (borrowed) variant. - pub fn is_view(&self) -> bool { + pub fn is_view(&self) -> bool + { self.data.is_view() } /// Returns `true` iff the array is the owned variant. - pub fn is_owned(&self) -> bool { + pub fn is_owned(&self) -> bool + { self.data.is_owned() } } impl<'a, A, D> From> for CowArray<'a, A, D> -where - D: Dimension, +where D: Dimension { - fn from(view: ArrayView<'a, A, D>) -> CowArray<'a, A, D> { + fn from(view: ArrayView<'a, A, D>) -> CowArray<'a, A, D> + { // safe because equivalent data - unsafe { - ArrayBase::from_data_ptr(CowRepr::View(view.data), view.ptr) - .with_strides_dim(view.strides, view.dim) - } + unsafe { ArrayBase::from_data_ptr(CowRepr::View(view.data), view.ptr).with_strides_dim(view.strides, view.dim) } } } impl<'a, A, D> From> for CowArray<'a, A, D> -where - D: Dimension, +where D: Dimension { - fn from(array: Array) -> CowArray<'a, A, D> { + fn from(array: Array) -> CowArray<'a, A, D> + { // safe because equivalent data unsafe { - ArrayBase::from_data_ptr(CowRepr::Owned(array.data), array.ptr) - .with_strides_dim(array.strides, array.dim) + ArrayBase::from_data_ptr(CowRepr::Owned(array.data), array.ptr).with_strides_dim(array.strides, array.dim) } } } impl<'a, A, Slice: ?Sized> From<&'a Slice> for CowArray<'a, A, Ix1> -where - Slice: AsRef<[A]>, +where Slice: AsRef<[A]> { /// Create a one-dimensional clone-on-write view of the data in `slice`. /// @@ -67,7 +63,8 @@ where /// assert!(array.is_view()); /// assert_eq!(array, array![1., 2., 3., 4.]); /// ``` - fn from(slice: &'a Slice) -> Self { + fn from(slice: &'a Slice) -> Self + { Self::from(ArrayView1::from(slice)) } } @@ -78,7 +75,8 @@ where D: Dimension, { /// Create a read-only clone-on-write view of the array. - fn from(array: &'a ArrayBase) -> Self { + fn from(array: &'a ArrayBase) -> Self + { Self::from(array.view()) } } diff --git a/src/impl_dyn.rs b/src/impl_dyn.rs index d6e1c6957..836234cec 100644 --- a/src/impl_dyn.rs +++ b/src/impl_dyn.rs @@ -11,8 +11,7 @@ use crate::imp_prelude::*; /// # Methods for Dynamic-Dimensional Arrays impl ArrayBase -where - S: Data, +where S: Data { /// Insert new array axis of length 1 at `axis`, modifying the shape and /// strides in-place. @@ -30,7 +29,8 @@ where /// assert_eq!(a.shape(), &[2, 1, 3]); /// ``` #[track_caller] - pub fn insert_axis_inplace(&mut self, axis: Axis) { + pub fn insert_axis_inplace(&mut self, axis: Axis) + { assert!(axis.index() <= self.ndim()); self.dim = self.dim.insert_axis(axis); self.strides = self.strides.insert_axis(axis); @@ -52,7 +52,8 @@ where /// assert_eq!(a.shape(), &[2]); /// ``` #[track_caller] - pub fn index_axis_inplace(&mut self, axis: Axis, index: usize) { + pub fn index_axis_inplace(&mut self, axis: Axis, index: usize) + { self.collapse_axis(axis, index); self.dim = self.dim.remove_axis(axis); self.strides = self.strides.remove_axis(axis); diff --git a/src/impl_internal_constructors.rs b/src/impl_internal_constructors.rs index 5d47c9897..ebb2e26e0 100644 --- a/src/impl_internal_constructors.rs +++ b/src/impl_internal_constructors.rs @@ -12,8 +12,7 @@ use crate::imp_prelude::*; // internal "builder-like" methods impl ArrayBase -where - S: RawData, +where S: RawData { /// Create an (initially) empty one-dimensional array from the given data and array head /// pointer @@ -21,9 +20,10 @@ where /// ## Safety /// /// The caller must ensure that the data storage and pointer is valid. - /// + /// /// See ArrayView::from_shape_ptr for general pointer validity documentation. - pub(crate) unsafe fn from_data_ptr(data: S, ptr: NonNull) -> Self { + pub(crate) unsafe fn from_data_ptr(data: S, ptr: NonNull) -> Self + { let array = ArrayBase { data, ptr, @@ -41,7 +41,6 @@ where S: RawData, D: Dimension, { - /// Set strides and dimension of the array to the new values /// /// The argument order with strides before dimensions is used because strides are often @@ -52,8 +51,7 @@ where /// The caller needs to ensure that the new strides and dimensions are correct /// for the array data. pub(crate) unsafe fn with_strides_dim(self, strides: E, dim: E) -> ArrayBase - where - E: Dimension + where E: Dimension { debug_assert_eq!(strides.ndim(), dim.ndim()); ArrayBase { diff --git a/src/impl_methods.rs b/src/impl_methods.rs index feded588a..d1250ec28 100644 --- a/src/impl_methods.rs +++ b/src/impl_methods.rs @@ -6,36 +6,54 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::mem::{size_of, ManuallyDrop}; use alloc::slice; use alloc::vec; #[cfg(not(feature = "std"))] use alloc::vec::Vec; use rawpointer::PointerExt; +use std::mem::{size_of, ManuallyDrop}; use crate::imp_prelude::*; -use crate::{arraytraits, DimMax}; use crate::argument_traits::AssignElem; use crate::dimension; +use crate::dimension::broadcast::co_broadcast; +use crate::dimension::reshape_dim; use crate::dimension::IntoDimension; use crate::dimension::{ - abs_index, axes_of, do_slice, merge_axes, move_min_stride_axis_to_last, - offset_from_low_addr_ptr_to_logical_ptr, size_of_shape_checked, stride_offset, Axes, + abs_index, + axes_of, + do_slice, + merge_axes, + move_min_stride_axis_to_last, + offset_from_low_addr_ptr_to_logical_ptr, + size_of_shape_checked, + stride_offset, + Axes, }; -use crate::dimension::broadcast::co_broadcast; -use crate::dimension::reshape_dim; -use crate::error::{self, ErrorKind, ShapeError, from_kind}; -use crate::math_cell::MathCell; +use crate::error::{self, from_kind, ErrorKind, ShapeError}; use crate::itertools::zip; -use crate::AxisDescription; +use crate::math_cell::MathCell; use crate::order::Order; use crate::shape_builder::ShapeArg; use crate::zip::{IntoNdProducer, Zip}; +use crate::AxisDescription; +use crate::{arraytraits, DimMax}; use crate::iter::{ - AxisChunksIter, AxisChunksIterMut, AxisIter, AxisIterMut, ExactChunks, ExactChunksMut, - IndexedIter, IndexedIterMut, Iter, IterMut, Lanes, LanesMut, Windows, + AxisChunksIter, + AxisChunksIterMut, + AxisIter, + AxisIterMut, + ExactChunks, + ExactChunksMut, + IndexedIter, + IndexedIterMut, + Iter, + IterMut, + Lanes, + LanesMut, + Windows, }; use crate::slice::{MultiSliceArg, SliceArg}; use crate::stacking::concatenate; @@ -48,7 +66,8 @@ where D: Dimension, { /// Return the total number of elements in the array. - pub fn len(&self) -> usize { + pub fn len(&self) -> usize + { self.dim.size() } @@ -59,24 +78,28 @@ where /// /// ***Panics*** if the axis is out of bounds. #[track_caller] - pub fn len_of(&self, axis: Axis) -> usize { + pub fn len_of(&self, axis: Axis) -> usize + { self.dim[axis.index()] } /// Return whether the array has any elements - pub fn is_empty(&self) -> bool { + pub fn is_empty(&self) -> bool + { self.len() == 0 } /// Return the number of dimensions (axes) in the array - pub fn ndim(&self) -> usize { + pub fn ndim(&self) -> usize + { self.dim.ndim() } /// Return the shape of the array in its “pattern” form, /// an integer in the one-dimensional case, tuple in the n-dimensional cases /// and so on. - pub fn dim(&self) -> D::Pattern { + pub fn dim(&self) -> D::Pattern + { self.dim.clone().into_pattern() } @@ -94,7 +117,8 @@ where /// // Create an array of zeros that's the same shape and dimensionality as `a`. /// let b = Array::::zeros(a.raw_dim()); /// ``` - pub fn raw_dim(&self) -> D { + pub fn raw_dim(&self) -> D + { self.dim.clone() } @@ -122,12 +146,14 @@ where /// let c = Array::zeros(a.raw_dim()); /// assert_eq!(a, c); /// ``` - pub fn shape(&self) -> &[usize] { + pub fn shape(&self) -> &[usize] + { self.dim.slice() } /// Return the strides of the array as a slice. - pub fn strides(&self) -> &[isize] { + pub fn strides(&self) -> &[isize] + { let s = self.strides.slice(); // reinterpret unsigned integer as signed unsafe { slice::from_raw_parts(s.as_ptr() as *const _, s.len()) } @@ -140,15 +166,15 @@ where /// /// ***Panics*** if the axis is out of bounds. #[track_caller] - pub fn stride_of(&self, axis: Axis) -> isize { + pub fn stride_of(&self, axis: Axis) -> isize + { // strides are reinterpreted as isize self.strides[axis.index()] as isize } /// Return a read-only view of the array pub fn view(&self) -> ArrayView<'_, A, D> - where - S: Data, + where S: Data { debug_assert!(self.pointer_is_inbounds()); unsafe { ArrayView::new(self.ptr, self.dim.clone(), self.strides.clone()) } @@ -156,8 +182,7 @@ where /// Return a read-write view of the array pub fn view_mut(&mut self) -> ArrayViewMut<'_, A, D> - where - S: DataMut, + where S: DataMut { self.ensure_unique(); unsafe { ArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) } @@ -171,8 +196,7 @@ where /// The view acts "as if" the elements are temporarily in cells, and elements /// can be changed through shared references using the regular cell methods. pub fn cell_view(&mut self) -> ArrayView<'_, MathCell, D> - where - S: DataMut, + where S: DataMut { self.view_mut().into_cell_view() } @@ -213,12 +237,7 @@ where S: Data, { if let Some(slc) = self.as_slice_memory_order() { - unsafe { - Array::from_shape_vec_unchecked( - self.dim.clone().strides(self.strides.clone()), - slc.to_vec(), - ) - } + unsafe { Array::from_shape_vec_unchecked(self.dim.clone().strides(self.strides.clone()), slc.to_vec()) } } else { self.map(A::clone) } @@ -266,8 +285,7 @@ where /// assert_eq!(unique, array![[1., 2.], [3., 4.]]); /// ``` pub fn try_into_owned_nocopy(self) -> Result, Self> - where - S: Data, + where S: Data { S::try_into_owned_nocopy(self) } @@ -275,14 +293,11 @@ where /// Turn the array into a shared ownership (copy on write) array, /// without any copying. pub fn into_shared(self) -> ArcArray - where - S: DataOwned, + where S: DataOwned { let data = self.data.into_shared(); // safe because: equivalent unmoved data, ptr and dims remain valid - unsafe { - ArrayBase::from_data_ptr(data, self.ptr).with_strides_dim(self.strides, self.dim) - } + unsafe { ArrayBase::from_data_ptr(data, self.ptr).with_strides_dim(self.strides, self.dim) } } /// Returns a reference to the first element of the array, or `None` if it @@ -301,8 +316,7 @@ where /// assert_eq!(b.first(), None); /// ``` pub fn first(&self) -> Option<&A> - where - S: Data, + where S: Data { if self.is_empty() { None @@ -327,8 +341,7 @@ where /// assert_eq!(b.first_mut(), None); /// ``` pub fn first_mut(&mut self) -> Option<&mut A> - where - S: DataMut, + where S: DataMut { if self.is_empty() { None @@ -353,8 +366,7 @@ where /// assert_eq!(b.last(), None); /// ``` pub fn last(&self) -> Option<&A> - where - S: Data, + where S: Data { if self.is_empty() { None @@ -383,8 +395,7 @@ where /// assert_eq!(b.last_mut(), None); /// ``` pub fn last_mut(&mut self) -> Option<&mut A> - where - S: DataMut, + where S: DataMut { if self.is_empty() { None @@ -404,8 +415,7 @@ where /// /// Iterator element type is `&A`. pub fn iter(&self) -> Iter<'_, A, D> - where - S: Data, + where S: Data { debug_assert!(self.pointer_is_inbounds()); self.view().into_iter_() @@ -418,8 +428,7 @@ where /// /// Iterator element type is `&mut A`. pub fn iter_mut(&mut self) -> IterMut<'_, A, D> - where - S: DataMut, + where S: DataMut { self.view_mut().into_iter_() } @@ -433,8 +442,7 @@ where /// /// See also [`Zip::indexed`] pub fn indexed_iter(&self) -> IndexedIter<'_, A, D> - where - S: Data, + where S: Data { IndexedIter::new(self.view().into_elements_base()) } @@ -446,8 +454,7 @@ where /// /// Iterator element type is `(D::Pattern, &mut A)`. pub fn indexed_iter_mut(&mut self) -> IndexedIterMut<'_, A, D> - where - S: DataMut, + where S: DataMut { IndexedIterMut::new(self.view_mut().into_elements_base()) } @@ -525,8 +532,7 @@ where /// (**Panics** if `D` is `IxDyn` and `info` does not match the number of array axes.) #[track_caller] pub fn slice_move(mut self, info: I) -> ArrayBase - where - I: SliceArg, + where I: SliceArg { assert_eq!( info.in_ndim(), @@ -595,8 +601,7 @@ where /// - if `D` is `IxDyn` and `info` does not match the number of array axes #[track_caller] pub fn slice_collapse(&mut self, info: I) - where - I: SliceArg, + where I: SliceArg { assert_eq!( info.in_ndim(), @@ -605,17 +610,17 @@ where ); let mut axis = 0; info.as_ref().iter().for_each(|&ax_info| match ax_info { - SliceInfoElem::Slice { start, end, step } => { - self.slice_axis_inplace(Axis(axis), Slice { start, end, step }); - axis += 1; - } - SliceInfoElem::Index(index) => { - let i_usize = abs_index(self.len_of(Axis(axis)), index); - self.collapse_axis(Axis(axis), i_usize); - axis += 1; - } - SliceInfoElem::NewAxis => panic!("`slice_collapse` does not support `NewAxis`."), - }); + SliceInfoElem::Slice { start, end, step } => { + self.slice_axis_inplace(Axis(axis), Slice { start, end, step }); + axis += 1; + } + SliceInfoElem::Index(index) => { + let i_usize = abs_index(self.len_of(Axis(axis)), index); + self.collapse_axis(Axis(axis), i_usize); + axis += 1; + } + SliceInfoElem::NewAxis => panic!("`slice_collapse` does not support `NewAxis`."), + }); debug_assert_eq!(axis, self.ndim()); } @@ -626,8 +631,7 @@ where #[track_caller] #[must_use = "slice_axis returns an array view with the sliced result"] pub fn slice_axis(&self, axis: Axis, indices: Slice) -> ArrayView<'_, A, D> - where - S: Data, + where S: Data { let mut view = self.view(); view.slice_axis_inplace(axis, indices); @@ -641,8 +645,7 @@ where #[track_caller] #[must_use = "slice_axis_mut returns an array view with the sliced result"] pub fn slice_axis_mut(&mut self, axis: Axis, indices: Slice) -> ArrayViewMut<'_, A, D> - where - S: DataMut, + where S: DataMut { let mut view_mut = self.view_mut(); view_mut.slice_axis_inplace(axis, indices); @@ -654,12 +657,10 @@ where /// **Panics** if an index is out of bounds or step size is zero.
/// **Panics** if `axis` is out of bounds. #[track_caller] - pub fn slice_axis_inplace(&mut self, axis: Axis, indices: Slice) { - let offset = do_slice( - &mut self.dim.slice_mut()[axis.index()], - &mut self.strides.slice_mut()[axis.index()], - indices, - ); + pub fn slice_axis_inplace(&mut self, axis: Axis, indices: Slice) + { + let offset = + do_slice(&mut self.dim.slice_mut()[axis.index()], &mut self.strides.slice_mut()[axis.index()], indices); unsafe { self.ptr = self.ptr.offset(offset); } @@ -671,7 +672,8 @@ where /// **Panics** if an index is out of bounds or step size is zero.
/// **Panics** if `axis` is out of bounds. #[must_use = "slice_axis_move returns an array with the sliced result"] - pub fn slice_axis_move(mut self, axis: Axis, indices: Slice) -> Self { + pub fn slice_axis_move(mut self, axis: Axis, indices: Slice) -> Self + { self.slice_axis_inplace(axis, indices); self } @@ -721,8 +723,7 @@ where /// **Panics** if an index is out of bounds or step size is zero. #[track_caller] pub fn slice_each_axis_inplace(&mut self, mut f: F) - where - F: FnMut(AxisDescription) -> Slice, + where F: FnMut(AxisDescription) -> Slice { for ax in 0..self.ndim() { self.slice_axis_inplace( @@ -776,8 +777,7 @@ where /// assert_eq!(unsafe { *p }, 2.); /// ``` pub fn get_ptr(&self, index: I) -> Option<*const A> - where - I: NdIndex, + where I: NdIndex { let ptr = self.ptr; index @@ -920,17 +920,13 @@ where arraytraits::debug_bounds_check(self, &index2); let off1 = index1.index_unchecked(&self.strides); let off2 = index2.index_unchecked(&self.strides); - std::ptr::swap( - self.ptr.as_ptr().offset(off1), - self.ptr.as_ptr().offset(off2), - ); + std::ptr::swap(self.ptr.as_ptr().offset(off1), self.ptr.as_ptr().offset(off2)); } // `get` for zero-dimensional arrays // panics if dimension is not zero. otherwise an element is always present. fn get_0d(&self) -> &A - where - S: Data, + where S: Data { assert!(self.ndim() == 0); unsafe { &*self.as_ptr() } @@ -1006,23 +1002,21 @@ where /// **Panics** if `axis` or `index` is out of bounds. #[track_caller] pub fn index_axis_move(mut self, axis: Axis, index: usize) -> ArrayBase - where - D: RemoveAxis, + where D: RemoveAxis { self.collapse_axis(axis, index); let dim = self.dim.remove_axis(axis); let strides = self.strides.remove_axis(axis); // safe because new dimension, strides allow access to a subset of old data - unsafe { - self.with_strides_dim(strides, dim) - } + unsafe { self.with_strides_dim(strides, dim) } } /// Selects `index` along the axis, collapsing the axis into length one. /// /// **Panics** if `axis` or `index` is out of bounds. #[track_caller] - pub fn collapse_axis(&mut self, axis: Axis, index: usize) { + pub fn collapse_axis(&mut self, axis: Axis, index: usize) + { let offset = dimension::do_collapse_axis(&mut self.dim, &self.strides, axis.index(), index); self.ptr = unsafe { self.ptr.offset(offset) }; debug_assert!(self.pointer_is_inbounds()); @@ -1069,10 +1063,10 @@ where let view = self.view().into_dimensionality::().unwrap(); Array::from_iter(indices.iter().map(move |&index| { // Safety: bounds checked indexes - unsafe { - view.uget(index).clone() - } - })).into_dimensionality::().unwrap() + unsafe { view.uget(index).clone() } + })) + .into_dimensionality::() + .unwrap() } else { let mut subs = vec![self.view(); indices.len()]; for (&i, sub) in zip(indices, &mut subs[..]) { @@ -1115,8 +1109,7 @@ where /// } /// ``` pub fn rows(&self) -> Lanes<'_, A, D::Smaller> - where - S: Data, + where S: Data { let mut n = self.ndim(); if n == 0 { @@ -1125,10 +1118,9 @@ where Lanes::new(self.view(), Axis(n - 1)) } - #[deprecated(note="Renamed to .rows()", since="0.15.0")] + #[deprecated(note = "Renamed to .rows()", since = "0.15.0")] pub fn genrows(&self) -> Lanes<'_, A, D::Smaller> - where - S: Data, + where S: Data { self.rows() } @@ -1138,8 +1130,7 @@ where /// /// Iterator element is `ArrayView1
` (1D read-write array view). pub fn rows_mut(&mut self) -> LanesMut<'_, A, D::Smaller> - where - S: DataMut, + where S: DataMut { let mut n = self.ndim(); if n == 0 { @@ -1148,10 +1139,9 @@ where LanesMut::new(self.view_mut(), Axis(n - 1)) } - #[deprecated(note="Renamed to .rows_mut()", since="0.15.0")] + #[deprecated(note = "Renamed to .rows_mut()", since = "0.15.0")] pub fn genrows_mut(&mut self) -> LanesMut<'_, A, D::Smaller> - where - S: DataMut, + where S: DataMut { self.rows_mut() } @@ -1183,8 +1173,7 @@ where /// } /// ``` pub fn columns(&self) -> Lanes<'_, A, D::Smaller> - where - S: Data, + where S: Data { Lanes::new(self.view(), Axis(0)) } @@ -1193,10 +1182,9 @@ where /// columns of the array. For a 2D array these are the regular columns. /// /// Renamed to `.columns()` - #[deprecated(note="Renamed to .columns()", since="0.15.0")] + #[deprecated(note = "Renamed to .columns()", since = "0.15.0")] pub fn gencolumns(&self) -> Lanes<'_, A, D::Smaller> - where - S: Data, + where S: Data { self.columns() } @@ -1206,8 +1194,7 @@ where /// /// Iterator element is `ArrayView1` (1D read-write array view). pub fn columns_mut(&mut self) -> LanesMut<'_, A, D::Smaller> - where - S: DataMut, + where S: DataMut { LanesMut::new(self.view_mut(), Axis(0)) } @@ -1216,10 +1203,9 @@ where /// columns of the array and yields mutable array views. /// /// Renamed to `.columns_mut()` - #[deprecated(note="Renamed to .columns_mut()", since="0.15.0")] + #[deprecated(note = "Renamed to .columns_mut()", since = "0.15.0")] pub fn gencolumns_mut(&mut self) -> LanesMut<'_, A, D::Smaller> - where - S: DataMut, + where S: DataMut { self.columns_mut() } @@ -1253,8 +1239,7 @@ where /// assert_eq!(inner2.into_iter().next().unwrap(), aview1(&[0, 1, 2])); /// ``` pub fn lanes(&self, axis: Axis) -> Lanes<'_, A, D::Smaller> - where - S: Data, + where S: Data { Lanes::new(self.view(), axis) } @@ -1264,8 +1249,7 @@ where /// /// Iterator element is `ArrayViewMut1` (1D read-write array view). pub fn lanes_mut(&mut self, axis: Axis) -> LanesMut<'_, A, D::Smaller> - where - S: DataMut, + where S: DataMut { LanesMut::new(self.view_mut(), axis) } @@ -1368,8 +1352,7 @@ where /// ``` #[track_caller] pub fn axis_chunks_iter(&self, axis: Axis, size: usize) -> AxisChunksIter<'_, A, D> - where - S: Data, + where S: Data { AxisChunksIter::new(self.view(), axis, size) } @@ -1382,8 +1365,7 @@ where /// **Panics** if `axis` is out of bounds or if `size` is zero. #[track_caller] pub fn axis_chunks_iter_mut(&mut self, axis: Axis, size: usize) -> AxisChunksIterMut<'_, A, D> - where - S: DataMut, + where S: DataMut { AxisChunksIterMut::new(self.view_mut(), axis, size) } @@ -1539,8 +1521,7 @@ where /// } /// ``` pub fn axis_windows(&self, axis: Axis, window_size: usize) -> Windows<'_, A, D> - where - S: Data, + where S: Data { let axis_index = axis.index(); @@ -1562,7 +1543,8 @@ where } // Return (length, stride) for diagonal - fn diag_params(&self) -> (Ix, Ixs) { + fn diag_params(&self) -> (Ix, Ixs) + { /* empty shape has len 1 */ let len = self.dim.slice().iter().cloned().min().unwrap_or(1); let stride = self.strides().iter().sum(); @@ -1574,27 +1556,24 @@ where /// The diagonal is simply the sequence indexed by *(0, 0, .., 0)*, /// *(1, 1, ..., 1)* etc as long as all axes have elements. pub fn diag(&self) -> ArrayView1<'_, A> - where - S: Data, + where S: Data { self.view().into_diag() } /// Return a read-write view over the diagonal elements of the array. pub fn diag_mut(&mut self) -> ArrayViewMut1<'_, A> - where - S: DataMut, + where S: DataMut { self.view_mut().into_diag() } /// Return the diagonal as a one-dimensional array. - pub fn into_diag(self) -> ArrayBase { + pub fn into_diag(self) -> ArrayBase + { let (len, stride) = self.diag_params(); // safe because new len stride allows access to a subset of the current elements - unsafe { - self.with_strides_dim(Ix1(stride as Ix), Ix1(len)) - } + unsafe { self.with_strides_dim(Ix1(stride as Ix), Ix1(len)) } } /// Try to make the array unshared. @@ -1603,8 +1582,7 @@ where /// /// This method is mostly only useful with unsafe code. fn try_ensure_unique(&mut self) - where - S: RawDataMut, + where S: RawDataMut { debug_assert!(self.pointer_is_inbounds()); S::try_ensure_unique(self); @@ -1615,8 +1593,7 @@ where /// /// This method is mostly only useful with unsafe code. fn ensure_unique(&mut self) - where - S: DataMut, + where S: DataMut { debug_assert!(self.pointer_is_inbounds()); S::ensure_unique(self); @@ -1628,12 +1605,14 @@ where /// /// Return `false` otherwise, i.e. the array is possibly not /// contiguous in memory, it has custom strides, etc. - pub fn is_standard_layout(&self) -> bool { + pub fn is_standard_layout(&self) -> bool + { dimension::is_layout_c(&self.dim, &self.strides) } /// Return true if the array is known to be contiguous. - pub(crate) fn is_contiguous(&self) -> bool { + pub(crate) fn is_contiguous(&self) -> bool + { D::is_contiguous(&self.dim, &self.strides) } @@ -1689,7 +1668,8 @@ where /// /// where *d* is `self.ndim()`. #[inline(always)] - pub fn as_ptr(&self) -> *const A { + pub fn as_ptr(&self) -> *const A + { self.ptr.as_ptr() as *const A } @@ -1705,8 +1685,7 @@ where /// the data may change the strides. #[inline(always)] pub fn as_mut_ptr(&mut self) -> *mut A - where - S: RawDataMut, + where S: RawDataMut { self.try_ensure_unique(); // for ArcArray self.ptr.as_ptr() @@ -1714,7 +1693,8 @@ where /// Return a raw view of the array. #[inline] - pub fn raw_view(&self) -> RawArrayView { + pub fn raw_view(&self) -> RawArrayView + { unsafe { RawArrayView::new(self.ptr, self.dim.clone(), self.strides.clone()) } } @@ -1724,8 +1704,7 @@ where /// data is guaranteed to be uniquely held on return. #[inline] pub fn raw_view_mut(&mut self) -> RawArrayViewMut - where - S: RawDataMut, + where S: RawDataMut { self.try_ensure_unique(); // for ArcArray unsafe { RawArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) } @@ -1736,8 +1715,7 @@ where /// Safety: The caller must ensure that the owned array is unshared when this is called #[inline] pub(crate) unsafe fn raw_view_mut_unchecked(&mut self) -> RawArrayViewMut - where - S: DataOwned, + where S: DataOwned { RawArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) } @@ -1748,8 +1726,7 @@ where /// If this function returns `Some(_)`, then the element order in the slice /// corresponds to the logical order of the array’s elements. pub fn as_slice(&self) -> Option<&[A]> - where - S: Data, + where S: Data { if self.is_standard_layout() { unsafe { Some(slice::from_raw_parts(self.ptr.as_ptr(), self.len())) } @@ -1761,8 +1738,7 @@ where /// Return the array’s data as a slice, if it is contiguous and in standard order. /// Return `None` otherwise. pub fn as_slice_mut(&mut self) -> Option<&mut [A]> - where - S: DataMut, + where S: DataMut { if self.is_standard_layout() { self.ensure_unique(); @@ -1778,17 +1754,11 @@ where /// If this function returns `Some(_)`, then the elements in the slice /// have whatever order the elements have in memory. pub fn as_slice_memory_order(&self) -> Option<&[A]> - where - S: Data, + where S: Data { if self.is_contiguous() { let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides); - unsafe { - Some(slice::from_raw_parts( - self.ptr.sub(offset).as_ptr(), - self.len(), - )) - } + unsafe { Some(slice::from_raw_parts(self.ptr.sub(offset).as_ptr(), self.len())) } } else { None } @@ -1801,8 +1771,7 @@ where /// method unshares the data if necessary, but it preserves the existing /// strides. pub fn as_slice_memory_order_mut(&mut self) -> Option<&mut [A]> - where - S: DataMut, + where S: DataMut { self.try_as_slice_memory_order_mut().ok() } @@ -1810,18 +1779,12 @@ where /// Return the array’s data as a slice if it is contiguous, otherwise /// return `self` in the `Err` variant. pub(crate) fn try_as_slice_memory_order_mut(&mut self) -> Result<&mut [A], &mut Self> - where - S: DataMut, + where S: DataMut { if self.is_contiguous() { self.ensure_unique(); let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides); - unsafe { - Ok(slice::from_raw_parts_mut( - self.ptr.sub(offset).as_ptr(), - self.len(), - )) - } + unsafe { Ok(slice::from_raw_parts_mut(self.ptr.sub(offset).as_ptr(), self.len())) } } else { Err(self) } @@ -1892,8 +1855,7 @@ where self.to_shape_order(shape, order.unwrap_or(Order::RowMajor)) } - fn to_shape_order(&self, shape: E, order: Order) - -> Result, ShapeError> + fn to_shape_order(&self, shape: E, order: Order) -> Result, ShapeError> where E: Dimension, A: Clone, @@ -1915,11 +1877,11 @@ where match reshape_dim(&self.dim, &self.strides, &shape, order) { Ok(to_strides) => unsafe { return Ok(CowArray::from(ArrayView::new(self.ptr, shape, to_strides))); - } + }, Err(err) if err.kind() == ErrorKind::IncompatibleShape => { return Err(error::incompatible_shapes(&self.dim, &shape)); } - _otherwise => { } + _otherwise => {} } // otherwise create a new array and copy the elements @@ -1928,8 +1890,7 @@ where Order::RowMajor => (shape.set_f(false), self.view()), Order::ColumnMajor => (shape.set_f(true), self.t()), }; - Ok(CowArray::from(Array::from_shape_trusted_iter_unchecked( - shape, view.into_iter(), A::clone))) + Ok(CowArray::from(Array::from_shape_trusted_iter_unchecked(shape, view.into_iter(), A::clone))) } } @@ -1979,17 +1940,14 @@ where /// ); /// ``` pub fn into_shape_with_order(self, shape: E) -> Result, ShapeError> - where - E: ShapeArg, + where E: ShapeArg { let (shape, order) = shape.into_shape_and_order(); self.into_shape_with_order_impl(shape, order.unwrap_or(Order::RowMajor)) } - fn into_shape_with_order_impl(self, shape: E, order: Order) - -> Result, ShapeError> - where - E: Dimension, + fn into_shape_with_order_impl(self, shape: E, order: Order) -> Result, ShapeError> + where E: Dimension { let shape = shape.into_dimension(); if size_of_shape_checked(&shape) != Ok(self.dim.size()) { @@ -2000,13 +1958,11 @@ where unsafe { // safe because arrays are contiguous and len is unchanged match order { - Order::RowMajor if self.is_standard_layout() => { - Ok(self.with_strides_dim(shape.default_strides(), shape)) - } - Order::ColumnMajor if self.raw_view().reversed_axes().is_standard_layout() => { - Ok(self.with_strides_dim(shape.fortran_strides(), shape)) - } - _otherwise => Err(error::from_kind(error::ErrorKind::IncompatibleLayout)) + Order::RowMajor if self.is_standard_layout() => + Ok(self.with_strides_dim(shape.default_strides(), shape)), + Order::ColumnMajor if self.raw_view().reversed_axes().is_standard_layout() => + Ok(self.with_strides_dim(shape.fortran_strides(), shape)), + _otherwise => Err(error::from_kind(error::ErrorKind::IncompatibleLayout)), } } } @@ -2036,8 +1992,7 @@ where /// ``` #[deprecated = "Use `.into_shape_with_order()` or `.to_shape()`"] pub fn into_shape(self, shape: E) -> Result, ShapeError> - where - E: IntoDimension, + where E: IntoDimension { let shape = shape.into_dimension(); if size_of_shape_checked(&shape) != Ok(self.dim.size()) { @@ -2082,8 +2037,7 @@ where self.into_shape_clone_order(shape, order) } - fn into_shape_clone_order(self, shape: E, order: Order) - -> Result, ShapeError> + fn into_shape_clone_order(self, shape: E, order: Order) -> Result, ShapeError> where S: DataOwned, A: Clone, @@ -2105,11 +2059,11 @@ where match reshape_dim(&self.dim, &self.strides, &shape, order) { Ok(to_strides) => unsafe { return Ok(self.with_strides_dim(to_strides, shape)); - } + }, Err(err) if err.kind() == ErrorKind::IncompatibleShape => { return Err(error::incompatible_shapes(&self.dim, &shape)); } - _otherwise => { } + _otherwise => {} } // otherwise, clone and allocate a new array @@ -2119,8 +2073,7 @@ where Order::ColumnMajor => (shape.set_f(true), self.t()), }; - Ok(ArrayBase::from_shape_trusted_iter_unchecked( - shape, view.into_iter(), A::clone)) + Ok(ArrayBase::from_shape_trusted_iter_unchecked(shape, view.into_iter(), A::clone)) } } @@ -2148,7 +2101,10 @@ where /// ); /// ``` #[track_caller] - #[deprecated(note="Obsolete, use `to_shape` or `into_shape_with_order` instead.", since="0.15.2")] + #[deprecated( + note = "Obsolete, use `to_shape` or `into_shape_with_order` instead.", + since = "0.15.2" + )] pub fn reshape(&self, shape: E) -> ArrayBase where S: DataShared + DataOwned, @@ -2167,9 +2123,7 @@ where if self.is_standard_layout() { let cl = self.clone(); // safe because array is contiguous and shape has equal number of elements - unsafe { - cl.with_strides_dim(shape.default_strides(), shape) - } + unsafe { cl.with_strides_dim(shape.default_strides(), shape) } } else { let v = self.iter().cloned().collect::>(); unsafe { ArrayBase::from_shape_vec_unchecked(shape, v) } @@ -2185,11 +2139,11 @@ where /// let array: ArrayD = arr2(&[[1, 2], /// [3, 4]]).into_dyn(); /// ``` - pub fn into_dyn(self) -> ArrayBase { + pub fn into_dyn(self) -> ArrayBase + { // safe because new dims equivalent unsafe { - ArrayBase::from_data_ptr(self.data, self.ptr) - .with_strides_dim(self.strides.into_dyn(), self.dim.into_dyn()) + ArrayBase::from_data_ptr(self.data, self.ptr).with_strides_dim(self.strides.into_dyn(), self.dim.into_dyn()) } } @@ -2210,17 +2164,16 @@ where /// assert!(array.into_dimensionality::().is_ok()); /// ``` pub fn into_dimensionality(self) -> Result, ShapeError> - where - D2: Dimension, + where D2: Dimension { unsafe { if D::NDIM == D2::NDIM { // safe because D == D2 let dim = unlimited_transmute::(self.dim); let strides = unlimited_transmute::(self.strides); - return Ok(ArrayBase::from_data_ptr(self.data, self.ptr) - .with_strides_dim(strides, dim)); - } else if D::NDIM.is_none() || D2::NDIM.is_none() { // one is dynamic dim + return Ok(ArrayBase::from_data_ptr(self.data, self.ptr).with_strides_dim(strides, dim)); + } else if D::NDIM.is_none() || D2::NDIM.is_none() { + // one is dynamic dim // safe because dim, strides are equivalent under a different type if let Some(dim) = D2::from_dimension(&self.dim) { if let Some(strides) = D2::from_dimension(&self.strides) { @@ -2274,7 +2227,8 @@ where /// /// **Note:** Cannot be used for mutable iterators, since repeating /// elements would create aliasing pointers. - fn upcast(to: &D, from: &E, stride: &E) -> Option { + fn upcast(to: &D, from: &E, stride: &E) -> Option + { // Make sure the product of non-zero axis lengths does not exceed // `isize::MAX`. This is the only safety check we need to perform // because all the other constraints of `ArrayBase` are guaranteed @@ -2331,28 +2285,34 @@ where /// /// Return `ShapeError` if their shapes can not be broadcast together. #[allow(clippy::type_complexity)] - pub(crate) fn broadcast_with<'a, 'b, B, S2, E>(&'a self, other: &'b ArrayBase) -> - Result<(ArrayView<'a, A, DimMaxOf>, ArrayView<'b, B, DimMaxOf>), ShapeError> + pub(crate) fn broadcast_with<'a, 'b, B, S2, E>( + &'a self, other: &'b ArrayBase, + ) -> Result<(ArrayView<'a, A, DimMaxOf>, ArrayView<'b, B, DimMaxOf>), ShapeError> where - S: Data, - S2: Data, + S: Data, + S2: Data, D: Dimension + DimMax, E: Dimension, { let shape = co_broadcast::>::Output>(&self.dim, &other.dim)?; let view1 = if shape.slice() == self.dim.slice() { - self.view().into_dimensionality::<>::Output>().unwrap() + self.view() + .into_dimensionality::<>::Output>() + .unwrap() } else if let Some(view1) = self.broadcast(shape.clone()) { view1 } else { - return Err(from_kind(ErrorKind::IncompatibleShape)) + return Err(from_kind(ErrorKind::IncompatibleShape)); }; let view2 = if shape.slice() == other.dim.slice() { - other.view().into_dimensionality::<>::Output>().unwrap() + other + .view() + .into_dimensionality::<>::Output>() + .unwrap() } else if let Some(view2) = other.broadcast(shape) { view2 } else { - return Err(from_kind(ErrorKind::IncompatibleShape)) + return Err(from_kind(ErrorKind::IncompatibleShape)); }; Ok((view1, view2)) } @@ -2374,7 +2334,8 @@ where /// ); /// ``` #[track_caller] - pub fn swap_axes(&mut self, ax: usize, bx: usize) { + pub fn swap_axes(&mut self, ax: usize, bx: usize) + { self.dim.slice_mut().swap(ax, bx); self.strides.slice_mut().swap(ax, bx); } @@ -2403,8 +2364,7 @@ where /// ``` #[track_caller] pub fn permuted_axes(self, axes: T) -> ArrayBase - where - T: IntoDimension, + where T: IntoDimension { let axes = axes.into_dimension(); // Ensure that each axis is used exactly once. @@ -2427,16 +2387,15 @@ where } } // safe because axis invariants are checked above; they are a permutation of the old - unsafe { - self.with_strides_dim(new_strides, new_dim) - } + unsafe { self.with_strides_dim(new_strides, new_dim) } } /// Transpose the array by reversing axes. /// /// Transposition reverses the order of the axes (dimensions and strides) /// while retaining the same data. - pub fn reversed_axes(mut self) -> ArrayBase { + pub fn reversed_axes(mut self) -> ArrayBase + { self.dim.slice_mut().reverse(); self.strides.slice_mut().reverse(); self @@ -2448,14 +2407,14 @@ where /// /// See also the more general methods `.reversed_axes()` and `.swap_axes()`. pub fn t(&self) -> ArrayView<'_, A, D> - where - S: Data, + where S: Data { self.view().reversed_axes() } /// Return an iterator over the length and stride of each axis. - pub fn axes(&self) -> Axes<'_, D> { + pub fn axes(&self) -> Axes<'_, D> + { axes_of(&self.dim, &self.strides) } @@ -2468,7 +2427,8 @@ where /// Return the axis with the greatest stride (by absolute value), /// preferring axes with len > 1. - pub fn max_stride_axis(&self) -> Axis { + pub fn max_stride_axis(&self) -> Axis + { self.dim.max_stride_axis(&self.strides) } @@ -2476,7 +2436,8 @@ where /// /// ***Panics*** if the axis is out of bounds. #[track_caller] - pub fn invert_axis(&mut self, axis: Axis) { + pub fn invert_axis(&mut self, axis: Axis) + { unsafe { let s = self.strides.axis(axis) as Ixs; let m = self.dim.axis(axis); @@ -2523,7 +2484,8 @@ where /// /// ***Panics*** if an axis is out of bounds. #[track_caller] - pub fn merge_axes(&mut self, take: Axis, into: Axis) -> bool { + pub fn merge_axes(&mut self, take: Axis, into: Axis) -> bool + { merge_axes(&mut self.dim, &mut self.strides, take, into) } @@ -2549,7 +2511,8 @@ where /// /// ***Panics*** if the axis is out of bounds. #[track_caller] - pub fn insert_axis(self, axis: Axis) -> ArrayBase { + pub fn insert_axis(self, axis: Axis) -> ArrayBase + { assert!(axis.index() <= self.ndim()); // safe because a new axis of length one does not affect memory layout unsafe { @@ -2567,13 +2530,13 @@ where /// **Panics** if the axis is out of bounds or its length is zero. #[track_caller] pub fn remove_axis(self, axis: Axis) -> ArrayBase - where - D: RemoveAxis, + where D: RemoveAxis { self.index_axis_move(axis, 0) } - pub(crate) fn pointer_is_inbounds(&self) -> bool { + pub(crate) fn pointer_is_inbounds(&self) -> bool + { self.data._is_pointer_inbounds(self.as_ptr()) } @@ -2606,8 +2569,7 @@ where P::Item: AssignElem, A: Clone, { - Zip::from(self) - .map_assign_into(to, A::clone); + Zip::from(self).map_assign_into(to, A::clone); } /// Perform an elementwise assigment to `self` from element `x`. @@ -2744,7 +2706,9 @@ where if let Some(slc) = self.as_slice_memory_order() { ArrayBase::from_shape_trusted_iter_unchecked( self.dim.clone().strides(self.strides.clone()), - slc.iter(), f) + slc.iter(), + f, + ) } else { ArrayBase::from_shape_trusted_iter_unchecked(self.dim.clone(), self.iter(), f) } @@ -2767,8 +2731,7 @@ where if self.is_contiguous() { let strides = self.strides.clone(); let slc = self.as_slice_memory_order_mut().unwrap(); - unsafe { ArrayBase::from_shape_trusted_iter_unchecked(dim.strides(strides), - slc.iter_mut(), f) } + unsafe { ArrayBase::from_shape_trusted_iter_unchecked(dim.strides(strides), slc.iter_mut(), f) } } else { unsafe { ArrayBase::from_shape_trusted_iter_unchecked(dim, self.iter_mut(), f) } } @@ -2921,7 +2884,7 @@ where /// on each element. /// /// Elements are visited in arbitrary order. - #[deprecated(note="Renamed to .for_each()", since="0.15.0")] + #[deprecated(note = "Renamed to .for_each()", since = "0.15.0")] pub fn visit<'a, F>(&'a self, f: F) where F: FnMut(&'a A), @@ -3016,8 +2979,7 @@ where /// ***Panics*** if `axis` is out of bounds
/// ***Panics*** if not `index < self.len_of(axis)`. pub fn remove_index(&mut self, axis: Axis, index: usize) - where - S: DataOwned + DataMut, + where S: DataOwned + DataMut { assert!(index < self.len_of(axis), "index {} must be less than length of Axis({})", index, axis.index()); @@ -3085,7 +3047,6 @@ where } } - /// Transmute from A to B. /// /// Like transmute, but does not have the compile-time size check which blocks @@ -3094,7 +3055,8 @@ where /// **Panics** if the size of A and B are different. #[track_caller] #[inline] -unsafe fn unlimited_transmute(data: A) -> B { +unsafe fn unlimited_transmute(data: A) -> B +{ // safe when sizes are equal and caller guarantees that representations are equal assert_eq!(size_of::
(), size_of::()); let old_data = ManuallyDrop::new(data); diff --git a/src/impl_ops.rs b/src/impl_ops.rs index 8d02364d1..46ea18a7c 100644 --- a/src/impl_ops.rs +++ b/src/impl_ops.rs @@ -288,21 +288,25 @@ impl<'a, S, D> $trt<&'a ArrayBase> for $scalar ); } -mod arithmetic_ops { +mod arithmetic_ops +{ use super::*; use crate::imp_prelude::*; use std::ops::*; - fn clone_opf(f: impl Fn(A, B) -> C) -> impl FnMut(&A, &B) -> C { + fn clone_opf(f: impl Fn(A, B) -> C) -> impl FnMut(&A, &B) -> C + { move |x, y| f(x.clone(), y.clone()) } - fn clone_iopf(f: impl Fn(A, B) -> A) -> impl FnMut(&mut A, &B) { + fn clone_iopf(f: impl Fn(A, B) -> A) -> impl FnMut(&mut A, &B) + { move |x, y| *x = f(x.clone(), y.clone()) } - fn clone_iopf_rev(f: impl Fn(A, B) -> B) -> impl FnMut(&mut B, &A) { + fn clone_iopf_rev(f: impl Fn(A, B) -> B) -> impl FnMut(&mut B, &A) + { move |x, y| *x = f(y.clone(), x.clone()) } @@ -378,7 +382,8 @@ mod arithmetic_ops { { type Output = Self; /// Perform an elementwise negation of `self` and return the result. - fn neg(mut self) -> Self { + fn neg(mut self) -> Self + { self.map_inplace(|elt| { *elt = -elt.clone(); }); @@ -395,7 +400,8 @@ mod arithmetic_ops { type Output = Array; /// Perform an elementwise negation of reference `self` and return the /// result as a new `Array`. - fn neg(self) -> Array { + fn neg(self) -> Array + { self.map(Neg::neg) } } @@ -408,7 +414,8 @@ mod arithmetic_ops { { type Output = Self; /// Perform an elementwise unary not of `self` and return the result. - fn not(mut self) -> Self { + fn not(mut self) -> Self + { self.map_inplace(|elt| { *elt = !elt.clone(); }); @@ -425,13 +432,15 @@ mod arithmetic_ops { type Output = Array; /// Perform an elementwise unary not of reference `self` and return the /// result as a new `Array`. - fn not(self) -> Array { + fn not(self) -> Array + { self.map(Not::not) } } } -mod assign_ops { +mod assign_ops +{ use super::*; use crate::imp_prelude::*; diff --git a/src/impl_owned_array.rs b/src/impl_owned_array.rs index 3e9001132..53be9e48c 100644 --- a/src/impl_owned_array.rs +++ b/src/impl_owned_array.rs @@ -1,4 +1,3 @@ - #[cfg(not(feature = "std"))] use alloc::vec::Vec; use std::mem; @@ -18,7 +17,8 @@ use crate::Zip; /// Methods specific to `Array0`. /// /// ***See also all methods for [`ArrayBase`]*** -impl Array { +impl Array +{ /// Returns the single element in the array without cloning it. /// /// ``` @@ -32,7 +32,8 @@ impl Array { /// let scalar: Foo = array.into_scalar(); /// assert_eq!(scalar, Foo); /// ``` - pub fn into_scalar(self) -> A { + pub fn into_scalar(self) -> A + { let size = mem::size_of::(); if size == 0 { // Any index in the `Vec` is fine since all elements are identical. @@ -56,15 +57,15 @@ impl Array { /// /// ***See also all methods for [`ArrayBase`]*** impl Array -where - D: Dimension, +where D: Dimension { /// Return a vector of the elements in the array, in the way they are /// stored internally. /// /// If the array is in standard memory layout, the logical element order /// of the array (`.iter()` order) and of the returned vector will be the same. - pub fn into_raw_vec(self) -> Vec { + pub fn into_raw_vec(self) -> Vec + { self.data.into_vec() } } @@ -72,7 +73,8 @@ where /// Methods specific to `Array2`. /// /// ***See also all methods for [`ArrayBase`]*** -impl Array { +impl Array +{ /// Append a row to an array /// /// The elements from `row` are cloned and added as a new row in the array. @@ -113,8 +115,7 @@ impl Array { /// [-1., -2., -3., -4.]]); /// ``` pub fn push_row(&mut self, row: ArrayView) -> Result<(), ShapeError> - where - A: Clone, + where A: Clone { self.append(Axis(0), row.insert_axis(Axis(0))) } @@ -159,15 +160,14 @@ impl Array { /// [2., -2.]]); /// ``` pub fn push_column(&mut self, column: ArrayView) -> Result<(), ShapeError> - where - A: Clone, + where A: Clone { self.append(Axis(1), column.insert_axis(Axis(1))) } } impl Array - where D: Dimension +where D: Dimension { /// Move all elements from self into `new_array`, which must be of the same shape but /// can have a different memory layout. The destination is overwritten completely. @@ -199,18 +199,18 @@ impl Array } else { // If `A` doesn't need drop, we can overwrite the destination. // Safe because: move_into_uninit only writes initialized values - unsafe { - self.move_into_uninit(new_array.into_maybe_uninit()) - } + unsafe { self.move_into_uninit(new_array.into_maybe_uninit()) } } } - fn move_into_needs_drop(mut self, new_array: ArrayViewMut) { + fn move_into_needs_drop(mut self, new_array: ArrayViewMut) + { // Simple case where `A` has a destructor: just swap values between self and new_array. // Afterwards, `self` drops full of initialized values and dropping works as usual. // This avoids moving out of owned values in `self` while at the same time managing // the dropping if the values being overwritten in `new_array`. - Zip::from(&mut self).and(new_array) + Zip::from(&mut self) + .and(new_array) .for_each(|src, dst| mem::swap(src, dst)); } @@ -249,7 +249,8 @@ impl Array self.move_into_impl(new_array.into()) } - fn move_into_impl(mut self, new_array: ArrayViewMut, D>) { + fn move_into_impl(mut self, new_array: ArrayViewMut, D>) + { unsafe { // Safety: copy_to_nonoverlapping cannot panic let guard = AbortIfPanic(&"move_into: moving out of owned value"); @@ -274,7 +275,8 @@ impl Array /// # Safety /// /// This is a panic critical section since `self` is already moved-from. - fn drop_unreachable_elements(mut self) -> OwnedRepr { + fn drop_unreachable_elements(mut self) -> OwnedRepr + { let self_len = self.len(); // "deconstruct" self; the owned repr releases ownership of all elements and we @@ -294,7 +296,8 @@ impl Array #[inline(never)] #[cold] - fn drop_unreachable_elements_slow(mut self) -> OwnedRepr { + fn drop_unreachable_elements_slow(mut self) -> OwnedRepr + { // "deconstruct" self; the owned repr releases ownership of all elements and we // carry on with raw view methods let data_len = self.data.len(); @@ -315,7 +318,8 @@ impl Array /// Create an empty array with an all-zeros shape /// /// ***Panics*** if D is zero-dimensional, because it can't be empty - pub(crate) fn empty() -> Array { + pub(crate) fn empty() -> Array + { assert_ne!(D::NDIM, Some(0)); let ndim = D::NDIM.unwrap_or(1); Array::from_shape_simple_fn(D::zeros(ndim), || unreachable!()) @@ -323,7 +327,8 @@ impl Array /// Create new_array with the right layout for appending to `growing_axis` #[cold] - fn change_to_contig_append_layout(&mut self, growing_axis: Axis) { + fn change_to_contig_append_layout(&mut self, growing_axis: Axis) + { let ndim = self.ndim(); let mut dim = self.raw_dim(); @@ -402,8 +407,7 @@ impl Array /// [0., 0., 0., 0.], /// [1., 1., 1., 1.]]); /// ``` - pub fn push(&mut self, axis: Axis, array: ArrayView) - -> Result<(), ShapeError> + pub fn push(&mut self, axis: Axis, array: ArrayView) -> Result<(), ShapeError> where A: Clone, D: RemoveAxis, @@ -412,7 +416,6 @@ impl Array self.append(axis, array.insert_axis(axis).into_dimensionality::().unwrap()) } - /// Append an array to the array along an axis. /// /// The elements of `array` are cloned and extend the axis `axis` in the present array; @@ -463,8 +466,7 @@ impl Array /// [1., 1., 1., 1.], /// [1., 1., 1., 1.]]); /// ``` - pub fn append(&mut self, axis: Axis, mut array: ArrayView) - -> Result<(), ShapeError> + pub fn append(&mut self, axis: Axis, mut array: ArrayView) -> Result<(), ShapeError> where A: Clone, D: RemoveAxis, @@ -557,7 +559,11 @@ impl Array acc } else { let this_ax = ax.len as isize * ax.stride.abs(); - if this_ax > acc { this_ax } else { acc } + if this_ax > acc { + this_ax + } else { + acc + } } }); let mut strides = self.strides.clone(); @@ -575,7 +581,10 @@ impl Array 0 }; debug_assert!(data_to_array_offset >= 0); - self.ptr = self.data.reserve(len_to_append).offset(data_to_array_offset); + self.ptr = self + .data + .reserve(len_to_append) + .offset(data_to_array_offset); // clone elements from view to the array now // @@ -612,19 +621,22 @@ impl Array debug_assert!(tail_view.is_standard_layout(), "not std layout dim: {:?}, strides: {:?}", tail_view.shape(), tail_view.strides()); - } + } // Keep track of currently filled length of `self.data` and update it // on scope exit (panic or loop finish). This "indirect" way to // write the length is used to help the compiler, the len store to self.data may // otherwise be mistaken to alias with other stores in the loop. - struct SetLenOnDrop<'a, A: 'a> { + struct SetLenOnDrop<'a, A: 'a> + { len: usize, data: &'a mut OwnedRepr, } - impl Drop for SetLenOnDrop<'_, A> { - fn drop(&mut self) { + impl Drop for SetLenOnDrop<'_, A> + { + fn drop(&mut self) + { unsafe { self.data.set_len(self.len); } @@ -636,7 +648,6 @@ impl Array data: &mut self.data, }; - // Safety: tail_view is constructed to have the same shape as array Zip::from(tail_view) .and_unchecked(array) @@ -667,8 +678,7 @@ impl Array /// This is an internal function for use by move_into and IntoIter only, safety invariants may need /// to be upheld across the calls from those implementations. pub(crate) unsafe fn drop_unreachable_raw(mut self_: RawArrayViewMut, data_ptr: *mut A, data_len: usize) -where - D: Dimension, +where D: Dimension { let self_len = self_.len(); @@ -751,8 +761,7 @@ where } fn sort_axes1_impl(adim: &mut D, astrides: &mut D) -where - D: Dimension, +where D: Dimension { debug_assert!(adim.ndim() > 1); debug_assert_eq!(adim.ndim(), astrides.ndim()); @@ -775,7 +784,6 @@ where } } - /// Sort axes to standard order, i.e Axis(0) has biggest stride and Axis(n - 1) least stride /// /// Axes in a and b are sorted by the strides of `a`, and `a`'s axes should have stride >= 0 before @@ -793,8 +801,7 @@ where } fn sort_axes2_impl(adim: &mut D, astrides: &mut D, bdim: &mut D, bstrides: &mut D) -where - D: Dimension, +where D: Dimension { debug_assert!(adim.ndim() > 1); debug_assert_eq!(adim.ndim(), bdim.ndim()); diff --git a/src/impl_raw_views.rs b/src/impl_raw_views.rs index 9f026318c..aeee75cb2 100644 --- a/src/impl_raw_views.rs +++ b/src/impl_raw_views.rs @@ -6,23 +6,23 @@ use crate::dimension::{self, stride_offset}; use crate::extension::nonnull::nonnull_debug_checked_from_ptr; use crate::imp_prelude::*; use crate::is_aligned; -use crate::shape_builder::{Strides, StrideShape}; +use crate::shape_builder::{StrideShape, Strides}; impl RawArrayView -where - D: Dimension, +where D: Dimension { /// Create a new `RawArrayView`. /// /// Unsafe because caller is responsible for ensuring that the array will /// meet all of the invariants of the `ArrayBase` type. #[inline] - pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self { - RawArrayView::from_data_ptr(RawViewRepr::new(), ptr) - .with_strides_dim(strides, dim) + pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self + { + RawArrayView::from_data_ptr(RawViewRepr::new(), ptr).with_strides_dim(strides, dim) } - unsafe fn new_(ptr: *const A, dim: D, strides: D) -> Self { + unsafe fn new_(ptr: *const A, dim: D, strides: D) -> Self + { Self::new(nonnull_debug_checked_from_ptr(ptr as *mut A), dim, strides) } @@ -59,7 +59,7 @@ where /// [`.offset()`] regardless of the starting point due to past offsets. /// /// * The product of non-zero axis lengths must not exceed `isize::MAX`. - /// + /// /// * Strides must be non-negative. /// /// This function can use debug assertions to check some of these requirements, @@ -67,8 +67,7 @@ where /// /// [`.offset()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub unsafe fn from_shape_ptr(shape: Sh, ptr: *const A) -> Self - where - Sh: Into>, + where Sh: Into> { let shape = shape.into(); let dim = shape.dim; @@ -94,7 +93,8 @@ where /// data is valid, ensure that the pointer is aligned, and choose the /// correct lifetime. #[inline] - pub unsafe fn deref_into_view<'a>(self) -> ArrayView<'a, A, D> { + pub unsafe fn deref_into_view<'a>(self) -> ArrayView<'a, A, D> + { debug_assert!( is_aligned(self.ptr.as_ptr()), "The pointer must be aligned." @@ -107,7 +107,8 @@ where /// /// **Panics** if `axis` or `index` is out of bounds. #[track_caller] - pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) { + pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) + { assert!(index <= self.len_of(axis)); let left_ptr = self.ptr.as_ptr(); let right_ptr = if index == self.len_of(axis) { @@ -141,7 +142,8 @@ where /// casts are safe, access through the produced raw view is only possible /// in an unsafe block or function. #[track_caller] - pub fn cast(self) -> RawArrayView { + pub fn cast(self) -> RawArrayView + { assert_eq!( mem::size_of::(), mem::size_of::(), @@ -153,12 +155,12 @@ where } impl RawArrayView, D> -where - D: Dimension, +where D: Dimension { /// Splits the view into views of the real and imaginary components of the /// elements. - pub fn split_complex(self) -> Complex> { + pub fn split_complex(self) -> Complex> + { // Check that the size and alignment of `Complex` are as expected. // These assertions should always pass, for arbitrary `T`. assert_eq!( @@ -220,20 +222,20 @@ where } impl RawArrayViewMut -where - D: Dimension, +where D: Dimension { /// Create a new `RawArrayViewMut`. /// /// Unsafe because caller is responsible for ensuring that the array will /// meet all of the invariants of the `ArrayBase` type. #[inline] - pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self { - RawArrayViewMut::from_data_ptr(RawViewRepr::new(), ptr) - .with_strides_dim(strides, dim) + pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self + { + RawArrayViewMut::from_data_ptr(RawViewRepr::new(), ptr).with_strides_dim(strides, dim) } - unsafe fn new_(ptr: *mut A, dim: D, strides: D) -> Self { + unsafe fn new_(ptr: *mut A, dim: D, strides: D) -> Self + { Self::new(nonnull_debug_checked_from_ptr(ptr), dim, strides) } @@ -268,7 +270,7 @@ where /// address by moving along all axes must not exceed `isize::MAX`. This /// constraint prevents overflow when calculating the `count` parameter to /// [`.offset()`] regardless of the starting point due to past offsets. - /// + /// /// * The product of non-zero axis lengths must not exceed `isize::MAX`. /// /// * Strides must be non-negative. @@ -278,8 +280,7 @@ where /// /// [`.offset()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub unsafe fn from_shape_ptr(shape: Sh, ptr: *mut A) -> Self - where - Sh: Into>, + where Sh: Into> { let shape = shape.into(); let dim = shape.dim; @@ -298,7 +299,8 @@ where /// Converts to a non-mutable `RawArrayView`. #[inline] - pub(crate) fn into_raw_view(self) -> RawArrayView { + pub(crate) fn into_raw_view(self) -> RawArrayView + { unsafe { RawArrayView::new(self.ptr, self.dim, self.strides) } } @@ -311,7 +313,8 @@ where /// data is valid, ensure that the pointer is aligned, and choose the /// correct lifetime. #[inline] - pub unsafe fn deref_into_view<'a>(self) -> ArrayView<'a, A, D> { + pub unsafe fn deref_into_view<'a>(self) -> ArrayView<'a, A, D> + { debug_assert!( is_aligned(self.ptr.as_ptr()), "The pointer must be aligned." @@ -328,7 +331,8 @@ where /// data is valid, ensure that the pointer is aligned, and choose the /// correct lifetime. #[inline] - pub unsafe fn deref_into_view_mut<'a>(self) -> ArrayViewMut<'a, A, D> { + pub unsafe fn deref_into_view_mut<'a>(self) -> ArrayViewMut<'a, A, D> + { debug_assert!( is_aligned(self.ptr.as_ptr()), "The pointer must be aligned." @@ -341,14 +345,10 @@ where /// /// **Panics** if `axis` or `index` is out of bounds. #[track_caller] - pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) { + pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) + { let (left, right) = self.into_raw_view().split_at(axis, index); - unsafe { - ( - Self::new(left.ptr, left.dim, left.strides), - Self::new(right.ptr, right.dim, right.strides), - ) - } + unsafe { (Self::new(left.ptr, left.dim, left.strides), Self::new(right.ptr, right.dim, right.strides)) } } /// Cast the raw pointer of the raw array view to a different type @@ -362,7 +362,8 @@ where /// casts are safe, access through the produced raw view is only possible /// in an unsafe block or function. #[track_caller] - pub fn cast(self) -> RawArrayViewMut { + pub fn cast(self) -> RawArrayViewMut + { assert_eq!( mem::size_of::(), mem::size_of::(), @@ -374,12 +375,12 @@ where } impl RawArrayViewMut, D> -where - D: Dimension, +where D: Dimension { /// Splits the view into views of the real and imaginary components of the /// elements. - pub fn split_complex(self) -> Complex> { + pub fn split_complex(self) -> Complex> + { let Complex { re, im } = self.into_raw_view().split_complex(); unsafe { Complex { diff --git a/src/impl_special_element_types.rs b/src/impl_special_element_types.rs index 5d5f18491..e430b20bc 100644 --- a/src/impl_special_element_types.rs +++ b/src/impl_special_element_types.rs @@ -11,13 +11,12 @@ use std::mem::MaybeUninit; use crate::imp_prelude::*; use crate::RawDataSubst; - /// Methods specific to arrays with `MaybeUninit` elements. /// /// ***See also all methods for [`ArrayBase`]*** impl ArrayBase where - S: RawDataSubst>, + S: RawDataSubst>, D: Dimension, { /// **Promise** that the array's elements are all fully initialized, and convert @@ -32,8 +31,14 @@ where /// Note that for owned and shared ownership arrays, the promise must include all of the /// array's storage; it is for example possible to slice these in place, but that must /// only be done after all elements have been initialized. - pub unsafe fn assume_init(self) -> ArrayBase<>::Output, D> { - let ArrayBase { data, ptr, dim, strides } = self; + pub unsafe fn assume_init(self) -> ArrayBase<>::Output, D> + { + let ArrayBase { + data, + ptr, + dim, + strides, + } = self; // "transmute" from storage of MaybeUninit to storage of A let data = S::data_subst(data); diff --git a/src/impl_views/constructors.rs b/src/impl_views/constructors.rs index 98cb81fcc..33c7b15be 100644 --- a/src/impl_views/constructors.rs +++ b/src/impl_views/constructors.rs @@ -9,16 +9,15 @@ use std::ptr::NonNull; use crate::dimension; +use crate::dimension::offset_from_low_addr_ptr_to_logical_ptr; use crate::error::ShapeError; use crate::extension::nonnull::nonnull_debug_checked_from_ptr; use crate::imp_prelude::*; use crate::{is_aligned, StrideShape}; -use crate::dimension::offset_from_low_addr_ptr_to_logical_ptr; /// Methods for read-only array views. impl<'a, A, D> ArrayView<'a, A, D> -where - D: Dimension, +where D: Dimension { /// Create a read-only array view borrowing its data from a slice. /// @@ -46,18 +45,25 @@ where /// assert!(a.strides() == &[1, 4, 2]); /// ``` pub fn from_shape(shape: Sh, xs: &'a [A]) -> Result - where - Sh: Into>, + where Sh: Into> { // eliminate the type parameter Sh as soon as possible Self::from_shape_impl(shape.into(), xs) } - fn from_shape_impl(shape: StrideShape, xs: &'a [A]) -> Result { + fn from_shape_impl(shape: StrideShape, xs: &'a [A]) -> Result + { let dim = shape.dim; dimension::can_index_slice_with_strides(xs, &dim, &shape.strides)?; let strides = shape.strides.strides_for_dim(&dim); - unsafe { Ok(Self::new_(xs.as_ptr().add(offset_from_low_addr_ptr_to_logical_ptr(&dim, &strides)), dim, strides)) } + unsafe { + Ok(Self::new_( + xs.as_ptr() + .add(offset_from_low_addr_ptr_to_logical_ptr(&dim, &strides)), + dim, + strides, + )) + } } /// Create an `ArrayView` from shape information and a raw pointer to @@ -105,8 +111,7 @@ where /// /// [`.offset()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub unsafe fn from_shape_ptr(shape: Sh, ptr: *const A) -> Self - where - Sh: Into>, + where Sh: Into> { RawArrayView::from_shape_ptr(shape, ptr).deref_into_view() } @@ -114,8 +119,7 @@ where /// Methods for read-write array views. impl<'a, A, D> ArrayViewMut<'a, A, D> -where - D: Dimension, +where D: Dimension { /// Create a read-write array view borrowing its data from a slice. /// @@ -143,18 +147,25 @@ where /// assert!(a.strides() == &[1, 4, 2]); /// ``` pub fn from_shape(shape: Sh, xs: &'a mut [A]) -> Result - where - Sh: Into>, + where Sh: Into> { // eliminate the type parameter Sh as soon as possible Self::from_shape_impl(shape.into(), xs) } - fn from_shape_impl(shape: StrideShape, xs: &'a mut [A]) -> Result { + fn from_shape_impl(shape: StrideShape, xs: &'a mut [A]) -> Result + { let dim = shape.dim; dimension::can_index_slice_with_strides(xs, &dim, &shape.strides)?; let strides = shape.strides.strides_for_dim(&dim); - unsafe { Ok(Self::new_(xs.as_mut_ptr().add(offset_from_low_addr_ptr_to_logical_ptr(&dim, &strides)), dim, strides)) } + unsafe { + Ok(Self::new_( + xs.as_mut_ptr() + .add(offset_from_low_addr_ptr_to_logical_ptr(&dim, &strides)), + dim, + strides, + )) + } } /// Create an `ArrayViewMut` from shape information and a @@ -202,8 +213,7 @@ where /// /// [`.offset()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub unsafe fn from_shape_ptr(shape: Sh, ptr: *mut A) -> Self - where - Sh: Into>, + where Sh: Into> { RawArrayViewMut::from_shape_ptr(shape, ptr).deref_into_view_mut() } @@ -211,8 +221,7 @@ where /// Convert the view into an `ArrayViewMut<'b, A, D>` where `'b` is a lifetime /// outlived by `'a'`. pub fn reborrow<'b>(self) -> ArrayViewMut<'b, A, D> - where - 'a: 'b, + where 'a: 'b { unsafe { ArrayViewMut::new(self.ptr, self.dim, self.strides) } } @@ -220,14 +229,14 @@ where /// Private array view methods impl<'a, A, D> ArrayView<'a, A, D> -where - D: Dimension, +where D: Dimension { /// Create a new `ArrayView` /// /// Unsafe because: `ptr` must be valid for the given dimension and strides. #[inline(always)] - pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self { + pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self + { if cfg!(debug_assertions) { assert!(is_aligned(ptr.as_ptr()), "The pointer must be aligned."); dimension::max_abs_offset_check_overflow::(&dim, &strides).unwrap(); @@ -237,20 +246,21 @@ where /// Unsafe because: `ptr` must be valid for the given dimension and strides. #[inline] - pub(crate) unsafe fn new_(ptr: *const A, dim: D, strides: D) -> Self { + pub(crate) unsafe fn new_(ptr: *const A, dim: D, strides: D) -> Self + { Self::new(nonnull_debug_checked_from_ptr(ptr as *mut A), dim, strides) } } impl<'a, A, D> ArrayViewMut<'a, A, D> -where - D: Dimension, +where D: Dimension { /// Create a new `ArrayView` /// /// Unsafe because: `ptr` must be valid for the given dimension and strides. #[inline(always)] - pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self { + pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self + { if cfg!(debug_assertions) { assert!(is_aligned(ptr.as_ptr()), "The pointer must be aligned."); dimension::max_abs_offset_check_overflow::(&dim, &strides).unwrap(); @@ -262,7 +272,8 @@ where /// /// Unsafe because: `ptr` must be valid for the given dimension and strides. #[inline(always)] - pub(crate) unsafe fn new_(ptr: *mut A, dim: D, strides: D) -> Self { + pub(crate) unsafe fn new_(ptr: *mut A, dim: D, strides: D) -> Self + { Self::new(nonnull_debug_checked_from_ptr(ptr), dim, strides) } } diff --git a/src/impl_views/conversions.rs b/src/impl_views/conversions.rs index ca571a761..f545ebdd0 100644 --- a/src/impl_views/conversions.rs +++ b/src/impl_views/conversions.rs @@ -21,14 +21,12 @@ use crate::IndexLonger; /// Methods for read-only array views. impl<'a, A, D> ArrayView<'a, A, D> -where - D: Dimension, +where D: Dimension { /// Convert the view into an `ArrayView<'b, A, D>` where `'b` is a lifetime /// outlived by `'a'`. pub fn reborrow<'b>(self) -> ArrayView<'b, A, D> - where - 'a: 'b, + where 'a: 'b { unsafe { ArrayView::new(self.ptr, self.dim, self.strides) } } @@ -38,7 +36,8 @@ where /// /// Note that while the method is similar to [`ArrayBase::as_slice()`], this method transfers /// the view's lifetime to the slice, so it is a bit more powerful. - pub fn to_slice(&self) -> Option<&'a [A]> { + pub fn to_slice(&self) -> Option<&'a [A]> + { if self.is_standard_layout() { unsafe { Some(slice::from_raw_parts(self.ptr.as_ptr(), self.len())) } } else { @@ -52,22 +51,19 @@ where /// Note that while the method is similar to /// [`ArrayBase::as_slice_memory_order()`], this method transfers the view's /// lifetime to the slice, so it is a bit more powerful. - pub fn to_slice_memory_order(&self) -> Option<&'a [A]> { + pub fn to_slice_memory_order(&self) -> Option<&'a [A]> + { if self.is_contiguous() { let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides); - unsafe { - Some(slice::from_raw_parts( - self.ptr.sub(offset).as_ptr(), - self.len(), - )) - } + unsafe { Some(slice::from_raw_parts(self.ptr.sub(offset).as_ptr(), self.len())) } } else { None } } /// Converts to a raw array view. - pub(crate) fn into_raw_view(self) -> RawArrayView { + pub(crate) fn into_raw_view(self) -> RawArrayView + { unsafe { RawArrayView::new(self.ptr, self.dim, self.strides) } } } @@ -75,7 +71,8 @@ where /// Methods specific to `ArrayView0`. /// /// ***See also all methods for [`ArrayView`] and [`ArrayBase`]*** -impl<'a, A> ArrayView<'a, A, Ix0> { +impl<'a, A> ArrayView<'a, A, Ix0> +{ /// Consume the view and return a reference to the single element in the array. /// /// The lifetime of the returned reference matches the lifetime of the data @@ -93,7 +90,8 @@ impl<'a, A> ArrayView<'a, A, Ix0> { /// let scalar: &Foo = view.into_scalar(); /// assert_eq!(scalar, &Foo); /// ``` - pub fn into_scalar(self) -> &'a A { + pub fn into_scalar(self) -> &'a A + { self.index(Ix0()) } } @@ -101,7 +99,8 @@ impl<'a, A> ArrayView<'a, A, Ix0> { /// Methods specific to `ArrayViewMut0`. /// /// ***See also all methods for [`ArrayViewMut`] and [`ArrayBase`]*** -impl<'a, A> ArrayViewMut<'a, A, Ix0> { +impl<'a, A> ArrayViewMut<'a, A, Ix0> +{ /// Consume the mutable view and return a mutable reference to the single element in the array. /// /// The lifetime of the returned reference matches the lifetime of the data @@ -117,22 +116,23 @@ impl<'a, A> ArrayViewMut<'a, A, Ix0> { /// assert_eq!(scalar, &7.); /// assert_eq!(array[()], 7.); /// ``` - pub fn into_scalar(self) -> &'a mut A { + pub fn into_scalar(self) -> &'a mut A + { self.index(Ix0()) } } /// Methods for read-write array views. impl<'a, A, D> ArrayViewMut<'a, A, D> -where - D: Dimension, +where D: Dimension { /// Return the array’s data as a slice, if it is contiguous and in standard order. /// Return `None` otherwise. /// /// Note that while this is similar to [`ArrayBase::as_slice_mut()`], this method transfers the /// view's lifetime to the slice. - pub fn into_slice(self) -> Option<&'a mut [A]> { + pub fn into_slice(self) -> Option<&'a mut [A]> + { self.try_into_slice().ok() } @@ -142,7 +142,8 @@ where /// Note that while this is similar to /// [`ArrayBase::as_slice_memory_order_mut()`], this method transfers the /// view's lifetime to the slice. - pub fn into_slice_memory_order(self) -> Option<&'a mut [A]> { + pub fn into_slice_memory_order(self) -> Option<&'a mut [A]> + { self.try_into_slice_memory_order().ok() } @@ -152,12 +153,15 @@ where /// /// The view acts "as if" the elements are temporarily in cells, and elements /// can be changed through shared references using the regular cell methods. - pub fn into_cell_view(self) -> ArrayView<'a, MathCell, D> { + pub fn into_cell_view(self) -> ArrayView<'a, MathCell, D> + { // safety: valid because // A and MathCell have the same representation // &'a mut T is interchangeable with &'a Cell -- see method Cell::from_mut in std unsafe { - self.into_raw_view_mut().cast::>().deref_into_view() + self.into_raw_view_mut() + .cast::>() + .deref_into_view() } } @@ -174,52 +178,57 @@ where /// This method allows writing uninitialized data into the view, which could leave any /// original array that we borrow from in an inconsistent state. This is not allowed /// when using the resulting array view. - pub(crate) unsafe fn into_maybe_uninit(self) -> ArrayViewMut<'a, MaybeUninit, D> { + pub(crate) unsafe fn into_maybe_uninit(self) -> ArrayViewMut<'a, MaybeUninit, D> + { // Safe because: A and MaybeUninit have the same representation; // and we can go from initialized to (maybe) not unconditionally in terms of // representation. However, the user must be careful to not write uninit elements // through the view. - self.into_raw_view_mut().cast::>().deref_into_view_mut() + self.into_raw_view_mut() + .cast::>() + .deref_into_view_mut() } } /// Private raw array view methods impl RawArrayView -where - D: Dimension, +where D: Dimension { #[inline] - pub(crate) fn into_base_iter(self) -> Baseiter { + pub(crate) fn into_base_iter(self) -> Baseiter + { unsafe { Baseiter::new(self.ptr.as_ptr(), self.dim, self.strides) } } } impl RawArrayViewMut -where - D: Dimension, +where D: Dimension { #[inline] - pub(crate) fn into_base_iter(self) -> Baseiter { + pub(crate) fn into_base_iter(self) -> Baseiter + { unsafe { Baseiter::new(self.ptr.as_ptr(), self.dim, self.strides) } } } /// Private array view methods impl<'a, A, D> ArrayView<'a, A, D> -where - D: Dimension, +where D: Dimension { #[inline] - pub(crate) fn into_base_iter(self) -> Baseiter { + pub(crate) fn into_base_iter(self) -> Baseiter + { unsafe { Baseiter::new(self.ptr.as_ptr(), self.dim, self.strides) } } #[inline] - pub(crate) fn into_elements_base(self) -> ElementsBase<'a, A, D> { + pub(crate) fn into_elements_base(self) -> ElementsBase<'a, A, D> + { ElementsBase::new(self) } - pub(crate) fn into_iter_(self) -> Iter<'a, A, D> { + pub(crate) fn into_iter_(self) -> Iter<'a, A, D> + { Iter::new(self) } @@ -227,40 +236,43 @@ where #[doc(hidden)] // not official #[deprecated(note = "This method will be replaced.")] pub fn into_outer_iter(self) -> iter::AxisIter<'a, A, D::Smaller> - where - D: RemoveAxis, + where D: RemoveAxis { AxisIter::new(self, Axis(0)) } } impl<'a, A, D> ArrayViewMut<'a, A, D> -where - D: Dimension, +where D: Dimension { // Convert into a read-only view - pub(crate) fn into_view(self) -> ArrayView<'a, A, D> { + pub(crate) fn into_view(self) -> ArrayView<'a, A, D> + { unsafe { ArrayView::new(self.ptr, self.dim, self.strides) } } /// Converts to a mutable raw array view. - pub(crate) fn into_raw_view_mut(self) -> RawArrayViewMut { + pub(crate) fn into_raw_view_mut(self) -> RawArrayViewMut + { unsafe { RawArrayViewMut::new(self.ptr, self.dim, self.strides) } } #[inline] - pub(crate) fn into_base_iter(self) -> Baseiter { + pub(crate) fn into_base_iter(self) -> Baseiter + { unsafe { Baseiter::new(self.ptr.as_ptr(), self.dim, self.strides) } } #[inline] - pub(crate) fn into_elements_base(self) -> ElementsBaseMut<'a, A, D> { + pub(crate) fn into_elements_base(self) -> ElementsBaseMut<'a, A, D> + { ElementsBaseMut::new(self) } /// Return the array’s data as a slice, if it is contiguous and in standard order. /// Otherwise return self in the Err branch of the result. - pub(crate) fn try_into_slice(self) -> Result<&'a mut [A], Self> { + pub(crate) fn try_into_slice(self) -> Result<&'a mut [A], Self> + { if self.is_standard_layout() { unsafe { Ok(slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len())) } } else { @@ -270,21 +282,18 @@ where /// Return the array’s data as a slice, if it is contiguous. /// Otherwise return self in the Err branch of the result. - fn try_into_slice_memory_order(self) -> Result<&'a mut [A], Self> { + fn try_into_slice_memory_order(self) -> Result<&'a mut [A], Self> + { if self.is_contiguous() { let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides); - unsafe { - Ok(slice::from_raw_parts_mut( - self.ptr.sub(offset).as_ptr(), - self.len(), - )) - } + unsafe { Ok(slice::from_raw_parts_mut(self.ptr.sub(offset).as_ptr(), self.len())) } } else { Err(self) } } - pub(crate) fn into_iter_(self) -> IterMut<'a, A, D> { + pub(crate) fn into_iter_(self) -> IterMut<'a, A, D> + { IterMut::new(self) } @@ -292,8 +301,7 @@ where #[doc(hidden)] // not official #[deprecated(note = "This method will be replaced.")] pub fn into_outer_iter(self) -> iter::AxisIterMut<'a, A, D::Smaller> - where - D: RemoveAxis, + where D: RemoveAxis { AxisIterMut::new(self, Axis(0)) } diff --git a/src/impl_views/indexing.rs b/src/impl_views/indexing.rs index 3494b91db..2b72c2142 100644 --- a/src/impl_views/indexing.rs +++ b/src/impl_views/indexing.rs @@ -46,7 +46,8 @@ use crate::NdIndex; /// assert_eq!(long_life_ref, &0.); /// /// ``` -pub trait IndexLonger { +pub trait IndexLonger +{ /// The type of the reference to the element that is produced, including /// its lifetime. type Output; @@ -119,12 +120,14 @@ where /// /// **Panics** if index is out of bounds. #[track_caller] - fn index(self, index: I) -> &'a A { + fn index(self, index: I) -> &'a A + { debug_bounds_check!(self, index); unsafe { &*self.get_ptr(index).unwrap_or_else(|| array_out_of_bounds()) } } - fn get(self, index: I) -> Option<&'a A> { + fn get(self, index: I) -> Option<&'a A> + { unsafe { self.get_ptr(index).map(|ptr| &*ptr) } } @@ -139,7 +142,8 @@ where /// [1]: ArrayBase::uget /// /// **Note:** only unchecked for non-debug builds of ndarray. - unsafe fn uget(self, index: I) -> &'a A { + unsafe fn uget(self, index: I) -> &'a A + { debug_bounds_check!(self, index); &*self.as_ptr().offset(index.index_unchecked(&self.strides)) } @@ -165,7 +169,8 @@ where /// /// **Panics** if index is out of bounds. #[track_caller] - fn index(mut self, index: I) -> &'a mut A { + fn index(mut self, index: I) -> &'a mut A + { debug_bounds_check!(self, index); unsafe { match self.get_mut_ptr(index) { @@ -183,7 +188,8 @@ where /// /// [1]: ArrayBase::get_mut /// - fn get(mut self, index: I) -> Option<&'a mut A> { + fn get(mut self, index: I) -> Option<&'a mut A> + { debug_bounds_check!(self, index); unsafe { match self.get_mut_ptr(index) { @@ -202,7 +208,8 @@ where /// [1]: ArrayBase::uget_mut /// /// **Note:** only unchecked for non-debug builds of ndarray. - unsafe fn uget(mut self, index: I) -> &'a mut A { + unsafe fn uget(mut self, index: I) -> &'a mut A + { debug_bounds_check!(self, index); &mut *self .as_mut_ptr() diff --git a/src/impl_views/splitting.rs b/src/impl_views/splitting.rs index 2ccc3ee91..e26900984 100644 --- a/src/impl_views/splitting.rs +++ b/src/impl_views/splitting.rs @@ -12,8 +12,7 @@ use num_complex::Complex; /// Methods for read-only array views. impl<'a, A, D> ArrayView<'a, A, D> -where - D: Dimension, +where D: Dimension { /// Split the array view along `axis` and return one view strictly before the /// split and one view after the split. @@ -89,7 +88,8 @@ where /// along Axis(1) /// ``` #[track_caller] - pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) { + pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) + { unsafe { let (left, right) = self.into_raw_view().split_at(axis, index); (left.deref_into_view(), right.deref_into_view()) @@ -98,8 +98,7 @@ where } impl<'a, T, D> ArrayView<'a, Complex, D> -where - D: Dimension, +where D: Dimension { /// Splits the view into views of the real and imaginary components of the /// elements. @@ -117,7 +116,8 @@ where /// assert_eq!(re, array![[1., 3.], [5., 7.], [9., 11.]]); /// assert_eq!(im, array![[2., 4.], [6., 8.], [10., 12.]]); /// ``` - pub fn split_complex(self) -> Complex> { + pub fn split_complex(self) -> Complex> + { unsafe { let Complex { re, im } = self.into_raw_view().split_complex(); Complex { @@ -130,15 +130,15 @@ where /// Methods for read-write array views. impl<'a, A, D> ArrayViewMut<'a, A, D> -where - D: Dimension, +where D: Dimension { /// Split the array view along `axis` and return one mutable view strictly /// before the split and one mutable view after the split. /// /// **Panics** if `axis` or `index` is out of bounds. #[track_caller] - pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) { + pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) + { unsafe { let (left, right) = self.into_raw_view_mut().split_at(axis, index); (left.deref_into_view_mut(), right.deref_into_view_mut()) @@ -164,16 +164,14 @@ where /// * if `D` is `IxDyn` and `info` does not match the number of array axes #[track_caller] pub fn multi_slice_move(self, info: M) -> M::Output - where - M: MultiSliceArg<'a, A, D>, + where M: MultiSliceArg<'a, A, D> { info.multi_slice_move(self) } } impl<'a, T, D> ArrayViewMut<'a, Complex, D> -where - D: Dimension, +where D: Dimension { /// Splits the view into views of the real and imaginary components of the /// elements. @@ -198,7 +196,8 @@ where /// assert_eq!(arr[[0, 1]], Complex64::new(13., 4.)); /// assert_eq!(arr[[2, 0]], Complex64::new(9., 14.)); /// ``` - pub fn split_complex(self) -> Complex> { + pub fn split_complex(self) -> Complex> + { unsafe { let Complex { re, im } = self.into_raw_view_mut().split_complex(); Complex { diff --git a/src/indexes.rs b/src/indexes.rs index 541570184..0fa2b50fb 100644 --- a/src/indexes.rs +++ b/src/indexes.rs @@ -7,8 +7,8 @@ // except according to those terms. use super::Dimension; use crate::dimension::IntoDimension; -use crate::zip::Offset; use crate::split_at::SplitAt; +use crate::zip::Offset; use crate::Axis; use crate::Layout; use crate::NdProducer; @@ -18,7 +18,8 @@ use crate::{ArrayBase, Data}; /// /// Iterator element type is `D`. #[derive(Clone)] -pub struct IndicesIter { +pub struct IndicesIter +{ dim: D, index: Option, } @@ -28,8 +29,7 @@ pub struct IndicesIter { /// *Note:* prefer higher order methods, arithmetic operations and /// non-indexed iteration before using indices. pub fn indices(shape: E) -> Indices -where - E: IntoDimension, +where E: IntoDimension { let dim = shape.into_dimension(); Indices { @@ -51,12 +51,12 @@ where } impl Iterator for IndicesIter -where - D: Dimension, +where D: Dimension { type Item = D::Pattern; #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { let index = match self.index { None => return None, Some(ref ix) => ix.clone(), @@ -65,7 +65,8 @@ where Some(index.into_pattern()) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { let l = match self.index { None => 0, Some(ref ix) => { @@ -83,8 +84,7 @@ where } fn fold(self, init: B, mut f: F) -> B - where - F: FnMut(B, D::Pattern) -> B, + where F: FnMut(B, D::Pattern) -> B { let IndicesIter { mut index, dim } = self; let ndim = dim.ndim(); @@ -112,18 +112,15 @@ where impl ExactSizeIterator for IndicesIter where D: Dimension {} impl IntoIterator for Indices -where - D: Dimension, +where D: Dimension { type Item = D::Pattern; type IntoIter = IndicesIter; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { let sz = self.dim.size(); let index = if sz != 0 { Some(self.start) } else { None }; - IndicesIter { - index, - dim: self.dim, - } + IndicesIter { index, dim: self.dim } } } @@ -132,26 +129,26 @@ where /// `Indices` is an `NdProducer` that produces the indices of an array shape. #[derive(Copy, Clone, Debug)] pub struct Indices -where - D: Dimension, +where D: Dimension { start: D, dim: D, } #[derive(Copy, Clone, Debug)] -pub struct IndexPtr { +pub struct IndexPtr +{ index: D, } impl Offset for IndexPtr -where - D: Dimension + Copy, +where D: Dimension + Copy { // stride: The axis to increment type Stride = usize; - unsafe fn stride_offset(mut self, stride: Self::Stride, index: usize) -> Self { + unsafe fn stride_offset(mut self, stride: Self::Stride, index: usize) -> Self + { self.index[stride] += index; self } @@ -172,7 +169,8 @@ where // [0, 0, 0].stride_offset(1, 10) => [0, 10, 0] axis 1 is incremented by 10. // // .as_ref() converts the Ptr value to an Item. For example [0, 10, 0] => (0, 10, 0) -impl NdProducer for Indices { +impl NdProducer for Indices +{ type Item = D::Pattern; type Dim = D; type Ptr = IndexPtr; @@ -180,19 +178,23 @@ impl NdProducer for Indices { private_impl! {} - fn raw_dim(&self) -> Self::Dim { + fn raw_dim(&self) -> Self::Dim + { self.dim } - fn equal_dim(&self, dim: &Self::Dim) -> bool { + fn equal_dim(&self, dim: &Self::Dim) -> bool + { self.dim.equal(dim) } - fn as_ptr(&self) -> Self::Ptr { + fn as_ptr(&self) -> Self::Ptr + { IndexPtr { index: self.start } } - fn layout(&self) -> Layout { + fn layout(&self) -> Layout + { if self.dim.ndim() <= 1 { Layout::one_dimensional() } else { @@ -200,40 +202,36 @@ impl NdProducer for Indices { } } - unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item { + unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item + { ptr.index.into_pattern() } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr + { let mut index = *i; index += &self.start; IndexPtr { index } } - fn stride_of(&self, axis: Axis) -> Self::Stride { + fn stride_of(&self, axis: Axis) -> Self::Stride + { axis.index() } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride { + fn contiguous_stride(&self) -> Self::Stride + { 0 } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + { let start_a = self.start; let mut start_b = start_a; let (a, b) = self.dim.split_at(axis, index); start_b[axis.index()] += index; - ( - Indices { - start: start_a, - dim: a, - }, - Indices { - start: start_b, - dim: b, - }, - ) + (Indices { start: start_a, dim: a }, Indices { start: start_b, dim: b }) } } @@ -241,15 +239,15 @@ impl NdProducer for Indices { /// /// Iterator element type is `D`. #[derive(Clone)] -pub struct IndicesIterF { +pub struct IndicesIterF +{ dim: D, index: D, has_remaining: bool, } pub fn indices_iter_f(shape: E) -> IndicesIterF -where - E: IntoDimension, +where E: IntoDimension { let dim = shape.into_dimension(); let zero = E::Dim::zeros(dim.ndim()); @@ -261,12 +259,12 @@ where } impl Iterator for IndicesIterF -where - D: Dimension, +where D: Dimension { type Item = D::Pattern; #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { if !self.has_remaining { None } else { @@ -276,7 +274,8 @@ where } } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { if !self.has_remaining { return (0, Some(0)); } @@ -295,12 +294,14 @@ where impl ExactSizeIterator for IndicesIterF where D: Dimension {} #[cfg(test)] -mod tests { +mod tests +{ use super::indices; use super::indices_iter_f; #[test] - fn test_indices_iter_c_size_hint() { + fn test_indices_iter_c_size_hint() + { let dim = (3, 4); let mut it = indices(dim).into_iter(); let mut len = dim.0 * dim.1; @@ -313,7 +314,8 @@ mod tests { } #[test] - fn test_indices_iter_c_fold() { + fn test_indices_iter_c_fold() + { macro_rules! run_test { ($dim:expr) => { for num_consume in 0..3 { @@ -341,7 +343,8 @@ mod tests { } #[test] - fn test_indices_iter_f_size_hint() { + fn test_indices_iter_f_size_hint() + { let dim = (3, 4); let mut it = indices_iter_f(dim); let mut len = dim.0 * dim.1; diff --git a/src/iterators/chunks.rs b/src/iterators/chunks.rs index 9cf06b55f..465428968 100644 --- a/src/iterators/chunks.rs +++ b/src/iterators/chunks.rs @@ -30,20 +30,21 @@ impl_ndproducer! { /// See [`.exact_chunks()`](ArrayBase::exact_chunks) for more /// information. //#[derive(Debug)] -pub struct ExactChunks<'a, A, D> { +pub struct ExactChunks<'a, A, D> +{ base: RawArrayView, life: PhantomData<&'a A>, chunk: D, inner_strides: D, } -impl<'a, A, D: Dimension> ExactChunks<'a, A, D> { +impl<'a, A, D: Dimension> ExactChunks<'a, A, D> +{ /// Creates a new exact chunks producer. /// /// **Panics** if any chunk dimension is zero pub(crate) fn new(a: ArrayView<'a, A, D>, chunk: E) -> Self - where - E: IntoDimension, + where E: IntoDimension { let mut a = a.into_raw_view(); let chunk = chunk.into_dimension(); @@ -79,7 +80,8 @@ where { type Item = ::Item; type IntoIter = ExactChunksIter<'a, A, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { ExactChunksIter { iter: self.base.into_base_iter(), life: self.life, @@ -93,7 +95,8 @@ where /// /// See [`.exact_chunks()`](ArrayBase::exact_chunks) for more /// information. -pub struct ExactChunksIter<'a, A, D> { +pub struct ExactChunksIter<'a, A, D> +{ iter: Baseiter, life: PhantomData<&'a A>, chunk: D, @@ -126,20 +129,21 @@ impl_ndproducer! { /// See [`.exact_chunks_mut()`](ArrayBase::exact_chunks_mut) /// for more information. //#[derive(Debug)] -pub struct ExactChunksMut<'a, A, D> { +pub struct ExactChunksMut<'a, A, D> +{ base: RawArrayViewMut, life: PhantomData<&'a mut A>, chunk: D, inner_strides: D, } -impl<'a, A, D: Dimension> ExactChunksMut<'a, A, D> { +impl<'a, A, D: Dimension> ExactChunksMut<'a, A, D> +{ /// Creates a new exact chunks producer. /// /// **Panics** if any chunk dimension is zero pub(crate) fn new(a: ArrayViewMut<'a, A, D>, chunk: E) -> Self - where - E: IntoDimension, + where E: IntoDimension { let mut a = a.into_raw_view_mut(); let chunk = chunk.into_dimension(); @@ -175,7 +179,8 @@ where { type Item = ::Item; type IntoIter = ExactChunksIterMut<'a, A, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { ExactChunksIterMut { iter: self.base.into_base_iter(), life: self.life, @@ -234,7 +239,8 @@ impl_iterator! { /// /// See [`.exact_chunks_mut()`](ArrayBase::exact_chunks_mut) /// for more information. -pub struct ExactChunksIterMut<'a, A, D> { +pub struct ExactChunksIterMut<'a, A, D> +{ iter: Baseiter, life: PhantomData<&'a mut A>, chunk: D, diff --git a/src/iterators/into_iter.rs b/src/iterators/into_iter.rs index cfa48299a..fcc2e4b8c 100644 --- a/src/iterators/into_iter.rs +++ b/src/iterators/into_iter.rs @@ -15,11 +15,9 @@ use crate::OwnedRepr; use super::Baseiter; use crate::impl_owned_array::drop_unreachable_raw; - /// By-value iterator for an array pub struct IntoIter -where - D: Dimension, +where D: Dimension { array_data: OwnedRepr, inner: Baseiter, @@ -31,12 +29,12 @@ where has_unreachable_elements: bool, } -impl IntoIter -where - D: Dimension, +impl IntoIter +where D: Dimension { /// Create a new by-value iterator that consumes `array` - pub(crate) fn new(mut array: Array) -> Self { + pub(crate) fn new(mut array: Array) -> Self + { unsafe { let array_head_ptr = array.ptr; let ptr = array.as_mut_ptr(); @@ -57,39 +55,45 @@ where } } -impl Iterator for IntoIter { +impl Iterator for IntoIter +{ type Item = A; #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { self.inner.next().map(|p| unsafe { p.read() }) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.inner.size_hint() } } -impl ExactSizeIterator for IntoIter { - fn len(&self) -> usize { self.inner.len() } +impl ExactSizeIterator for IntoIter +{ + fn len(&self) -> usize + { + self.inner.len() + } } impl Drop for IntoIter -where - D: Dimension +where D: Dimension { - fn drop(&mut self) { + fn drop(&mut self) + { if !self.has_unreachable_elements || mem::size_of::() == 0 || !mem::needs_drop::() { return; } // iterate til the end - while let Some(_) = self.next() { } + while let Some(_) = self.next() {} unsafe { let data_ptr = self.array_data.as_ptr_mut(); - let view = RawArrayViewMut::new(self.array_head_ptr, self.inner.dim.clone(), - self.inner.strides.clone()); + let view = RawArrayViewMut::new(self.array_head_ptr, self.inner.dim.clone(), self.inner.strides.clone()); debug_assert!(self.inner.dim.size() < self.data_len, "data_len {} and dim size {}", self.data_len, self.inner.dim.size()); drop_unreachable_raw(view, data_ptr, self.data_len); @@ -98,13 +102,13 @@ where } impl IntoIterator for Array -where - D: Dimension +where D: Dimension { type Item = A; type IntoIter = IntoIter; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { IntoIter::new(self) } } @@ -117,7 +121,8 @@ where type Item = A; type IntoIter = IntoIter; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { IntoIter::new(self.into_owned()) } } @@ -130,7 +135,8 @@ where type Item = A; type IntoIter = IntoIter; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { IntoIter::new(self.into_owned()) } } diff --git a/src/iterators/iter.rs b/src/iterators/iter.rs index 3f8b05009..5c5acb9d7 100644 --- a/src/iterators/iter.rs +++ b/src/iterators/iter.rs @@ -19,9 +19,9 @@ pub use crate::iterators::{ ExactChunksMut, IndexedIter, IndexedIterMut, + IntoIter, Iter, IterMut, - IntoIter, Lanes, LanesIter, LanesIterMut, diff --git a/src/iterators/lanes.rs b/src/iterators/lanes.rs index 7286e0696..11c83d002 100644 --- a/src/iterators/lanes.rs +++ b/src/iterators/lanes.rs @@ -25,16 +25,17 @@ impl_ndproducer! { /// See [`.lanes()`](ArrayBase::lanes) /// for more information. -pub struct Lanes<'a, A, D> { +pub struct Lanes<'a, A, D> +{ base: ArrayView<'a, A, D>, inner_len: Ix, inner_stride: Ixs, } -impl<'a, A, D: Dimension> Lanes<'a, A, D> { +impl<'a, A, D: Dimension> Lanes<'a, A, D> +{ pub(crate) fn new(v: ArrayView<'a, A, Di>, axis: Axis) -> Self - where - Di: Dimension, + where Di: Dimension { let ndim = v.ndim(); let len; @@ -76,12 +77,12 @@ impl_ndproducer! { } impl<'a, A, D> IntoIterator for Lanes<'a, A, D> -where - D: Dimension, +where D: Dimension { type Item = ::Item; type IntoIter = LanesIter<'a, A, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { LanesIter { iter: self.base.into_base_iter(), inner_len: self.inner_len, @@ -93,16 +94,17 @@ where /// See [`.lanes_mut()`](ArrayBase::lanes_mut) /// for more information. -pub struct LanesMut<'a, A, D> { +pub struct LanesMut<'a, A, D> +{ base: ArrayViewMut<'a, A, D>, inner_len: Ix, inner_stride: Ixs, } -impl<'a, A, D: Dimension> LanesMut<'a, A, D> { +impl<'a, A, D: Dimension> LanesMut<'a, A, D> +{ pub(crate) fn new(v: ArrayViewMut<'a, A, Di>, axis: Axis) -> Self - where - Di: Dimension, + where Di: Dimension { let ndim = v.ndim(); let len; @@ -126,12 +128,12 @@ impl<'a, A, D: Dimension> LanesMut<'a, A, D> { } impl<'a, A, D> IntoIterator for LanesMut<'a, A, D> -where - D: Dimension, +where D: Dimension { type Item = ::Item; type IntoIter = LanesIterMut<'a, A, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { LanesIterMut { iter: self.base.into_base_iter(), inner_len: self.inner_len, diff --git a/src/iterators/macros.rs b/src/iterators/macros.rs index 7fbe410fe..78697ec25 100644 --- a/src/iterators/macros.rs +++ b/src/iterators/macros.rs @@ -123,7 +123,7 @@ expand_if!(@nonempty [$($cloneparm)*] } ); - } + }; } macro_rules! impl_iterator { @@ -170,5 +170,5 @@ macro_rules! impl_iterator { self.$base.size_hint() } } - } + }; } diff --git a/src/iterators/mod.rs b/src/iterators/mod.rs index 3e1d6fe43..4851b2827 100644 --- a/src/iterators/mod.rs +++ b/src/iterators/mod.rs @@ -36,19 +36,22 @@ use std::slice::{self, Iter as SliceIter, IterMut as SliceIterMut}; /// /// Iterator element type is `*mut A`. #[derive(Debug)] -pub struct Baseiter { +pub struct Baseiter +{ ptr: *mut A, dim: D, strides: D, index: Option, } -impl Baseiter { +impl Baseiter +{ /// Creating a Baseiter is unsafe because shape and stride parameters need /// to be correct to avoid performing an unsafe pointer offset while /// iterating. #[inline] - pub unsafe fn new(ptr: *mut A, len: D, stride: D) -> Baseiter { + pub unsafe fn new(ptr: *mut A, len: D, stride: D) -> Baseiter + { Baseiter { ptr, index: len.first_index(), @@ -58,11 +61,13 @@ impl Baseiter { } } -impl Iterator for Baseiter { +impl Iterator for Baseiter +{ type Item = *mut A; #[inline] - fn next(&mut self) -> Option<*mut A> { + fn next(&mut self) -> Option<*mut A> + { let index = match self.index { None => return None, Some(ref ix) => ix.clone(), @@ -72,14 +77,14 @@ impl Iterator for Baseiter { unsafe { Some(self.ptr.offset(offset)) } } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { let len = self.len(); (len, Some(len)) } fn fold(mut self, init: Acc, mut g: G) -> Acc - where - G: FnMut(Acc, *mut A) -> Acc, + where G: FnMut(Acc, *mut A) -> Acc { let ndim = self.dim.ndim(); debug_assert_ne!(ndim, 0); @@ -105,8 +110,10 @@ impl Iterator for Baseiter { } } -impl ExactSizeIterator for Baseiter { - fn len(&self) -> usize { +impl ExactSizeIterator for Baseiter +{ + fn len(&self) -> usize + { match self.index { None => 0, Some(ref ix) => { @@ -123,9 +130,11 @@ impl ExactSizeIterator for Baseiter { } } -impl DoubleEndedIterator for Baseiter { +impl DoubleEndedIterator for Baseiter +{ #[inline] - fn next_back(&mut self) -> Option<*mut A> { + fn next_back(&mut self) -> Option<*mut A> + { let index = match self.index { None => return None, Some(ix) => ix, @@ -139,7 +148,8 @@ impl DoubleEndedIterator for Baseiter { unsafe { Some(self.ptr.offset(offset)) } } - fn nth_back(&mut self, n: usize) -> Option<*mut A> { + fn nth_back(&mut self, n: usize) -> Option<*mut A> + { let index = self.index?; let len = self.dim[0] - index[0]; if n < len { @@ -156,8 +166,7 @@ impl DoubleEndedIterator for Baseiter { } fn rfold(mut self, init: Acc, mut g: G) -> Acc - where - G: FnMut(Acc, *mut A) -> Acc, + where G: FnMut(Acc, *mut A) -> Acc { let mut accum = init; if let Some(index) = self.index { @@ -200,8 +209,10 @@ clone_bounds!( } ); -impl<'a, A, D: Dimension> ElementsBase<'a, A, D> { - pub fn new(v: ArrayView<'a, A, D>) -> Self { +impl<'a, A, D: Dimension> ElementsBase<'a, A, D> +{ + pub fn new(v: ArrayView<'a, A, D>) -> Self + { ElementsBase { inner: v.into_base_iter(), life: PhantomData, @@ -209,44 +220,47 @@ impl<'a, A, D: Dimension> ElementsBase<'a, A, D> { } } -impl<'a, A, D: Dimension> Iterator for ElementsBase<'a, A, D> { +impl<'a, A, D: Dimension> Iterator for ElementsBase<'a, A, D> +{ type Item = &'a A; #[inline] - fn next(&mut self) -> Option<&'a A> { + fn next(&mut self) -> Option<&'a A> + { self.inner.next().map(|p| unsafe { &*p }) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.inner.size_hint() } fn fold(self, init: Acc, mut g: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, + where G: FnMut(Acc, Self::Item) -> Acc { unsafe { self.inner.fold(init, move |acc, ptr| g(acc, &*ptr)) } } } -impl<'a, A> DoubleEndedIterator for ElementsBase<'a, A, Ix1> { +impl<'a, A> DoubleEndedIterator for ElementsBase<'a, A, Ix1> +{ #[inline] - fn next_back(&mut self) -> Option<&'a A> { + fn next_back(&mut self) -> Option<&'a A> + { self.inner.next_back().map(|p| unsafe { &*p }) } fn rfold(self, init: Acc, mut g: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, + where G: FnMut(Acc, Self::Item) -> Acc { unsafe { self.inner.rfold(init, move |acc, ptr| g(acc, &*ptr)) } } } impl<'a, A, D> ExactSizeIterator for ElementsBase<'a, A, D> -where - D: Dimension, +where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.inner.len() } } @@ -279,10 +293,10 @@ clone_bounds!( ); impl<'a, A, D> Iter<'a, A, D> -where - D: Dimension, +where D: Dimension { - pub(crate) fn new(self_: ArrayView<'a, A, D>) -> Self { + pub(crate) fn new(self_: ArrayView<'a, A, D>) -> Self + { Iter { inner: if let Some(slc) = self_.to_slice() { ElementsRepr::Slice(slc.iter()) @@ -294,10 +308,10 @@ where } impl<'a, A, D> IterMut<'a, A, D> -where - D: Dimension, +where D: Dimension { - pub(crate) fn new(self_: ArrayViewMut<'a, A, D>) -> Self { + pub(crate) fn new(self_: ArrayViewMut<'a, A, D>) -> Self + { IterMut { inner: match self_.try_into_slice() { Ok(x) => ElementsRepr::Slice(x.iter_mut()), @@ -308,7 +322,8 @@ where } #[derive(Clone, Debug)] -pub enum ElementsRepr { +pub enum ElementsRepr +{ Slice(S), Counted(C), } @@ -319,13 +334,15 @@ pub enum ElementsRepr { /// /// See [`.iter()`](ArrayBase::iter) for more information. #[derive(Debug)] -pub struct Iter<'a, A, D> { +pub struct Iter<'a, A, D> +{ inner: ElementsRepr, ElementsBase<'a, A, D>>, } /// Counted read only iterator #[derive(Debug)] -pub struct ElementsBase<'a, A, D> { +pub struct ElementsBase<'a, A, D> +{ inner: Baseiter, life: PhantomData<&'a A>, } @@ -336,7 +353,8 @@ pub struct ElementsBase<'a, A, D> { /// /// See [`.iter_mut()`](ArrayBase::iter_mut) for more information. #[derive(Debug)] -pub struct IterMut<'a, A, D> { +pub struct IterMut<'a, A, D> +{ inner: ElementsRepr, ElementsBaseMut<'a, A, D>>, } @@ -344,13 +362,16 @@ pub struct IterMut<'a, A, D> { /// /// Iterator element type is `&'a mut A`. #[derive(Debug)] -pub struct ElementsBaseMut<'a, A, D> { +pub struct ElementsBaseMut<'a, A, D> +{ inner: Baseiter, life: PhantomData<&'a mut A>, } -impl<'a, A, D: Dimension> ElementsBaseMut<'a, A, D> { - pub fn new(v: ArrayViewMut<'a, A, D>) -> Self { +impl<'a, A, D: Dimension> ElementsBaseMut<'a, A, D> +{ + pub fn new(v: ArrayViewMut<'a, A, D>) -> Self + { ElementsBaseMut { inner: v.into_base_iter(), life: PhantomData, @@ -369,127 +390,130 @@ pub struct IndexedIter<'a, A, D>(ElementsBase<'a, A, D>); pub struct IndexedIterMut<'a, A, D>(ElementsBaseMut<'a, A, D>); impl<'a, A, D> IndexedIter<'a, A, D> -where - D: Dimension, +where D: Dimension { - pub(crate) fn new(x: ElementsBase<'a, A, D>) -> Self { + pub(crate) fn new(x: ElementsBase<'a, A, D>) -> Self + { IndexedIter(x) } } impl<'a, A, D> IndexedIterMut<'a, A, D> -where - D: Dimension, +where D: Dimension { - pub(crate) fn new(x: ElementsBaseMut<'a, A, D>) -> Self { + pub(crate) fn new(x: ElementsBaseMut<'a, A, D>) -> Self + { IndexedIterMut(x) } } -impl<'a, A, D: Dimension> Iterator for Iter<'a, A, D> { +impl<'a, A, D: Dimension> Iterator for Iter<'a, A, D> +{ type Item = &'a A; #[inline] - fn next(&mut self) -> Option<&'a A> { + fn next(&mut self) -> Option<&'a A> + { either_mut!(self.inner, iter => iter.next()) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { either!(self.inner, ref iter => iter.size_hint()) } fn fold(self, init: Acc, g: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, + where G: FnMut(Acc, Self::Item) -> Acc { either!(self.inner, iter => iter.fold(init, g)) } - fn nth(&mut self, n: usize) -> Option { + fn nth(&mut self, n: usize) -> Option + { either_mut!(self.inner, iter => iter.nth(n)) } fn collect(self) -> B - where - B: FromIterator, + where B: FromIterator { either!(self.inner, iter => iter.collect()) } fn all(&mut self, f: F) -> bool - where - F: FnMut(Self::Item) -> bool, + where F: FnMut(Self::Item) -> bool { either_mut!(self.inner, iter => iter.all(f)) } fn any(&mut self, f: F) -> bool - where - F: FnMut(Self::Item) -> bool, + where F: FnMut(Self::Item) -> bool { either_mut!(self.inner, iter => iter.any(f)) } fn find

(&mut self, predicate: P) -> Option - where - P: FnMut(&Self::Item) -> bool, + where P: FnMut(&Self::Item) -> bool { either_mut!(self.inner, iter => iter.find(predicate)) } fn find_map(&mut self, f: F) -> Option - where - F: FnMut(Self::Item) -> Option, + where F: FnMut(Self::Item) -> Option { either_mut!(self.inner, iter => iter.find_map(f)) } - fn count(self) -> usize { + fn count(self) -> usize + { either!(self.inner, iter => iter.count()) } - fn last(self) -> Option { + fn last(self) -> Option + { either!(self.inner, iter => iter.last()) } fn position

(&mut self, predicate: P) -> Option - where - P: FnMut(Self::Item) -> bool, + where P: FnMut(Self::Item) -> bool { either_mut!(self.inner, iter => iter.position(predicate)) } } -impl<'a, A> DoubleEndedIterator for Iter<'a, A, Ix1> { +impl<'a, A> DoubleEndedIterator for Iter<'a, A, Ix1> +{ #[inline] - fn next_back(&mut self) -> Option<&'a A> { + fn next_back(&mut self) -> Option<&'a A> + { either_mut!(self.inner, iter => iter.next_back()) } - fn nth_back(&mut self, n: usize) -> Option<&'a A> { + fn nth_back(&mut self, n: usize) -> Option<&'a A> + { either_mut!(self.inner, iter => iter.nth_back(n)) } fn rfold(self, init: Acc, g: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, + where G: FnMut(Acc, Self::Item) -> Acc { either!(self.inner, iter => iter.rfold(init, g)) } } impl<'a, A, D> ExactSizeIterator for Iter<'a, A, D> -where - D: Dimension, +where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { either!(self.inner, ref iter => iter.len()) } } -impl<'a, A, D: Dimension> Iterator for IndexedIter<'a, A, D> { +impl<'a, A, D: Dimension> Iterator for IndexedIter<'a, A, D> +{ type Item = (D::Pattern, &'a A); #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { let index = match self.0.inner.index { None => return None, Some(ref ix) => ix.clone(), @@ -500,166 +524,173 @@ impl<'a, A, D: Dimension> Iterator for IndexedIter<'a, A, D> { } } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.0.size_hint() } } impl<'a, A, D> ExactSizeIterator for IndexedIter<'a, A, D> -where - D: Dimension, +where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.0.inner.len() } } -impl<'a, A, D: Dimension> Iterator for IterMut<'a, A, D> { +impl<'a, A, D: Dimension> Iterator for IterMut<'a, A, D> +{ type Item = &'a mut A; #[inline] - fn next(&mut self) -> Option<&'a mut A> { + fn next(&mut self) -> Option<&'a mut A> + { either_mut!(self.inner, iter => iter.next()) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { either!(self.inner, ref iter => iter.size_hint()) } fn fold(self, init: Acc, g: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, + where G: FnMut(Acc, Self::Item) -> Acc { either!(self.inner, iter => iter.fold(init, g)) } - fn nth(&mut self, n: usize) -> Option { + fn nth(&mut self, n: usize) -> Option + { either_mut!(self.inner, iter => iter.nth(n)) } fn collect(self) -> B - where - B: FromIterator, + where B: FromIterator { either!(self.inner, iter => iter.collect()) } fn all(&mut self, f: F) -> bool - where - F: FnMut(Self::Item) -> bool, + where F: FnMut(Self::Item) -> bool { either_mut!(self.inner, iter => iter.all(f)) } fn any(&mut self, f: F) -> bool - where - F: FnMut(Self::Item) -> bool, + where F: FnMut(Self::Item) -> bool { either_mut!(self.inner, iter => iter.any(f)) } fn find

(&mut self, predicate: P) -> Option - where - P: FnMut(&Self::Item) -> bool, + where P: FnMut(&Self::Item) -> bool { either_mut!(self.inner, iter => iter.find(predicate)) } fn find_map(&mut self, f: F) -> Option - where - F: FnMut(Self::Item) -> Option, + where F: FnMut(Self::Item) -> Option { either_mut!(self.inner, iter => iter.find_map(f)) } - fn count(self) -> usize { + fn count(self) -> usize + { either!(self.inner, iter => iter.count()) } - fn last(self) -> Option { + fn last(self) -> Option + { either!(self.inner, iter => iter.last()) } fn position

(&mut self, predicate: P) -> Option - where - P: FnMut(Self::Item) -> bool, + where P: FnMut(Self::Item) -> bool { either_mut!(self.inner, iter => iter.position(predicate)) } } -impl<'a, A> DoubleEndedIterator for IterMut<'a, A, Ix1> { +impl<'a, A> DoubleEndedIterator for IterMut<'a, A, Ix1> +{ #[inline] - fn next_back(&mut self) -> Option<&'a mut A> { + fn next_back(&mut self) -> Option<&'a mut A> + { either_mut!(self.inner, iter => iter.next_back()) } - fn nth_back(&mut self, n: usize) -> Option<&'a mut A> { + fn nth_back(&mut self, n: usize) -> Option<&'a mut A> + { either_mut!(self.inner, iter => iter.nth_back(n)) } fn rfold(self, init: Acc, g: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, + where G: FnMut(Acc, Self::Item) -> Acc { either!(self.inner, iter => iter.rfold(init, g)) } } impl<'a, A, D> ExactSizeIterator for IterMut<'a, A, D> -where - D: Dimension, +where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { either!(self.inner, ref iter => iter.len()) } } -impl<'a, A, D: Dimension> Iterator for ElementsBaseMut<'a, A, D> { +impl<'a, A, D: Dimension> Iterator for ElementsBaseMut<'a, A, D> +{ type Item = &'a mut A; #[inline] - fn next(&mut self) -> Option<&'a mut A> { + fn next(&mut self) -> Option<&'a mut A> + { self.inner.next().map(|p| unsafe { &mut *p }) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.inner.size_hint() } fn fold(self, init: Acc, mut g: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, + where G: FnMut(Acc, Self::Item) -> Acc { unsafe { self.inner.fold(init, move |acc, ptr| g(acc, &mut *ptr)) } } } -impl<'a, A> DoubleEndedIterator for ElementsBaseMut<'a, A, Ix1> { +impl<'a, A> DoubleEndedIterator for ElementsBaseMut<'a, A, Ix1> +{ #[inline] - fn next_back(&mut self) -> Option<&'a mut A> { + fn next_back(&mut self) -> Option<&'a mut A> + { self.inner.next_back().map(|p| unsafe { &mut *p }) } fn rfold(self, init: Acc, mut g: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, + where G: FnMut(Acc, Self::Item) -> Acc { unsafe { self.inner.rfold(init, move |acc, ptr| g(acc, &mut *ptr)) } } } impl<'a, A, D> ExactSizeIterator for ElementsBaseMut<'a, A, D> -where - D: Dimension, +where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.inner.len() } } -impl<'a, A, D: Dimension> Iterator for IndexedIterMut<'a, A, D> { +impl<'a, A, D: Dimension> Iterator for IndexedIterMut<'a, A, D> +{ type Item = (D::Pattern, &'a mut A); #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { let index = match self.0.inner.index { None => return None, Some(ref ix) => ix.clone(), @@ -670,16 +701,17 @@ impl<'a, A, D: Dimension> Iterator for IndexedIterMut<'a, A, D> { } } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.0.size_hint() } } impl<'a, A, D> ExactSizeIterator for IndexedIterMut<'a, A, D> -where - D: Dimension, +where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.0.inner.len() } } @@ -688,7 +720,8 @@ where /// each lane along that axis. /// /// See [`.lanes()`](ArrayBase::lanes) for more information. -pub struct LanesIter<'a, A, D> { +pub struct LanesIter<'a, A, D> +{ inner_len: Ix, inner_stride: Ixs, iter: Baseiter, @@ -708,36 +741,38 @@ clone_bounds!( ); impl<'a, A, D> Iterator for LanesIter<'a, A, D> -where - D: Dimension, +where D: Dimension { type Item = ArrayView<'a, A, Ix1>; - fn next(&mut self) -> Option { - self.iter.next().map(|ptr| unsafe { - ArrayView::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) - }) + fn next(&mut self) -> Option + { + self.iter + .next() + .map(|ptr| unsafe { ArrayView::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) }) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.iter.size_hint() } } impl<'a, A, D> ExactSizeIterator for LanesIter<'a, A, D> -where - D: Dimension, +where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.iter.len() } } impl<'a, A> DoubleEndedIterator for LanesIter<'a, A, Ix1> { - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|ptr| unsafe { - ArrayView::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) - }) + fn next_back(&mut self) -> Option + { + self.iter + .next_back() + .map(|ptr| unsafe { ArrayView::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) }) } } @@ -749,7 +784,8 @@ impl<'a, A> DoubleEndedIterator for LanesIter<'a, A, Ix1> /// /// See [`.lanes_mut()`](ArrayBase::lanes_mut) /// for more information. -pub struct LanesIterMut<'a, A, D> { +pub struct LanesIterMut<'a, A, D> +{ inner_len: Ix, inner_stride: Ixs, iter: Baseiter, @@ -757,41 +793,44 @@ pub struct LanesIterMut<'a, A, D> { } impl<'a, A, D> Iterator for LanesIterMut<'a, A, D> -where - D: Dimension, +where D: Dimension { type Item = ArrayViewMut<'a, A, Ix1>; - fn next(&mut self) -> Option { - self.iter.next().map(|ptr| unsafe { - ArrayViewMut::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) - }) + fn next(&mut self) -> Option + { + self.iter + .next() + .map(|ptr| unsafe { ArrayViewMut::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) }) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.iter.size_hint() } } impl<'a, A, D> ExactSizeIterator for LanesIterMut<'a, A, D> -where - D: Dimension, +where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.iter.len() } } impl<'a, A> DoubleEndedIterator for LanesIterMut<'a, A, Ix1> { - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|ptr| unsafe { - ArrayViewMut::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) - }) + fn next_back(&mut self) -> Option + { + self.iter + .next_back() + .map(|ptr| unsafe { ArrayViewMut::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) }) } } #[derive(Debug)] -pub struct AxisIterCore { +pub struct AxisIterCore +{ /// Index along the axis of the value of `.next()`, relative to the start /// of the axis. index: Ix, @@ -822,7 +861,8 @@ clone_bounds!( } ); -impl AxisIterCore { +impl AxisIterCore +{ /// Constructs a new iterator over the specified axis. fn new(v: ArrayBase, axis: Axis) -> Self where @@ -840,7 +880,8 @@ impl AxisIterCore { } #[inline] - unsafe fn offset(&self, index: usize) -> *mut A { + unsafe fn offset(&self, index: usize) -> *mut A + { debug_assert!( index < self.end, "index={}, end={}, stride={}", @@ -859,7 +900,8 @@ impl AxisIterCore { /// **Panics** if `index` is strictly greater than the iterator's remaining /// length. #[track_caller] - fn split_at(self, index: usize) -> (Self, Self) { + fn split_at(self, index: usize) -> (Self, Self) + { assert!(index <= self.len()); let mid = self.index + index; let left = AxisIterCore { @@ -883,25 +925,27 @@ impl AxisIterCore { /// Does the same thing as `.next()` but also returns the index of the item /// relative to the start of the axis. - fn next_with_index(&mut self) -> Option<(usize, *mut A)> { + fn next_with_index(&mut self) -> Option<(usize, *mut A)> + { let index = self.index; self.next().map(|ptr| (index, ptr)) } /// Does the same thing as `.next_back()` but also returns the index of the /// item relative to the start of the axis. - fn next_back_with_index(&mut self) -> Option<(usize, *mut A)> { + fn next_back_with_index(&mut self) -> Option<(usize, *mut A)> + { self.next_back().map(|ptr| (self.end, ptr)) } } impl Iterator for AxisIterCore -where - D: Dimension, +where D: Dimension { type Item = *mut A; - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { if self.index >= self.end { None } else { @@ -911,17 +955,18 @@ where } } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { let len = self.len(); (len, Some(len)) } } impl DoubleEndedIterator for AxisIterCore -where - D: Dimension, +where D: Dimension { - fn next_back(&mut self) -> Option { + fn next_back(&mut self) -> Option + { if self.index >= self.end { None } else { @@ -933,10 +978,10 @@ where } impl ExactSizeIterator for AxisIterCore -where - D: Dimension, +where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.end - self.index } } @@ -956,7 +1001,8 @@ where /// or [`.axis_iter()`](ArrayBase::axis_iter) /// for more information. #[derive(Debug)] -pub struct AxisIter<'a, A, D> { +pub struct AxisIter<'a, A, D> +{ iter: AxisIterCore, life: PhantomData<&'a A>, } @@ -971,11 +1017,11 @@ clone_bounds!( } ); -impl<'a, A, D: Dimension> AxisIter<'a, A, D> { +impl<'a, A, D: Dimension> AxisIter<'a, A, D> +{ /// Creates a new iterator over the specified axis. pub(crate) fn new(v: ArrayView<'a, A, Di>, axis: Axis) -> Self - where - Di: RemoveAxis, + where Di: RemoveAxis { AxisIter { iter: AxisIterCore::new(v, axis), @@ -991,7 +1037,8 @@ impl<'a, A, D: Dimension> AxisIter<'a, A, D> { /// **Panics** if `index` is strictly greater than the iterator's remaining /// length. #[track_caller] - pub fn split_at(self, index: usize) -> (Self, Self) { + pub fn split_at(self, index: usize) -> (Self, Self) + { let (left, right) = self.iter.split_at(index); ( AxisIter { @@ -1007,34 +1054,35 @@ impl<'a, A, D: Dimension> AxisIter<'a, A, D> { } impl<'a, A, D> Iterator for AxisIter<'a, A, D> -where - D: Dimension, +where D: Dimension { type Item = ArrayView<'a, A, D>; - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { self.iter.next().map(|ptr| unsafe { self.as_ref(ptr) }) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.iter.size_hint() } } impl<'a, A, D> DoubleEndedIterator for AxisIter<'a, A, D> -where - D: Dimension, +where D: Dimension { - fn next_back(&mut self) -> Option { + fn next_back(&mut self) -> Option + { self.iter.next_back().map(|ptr| unsafe { self.as_ref(ptr) }) } } impl<'a, A, D> ExactSizeIterator for AxisIter<'a, A, D> -where - D: Dimension, +where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.iter.len() } } @@ -1053,16 +1101,17 @@ where /// See [`.outer_iter_mut()`](ArrayBase::outer_iter_mut) /// or [`.axis_iter_mut()`](ArrayBase::axis_iter_mut) /// for more information. -pub struct AxisIterMut<'a, A, D> { +pub struct AxisIterMut<'a, A, D> +{ iter: AxisIterCore, life: PhantomData<&'a mut A>, } -impl<'a, A, D: Dimension> AxisIterMut<'a, A, D> { +impl<'a, A, D: Dimension> AxisIterMut<'a, A, D> +{ /// Creates a new iterator over the specified axis. pub(crate) fn new(v: ArrayViewMut<'a, A, Di>, axis: Axis) -> Self - where - Di: RemoveAxis, + where Di: RemoveAxis { AxisIterMut { iter: AxisIterCore::new(v, axis), @@ -1078,7 +1127,8 @@ impl<'a, A, D: Dimension> AxisIterMut<'a, A, D> { /// **Panics** if `index` is strictly greater than the iterator's remaining /// length. #[track_caller] - pub fn split_at(self, index: usize) -> (Self, Self) { + pub fn split_at(self, index: usize) -> (Self, Self) + { let (left, right) = self.iter.split_at(index); ( AxisIterMut { @@ -1094,53 +1144,58 @@ impl<'a, A, D: Dimension> AxisIterMut<'a, A, D> { } impl<'a, A, D> Iterator for AxisIterMut<'a, A, D> -where - D: Dimension, +where D: Dimension { type Item = ArrayViewMut<'a, A, D>; - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { self.iter.next().map(|ptr| unsafe { self.as_ref(ptr) }) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.iter.size_hint() } } impl<'a, A, D> DoubleEndedIterator for AxisIterMut<'a, A, D> -where - D: Dimension, +where D: Dimension { - fn next_back(&mut self) -> Option { + fn next_back(&mut self) -> Option + { self.iter.next_back().map(|ptr| unsafe { self.as_ref(ptr) }) } } impl<'a, A, D> ExactSizeIterator for AxisIterMut<'a, A, D> -where - D: Dimension, +where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.iter.len() } } -impl<'a, A, D: Dimension> NdProducer for AxisIter<'a, A, D> { +impl<'a, A, D: Dimension> NdProducer for AxisIter<'a, A, D> +{ type Item = ::Item; type Dim = Ix1; type Ptr = *mut A; type Stride = isize; - fn layout(&self) -> crate::Layout { + fn layout(&self) -> crate::Layout + { crate::Layout::one_dimensional() } - fn raw_dim(&self) -> Self::Dim { + fn raw_dim(&self) -> Self::Dim + { Ix1(self.len()) } - fn as_ptr(&self) -> Self::Ptr { + fn as_ptr(&self) -> Self::Ptr + { if self.len() > 0 { // `self.iter.index` is guaranteed to be in-bounds if any of the // iterator remains (i.e. if `self.len() > 0`). @@ -1153,48 +1208,53 @@ impl<'a, A, D: Dimension> NdProducer for AxisIter<'a, A, D> { } } - fn contiguous_stride(&self) -> isize { + fn contiguous_stride(&self) -> isize + { self.iter.stride } - unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item { - ArrayView::new_( - ptr, - self.iter.inner_dim.clone(), - self.iter.inner_strides.clone(), - ) + unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item + { + ArrayView::new_(ptr, self.iter.inner_dim.clone(), self.iter.inner_strides.clone()) } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr + { self.iter.offset(self.iter.index + i[0]) } - fn stride_of(&self, _axis: Axis) -> isize { + fn stride_of(&self, _axis: Axis) -> isize + { self.contiguous_stride() } - fn split_at(self, _axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, _axis: Axis, index: usize) -> (Self, Self) + { self.split_at(index) } private_impl! {} } -impl<'a, A, D: Dimension> NdProducer for AxisIterMut<'a, A, D> { +impl<'a, A, D: Dimension> NdProducer for AxisIterMut<'a, A, D> +{ type Item = ::Item; type Dim = Ix1; type Ptr = *mut A; type Stride = isize; - fn layout(&self) -> crate::Layout { + fn layout(&self) -> crate::Layout + { crate::Layout::one_dimensional() } - fn raw_dim(&self) -> Self::Dim { + fn raw_dim(&self) -> Self::Dim + { Ix1(self.len()) } - fn as_ptr(&self) -> Self::Ptr { + fn as_ptr(&self) -> Self::Ptr + { if self.len() > 0 { // `self.iter.index` is guaranteed to be in-bounds if any of the // iterator remains (i.e. if `self.len() > 0`). @@ -1207,27 +1267,28 @@ impl<'a, A, D: Dimension> NdProducer for AxisIterMut<'a, A, D> { } } - fn contiguous_stride(&self) -> isize { + fn contiguous_stride(&self) -> isize + { self.iter.stride } - unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item { - ArrayViewMut::new_( - ptr, - self.iter.inner_dim.clone(), - self.iter.inner_strides.clone(), - ) + unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item + { + ArrayViewMut::new_(ptr, self.iter.inner_dim.clone(), self.iter.inner_strides.clone()) } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr + { self.iter.offset(self.iter.index + i[0]) } - fn stride_of(&self, _axis: Axis) -> isize { + fn stride_of(&self, _axis: Axis) -> isize + { self.contiguous_stride() } - fn split_at(self, _axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, _axis: Axis, index: usize) -> (Self, Self) + { self.split_at(index) } @@ -1244,7 +1305,8 @@ impl<'a, A, D: Dimension> NdProducer for AxisIterMut<'a, A, D> { /// Iterator element type is `ArrayView<'a, A, D>`. /// /// See [`.axis_chunks_iter()`](ArrayBase::axis_chunks_iter) for more information. -pub struct AxisChunksIter<'a, A, D> { +pub struct AxisChunksIter<'a, A, D> +{ iter: AxisIterCore, /// Index of the partial chunk (the chunk smaller than the specified chunk /// size due to the axis length not being evenly divisible). If the axis @@ -1277,11 +1339,9 @@ clone_bounds!( /// /// **Panics** if `size == 0`. #[track_caller] -fn chunk_iter_parts( - v: ArrayView<'_, A, D>, - axis: Axis, - size: usize, -) -> (AxisIterCore, usize, D) { +fn chunk_iter_parts(v: ArrayView<'_, A, D>, axis: Axis, size: usize) + -> (AxisIterCore, usize, D) +{ assert_ne!(size, 0, "Chunk size must be nonzero."); let axis_len = v.len_of(axis); let n_whole_chunks = axis_len / size; @@ -1318,8 +1378,10 @@ fn chunk_iter_parts( (iter, partial_chunk_index, partial_chunk_dim) } -impl<'a, A, D: Dimension> AxisChunksIter<'a, A, D> { - pub(crate) fn new(v: ArrayView<'a, A, D>, axis: Axis, size: usize) -> Self { +impl<'a, A, D: Dimension> AxisChunksIter<'a, A, D> +{ + pub(crate) fn new(v: ArrayView<'a, A, D>, axis: Axis, size: usize) -> Self + { let (iter, partial_chunk_index, partial_chunk_dim) = chunk_iter_parts(v, axis, size); AxisChunksIter { iter, @@ -1426,17 +1488,19 @@ macro_rules! chunk_iter_impl { /// /// See [`.axis_chunks_iter_mut()`](ArrayBase::axis_chunks_iter_mut) /// for more information. -pub struct AxisChunksIterMut<'a, A, D> { +pub struct AxisChunksIterMut<'a, A, D> +{ iter: AxisIterCore, partial_chunk_index: usize, partial_chunk_dim: D, life: PhantomData<&'a mut A>, } -impl<'a, A, D: Dimension> AxisChunksIterMut<'a, A, D> { - pub(crate) fn new(v: ArrayViewMut<'a, A, D>, axis: Axis, size: usize) -> Self { - let (iter, partial_chunk_index, partial_chunk_dim) = - chunk_iter_parts(v.into_view(), axis, size); +impl<'a, A, D: Dimension> AxisChunksIterMut<'a, A, D> +{ + pub(crate) fn new(v: ArrayViewMut<'a, A, D>, axis: Axis, size: usize) -> Self + { + let (iter, partial_chunk_index, partial_chunk_dim) = chunk_iter_parts(v.into_view(), axis, size); AxisChunksIterMut { iter, partial_chunk_index, @@ -1495,8 +1559,7 @@ unsafe impl TrustedIterator for IntoIter where D: Dimension {} /// Like Iterator::collect, but only for trusted length iterators pub fn to_vec(iter: I) -> Vec -where - I: TrustedIterator + ExactSizeIterator, +where I: TrustedIterator + ExactSizeIterator { to_vec_mapped(iter, |x| x) } diff --git a/src/iterators/windows.rs b/src/iterators/windows.rs index 9140f43b9..ec1afb634 100644 --- a/src/iterators/windows.rs +++ b/src/iterators/windows.rs @@ -11,17 +11,18 @@ use crate::Slice; /// /// See [`.windows()`](ArrayBase::windows) for more /// information. -pub struct Windows<'a, A, D> { +pub struct Windows<'a, A, D> +{ base: RawArrayView, life: PhantomData<&'a A>, window: D, strides: D, } -impl<'a, A, D: Dimension> Windows<'a, A, D> { +impl<'a, A, D: Dimension> Windows<'a, A, D> +{ pub(crate) fn new(a: ArrayView<'a, A, D>, window_size: E) -> Self - where - E: IntoDimension, + where E: IntoDimension { let window = window_size.into_dimension(); let ndim = window.ndim(); @@ -33,8 +34,7 @@ impl<'a, A, D: Dimension> Windows<'a, A, D> { } pub(crate) fn new_with_stride(a: ArrayView<'a, A, D>, window_size: E, axis_strides: E) -> Self - where - E: IntoDimension, + where E: IntoDimension { let window = window_size.into_dimension(); @@ -112,7 +112,8 @@ where { type Item = ::Item; type IntoIter = WindowsIter<'a, A, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { WindowsIter { iter: self.base.into_base_iter(), life: self.life, @@ -126,7 +127,8 @@ where /// /// See [`.windows()`](ArrayBase::windows) for more /// information. -pub struct WindowsIter<'a, A, D> { +pub struct WindowsIter<'a, A, D> +{ iter: Baseiter, life: PhantomData<&'a A>, window: D, diff --git a/src/itertools.rs b/src/itertools.rs index 8edfd75eb..d3562e687 100644 --- a/src/itertools.rs +++ b/src/itertools.rs @@ -23,8 +23,7 @@ use std::iter; /// } /// ``` pub(crate) fn enumerate(iterable: I) -> iter::Enumerate -where - I: IntoIterator, +where I: IntoIterator { iterable.into_iter().enumerate() } diff --git a/src/layout/layoutfmt.rs b/src/layout/layoutfmt.rs index 3d7fad00a..f20f0caaa 100644 --- a/src/layout/layoutfmt.rs +++ b/src/layout/layoutfmt.rs @@ -12,8 +12,10 @@ const LAYOUT_NAMES: &[&str] = &["C", "F", "c", "f"]; use std::fmt; -impl fmt::Debug for Layout { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { +impl fmt::Debug for Layout +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { if self.0 == 0 { write!(f, "Custom")? } else { diff --git a/src/layout/mod.rs b/src/layout/mod.rs index 9eecf016d..026688d63 100644 --- a/src/layout/mod.rs +++ b/src/layout/mod.rs @@ -8,72 +8,82 @@ mod layoutfmt; #[derive(Copy, Clone)] pub struct Layout(u32); -impl Layout { +impl Layout +{ pub(crate) const CORDER: u32 = 0b01; pub(crate) const FORDER: u32 = 0b10; pub(crate) const CPREFER: u32 = 0b0100; pub(crate) const FPREFER: u32 = 0b1000; #[inline(always)] - pub(crate) fn is(self, flag: u32) -> bool { + pub(crate) fn is(self, flag: u32) -> bool + { self.0 & flag != 0 } /// Return layout common to both inputs #[inline(always)] - pub(crate) fn intersect(self, other: Layout) -> Layout { + pub(crate) fn intersect(self, other: Layout) -> Layout + { Layout(self.0 & other.0) } /// Return a layout that simultaneously "is" what both of the inputs are #[inline(always)] - pub(crate) fn also(self, other: Layout) -> Layout { + pub(crate) fn also(self, other: Layout) -> Layout + { Layout(self.0 | other.0) } #[inline(always)] - pub(crate) fn one_dimensional() -> Layout { + pub(crate) fn one_dimensional() -> Layout + { Layout::c().also(Layout::f()) } #[inline(always)] - pub(crate) fn c() -> Layout { + pub(crate) fn c() -> Layout + { Layout(Layout::CORDER | Layout::CPREFER) } #[inline(always)] - pub(crate) fn f() -> Layout { + pub(crate) fn f() -> Layout + { Layout(Layout::FORDER | Layout::FPREFER) } #[inline(always)] - pub(crate) fn cpref() -> Layout { + pub(crate) fn cpref() -> Layout + { Layout(Layout::CPREFER) } #[inline(always)] - pub(crate) fn fpref() -> Layout { + pub(crate) fn fpref() -> Layout + { Layout(Layout::FPREFER) } #[inline(always)] - pub(crate) fn none() -> Layout { + pub(crate) fn none() -> Layout + { Layout(0) } /// A simple "score" method which scores positive for preferring C-order, negative for F-order /// Subject to change when we can describe other layouts #[inline] - pub(crate) fn tendency(self) -> i32 { - (self.is(Layout::CORDER) as i32 - self.is(Layout::FORDER) as i32) + - (self.is(Layout::CPREFER) as i32 - self.is(Layout::FPREFER) as i32) - + pub(crate) fn tendency(self) -> i32 + { + (self.is(Layout::CORDER) as i32 - self.is(Layout::FORDER) as i32) + + (self.is(Layout::CPREFER) as i32 - self.is(Layout::FPREFER) as i32) } } - #[cfg(test)] -mod tests { +mod tests +{ use super::*; use crate::imp_prelude::*; use crate::NdProducer; @@ -91,7 +101,7 @@ mod tests { $mat, stringify!($layout)); )* - }} + }}; } macro_rules! assert_not_layouts { @@ -103,11 +113,12 @@ mod tests { $mat, stringify!($layout)); )* - }} + }}; } #[test] - fn contig_layouts() { + fn contig_layouts() + { let a = M::zeros((5, 5)); let b = M::zeros((5, 5).f()); let ac = a.view().layout(); @@ -119,7 +130,8 @@ mod tests { } #[test] - fn contig_cf_layouts() { + fn contig_cf_layouts() + { let a = M::zeros((5, 1)); let b = M::zeros((1, 5).f()); assert_layouts!(a, CORDER, CPREFER, FORDER, FPREFER); @@ -147,7 +159,8 @@ mod tests { } #[test] - fn stride_layouts() { + fn stride_layouts() + { let a = M::zeros((5, 5)); { @@ -174,7 +187,8 @@ mod tests { } #[test] - fn no_layouts() { + fn no_layouts() + { let a = M::zeros((5, 5)); let b = M::zeros((5, 5).f()); @@ -202,7 +216,8 @@ mod tests { } #[test] - fn skip_layouts() { + fn skip_layouts() + { let a = M::zeros((5, 5)); { let v1 = a.slice(s![..;2, ..]).layout(); diff --git a/src/lib.rs b/src/lib.rs index b15b0ea88..37af0adfe 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -112,13 +112,12 @@ //! For conversion between `ndarray`, [`nalgebra`](https://crates.io/crates/nalgebra) and //! [`image`](https://crates.io/crates/image) check out [`nshare`](https://crates.io/crates/nshare). - extern crate alloc; -#[cfg(feature = "std")] -extern crate std; #[cfg(not(feature = "std"))] extern crate core as std; +#[cfg(feature = "std")] +extern crate std; #[cfg(feature = "blas")] extern crate cblas_sys; @@ -126,8 +125,8 @@ extern crate cblas_sys; #[cfg(feature = "docs")] pub mod doc; -use std::marker::PhantomData; use alloc::sync::Arc; +use std::marker::PhantomData; pub use crate::dimension::dim::*; pub use crate::dimension::{Axis, AxisDescription, Dimension, IntoDimension, RemoveAxis}; @@ -138,24 +137,22 @@ pub use crate::dimension::NdIndex; pub use crate::error::{ErrorKind, ShapeError}; pub use crate::indexes::{indices, indices_of}; pub use crate::order::Order; -pub use crate::slice::{ - MultiSliceArg, NewAxis, Slice, SliceArg, SliceInfo, SliceInfoElem, SliceNextDim, -}; +pub use crate::slice::{MultiSliceArg, NewAxis, Slice, SliceArg, SliceInfo, SliceInfoElem, SliceNextDim}; use crate::iterators::Baseiter; use crate::iterators::{ElementsBase, ElementsBaseMut, Iter, IterMut}; pub use crate::arraytraits::AsArray; +pub use crate::linalg_traits::LinalgScalar; #[cfg(feature = "std")] pub use crate::linalg_traits::NdFloat; -pub use crate::linalg_traits::LinalgScalar; #[allow(deprecated)] // stack_new_axis pub use crate::stacking::{concatenate, stack, stack_new_axis}; -pub use crate::math_cell::MathCell; pub use crate::impl_views::IndexLonger; -pub use crate::shape_builder::{Shape, ShapeBuilder, ShapeArg, StrideShape}; +pub use crate::math_cell::MathCell; +pub use crate::shape_builder::{Shape, ShapeArg, ShapeBuilder, StrideShape}; #[macro_use] mod macro_utils; @@ -175,10 +172,7 @@ mod data_traits; pub use crate::aliases::*; -pub use crate::data_traits::{ - Data, DataMut, DataOwned, DataShared, RawData, RawDataClone, RawDataMut, - RawDataSubst, -}; +pub use crate::data_traits::{Data, DataMut, DataOwned, DataShared, RawData, RawDataClone, RawDataMut, RawDataSubst}; mod free_functions; pub use crate::free_functions::*; @@ -193,10 +187,10 @@ mod layout; mod linalg_traits; mod linspace; #[cfg(feature = "std")] -pub use crate::linspace::{Linspace, linspace, range}; +pub use crate::linspace::{linspace, range, Linspace}; mod logspace; #[cfg(feature = "std")] -pub use crate::logspace::{Logspace, logspace}; +pub use crate::logspace::{logspace, Logspace}; mod math_cell; mod numeric_util; mod order; @@ -217,13 +211,24 @@ pub use crate::zip::{FoldWhile, IntoNdProducer, NdProducer, Zip}; pub use crate::layout::Layout; /// Implementation's prelude. Common types used everywhere. -mod imp_prelude { +mod imp_prelude +{ pub use crate::dimension::DimensionExt; pub use crate::prelude::*; pub use crate::ArcArray; pub use crate::{ - CowRepr, Data, DataMut, DataOwned, DataShared, Ix, Ixs, RawData, RawDataMut, RawViewRepr, - RemoveAxis, ViewRepr, + CowRepr, + Data, + DataMut, + DataOwned, + DataShared, + Ix, + Ixs, + RawData, + RawDataMut, + RawViewRepr, + RemoveAxis, + ViewRepr, }; } @@ -1271,8 +1276,7 @@ pub type Ixs = isize; // // [`.offset()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset-1 pub struct ArrayBase -where - S: RawData, +where S: RawData { /// Data buffer / ownership information. (If owned, contains the data /// buffer; if borrowed, contains the lifetime and mutability.) @@ -1432,8 +1436,10 @@ pub use data_repr::OwnedRepr; #[derive(Debug)] pub struct OwnedArcRepr(Arc>); -impl Clone for OwnedArcRepr { - fn clone(&self) -> Self { +impl Clone for OwnedArcRepr +{ + fn clone(&self) -> Self + { OwnedArcRepr(self.0.clone()) } } @@ -1444,13 +1450,16 @@ impl Clone for OwnedArcRepr { /// [`RawArrayView`] / [`RawArrayViewMut`] for the array type!* #[derive(Copy, Clone)] // This is just a marker type, to carry the mutability and element type. -pub struct RawViewRepr { +pub struct RawViewRepr +{ ptr: PhantomData, } -impl RawViewRepr { +impl RawViewRepr +{ #[inline(always)] - const fn new() -> Self { + const fn new() -> Self + { RawViewRepr { ptr: PhantomData } } } @@ -1461,13 +1470,16 @@ impl RawViewRepr { /// [`ArrayView`] / [`ArrayViewMut`] for the array type!* #[derive(Copy, Clone)] // This is just a marker type, to carry the lifetime parameter. -pub struct ViewRepr { +pub struct ViewRepr +{ life: PhantomData, } -impl ViewRepr { +impl ViewRepr +{ #[inline(always)] - const fn new() -> Self { + const fn new() -> Self + { ViewRepr { life: PhantomData } } } @@ -1476,16 +1488,19 @@ impl ViewRepr { /// /// *Don't use this type directly—use the type alias /// [`CowArray`] for the array type!* -pub enum CowRepr<'a, A> { +pub enum CowRepr<'a, A> +{ /// Borrowed data. View(ViewRepr<&'a A>), /// Owned data. Owned(OwnedRepr), } -impl<'a, A> CowRepr<'a, A> { +impl<'a, A> CowRepr<'a, A> +{ /// Returns `true` iff the data is the `View` variant. - pub fn is_view(&self) -> bool { + pub fn is_view(&self) -> bool + { match self { CowRepr::View(_) => true, CowRepr::Owned(_) => false, @@ -1493,7 +1508,8 @@ impl<'a, A> CowRepr<'a, A> { } /// Returns `true` iff the data is the `Owned` variant. - pub fn is_owned(&self) -> bool { + pub fn is_owned(&self) -> bool + { match self { CowRepr::View(_) => false, CowRepr::Owned(_) => true, @@ -1521,8 +1537,7 @@ where { #[inline] fn broadcast_unwrap(&self, dim: E) -> ArrayView<'_, A, E> - where - E: Dimension, + where E: Dimension { #[cold] #[inline(never)] @@ -1548,8 +1563,7 @@ where // (Checked in debug assertions). #[inline] fn broadcast_assume(&self, dim: E) -> ArrayView<'_, A, E> - where - E: Dimension, + where E: Dimension { let dim = dim.into_dimension(); debug_assert_eq!(self.shape(), dim.slice()); @@ -1560,13 +1574,12 @@ where } /// Remove array axis `axis` and return the result. - fn try_remove_axis(self, axis: Axis) -> ArrayBase { + fn try_remove_axis(self, axis: Axis) -> ArrayBase + { let d = self.dim.try_remove_axis(axis); let s = self.strides.try_remove_axis(axis); // safe because new dimension, strides allow access to a subset of old data - unsafe { - self.with_strides_dim(s, d) - } + unsafe { self.with_strides_dim(s, d) } } } @@ -1600,6 +1613,7 @@ mod impl_raw_views; mod impl_cow; /// Returns `true` if the pointer is aligned. -pub(crate) fn is_aligned(ptr: *const T) -> bool { +pub(crate) fn is_aligned(ptr: *const T) -> bool +{ (ptr as usize) % ::std::mem::align_of::() == 0 } diff --git a/src/linalg/impl_linalg.rs b/src/linalg/impl_linalg.rs index dc8ef3428..bcfcba94e 100644 --- a/src/linalg/impl_linalg.rs +++ b/src/linalg/impl_linalg.rs @@ -14,10 +14,10 @@ use crate::numeric_util; use crate::{LinalgScalar, Zip}; -use std::any::TypeId; -use std::mem::MaybeUninit; #[cfg(not(feature = "std"))] use alloc::vec::Vec; +use std::any::TypeId; +use std::mem::MaybeUninit; use num_complex::Complex; use num_complex::{Complex32 as c32, Complex64 as c64}; @@ -45,8 +45,7 @@ const GEMM_BLAS_CUTOFF: usize = 7; type blas_index = c_int; // blas index type impl ArrayBase -where - S: Data, +where S: Data { /// Perform dot product or matrix multiplication of arrays `self` and `rhs`. /// @@ -67,8 +66,7 @@ where /// layout allows. #[track_caller] pub fn dot(&self, rhs: &Rhs) -> >::Output - where - Self: Dot, + where Self: Dot { Dot::dot(self, rhs) } @@ -147,11 +145,8 @@ where /// which agrees with our pointer for non-negative strides, but /// is at the opposite end for negative strides. #[cfg(feature = "blas")] -unsafe fn blas_1d_params( - ptr: *const A, - len: usize, - stride: isize, -) -> (*const A, blas_index, blas_index) { +unsafe fn blas_1d_params(ptr: *const A, len: usize, stride: isize) -> (*const A, blas_index, blas_index) +{ // [x x x x] // ^--ptr // stride = -1 @@ -168,7 +163,8 @@ unsafe fn blas_1d_params( /// /// For two-dimensional arrays, the dot method computes the matrix /// multiplication. -pub trait Dot { +pub trait Dot +{ /// The result of the operation. /// /// For two-dimensional arrays: a rectangular array. @@ -193,7 +189,8 @@ where /// *Note:* If enabled, uses blas `dot` for elements of `f32, f64` when memory /// layout allows. #[track_caller] - fn dot(&self, rhs: &ArrayBase) -> A { + fn dot(&self, rhs: &ArrayBase) -> A + { self.dot_impl(rhs) } } @@ -216,14 +213,14 @@ where /// /// **Panics** if shapes are incompatible. #[track_caller] - fn dot(&self, rhs: &ArrayBase) -> Array { + fn dot(&self, rhs: &ArrayBase) -> Array + { rhs.t().dot(self) } } impl ArrayBase -where - S: Data, +where S: Data { /// Perform matrix multiplication of rectangular arrays `self` and `rhs`. /// @@ -256,8 +253,7 @@ where /// ``` #[track_caller] pub fn dot(&self, rhs: &Rhs) -> >::Output - where - Self: Dot, + where Self: Dot { Dot::dot(self, rhs) } @@ -270,7 +266,8 @@ where A: LinalgScalar, { type Output = Array2; - fn dot(&self, b: &ArrayBase) -> Array2 { + fn dot(&self, b: &ArrayBase) -> Array2 + { let a = self.view(); let b = b.view(); let ((m, k), (k2, n)) = (a.dim(), b.dim()); @@ -296,7 +293,8 @@ where /// Assumes that `m` and `n` are ≤ `isize::MAX`. #[cold] #[inline(never)] -fn dot_shape_error(m: usize, k: usize, k2: usize, n: usize) -> ! { +fn dot_shape_error(m: usize, k: usize, k2: usize, n: usize) -> ! +{ match m.checked_mul(n) { Some(len) if len <= ::std::isize::MAX as usize => {} _ => panic!("ndarray: shape {} × {} overflows isize", m, n), @@ -309,7 +307,8 @@ fn dot_shape_error(m: usize, k: usize, k2: usize, n: usize) -> ! { #[cold] #[inline(never)] -fn general_dot_shape_error(m: usize, k: usize, k2: usize, n: usize, c1: usize, c2: usize) -> ! { +fn general_dot_shape_error(m: usize, k: usize, k2: usize, n: usize, c1: usize, c2: usize) -> ! +{ panic!("ndarray: inputs {} × {}, {} × {}, and output {} × {} are not compatible for matrix multiplication", m, k, k2, n, c1, c2); } @@ -331,7 +330,8 @@ where { type Output = Array; #[track_caller] - fn dot(&self, rhs: &ArrayBase) -> Array { + fn dot(&self, rhs: &ArrayBase) -> Array + { let ((m, a), n) = (self.dim(), rhs.dim()); if a != n { dot_shape_error(m, a, n, 1); @@ -375,6 +375,7 @@ where #[cfg(not(feature = "blas"))] use self::mat_mul_general as mat_mul_impl; +#[rustfmt::skip] #[cfg(feature = "blas")] fn mat_mul_impl( alpha: A, @@ -495,13 +496,8 @@ fn mat_mul_impl( /// C ← α A B + β C fn mat_mul_general( - alpha: A, - lhs: &ArrayView2<'_, A>, - rhs: &ArrayView2<'_, A>, - beta: A, - c: &mut ArrayViewMut2<'_, A>, -) where - A: LinalgScalar, + alpha: A, lhs: &ArrayView2<'_, A>, rhs: &ArrayView2<'_, A>, beta: A, c: &mut ArrayViewMut2<'_, A>, +) where A: LinalgScalar { let ((m, k), (_, n)) = (lhs.dim(), rhs.dim()); @@ -606,11 +602,8 @@ fn mat_mul_general( loop { unsafe { let elt = c.uget_mut((i, j)); - *elt = *elt * beta - + alpha - * (0..k).fold(A::zero(), move |s, x| { - s + *lhs.uget((i, x)) * *rhs.uget((x, j)) - }); + *elt = + *elt * beta + alpha * (0..k).fold(A::zero(), move |s, x| s + *lhs.uget((i, x)) * *rhs.uget((x, j))); } j += 1; if j == n { @@ -637,11 +630,7 @@ fn mat_mul_general( /// `f32, f64` for all memory layouts. #[track_caller] pub fn general_mat_mul( - alpha: A, - a: &ArrayBase, - b: &ArrayBase, - beta: A, - c: &mut ArrayBase, + alpha: A, a: &ArrayBase, b: &ArrayBase, beta: A, c: &mut ArrayBase, ) where S1: Data, S2: Data, @@ -670,11 +659,7 @@ pub fn general_mat_mul( #[track_caller] #[allow(clippy::collapsible_if)] pub fn general_mat_vec_mul( - alpha: A, - a: &ArrayBase, - x: &ArrayBase, - beta: A, - y: &mut ArrayBase, + alpha: A, a: &ArrayBase, x: &ArrayBase, beta: A, y: &mut ArrayBase, ) where S1: Data, S2: Data, @@ -694,11 +679,7 @@ pub fn general_mat_vec_mul( /// the destination may be uninitialized iff beta is zero. #[allow(clippy::collapsible_else_if)] unsafe fn general_mat_vec_mul_impl( - alpha: A, - a: &ArrayBase, - x: &ArrayBase, - beta: A, - y: RawArrayViewMut, + alpha: A, a: &ArrayBase, x: &ArrayBase, beta: A, y: RawArrayViewMut, ) where S1: Data, S2: Data, @@ -776,7 +757,6 @@ unsafe fn general_mat_vec_mul_impl( } } - /// Kronecker product of 2D matrices. /// /// The kronecker product of a LxN matrix A and a MxR matrix B is a (L*M)x(N*R) @@ -811,14 +791,16 @@ where #[inline(always)] /// Return `true` if `A` and `B` are the same type -fn same_type() -> bool { +fn same_type() -> bool +{ TypeId::of::() == TypeId::of::() } // Read pointer to type `A` as type `B`. // // **Panics** if `A` and `B` are not the same type -fn cast_as(a: &A) -> B { +fn cast_as(a: &A) -> B +{ assert!(same_type::(), "expect type {} and {} to match", std::any::type_name::(), std::any::type_name::()); unsafe { ::std::ptr::read(a as *const _ as *const B) } @@ -826,7 +808,8 @@ fn cast_as(a: &A) -> B { /// Return the complex in the form of an array [re, im] #[inline] -fn complex_array(z: Complex) -> [A; 2] { +fn complex_array(z: Complex) -> [A; 2] +{ [z.re, z.im] } @@ -844,17 +827,15 @@ where return false; } let stride = a.strides()[0]; - if stride == 0 - || stride > blas_index::max_value() as isize - || stride < blas_index::min_value() as isize - { + if stride == 0 || stride > blas_index::max_value() as isize || stride < blas_index::min_value() as isize { return false; } true } #[cfg(feature = "blas")] -enum MemoryOrder { +enum MemoryOrder +{ C, F, } @@ -886,7 +867,8 @@ where } #[cfg(feature = "blas")] -fn is_blas_2d(dim: &Ix2, stride: &Ix2, order: MemoryOrder) -> bool { +fn is_blas_2d(dim: &Ix2, stride: &Ix2, order: MemoryOrder) -> bool +{ let (m, n) = dim.into_pattern(); let s0 = stride[0] as isize; let s1 = stride[1] as isize; @@ -929,32 +911,37 @@ where #[cfg(test)] #[cfg(feature = "blas")] -mod blas_tests { +mod blas_tests +{ use super::*; #[test] - fn blas_row_major_2d_normal_matrix() { + fn blas_row_major_2d_normal_matrix() + { let m: Array2 = Array2::zeros((3, 5)); assert!(blas_row_major_2d::(&m)); assert!(!blas_column_major_2d::(&m)); } #[test] - fn blas_row_major_2d_row_matrix() { + fn blas_row_major_2d_row_matrix() + { let m: Array2 = Array2::zeros((1, 5)); assert!(blas_row_major_2d::(&m)); assert!(blas_column_major_2d::(&m)); } #[test] - fn blas_row_major_2d_column_matrix() { + fn blas_row_major_2d_column_matrix() + { let m: Array2 = Array2::zeros((5, 1)); assert!(blas_row_major_2d::(&m)); assert!(blas_column_major_2d::(&m)); } #[test] - fn blas_row_major_2d_transposed_row_matrix() { + fn blas_row_major_2d_transposed_row_matrix() + { let m: Array2 = Array2::zeros((1, 5)); let m_t = m.t(); assert!(blas_row_major_2d::(&m_t)); @@ -962,7 +949,8 @@ mod blas_tests { } #[test] - fn blas_row_major_2d_transposed_column_matrix() { + fn blas_row_major_2d_transposed_column_matrix() + { let m: Array2 = Array2::zeros((5, 1)); let m_t = m.t(); assert!(blas_row_major_2d::(&m_t)); @@ -970,7 +958,8 @@ mod blas_tests { } #[test] - fn blas_column_major_2d_normal_matrix() { + fn blas_column_major_2d_normal_matrix() + { let m: Array2 = Array2::zeros((3, 5).f()); assert!(!blas_row_major_2d::(&m)); assert!(blas_column_major_2d::(&m)); diff --git a/src/linalg/mod.rs b/src/linalg/mod.rs index abd7b2b9d..dc6964f9b 100644 --- a/src/linalg/mod.rs +++ b/src/linalg/mod.rs @@ -10,7 +10,7 @@ pub use self::impl_linalg::general_mat_mul; pub use self::impl_linalg::general_mat_vec_mul; -pub use self::impl_linalg::Dot; pub use self::impl_linalg::kron; +pub use self::impl_linalg::Dot; mod impl_linalg; diff --git a/src/linalg_traits.rs b/src/linalg_traits.rs index d52e7f54f..65d264c40 100644 --- a/src/linalg_traits.rs +++ b/src/linalg_traits.rs @@ -24,28 +24,12 @@ use crate::ScalarOperand; /// `'static` for type-based specialization, `Copy` so that they don't need move /// semantics or destructors, and the rest are numerical traits. pub trait LinalgScalar: - 'static - + Copy - + Zero - + One - + Add - + Sub - + Mul - + Div + 'static + Copy + Zero + One + Add + Sub + Mul + Div { } -impl LinalgScalar for T where - T: 'static - + Copy - + Zero - + One - + Add - + Sub - + Mul - + Div -{ -} +impl LinalgScalar for T where T: 'static + Copy + Zero + One + Add + Sub + Mul + Div +{} /// Floating-point element types `f32` and `f64`. /// @@ -78,4 +62,3 @@ pub trait NdFloat: impl NdFloat for f32 {} #[cfg(feature = "std")] impl NdFloat for f64 {} - diff --git a/src/linspace.rs b/src/linspace.rs index 513044e00..411c480db 100644 --- a/src/linspace.rs +++ b/src/linspace.rs @@ -11,7 +11,8 @@ use num_traits::Float; /// An iterator of a sequence of evenly spaced floats. /// /// Iterator element type is `F`. -pub struct Linspace { +pub struct Linspace +{ start: F, step: F, index: usize, @@ -19,13 +20,13 @@ pub struct Linspace { } impl Iterator for Linspace -where - F: Float, +where F: Float { type Item = F; #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { if self.index >= self.len { None } else { @@ -37,18 +38,19 @@ where } #[inline] - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { let n = self.len - self.index; (n, Some(n)) } } impl DoubleEndedIterator for Linspace -where - F: Float, +where F: Float { #[inline] - fn next_back(&mut self) -> Option { + fn next_back(&mut self) -> Option + { if self.index >= self.len { None } else { @@ -72,8 +74,7 @@ impl ExactSizeIterator for Linspace where Linspace: Iterator {} /// **Panics** if converting `n - 1` to type `F` fails. #[inline] pub fn linspace(a: F, b: F, n: usize) -> Linspace -where - F: Float, +where F: Float { let step = if n > 1 { let num_steps = F::from(n - 1).expect("Converting number of steps to `A` must not fail."); @@ -100,8 +101,7 @@ where /// **Panics** if converting `((b - a) / step).ceil()` to type `F` fails. #[inline] pub fn range(a: F, b: F, step: F) -> Linspace -where - F: Float, +where F: Float { let len = b - a; let steps = F::ceil(len / step); diff --git a/src/logspace.rs b/src/logspace.rs index 53be034b5..6f8de885d 100644 --- a/src/logspace.rs +++ b/src/logspace.rs @@ -11,7 +11,8 @@ use num_traits::Float; /// An iterator of a sequence of logarithmically spaced number. /// /// Iterator element type is `F`. -pub struct Logspace { +pub struct Logspace +{ sign: F, base: F, start: F, @@ -21,13 +22,13 @@ pub struct Logspace { } impl Iterator for Logspace -where - F: Float, +where F: Float { type Item = F; #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { if self.index >= self.len { None } else { @@ -40,18 +41,19 @@ where } #[inline] - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { let n = self.len - self.index; (n, Some(n)) } } impl DoubleEndedIterator for Logspace -where - F: Float, +where F: Float { #[inline] - fn next_back(&mut self) -> Option { + fn next_back(&mut self) -> Option + { if self.index >= self.len { None } else { @@ -78,8 +80,7 @@ impl ExactSizeIterator for Logspace where Logspace: Iterator {} /// **Panics** if converting `n - 1` to type `F` fails. #[inline] pub fn logspace(base: F, a: F, b: F, n: usize) -> Logspace -where - F: Float, +where F: Float { let step = if n > 1 { let num_steps = F::from(n - 1).expect("Converting number of steps to `A` must not fail."); @@ -98,12 +99,14 @@ where } #[cfg(test)] -mod tests { +mod tests +{ use super::logspace; #[test] #[cfg(feature = "approx")] - fn valid() { + fn valid() + { use crate::{arr1, Array1}; use approx::assert_abs_diff_eq; @@ -121,7 +124,8 @@ mod tests { } #[test] - fn iter_forward() { + fn iter_forward() + { let mut iter = logspace(10.0f64, 0.0, 3.0, 4); assert!(iter.size_hint() == (4, Some(4))); @@ -136,7 +140,8 @@ mod tests { } #[test] - fn iter_backward() { + fn iter_backward() + { let mut iter = logspace(10.0f64, 0.0, 3.0, 4); assert!(iter.size_hint() == (4, Some(4))); diff --git a/src/low_level_util.rs b/src/low_level_util.rs index b61b06f0d..5a615a187 100644 --- a/src/low_level_util.rs +++ b/src/low_level_util.rs @@ -6,7 +6,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - /// Guard value that will abort if it is dropped. /// To defuse, this value must be forgotten before the end of the scope. /// @@ -14,24 +13,28 @@ #[must_use] pub(crate) struct AbortIfPanic(pub(crate) &'static &'static str); -impl AbortIfPanic { +impl AbortIfPanic +{ /// Defuse the AbortIfPanic guard. This *must* be done when finished. #[inline] - pub(crate) fn defuse(self) { + pub(crate) fn defuse(self) + { std::mem::forget(self); } } -impl Drop for AbortIfPanic { +impl Drop for AbortIfPanic +{ // The compiler should be able to remove this, if it can see through that there // is no panic in the code section. - fn drop(&mut self) { - #[cfg(feature="std")] + fn drop(&mut self) + { + #[cfg(feature = "std")] { eprintln!("ndarray: panic in no-panic section, aborting: {}", self.0); std::process::abort() } - #[cfg(not(feature="std"))] + #[cfg(not(feature = "std"))] { // no-std uses panic-in-panic (should abort) panic!("ndarray: panic in no-panic section, bailing out: {}", self.0); diff --git a/src/macro_utils.rs b/src/macro_utils.rs index 0480b7c91..75360de37 100644 --- a/src/macro_utils.rs +++ b/src/macro_utils.rs @@ -9,7 +9,7 @@ macro_rules! copy_and_clone { }; ($type_:ty) => { copy_and_clone!{ [] $type_ } - } + }; } macro_rules! clone_bounds { @@ -38,7 +38,7 @@ macro_rules! clone_bounds { /// debug assertions are enabled). #[cfg(debug_assertions)] macro_rules! ndassert { - ($e:expr, $($t:tt)*) => { assert!($e, $($t)*) } + ($e:expr, $($t:tt)*) => { assert!($e, $($t)*) }; } #[cfg(not(debug_assertions))] diff --git a/src/math_cell.rs b/src/math_cell.rs index f0f8da40b..6ed1ed71f 100644 --- a/src/math_cell.rs +++ b/src/math_cell.rs @@ -1,4 +1,3 @@ - use std::cell::Cell; use std::cmp::Ordering; use std::fmt; @@ -14,87 +13,119 @@ use std::ops::{Deref, DerefMut}; #[derive(Default)] pub struct MathCell(Cell); -impl MathCell { +impl MathCell +{ /// Create a new cell with the given value #[inline(always)] - pub const fn new(value: T) -> Self { MathCell(Cell::new(value)) } + pub const fn new(value: T) -> Self + { + MathCell(Cell::new(value)) + } /// Return the inner value - pub fn into_inner(self) -> T { Cell::into_inner(self.0) } + pub fn into_inner(self) -> T + { + Cell::into_inner(self.0) + } /// Swap value with another cell - pub fn swap(&self, other: &Self) { + pub fn swap(&self, other: &Self) + { Cell::swap(&self.0, &other.0) } } -impl Deref for MathCell { +impl Deref for MathCell +{ type Target = Cell; #[inline(always)] - fn deref(&self) -> &Self::Target { &self.0 } + fn deref(&self) -> &Self::Target + { + &self.0 + } } -impl DerefMut for MathCell { +impl DerefMut for MathCell +{ #[inline(always)] - fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } + fn deref_mut(&mut self) -> &mut Self::Target + { + &mut self.0 + } } impl Clone for MathCell - where T: Copy +where T: Copy { - fn clone(&self) -> Self { + fn clone(&self) -> Self + { MathCell::new(self.get()) } } impl PartialEq for MathCell - where T: Copy + PartialEq +where T: Copy + PartialEq { - fn eq(&self, rhs: &Self) -> bool { + fn eq(&self, rhs: &Self) -> bool + { self.get() == rhs.get() } } -impl Eq for MathCell - where T: Copy + Eq -{ } +impl Eq for MathCell where T: Copy + Eq {} impl PartialOrd for MathCell - where T: Copy + PartialOrd +where T: Copy + PartialOrd { - fn partial_cmp(&self, rhs: &Self) -> Option { + fn partial_cmp(&self, rhs: &Self) -> Option + { self.get().partial_cmp(&rhs.get()) } - fn lt(&self, rhs: &Self) -> bool { self.get().lt(&rhs.get()) } - fn le(&self, rhs: &Self) -> bool { self.get().le(&rhs.get()) } - fn gt(&self, rhs: &Self) -> bool { self.get().gt(&rhs.get()) } - fn ge(&self, rhs: &Self) -> bool { self.get().ge(&rhs.get()) } + fn lt(&self, rhs: &Self) -> bool + { + self.get().lt(&rhs.get()) + } + fn le(&self, rhs: &Self) -> bool + { + self.get().le(&rhs.get()) + } + fn gt(&self, rhs: &Self) -> bool + { + self.get().gt(&rhs.get()) + } + fn ge(&self, rhs: &Self) -> bool + { + self.get().ge(&rhs.get()) + } } impl Ord for MathCell - where T: Copy + Ord +where T: Copy + Ord { - fn cmp(&self, rhs: &Self) -> Ordering { + fn cmp(&self, rhs: &Self) -> Ordering + { self.get().cmp(&rhs.get()) } } impl fmt::Debug for MathCell - where T: Copy + fmt::Debug +where T: Copy + fmt::Debug { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result + { self.get().fmt(f) } } - #[cfg(test)] -mod tests { +mod tests +{ use super::MathCell; #[test] - fn test_basic() { + fn test_basic() + { let c = &MathCell::new(0); c.set(1); assert_eq!(c.get(), 1); diff --git a/src/numeric/impl_float_maths.rs b/src/numeric/impl_float_maths.rs index 4b3208800..54fed49c2 100644 --- a/src/numeric/impl_float_maths.rs +++ b/src/numeric/impl_float_maths.rs @@ -137,7 +137,8 @@ where /// Square (two powers) of each element. #[must_use = "method returns a new array and does not mutate the original value"] - pub fn pow2(&self) -> Array { + pub fn pow2(&self) -> Array + { self.mapv(|v: A| v * v) } } @@ -161,7 +162,8 @@ where /// # Panics /// /// Panics if `!(min <= max)`. - pub fn clamp(&self, min: A, max: A) -> Array { + pub fn clamp(&self, min: A, max: A) -> Array + { assert!(min <= max, "min must be less than or equal to max"); self.mapv(|a| num_traits::clamp(a, min.clone(), max.clone())) } diff --git a/src/numeric/impl_numeric.rs b/src/numeric/impl_numeric.rs index 55cd0cdfe..ca6f24bbe 100644 --- a/src/numeric/impl_numeric.rs +++ b/src/numeric/impl_numeric.rs @@ -30,8 +30,7 @@ where /// assert_eq!(a.sum(), 10.); /// ``` pub fn sum(&self) -> A - where - A: Clone + Add + num_traits::Zero, + where A: Clone + Add + num_traits::Zero { if let Some(slc) = self.as_slice_memory_order() { return numeric_util::unrolled_fold(slc, A::zero, A::add); @@ -50,10 +49,9 @@ where /// Return the sum of all elements in the array. /// /// *This method has been renamed to `.sum()`* - #[deprecated(note="renamed to `sum`", since="0.15.0")] + #[deprecated(note = "renamed to `sum`", since = "0.15.0")] pub fn scalar_sum(&self) -> A - where - A: Clone + Add + num_traits::Zero, + where A: Clone + Add + num_traits::Zero { self.sum() } @@ -72,15 +70,13 @@ where /// /// [arithmetic mean]: https://en.wikipedia.org/wiki/Arithmetic_mean pub fn mean(&self) -> Option - where - A: Clone + FromPrimitive + Add + Div + Zero, + where A: Clone + FromPrimitive + Add + Div + Zero { let n_elements = self.len(); if n_elements == 0 { None } else { - let n_elements = A::from_usize(n_elements) - .expect("Converting number of elements to `A` must not fail."); + let n_elements = A::from_usize(n_elements).expect("Converting number of elements to `A` must not fail."); Some(self.sum() / n_elements) } } @@ -95,8 +91,7 @@ where /// assert_eq!(a.product(), 24.); /// ``` pub fn product(&self) -> A - where - A: Clone + Mul + num_traits::One, + where A: Clone + Mul + num_traits::One { if let Some(slc) = self.as_slice_memory_order() { return numeric_util::unrolled_fold(slc, A::one, A::mul); @@ -154,8 +149,7 @@ where #[track_caller] #[cfg(feature = "std")] pub fn var(&self, ddof: A) -> A - where - A: Float + FromPrimitive, + where A: Float + FromPrimitive { let zero = A::from_usize(0).expect("Converting 0 to `A` must not fail."); let n = A::from_usize(self.len()).expect("Converting length to `A` must not fail."); @@ -220,8 +214,7 @@ where #[track_caller] #[cfg(feature = "std")] pub fn std(&self, ddof: A) -> A - where - A: Float + FromPrimitive, + where A: Float + FromPrimitive { self.var(ddof).sqrt() } @@ -289,8 +282,7 @@ where if axis_length == 0 { None } else { - let axis_length = - A::from_usize(axis_length).expect("Converting axis length to `A` must not fail."); + let axis_length = A::from_usize(axis_length).expect("Converting axis length to `A` must not fail."); let sum = self.sum_axis(axis); Some(sum / aview0(&axis_length)) } diff --git a/src/numeric_util.rs b/src/numeric_util.rs index b06850fd0..9d5ce66c5 100644 --- a/src/numeric_util.rs +++ b/src/numeric_util.rs @@ -20,16 +20,8 @@ where // eightfold unrolled so that floating point can be vectorized // (even with strict floating point accuracy semantics) let mut acc = init(); - let (mut p0, mut p1, mut p2, mut p3, mut p4, mut p5, mut p6, mut p7) = ( - init(), - init(), - init(), - init(), - init(), - init(), - init(), - init(), - ); + let (mut p0, mut p1, mut p2, mut p3, mut p4, mut p5, mut p6, mut p7) = + (init(), init(), init(), init(), init(), init(), init(), init()); while xs.len() >= 8 { p0 = f(p0, xs[0].clone()); p1 = f(p1, xs[1].clone()); @@ -62,8 +54,7 @@ where /// /// `xs` and `ys` must be the same length pub fn unrolled_dot(xs: &[A], ys: &[A]) -> A -where - A: LinalgScalar, +where A: LinalgScalar { debug_assert_eq!(xs.len(), ys.len()); // eightfold unrolled so that floating point can be vectorized @@ -72,16 +63,8 @@ where let mut xs = &xs[..len]; let mut ys = &ys[..len]; let mut sum = A::zero(); - let (mut p0, mut p1, mut p2, mut p3, mut p4, mut p5, mut p6, mut p7) = ( - A::zero(), - A::zero(), - A::zero(), - A::zero(), - A::zero(), - A::zero(), - A::zero(), - A::zero(), - ); + let (mut p0, mut p1, mut p2, mut p3, mut p4, mut p5, mut p6, mut p7) = + (A::zero(), A::zero(), A::zero(), A::zero(), A::zero(), A::zero(), A::zero(), A::zero()); while xs.len() >= 8 { p0 = p0 + xs[0] * ys[0]; p1 = p1 + xs[1] * ys[1]; @@ -113,8 +96,7 @@ where /// /// `xs` and `ys` must be the same length pub fn unrolled_eq(xs: &[A], ys: &[B]) -> bool -where - A: PartialEq, +where A: PartialEq { debug_assert_eq!(xs.len(), ys.len()); // eightfold unrolled for performance (this is not done by llvm automatically) diff --git a/src/order.rs b/src/order.rs index e8d9c8db1..a52a32e2c 100644 --- a/src/order.rs +++ b/src/order.rs @@ -1,4 +1,3 @@ - /// Array order /// /// Order refers to indexing order, or how a linear sequence is translated @@ -31,14 +30,16 @@ /// or "Fortran" order. #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[non_exhaustive] -pub enum Order { +pub enum Order +{ /// Row major or "C" order RowMajor, /// Column major or "F" order ColumnMajor, } -impl Order { +impl Order +{ /// "C" is an alias for row major ordering pub const C: Order = Order::RowMajor; @@ -47,7 +48,8 @@ impl Order { /// Return true if input is Order::RowMajor, false otherwise #[inline] - pub fn is_row_major(self) -> bool { + pub fn is_row_major(self) -> bool + { match self { Order::RowMajor => true, Order::ColumnMajor => false, @@ -56,25 +58,33 @@ impl Order { /// Return true if input is Order::ColumnMajor, false otherwise #[inline] - pub fn is_column_major(self) -> bool { + pub fn is_column_major(self) -> bool + { !self.is_row_major() } /// Return Order::RowMajor if the input is true, Order::ColumnMajor otherwise #[inline] - pub fn row_major(row_major: bool) -> Order { - if row_major { Order::RowMajor } else { Order::ColumnMajor } + pub fn row_major(row_major: bool) -> Order + { + if row_major { + Order::RowMajor + } else { + Order::ColumnMajor + } } /// Return Order::ColumnMajor if the input is true, Order::RowMajor otherwise #[inline] - pub fn column_major(column_major: bool) -> Order { + pub fn column_major(column_major: bool) -> Order + { Self::row_major(!column_major) } /// Return the transpose: row major becomes column major and vice versa. #[inline] - pub fn transpose(self) -> Order { + pub fn transpose(self) -> Order + { match self { Order::RowMajor => Order::ColumnMajor, Order::ColumnMajor => Order::RowMajor, diff --git a/src/parallel/impl_par_methods.rs b/src/parallel/impl_par_methods.rs index ed0dcad7a..b3fbdedc8 100644 --- a/src/parallel/impl_par_methods.rs +++ b/src/parallel/impl_par_methods.rs @@ -1,9 +1,9 @@ -use crate::{Array, ArrayBase, DataMut, Dimension, IntoNdProducer, NdProducer, Zip}; use crate::AssignElem; +use crate::{Array, ArrayBase, DataMut, Dimension, IntoNdProducer, NdProducer, Zip}; -use crate::parallel::prelude::*; -use crate::parallel::par::ParallelSplits; use super::send_producer::SendProducer; +use crate::parallel::par::ParallelSplits; +use crate::parallel::prelude::*; use crate::partial::Partial; @@ -22,8 +22,7 @@ where /// /// Elements are visited in arbitrary order. pub fn par_map_inplace(&mut self, f: F) - where - F: Fn(&mut A) + Sync + Send, + where F: Fn(&mut A) + Sync + Send { self.view_mut().into_par_iter().for_each(f) } @@ -230,7 +229,7 @@ macro_rules! zip_impl { ); } )+ - } + }; } zip_impl! { diff --git a/src/parallel/into_impls.rs b/src/parallel/into_impls.rs index c1a5388fd..75bded7de 100644 --- a/src/parallel/into_impls.rs +++ b/src/parallel/into_impls.rs @@ -11,7 +11,8 @@ where { type Item = &'a A; type Iter = Parallel>; - fn into_par_iter(self) -> Self::Iter { + fn into_par_iter(self) -> Self::Iter + { self.view().into_par_iter() } } @@ -25,7 +26,8 @@ where { type Item = &'a A; type Iter = Parallel>; - fn into_par_iter(self) -> Self::Iter { + fn into_par_iter(self) -> Self::Iter + { self.view().into_par_iter() } } @@ -38,7 +40,8 @@ where { type Item = &'a mut A; type Iter = Parallel>; - fn into_par_iter(self) -> Self::Iter { + fn into_par_iter(self) -> Self::Iter + { self.view_mut().into_par_iter() } } @@ -52,7 +55,8 @@ where { type Item = &'a mut A; type Iter = Parallel>; - fn into_par_iter(self) -> Self::Iter { + fn into_par_iter(self) -> Self::Iter + { self.view_mut().into_par_iter() } } diff --git a/src/parallel/mod.rs b/src/parallel/mod.rs index 552515f11..0c84baa91 100644 --- a/src/parallel/mod.rs +++ b/src/parallel/mod.rs @@ -118,28 +118,20 @@ //! ``` #[allow(unused_imports)] // used by rustdoc links -use crate::{ - ArrayBase, - Array, - ArcArray, - ArrayView, - ArrayViewMut, - Zip, -}; +use crate::iter::{AxisChunksIter, AxisChunksIterMut, AxisIter, AxisIterMut}; #[allow(unused_imports)] // used by rustdoc links -use crate::iter::{ - AxisIter, - AxisIterMut, - AxisChunksIter, - AxisChunksIterMut, -}; +use crate::{ArcArray, Array, ArrayBase, ArrayView, ArrayViewMut, Zip}; /// Into- traits for creating parallelized iterators and/or using [`par_azip!`] -pub mod prelude { +pub mod prelude +{ #[doc(no_inline)] pub use rayon::prelude::{ - IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, - IntoParallelRefMutIterator, ParallelIterator, + IndexedParallelIterator, + IntoParallelIterator, + IntoParallelRefIterator, + IntoParallelRefMutIterator, + ParallelIterator, }; pub use super::par_azip; diff --git a/src/parallel/par.rs b/src/parallel/par.rs index cc905b5cf..b59af4c8e 100644 --- a/src/parallel/par.rs +++ b/src/parallel/par.rs @@ -13,13 +13,14 @@ use crate::iter::AxisChunksIter; use crate::iter::AxisChunksIterMut; use crate::iter::AxisIter; use crate::iter::AxisIterMut; +use crate::split_at::SplitPreference; use crate::Dimension; use crate::{ArrayView, ArrayViewMut}; -use crate::split_at::SplitPreference; /// Parallel iterator wrapper. #[derive(Copy, Clone, Debug)] -pub struct Parallel { +pub struct Parallel +{ iter: I, min_len: usize, } @@ -114,7 +115,7 @@ macro_rules! par_iter_wrapper { } } - } + }; } par_iter_wrapper!(AxisIter, [Sync]); @@ -216,7 +217,7 @@ macro_rules! par_iter_view_wrapper { } } - } + }; } par_iter_view_wrapper!(ArrayView, [Sync]); @@ -296,7 +297,7 @@ macro_rules! zip_impl { } } )+ - } + }; } zip_impl! { @@ -309,69 +310,71 @@ zip_impl! { } impl Parallel> -where - D: Dimension, +where D: Dimension { /// Sets the minimum number of elements desired to process in each job. This will not be /// split any smaller than this length, but of course a producer could already be smaller /// to begin with. /// /// ***Panics*** if `min_len` is zero. - pub fn with_min_len(self, min_len: usize) -> Self { + pub fn with_min_len(self, min_len: usize) -> Self + { assert_ne!(min_len, 0, "Minimum number of elements must at least be one to avoid splitting off empty tasks."); - Self { - min_len, - ..self - } + Self { min_len, ..self } } } /// A parallel iterator (unindexed) that produces the splits of the array /// or producer `P`. -pub(crate) struct ParallelSplits

{ +pub(crate) struct ParallelSplits

+{ pub(crate) iter: P, pub(crate) max_splits: usize, } impl

ParallelIterator for ParallelSplits

- where P: SplitPreference + Send, +where P: SplitPreference + Send { type Item = P; fn drive_unindexed(self, consumer: C) -> C::Result - where C: UnindexedConsumer + where C: UnindexedConsumer { bridge_unindexed(self, consumer) } - fn opt_len(&self) -> Option { + fn opt_len(&self) -> Option + { None } } impl

UnindexedProducer for ParallelSplits

- where P: SplitPreference + Send, +where P: SplitPreference + Send { type Item = P; - fn split(self) -> (Self, Option) { + fn split(self) -> (Self, Option) + { if self.max_splits == 0 || !self.iter.can_split() { - return (self, None) + return (self, None); } let (a, b) = self.iter.split(); - (ParallelSplits { - iter: a, - max_splits: self.max_splits - 1, - }, - Some(ParallelSplits { - iter: b, - max_splits: self.max_splits - 1, - })) + ( + ParallelSplits { + iter: a, + max_splits: self.max_splits - 1, + }, + Some(ParallelSplits { + iter: b, + max_splits: self.max_splits - 1, + }), + ) } fn fold_with(self, folder: Fold) -> Fold - where Fold: Folder, + where Fold: Folder { folder.consume(self.iter) } diff --git a/src/parallel/send_producer.rs b/src/parallel/send_producer.rs index 5324b3490..ecfb77af0 100644 --- a/src/parallel/send_producer.rs +++ b/src/parallel/send_producer.rs @@ -1,32 +1,44 @@ - use crate::imp_prelude::*; use crate::{Layout, NdProducer}; use std::ops::{Deref, DerefMut}; /// An NdProducer that is unconditionally `Send`. #[repr(transparent)] -pub(crate) struct SendProducer { - inner: T +pub(crate) struct SendProducer +{ + inner: T, } -impl SendProducer { +impl SendProducer +{ /// Create an unconditionally `Send` ndproducer from the producer - pub(crate) unsafe fn new(producer: T) -> Self { Self { inner: producer } } + pub(crate) unsafe fn new(producer: T) -> Self + { + Self { inner: producer } + } } -unsafe impl

Send for SendProducer

{ } +unsafe impl

Send for SendProducer

{} -impl

Deref for SendProducer

{ +impl

Deref for SendProducer

+{ type Target = P; - fn deref(&self) -> &P { &self.inner } + fn deref(&self) -> &P + { + &self.inner + } } -impl

DerefMut for SendProducer

{ - fn deref_mut(&mut self) -> &mut P { &mut self.inner } +impl

DerefMut for SendProducer

+{ + fn deref_mut(&mut self) -> &mut P + { + &mut self.inner + } } impl NdProducer for SendProducer

- where P: NdProducer, +where P: NdProducer { type Item = P::Item; type Dim = P::Dim; @@ -36,48 +48,56 @@ impl NdProducer for SendProducer

private_impl! {} #[inline(always)] - fn raw_dim(&self) -> Self::Dim { + fn raw_dim(&self) -> Self::Dim + { self.inner.raw_dim() } #[inline(always)] - fn equal_dim(&self, dim: &Self::Dim) -> bool { + fn equal_dim(&self, dim: &Self::Dim) -> bool + { self.inner.equal_dim(dim) } #[inline(always)] - fn as_ptr(&self) -> Self::Ptr { + fn as_ptr(&self) -> Self::Ptr + { self.inner.as_ptr() } #[inline(always)] - fn layout(&self) -> Layout { + fn layout(&self) -> Layout + { self.inner.layout() } #[inline(always)] - unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item { + unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item + { self.inner.as_ref(ptr) } #[inline(always)] - unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr + { self.inner.uget_ptr(i) } #[inline(always)] - fn stride_of(&self, axis: Axis) -> Self::Stride { + fn stride_of(&self, axis: Axis) -> Self::Stride + { self.inner.stride_of(axis) } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride { + fn contiguous_stride(&self) -> Self::Stride + { self.inner.contiguous_stride() } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + { let (a, b) = self.inner.split_at(axis, index); (Self { inner: a }, Self { inner: b }) } } - diff --git a/src/partial.rs b/src/partial.rs index a835081c4..99aba75a8 100644 --- a/src/partial.rs +++ b/src/partial.rs @@ -12,14 +12,16 @@ use std::ptr; /// it is the owner of the elements, but not the allocation, /// and will drop the elements on drop. #[must_use] -pub(crate) struct Partial { +pub(crate) struct Partial +{ /// Data pointer ptr: *mut T, /// Current length pub(crate) len: usize, } -impl Partial { +impl Partial +{ /// Create an empty partial for this data pointer /// /// ## Safety @@ -29,25 +31,29 @@ impl Partial { /// the `len` elements following it valid. /// /// The Partial has an accessible length field which must only be modified in trusted code. - pub(crate) unsafe fn new(ptr: *mut T) -> Self { - Self { - ptr, - len: 0, - } + pub(crate) unsafe fn new(ptr: *mut T) -> Self + { + Self { ptr, len: 0 } } #[cfg(feature = "rayon")] - pub(crate) fn stub() -> Self { - Self { len: 0, ptr: ptr::null_mut() } + pub(crate) fn stub() -> Self + { + Self { + len: 0, + ptr: ptr::null_mut(), + } } #[cfg(feature = "rayon")] - pub(crate) fn is_stub(&self) -> bool { + pub(crate) fn is_stub(&self) -> bool + { self.ptr.is_null() } /// Release Partial's ownership of the written elements, and return the current length - pub(crate) fn release_ownership(mut self) -> usize { + pub(crate) fn release_ownership(mut self) -> usize + { let ret = self.len; self.len = 0; ret @@ -56,7 +62,8 @@ impl Partial { #[cfg(feature = "rayon")] /// Merge if they are in order (left to right) and contiguous. /// Skips merge if T does not need drop. - pub(crate) fn try_merge(mut left: Self, right: Self) -> Self { + pub(crate) fn try_merge(mut left: Self, right: Self) -> Self + { if !std::mem::needs_drop::() { return left; } @@ -75,10 +82,12 @@ impl Partial { } } -unsafe impl Send for Partial where T: Send { } +unsafe impl Send for Partial where T: Send {} -impl Drop for Partial { - fn drop(&mut self) { +impl Drop for Partial +{ + fn drop(&mut self) + { if !self.ptr.is_null() { unsafe { ptr::drop_in_place(alloc::slice::from_raw_parts_mut(self.ptr, self.len)); diff --git a/src/prelude.rs b/src/prelude.rs index a25fc8780..acf39da1a 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -18,9 +18,7 @@ //! ``` #[doc(no_inline)] -pub use crate::{ - ArcArray, Array, ArrayBase, ArrayView, ArrayViewMut, CowArray, RawArrayView, RawArrayViewMut, -}; +pub use crate::{ArcArray, Array, ArrayBase, ArrayView, ArrayViewMut, CowArray, RawArrayView, RawArrayViewMut}; #[doc(no_inline)] pub use crate::{Axis, Dim, Dimension}; @@ -29,14 +27,18 @@ pub use crate::{Axis, Dim, Dimension}; pub use crate::{Array0, Array1, Array2, Array3, Array4, Array5, Array6, ArrayD}; #[doc(no_inline)] -pub use crate::{ - ArrayView0, ArrayView1, ArrayView2, ArrayView3, ArrayView4, ArrayView5, ArrayView6, ArrayViewD, -}; +pub use crate::{ArrayView0, ArrayView1, ArrayView2, ArrayView3, ArrayView4, ArrayView5, ArrayView6, ArrayViewD}; #[doc(no_inline)] pub use crate::{ - ArrayViewMut0, ArrayViewMut1, ArrayViewMut2, ArrayViewMut3, ArrayViewMut4, ArrayViewMut5, - ArrayViewMut6, ArrayViewMutD, + ArrayViewMut0, + ArrayViewMut1, + ArrayViewMut2, + ArrayViewMut3, + ArrayViewMut4, + ArrayViewMut5, + ArrayViewMut6, + ArrayViewMutD, }; #[doc(no_inline)] diff --git a/src/private.rs b/src/private.rs index 1fa779ff0..9dade0c48 100644 --- a/src/private.rs +++ b/src/private.rs @@ -13,7 +13,7 @@ macro_rules! private_decl { /// impossible to implement outside the crate. #[doc(hidden)] fn __private__(&self) -> crate::private::PrivateMarker; - } + }; } macro_rules! private_impl { diff --git a/src/shape_builder.rs b/src/shape_builder.rs index 877102b5c..8b25f71e7 100644 --- a/src/shape_builder.rs +++ b/src/shape_builder.rs @@ -1,12 +1,13 @@ use crate::dimension::IntoDimension; -use crate::Dimension; use crate::order::Order; +use crate::Dimension; /// A contiguous array shape of n dimensions. /// /// Either c- or f- memory ordered (*c* a.k.a *row major* is the default). #[derive(Copy, Clone, Debug)] -pub struct Shape { +pub struct Shape +{ /// Shape (axis lengths) pub(crate) dim: D, /// Strides can only be C or F here @@ -16,36 +17,41 @@ pub struct Shape { #[derive(Copy, Clone, Debug)] pub(crate) enum Contiguous {} -impl Shape { - pub(crate) fn is_c(&self) -> bool { +impl Shape +{ + pub(crate) fn is_c(&self) -> bool + { matches!(self.strides, Strides::C) } } /// An array shape of n dimensions in c-order, f-order or custom strides. #[derive(Copy, Clone, Debug)] -pub struct StrideShape { +pub struct StrideShape +{ pub(crate) dim: D, pub(crate) strides: Strides, } impl StrideShape -where - D: Dimension, +where D: Dimension { /// Return a reference to the dimension - pub fn raw_dim(&self) -> &D { + pub fn raw_dim(&self) -> &D + { &self.dim } /// Return the size of the shape in number of elements - pub fn size(&self) -> usize { + pub fn size(&self) -> usize + { self.dim.size() } } /// Stride description #[derive(Copy, Clone, Debug)] -pub(crate) enum Strides { +pub(crate) enum Strides +{ /// Row-major ("C"-order) C, /// Column-major ("F"-order) @@ -54,11 +60,11 @@ pub(crate) enum Strides { Custom(D), } -impl Strides { +impl Strides +{ /// Return strides for `dim` (computed from dimension if c/f, else return the custom stride) pub(crate) fn strides_for_dim(self, dim: &D) -> D - where - D: Dimension, + where D: Dimension { match self { Strides::C => dim.default_strides(), @@ -76,7 +82,8 @@ impl Strides { } } - pub(crate) fn is_custom(&self) -> bool { + pub(crate) fn is_custom(&self) -> bool + { matches!(*self, Strides::Custom(_)) } } @@ -86,7 +93,8 @@ impl Strides { /// /// This trait is used together with array constructor methods like /// `Array::from_shape_vec`. -pub trait ShapeBuilder { +pub trait ShapeBuilder +{ type Dim: Dimension; type Strides; @@ -97,11 +105,11 @@ pub trait ShapeBuilder { } impl From for Shape -where - D: Dimension, +where D: Dimension { /// Create a `Shape` from `dimension`, using the default memory layout. - fn from(dimension: D) -> Shape { + fn from(dimension: D) -> Shape + { dimension.into_shape_with_order() } } @@ -111,7 +119,8 @@ where D: Dimension, T: ShapeBuilder, { - fn from(value: T) -> Self { + fn from(value: T) -> Self + { let shape = value.into_shape_with_order(); let st = if shape.is_c() { Strides::C } else { Strides::F }; StrideShape { @@ -122,49 +131,55 @@ where } impl ShapeBuilder for T -where - T: IntoDimension, +where T: IntoDimension { type Dim = T::Dim; type Strides = T; - fn into_shape_with_order(self) -> Shape { + fn into_shape_with_order(self) -> Shape + { Shape { dim: self.into_dimension(), strides: Strides::C, } } - fn f(self) -> Shape { + fn f(self) -> Shape + { self.set_f(true) } - fn set_f(self, is_f: bool) -> Shape { + fn set_f(self, is_f: bool) -> Shape + { self.into_shape_with_order().set_f(is_f) } - fn strides(self, st: T) -> StrideShape { + fn strides(self, st: T) -> StrideShape + { self.into_shape_with_order().strides(st.into_dimension()) } } impl ShapeBuilder for Shape -where - D: Dimension, +where D: Dimension { type Dim = D; type Strides = D; - fn into_shape_with_order(self) -> Shape { + fn into_shape_with_order(self) -> Shape + { self } - fn f(self) -> Self { + fn f(self) -> Self + { self.set_f(true) } - fn set_f(mut self, is_f: bool) -> Self { + fn set_f(mut self, is_f: bool) -> Self + { self.strides = if !is_f { Strides::C } else { Strides::F }; self } - fn strides(self, st: D) -> StrideShape { + fn strides(self, st: D) -> StrideShape + { StrideShape { dim: self.dim, strides: Strides::Custom(st), @@ -173,20 +188,20 @@ where } impl Shape -where - D: Dimension, +where D: Dimension { /// Return a reference to the dimension - pub fn raw_dim(&self) -> &D { + pub fn raw_dim(&self) -> &D + { &self.dim } /// Return the size of the shape in number of elements - pub fn size(&self) -> usize { + pub fn size(&self) -> usize + { self.dim.size() } } - /// Array shape argument with optional order parameter /// /// Shape or array dimension argument, with optional [`Order`] parameter. @@ -195,23 +210,30 @@ where /// (optionally) an ordering argument. /// /// See for example [`.to_shape()`](crate::ArrayBase::to_shape). -pub trait ShapeArg { +pub trait ShapeArg +{ type Dim: Dimension; fn into_shape_and_order(self) -> (Self::Dim, Option); } -impl ShapeArg for T where T: IntoDimension { +impl ShapeArg for T +where T: IntoDimension +{ type Dim = T::Dim; - fn into_shape_and_order(self) -> (Self::Dim, Option) { + fn into_shape_and_order(self) -> (Self::Dim, Option) + { (self.into_dimension(), None) } } -impl ShapeArg for (T, Order) where T: IntoDimension { +impl ShapeArg for (T, Order) +where T: IntoDimension +{ type Dim = T::Dim; - fn into_shape_and_order(self) -> (Self::Dim, Option) { + fn into_shape_and_order(self) -> (Self::Dim, Option) + { (self.0.into_dimension(), Some(self.1)) } } diff --git a/src/slice.rs b/src/slice.rs index 14ab0dd67..9e6acc449 100644 --- a/src/slice.rs +++ b/src/slice.rs @@ -36,7 +36,8 @@ use std::ops::{Deref, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, Rang /// reverse order. It can also be created with `Slice::from(a..).step_by(-1)`. /// The Python equivalent is `[a::-1]`. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub struct Slice { +pub struct Slice +{ /// start index; negative are counted from the back of the axis pub start: isize, /// end index; negative are counted from the back of the axis; when not present @@ -46,7 +47,8 @@ pub struct Slice { pub step: isize, } -impl Slice { +impl Slice +{ /// Create a new `Slice` with the given extents. /// /// See also the `From` impls, converting from ranges; for example @@ -54,7 +56,8 @@ impl Slice { /// /// `step` must be nonzero. /// (This method checks with a debug assertion that `step` is not zero.) - pub fn new(start: isize, end: Option, step: isize) -> Slice { + pub fn new(start: isize, end: Option, step: isize) -> Slice + { debug_assert_ne!(step, 0, "Slice::new: step must be nonzero"); Slice { start, end, step } } @@ -65,7 +68,8 @@ impl Slice { /// `step` must be nonzero. /// (This method checks with a debug assertion that `step` is not zero.) #[inline] - pub fn step_by(self, step: isize) -> Self { + pub fn step_by(self, step: isize) -> Self + { debug_assert_ne!(step, 0, "Slice::step_by: step must be nonzero"); Slice { step: self.step * step, @@ -109,11 +113,13 @@ pub struct NewAxis; /// with `SliceInfoElem::from(NewAxis)`. The Python equivalent is /// `[np.newaxis]`. The macro equivalent is `s![NewAxis]`. #[derive(Debug, PartialEq, Eq, Hash)] -pub enum SliceInfoElem { +pub enum SliceInfoElem +{ /// A range with step size. `end` is an exclusive index. Negative `start` /// or `end` indexes are counted from the back of the axis. If `end` is /// `None`, the slice extends to the end of the axis. - Slice { + Slice + { /// start index; negative are counted from the back of the axis start: isize, /// end index; negative are counted from the back of the axis; when not present @@ -130,25 +136,31 @@ pub enum SliceInfoElem { copy_and_clone! {SliceInfoElem} -impl SliceInfoElem { +impl SliceInfoElem +{ /// Returns `true` if `self` is a `Slice` value. - pub fn is_slice(&self) -> bool { + pub fn is_slice(&self) -> bool + { matches!(self, SliceInfoElem::Slice { .. }) } /// Returns `true` if `self` is an `Index` value. - pub fn is_index(&self) -> bool { + pub fn is_index(&self) -> bool + { matches!(self, SliceInfoElem::Index(_)) } /// Returns `true` if `self` is a `NewAxis` value. - pub fn is_new_axis(&self) -> bool { + pub fn is_new_axis(&self) -> bool + { matches!(self, SliceInfoElem::NewAxis) } } -impl fmt::Display for SliceInfoElem { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { +impl fmt::Display for SliceInfoElem +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { match *self { SliceInfoElem::Index(index) => write!(f, "{}", index)?, SliceInfoElem::Slice { start, end, step } => { @@ -236,9 +248,11 @@ impl_slice_variant_from_range!(SliceInfoElem, SliceInfoElem::Slice, isize); impl_slice_variant_from_range!(SliceInfoElem, SliceInfoElem::Slice, usize); impl_slice_variant_from_range!(SliceInfoElem, SliceInfoElem::Slice, i32); -impl From for Slice { +impl From for Slice +{ #[inline] - fn from(_: RangeFull) -> Slice { + fn from(_: RangeFull) -> Slice + { Slice { start: 0, end: None, @@ -247,9 +261,11 @@ impl From for Slice { } } -impl From for SliceInfoElem { +impl From for SliceInfoElem +{ #[inline] - fn from(_: RangeFull) -> SliceInfoElem { + fn from(_: RangeFull) -> SliceInfoElem + { SliceInfoElem::Slice { start: 0, end: None, @@ -258,9 +274,11 @@ impl From for SliceInfoElem { } } -impl From for SliceInfoElem { +impl From for SliceInfoElem +{ #[inline] - fn from(s: Slice) -> SliceInfoElem { + fn from(s: Slice) -> SliceInfoElem + { SliceInfoElem::Slice { start: s.start, end: s.end, @@ -283,9 +301,11 @@ impl_sliceinfoelem_from_index!(isize); impl_sliceinfoelem_from_index!(usize); impl_sliceinfoelem_from_index!(i32); -impl From for SliceInfoElem { +impl From for SliceInfoElem +{ #[inline] - fn from(_: NewAxis) -> SliceInfoElem { + fn from(_: NewAxis) -> SliceInfoElem + { SliceInfoElem::NewAxis } } @@ -297,7 +317,8 @@ impl From for SliceInfoElem { /// consistent with the `&[SliceInfoElem]` returned by `self.as_ref()` and that /// `self.as_ref()` always returns the same value when called multiple times. #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait SliceArg: AsRef<[SliceInfoElem]> { +pub unsafe trait SliceArg: AsRef<[SliceInfoElem]> +{ /// Dimensionality of the output array. type OutDim: Dimension; @@ -317,11 +338,13 @@ where { type OutDim = T::OutDim; - fn in_ndim(&self) -> usize { + fn in_ndim(&self) -> usize + { T::in_ndim(self) } - fn out_ndim(&self) -> usize { + fn out_ndim(&self) -> usize + { T::out_ndim(self) } @@ -365,25 +388,30 @@ where { type OutDim = Dout; - fn in_ndim(&self) -> usize { + fn in_ndim(&self) -> usize + { self.in_ndim() } - fn out_ndim(&self) -> usize { + fn out_ndim(&self) -> usize + { self.out_ndim() } private_impl! {} } -unsafe impl SliceArg for [SliceInfoElem] { +unsafe impl SliceArg for [SliceInfoElem] +{ type OutDim = IxDyn; - fn in_ndim(&self) -> usize { + fn in_ndim(&self) -> usize + { self.iter().filter(|s| !s.is_new_axis()).count() } - fn out_ndim(&self) -> usize { + fn out_ndim(&self) -> usize + { self.iter().filter(|s| !s.is_index()).count() } @@ -401,7 +429,8 @@ unsafe impl SliceArg for [SliceInfoElem] { /// /// [`.slice()`]: crate::ArrayBase::slice #[derive(Debug)] -pub struct SliceInfo { +pub struct SliceInfo +{ in_dim: PhantomData, out_dim: PhantomData, indices: T, @@ -413,7 +442,8 @@ where Dout: Dimension, { type Target = T; - fn deref(&self) -> &Self::Target { + fn deref(&self) -> &Self::Target + { &self.indices } } @@ -453,10 +483,9 @@ where /// when called multiple times. #[doc(hidden)] pub unsafe fn new_unchecked( - indices: T, - in_dim: PhantomData, - out_dim: PhantomData, - ) -> SliceInfo { + indices: T, in_dim: PhantomData, out_dim: PhantomData, + ) -> SliceInfo + { if cfg!(debug_assertions) { check_dims_for_sliceinfo::(indices.as_ref()) .expect("`Din` and `Dout` must be consistent with `indices`."); @@ -478,7 +507,8 @@ where /// /// The caller must ensure `indices.as_ref()` always returns the same value /// when called multiple times. - pub unsafe fn new(indices: T) -> Result, ShapeError> { + pub unsafe fn new(indices: T) -> Result, ShapeError> + { check_dims_for_sliceinfo::(indices.as_ref())?; Ok(SliceInfo { in_dim: PhantomData, @@ -493,7 +523,8 @@ where /// If `Din` is a fixed-size dimension type, then this is equivalent to /// `Din::NDIM.unwrap()`. Otherwise, the value is calculated by iterating /// over the `SliceInfoElem` elements. - pub fn in_ndim(&self) -> usize { + pub fn in_ndim(&self) -> usize + { if let Some(ndim) = Din::NDIM { ndim } else { @@ -508,7 +539,8 @@ where /// If `Dout` is a fixed-size dimension type, then this is equivalent to /// `Dout::NDIM.unwrap()`. Otherwise, the value is calculated by iterating /// over the `SliceInfoElem` elements. - pub fn out_ndim(&self) -> usize { + pub fn out_ndim(&self) -> usize + { if let Some(ndim) = Dout::NDIM { ndim } else { @@ -524,9 +556,8 @@ where { type Error = ShapeError; - fn try_from( - indices: &'a [SliceInfoElem], - ) -> Result, ShapeError> { + fn try_from(indices: &'a [SliceInfoElem]) -> Result, ShapeError> + { unsafe { // This is okay because `&[SliceInfoElem]` always returns the same // value for `.as_ref()`. @@ -542,9 +573,8 @@ where { type Error = ShapeError; - fn try_from( - indices: Vec, - ) -> Result, Din, Dout>, ShapeError> { + fn try_from(indices: Vec) -> Result, Din, Dout>, ShapeError> + { unsafe { // This is okay because `Vec` always returns the same value for // `.as_ref()`. @@ -591,19 +621,20 @@ where Din: Dimension, Dout: Dimension, { - fn as_ref(&self) -> &[SliceInfoElem] { + fn as_ref(&self) -> &[SliceInfoElem] + { self.indices.as_ref() } } -impl<'a, T, Din, Dout> From<&'a SliceInfo> - for SliceInfo<&'a [SliceInfoElem], Din, Dout> +impl<'a, T, Din, Dout> From<&'a SliceInfo> for SliceInfo<&'a [SliceInfoElem], Din, Dout> where T: AsRef<[SliceInfoElem]>, Din: Dimension, Dout: Dimension, { - fn from(info: &'a SliceInfo) -> SliceInfo<&'a [SliceInfoElem], Din, Dout> { + fn from(info: &'a SliceInfo) -> SliceInfo<&'a [SliceInfoElem], Din, Dout> + { SliceInfo { in_dim: info.in_dim, out_dim: info.out_dim, @@ -626,7 +657,8 @@ where Din: Dimension, Dout: Dimension, { - fn clone(&self) -> Self { + fn clone(&self) -> Self + { SliceInfo { in_dim: PhantomData, out_dim: PhantomData, @@ -637,22 +669,21 @@ where /// Trait for determining dimensionality of input and output for [`s!`] macro. #[doc(hidden)] -pub trait SliceNextDim { +pub trait SliceNextDim +{ /// Number of dimensions that this slicing argument consumes in the input array. type InDim: Dimension; /// Number of dimensions that this slicing argument produces in the output array. type OutDim: Dimension; fn next_in_dim(&self, _: PhantomData) -> PhantomData<>::Output> - where - D: Dimension + DimAdd, + where D: Dimension + DimAdd { PhantomData } fn next_out_dim(&self, _: PhantomData) -> PhantomData<>::Output> - where - D: Dimension + DimAdd, + where D: Dimension + DimAdd { PhantomData } @@ -915,7 +946,8 @@ where { type Output = (ArrayViewMut<'a, A, I0::OutDim>,); - fn multi_slice_move(&self, view: ArrayViewMut<'a, A, D>) -> Self::Output { + fn multi_slice_move(&self, view: ArrayViewMut<'a, A, D>) -> Self::Output + { (view.slice_move(&self.0),) } @@ -977,7 +1009,8 @@ where { type Output = T::Output; - fn multi_slice_move(&self, view: ArrayViewMut<'a, A, D>) -> Self::Output { + fn multi_slice_move(&self, view: ArrayViewMut<'a, A, D>) -> Self::Output + { T::multi_slice_move(self, view) } diff --git a/src/split_at.rs b/src/split_at.rs index 50466afdf..4af1403c0 100644 --- a/src/split_at.rs +++ b/src/split_at.rs @@ -1,26 +1,30 @@ - use crate::imp_prelude::*; /// Arrays and similar that can be split along an axis -pub(crate) trait SplitAt { - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) where Self: Sized; +pub(crate) trait SplitAt +{ + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + where Self: Sized; } -pub(crate) trait SplitPreference : SplitAt { +pub(crate) trait SplitPreference: SplitAt +{ #[allow(dead_code)] // used only when Rayon support is enabled fn can_split(&self) -> bool; fn split_preference(&self) -> (Axis, usize); - fn split(self) -> (Self, Self) where Self: Sized { + fn split(self) -> (Self, Self) + where Self: Sized + { let (axis, index) = self.split_preference(); self.split_at(axis, index) } } impl SplitAt for D -where - D: Dimension, +where D: Dimension { - fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) { + fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) + { let mut d1 = self; let mut d2 = d1.clone(); let i = axis.index(); @@ -32,18 +36,19 @@ where } impl<'a, A, D> SplitAt for ArrayViewMut<'a, A, D> - where D: Dimension +where D: Dimension { - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + { self.split_at(axis, index) } } - impl SplitAt for RawArrayViewMut - where D: Dimension +where D: Dimension { - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + { self.split_at(axis, index) } } diff --git a/src/stacking.rs b/src/stacking.rs index 16058f39d..120baad14 100644 --- a/src/stacking.rs +++ b/src/stacking.rs @@ -37,10 +37,7 @@ use crate::imp_prelude::*; /// ); /// # } /// ``` -pub fn stack( - axis: Axis, - arrays: &[ArrayView], -) -> Result, ShapeError> +pub fn stack(axis: Axis, arrays: &[ArrayView]) -> Result, ShapeError> where A: Clone, D: Dimension, @@ -109,7 +106,7 @@ where Ok(res) } -#[deprecated(note="Use under the name stack instead.", since="0.15.0")] +#[deprecated(note = "Use under the name stack instead.", since = "0.15.0")] /// Stack arrays along the new axis. /// /// ***Errors*** if the arrays have mismatching shapes. @@ -134,10 +131,7 @@ where /// ); /// # } /// ``` -pub fn stack_new_axis( - axis: Axis, - arrays: &[ArrayView], -) -> Result, ShapeError> +pub fn stack_new_axis(axis: Axis, arrays: &[ArrayView]) -> Result, ShapeError> where A: Clone, D: Dimension, @@ -292,9 +286,9 @@ macro_rules! concatenate { /// # } /// ``` #[macro_export] -#[deprecated(note="Use under the name stack instead.", since="0.15.0")] +#[deprecated(note = "Use under the name stack instead.", since = "0.15.0")] macro_rules! stack_new_axis { ($axis:expr, $( $array:expr ),+ ) => { $crate::stack_new_axis($axis, &[ $($crate::ArrayView::from(&$array) ),* ]).unwrap() - } + }; } diff --git a/src/zip/mod.rs b/src/zip/mod.rs index a94f74518..aadc93032 100644 --- a/src/zip/mod.rs +++ b/src/zip/mod.rs @@ -14,16 +14,16 @@ mod ndproducer; use std::mem::MaybeUninit; use crate::imp_prelude::*; +use crate::partial::Partial; use crate::AssignElem; use crate::IntoDimension; use crate::Layout; -use crate::partial::Partial; -use crate::indexes::{indices, Indices}; -use crate::split_at::{SplitPreference, SplitAt}; use crate::dimension; +use crate::indexes::{indices, Indices}; +use crate::split_at::{SplitAt, SplitPreference}; -pub use self::ndproducer::{NdProducer, IntoNdProducer, Offset}; +pub use self::ndproducer::{IntoNdProducer, NdProducer, Offset}; /// Return if the expression is a break value. macro_rules! fold_while { @@ -39,8 +39,7 @@ macro_rules! fold_while { /// /// See [broadcasting](ArrayBase#broadcasting) for more information. trait Broadcast -where - E: IntoDimension, +where E: IntoDimension { type Output: NdProducer; /// Broadcast the array to the new dimensions `shape`. @@ -52,7 +51,8 @@ where } /// Compute `Layout` hints for array shape dim, strides -fn array_layout(dim: &D, strides: &D) -> Layout { +fn array_layout(dim: &D, strides: &D) -> Layout +{ let n = dim.ndim(); if dimension::is_layout_c(dim, strides) { // effectively one-dimensional => C and F layout compatible @@ -81,7 +81,8 @@ where S: RawData, D: Dimension, { - pub(crate) fn layout_impl(&self) -> Layout { + pub(crate) fn layout_impl(&self) -> Layout + { array_layout(&self.dim, &self.strides) } } @@ -92,7 +93,8 @@ where D: Dimension, { type Output = ArrayView<'a, A, E::Dim>; - fn broadcast_unwrap(self, shape: E) -> Self::Output { + fn broadcast_unwrap(self, shape: E) -> Self::Output + { #[allow(clippy::needless_borrow)] let res: ArrayView<'_, A, E::Dim> = (&self).broadcast_unwrap(shape.into_dimension()); unsafe { ArrayView::new(res.ptr, res.dim, res.strides) } @@ -100,7 +102,8 @@ where private_impl! {} } -trait ZippableTuple: Sized { +trait ZippableTuple: Sized +{ type Item; type Ptr: OffsetTuple + Copy; type Dim: Dimension; @@ -189,7 +192,8 @@ trait ZippableTuple: Sized { /// ``` #[derive(Debug, Clone)] #[must_use = "zipping producers is lazy and does nothing unless consumed"] -pub struct Zip { +pub struct Zip +{ parts: Parts, dimension: D, layout: Layout, @@ -198,7 +202,6 @@ pub struct Zip { layout_tendency: i32, } - impl Zip<(P,), D> where D: Dimension, @@ -209,8 +212,7 @@ where /// The Zip will take the exact dimension of `p` and all inputs /// must have the same dimensions (or be broadcast to them). pub fn from(p: IP) -> Self - where - IP: IntoNdProducer, + where IP: IntoNdProducer { let array = p.into_producer(); let dim = array.raw_dim(); @@ -235,8 +237,7 @@ where /// /// *Note:* Indexed zip has overhead. pub fn indexed(p: IP) -> Self - where - IP: IntoNdProducer, + where IP: IntoNdProducer { let array = p.into_producer(); let dim = array.raw_dim(); @@ -258,13 +259,12 @@ where ); } - impl Zip -where - D: Dimension, +where D: Dimension { /// Return a the number of element tuples in the Zip - pub fn size(&self) -> usize { + pub fn size(&self) -> usize + { self.dimension.size() } @@ -272,30 +272,30 @@ where /// /// ***Panics*** if `axis` is out of bounds. #[track_caller] - fn len_of(&self, axis: Axis) -> usize { + fn len_of(&self, axis: Axis) -> usize + { self.dimension[axis.index()] } - fn prefer_f(&self) -> bool { - !self.layout.is(Layout::CORDER) && - (self.layout.is(Layout::FORDER) || self.layout_tendency < 0) + fn prefer_f(&self) -> bool + { + !self.layout.is(Layout::CORDER) && (self.layout.is(Layout::FORDER) || self.layout_tendency < 0) } /// Return an *approximation* to the max stride axis; if /// component arrays disagree, there may be no choice better than the /// others. - fn max_stride_axis(&self) -> Axis { + fn max_stride_axis(&self) -> Axis + { let i = if self.prefer_f() { - self - .dimension + self.dimension .slice() .iter() .rposition(|&len| len > 1) .unwrap_or(self.dimension.ndim() - 1) } else { /* corder or default */ - self - .dimension + self.dimension .slice() .iter() .position(|&len| len > 1) @@ -306,8 +306,7 @@ where } impl Zip -where - D: Dimension, +where D: Dimension { fn for_each_core(&mut self, acc: Acc, mut function: F) -> FoldWhile where @@ -332,9 +331,7 @@ where let size = self.dimension.size(); let ptrs = self.parts.as_ptr(); let inner_strides = self.parts.contiguous_stride(); - unsafe { - self.inner(acc, ptrs, inner_strides, size, &mut function) - } + unsafe { self.inner(acc, ptrs, inner_strides, size, &mut function) } } /// The innermost loop of the Zip for_each methods @@ -345,11 +342,12 @@ where /// `strides`: strides for the elements in this stretch /// `len`: number of elements /// `function`: closure - unsafe fn inner(&self, mut acc: Acc, ptr: P::Ptr, strides: P::Stride, - len: usize, function: &mut F) -> FoldWhile + unsafe fn inner( + &self, mut acc: Acc, ptr: P::Ptr, strides: P::Stride, len: usize, function: &mut F, + ) -> FoldWhile where F: FnMut(Acc, P::Item) -> FoldWhile, - P: ZippableTuple + P: ZippableTuple, { let mut i = 0; while i < len { @@ -360,7 +358,6 @@ where FoldWhile::Continue(acc) } - fn for_each_core_strided(&mut self, acc: Acc, function: F) -> FoldWhile where F: FnMut(Acc, P::Item) -> FoldWhile, @@ -439,13 +436,14 @@ where impl Zip<(P1, P2), D> where D: Dimension, - P1: NdProducer, - P1: NdProducer, + P1: NdProducer, + P1: NdProducer, { /// Debug assert traversal order is like c (including 1D case) // Method placement: only used for binary Zip at the moment. #[inline] - pub(crate) fn debug_assert_c_order(self) -> Self { + pub(crate) fn debug_assert_c_order(self) -> Self + { debug_assert!(self.layout.is(Layout::CORDER) || self.layout_tendency >= 0 || self.dimension.slice().iter().filter(|&&d| d > 1).count() <= 1, "Assertion failed: traversal is not c-order or 1D for \ @@ -455,7 +453,6 @@ where } } - /* trait Offset : Copy { unsafe fn offset(self, off: isize) -> Self; @@ -471,14 +468,17 @@ impl Offset for *mut T { } */ -trait OffsetTuple { +trait OffsetTuple +{ type Args; unsafe fn stride_offset(self, stride: Self::Args, index: usize) -> Self; } -impl OffsetTuple for *mut T { +impl OffsetTuple for *mut T +{ type Args = isize; - unsafe fn stride_offset(self, stride: Self::Args, index: usize) -> Self { + unsafe fn stride_offset(self, stride: Self::Args, index: usize) -> Self + { self.offset(index as isize * stride) } } @@ -496,7 +496,7 @@ macro_rules! offset_impl { } } )+ - } + }; } offset_impl! { @@ -555,7 +555,7 @@ macro_rules! zipt_impl { } } )+ - } + }; } zipt_impl! { @@ -938,7 +938,7 @@ macro_rules! map_impl { } )+ - } + }; } map_impl! { @@ -952,23 +952,27 @@ map_impl! { /// Value controlling the execution of `.fold_while` on `Zip`. #[derive(Debug, Copy, Clone)] -pub enum FoldWhile { +pub enum FoldWhile +{ /// Continue folding with this value Continue(T), /// Fold is complete and will return this value Done(T), } -impl FoldWhile { +impl FoldWhile +{ /// Return the inner value - pub fn into_inner(self) -> T { + pub fn into_inner(self) -> T + { match self { FoldWhile::Continue(x) | FoldWhile::Done(x) => x, } } /// Return true if it is `Done`, false if `Continue` - pub fn is_done(&self) -> bool { + pub fn is_done(&self) -> bool + { match *self { FoldWhile::Continue(_) => false, FoldWhile::Done(_) => true, diff --git a/src/zip/ndproducer.rs b/src/zip/ndproducer.rs index 4eb986d37..1d1b3391b 100644 --- a/src/zip/ndproducer.rs +++ b/src/zip/ndproducer.rs @@ -9,7 +9,8 @@ use alloc::vec::Vec; /// Slices and vectors can be used (equivalent to 1-dimensional array views). /// /// This trait is like `IntoIterator` for `NdProducers` instead of iterators. -pub trait IntoNdProducer { +pub trait IntoNdProducer +{ /// The element produced per iteration. type Item; /// Dimension type of the producer @@ -20,13 +21,13 @@ pub trait IntoNdProducer { } impl

IntoNdProducer for P -where - P: NdProducer, +where P: NdProducer { type Item = P::Item; type Dim = P::Dim; type Output = Self; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { self } } @@ -51,7 +52,8 @@ where /// *producing* multidimensional items). /// /// See also [`IntoNdProducer`] -pub trait NdProducer { +pub trait NdProducer +{ /// The element produced per iteration. type Item; // Internal use / Pointee type @@ -74,7 +76,8 @@ pub trait NdProducer { /// Return the shape of the producer. fn raw_dim(&self) -> Self::Dim; #[doc(hidden)] - fn equal_dim(&self, dim: &Self::Dim) -> bool { + fn equal_dim(&self, dim: &Self::Dim) -> bool + { self.raw_dim() == *dim } #[doc(hidden)] @@ -89,29 +92,33 @@ pub trait NdProducer { fn contiguous_stride(&self) -> Self::Stride; #[doc(hidden)] fn split_at(self, axis: Axis, index: usize) -> (Self, Self) - where - Self: Sized; + where Self: Sized; private_decl! {} } -pub trait Offset: Copy { +pub trait Offset: Copy +{ type Stride: Copy; unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self; private_decl! {} } -impl Offset for *const T { +impl Offset for *const T +{ type Stride = isize; - unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self { + unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self + { self.offset(s * (index as isize)) } private_impl! {} } -impl Offset for *mut T { +impl Offset for *mut T +{ type Stride = isize; - unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self { + unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self + { self.offset(s * (index as isize)) } private_impl! {} @@ -127,7 +134,8 @@ where type Item = &'a A; type Dim = D; type Output = ArrayView<'a, A, D>; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { self.view() } } @@ -142,72 +150,86 @@ where type Item = &'a mut A; type Dim = D; type Output = ArrayViewMut<'a, A, D>; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { self.view_mut() } } /// A slice is a one-dimensional producer -impl<'a, A: 'a> IntoNdProducer for &'a [A] { +impl<'a, A: 'a> IntoNdProducer for &'a [A] +{ type Item = ::Item; type Dim = Ix1; type Output = ArrayView1<'a, A>; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { <_>::from(self) } } /// A mutable slice is a mutable one-dimensional producer -impl<'a, A: 'a> IntoNdProducer for &'a mut [A] { +impl<'a, A: 'a> IntoNdProducer for &'a mut [A] +{ type Item = ::Item; type Dim = Ix1; type Output = ArrayViewMut1<'a, A>; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { <_>::from(self) } } /// A one-dimensional array is a one-dimensional producer -impl<'a, A: 'a, const N: usize> IntoNdProducer for &'a [A; N] { +impl<'a, A: 'a, const N: usize> IntoNdProducer for &'a [A; N] +{ type Item = ::Item; type Dim = Ix1; type Output = ArrayView1<'a, A>; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { <_>::from(self) } } /// A mutable one-dimensional array is a mutable one-dimensional producer -impl<'a, A: 'a, const N: usize> IntoNdProducer for &'a mut [A; N] { +impl<'a, A: 'a, const N: usize> IntoNdProducer for &'a mut [A; N] +{ type Item = ::Item; type Dim = Ix1; type Output = ArrayViewMut1<'a, A>; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { <_>::from(self) } } /// A Vec is a one-dimensional producer -impl<'a, A: 'a> IntoNdProducer for &'a Vec { +impl<'a, A: 'a> IntoNdProducer for &'a Vec +{ type Item = ::Item; type Dim = Ix1; type Output = ArrayView1<'a, A>; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { <_>::from(self) } } /// A mutable Vec is a mutable one-dimensional producer -impl<'a, A: 'a> IntoNdProducer for &'a mut Vec { +impl<'a, A: 'a> IntoNdProducer for &'a mut Vec +{ type Item = ::Item; type Dim = Ix1; type Output = ArrayViewMut1<'a, A>; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { <_>::from(self) } } -impl<'a, A, D: Dimension> NdProducer for ArrayView<'a, A, D> { +impl<'a, A, D: Dimension> NdProducer for ArrayView<'a, A, D> +{ type Item = &'a A; type Dim = D; type Ptr = *mut A; @@ -215,45 +237,55 @@ impl<'a, A, D: Dimension> NdProducer for ArrayView<'a, A, D> { private_impl! {} - fn raw_dim(&self) -> Self::Dim { + fn raw_dim(&self) -> Self::Dim + { self.raw_dim() } - fn equal_dim(&self, dim: &Self::Dim) -> bool { + fn equal_dim(&self, dim: &Self::Dim) -> bool + { self.dim.equal(dim) } - fn as_ptr(&self) -> *mut A { + fn as_ptr(&self) -> *mut A + { self.as_ptr() as _ } - fn layout(&self) -> Layout { + fn layout(&self) -> Layout + { self.layout_impl() } - unsafe fn as_ref(&self, ptr: *mut A) -> Self::Item { + unsafe fn as_ref(&self, ptr: *mut A) -> Self::Item + { &*ptr } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A + { self.ptr.as_ptr().offset(i.index_unchecked(&self.strides)) } - fn stride_of(&self, axis: Axis) -> isize { + fn stride_of(&self, axis: Axis) -> isize + { self.stride_of(axis) } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride { + fn contiguous_stride(&self) -> Self::Stride + { 1 } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + { self.split_at(axis, index) } } -impl<'a, A, D: Dimension> NdProducer for ArrayViewMut<'a, A, D> { +impl<'a, A, D: Dimension> NdProducer for ArrayViewMut<'a, A, D> +{ type Item = &'a mut A; type Dim = D; type Ptr = *mut A; @@ -261,45 +293,55 @@ impl<'a, A, D: Dimension> NdProducer for ArrayViewMut<'a, A, D> { private_impl! {} - fn raw_dim(&self) -> Self::Dim { + fn raw_dim(&self) -> Self::Dim + { self.raw_dim() } - fn equal_dim(&self, dim: &Self::Dim) -> bool { + fn equal_dim(&self, dim: &Self::Dim) -> bool + { self.dim.equal(dim) } - fn as_ptr(&self) -> *mut A { + fn as_ptr(&self) -> *mut A + { self.as_ptr() as _ } - fn layout(&self) -> Layout { + fn layout(&self) -> Layout + { self.layout_impl() } - unsafe fn as_ref(&self, ptr: *mut A) -> Self::Item { + unsafe fn as_ref(&self, ptr: *mut A) -> Self::Item + { &mut *ptr } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A + { self.ptr.as_ptr().offset(i.index_unchecked(&self.strides)) } - fn stride_of(&self, axis: Axis) -> isize { + fn stride_of(&self, axis: Axis) -> isize + { self.stride_of(axis) } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride { + fn contiguous_stride(&self) -> Self::Stride + { 1 } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + { self.split_at(axis, index) } } -impl NdProducer for RawArrayView { +impl NdProducer for RawArrayView +{ type Item = *const A; type Dim = D; type Ptr = *const A; @@ -307,45 +349,55 @@ impl NdProducer for RawArrayView { private_impl! {} - fn raw_dim(&self) -> Self::Dim { + fn raw_dim(&self) -> Self::Dim + { self.raw_dim() } - fn equal_dim(&self, dim: &Self::Dim) -> bool { + fn equal_dim(&self, dim: &Self::Dim) -> bool + { self.dim.equal(dim) } - fn as_ptr(&self) -> *const A { + fn as_ptr(&self) -> *const A + { self.as_ptr() } - fn layout(&self) -> Layout { + fn layout(&self) -> Layout + { self.layout_impl() } - unsafe fn as_ref(&self, ptr: *const A) -> *const A { + unsafe fn as_ref(&self, ptr: *const A) -> *const A + { ptr } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> *const A { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> *const A + { self.ptr.as_ptr().offset(i.index_unchecked(&self.strides)) } - fn stride_of(&self, axis: Axis) -> isize { + fn stride_of(&self, axis: Axis) -> isize + { self.stride_of(axis) } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride { + fn contiguous_stride(&self) -> Self::Stride + { 1 } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + { self.split_at(axis, index) } } -impl NdProducer for RawArrayViewMut { +impl NdProducer for RawArrayViewMut +{ type Item = *mut A; type Dim = D; type Ptr = *mut A; @@ -353,40 +405,49 @@ impl NdProducer for RawArrayViewMut { private_impl! {} - fn raw_dim(&self) -> Self::Dim { + fn raw_dim(&self) -> Self::Dim + { self.raw_dim() } - fn equal_dim(&self, dim: &Self::Dim) -> bool { + fn equal_dim(&self, dim: &Self::Dim) -> bool + { self.dim.equal(dim) } - fn as_ptr(&self) -> *mut A { + fn as_ptr(&self) -> *mut A + { self.as_ptr() as _ } - fn layout(&self) -> Layout { + fn layout(&self) -> Layout + { self.layout_impl() } - unsafe fn as_ref(&self, ptr: *mut A) -> *mut A { + unsafe fn as_ref(&self, ptr: *mut A) -> *mut A + { ptr } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A + { self.ptr.as_ptr().offset(i.index_unchecked(&self.strides)) } - fn stride_of(&self, axis: Axis) -> isize { + fn stride_of(&self, axis: Axis) -> isize + { self.stride_of(axis) } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride { + fn contiguous_stride(&self) -> Self::Stride + { 1 } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + { self.split_at(axis, index) } } diff --git a/tests/append.rs b/tests/append.rs index cbb10d853..78ea243b2 100644 --- a/tests/append.rs +++ b/tests/append.rs @@ -1,9 +1,9 @@ - use ndarray::prelude::*; -use ndarray::{ShapeError, ErrorKind}; +use ndarray::{ErrorKind, ShapeError}; #[test] -fn push_row() { +fn push_row() +{ let mut a = Array::zeros((0, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_row(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -25,7 +25,8 @@ fn push_row() { } #[test] -fn push_row_wrong_layout() { +fn push_row_wrong_layout() +{ let mut a = Array::zeros((0, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_row(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -44,7 +45,6 @@ fn push_row_wrong_layout() { [4., 5., 6., 7., 2.]]); assert_eq!(a2.strides(), &[1, 2]); - // Clone the array let mut dim = a.raw_dim(); @@ -58,7 +58,8 @@ fn push_row_wrong_layout() { } #[test] -fn push_row_neg_stride_1() { +fn push_row_neg_stride_1() +{ let mut a = Array::zeros((0, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_row(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -101,7 +102,8 @@ fn push_row_neg_stride_1() { } #[test] -fn push_row_neg_stride_2() { +fn push_row_neg_stride_2() +{ let mut a = Array::zeros((0, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_row(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -144,7 +146,8 @@ fn push_row_neg_stride_2() { } #[test] -fn push_row_error() { +fn push_row_error() +{ let mut a = Array::zeros((3, 4)); assert_eq!(a.push_row(aview1(&[1.])), @@ -162,7 +165,8 @@ fn push_row_error() { } #[test] -fn push_row_existing() { +fn push_row_existing() +{ let mut a = Array::zeros((1, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_row(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -186,7 +190,8 @@ fn push_row_existing() { } #[test] -fn push_row_col_len_1() { +fn push_row_col_len_1() +{ // Test appending 1 row and then cols from shape 1 x 1 let mut a = Array::zeros((1, 1)); a.push_row(aview1(&[1.])).unwrap(); // shape 2 x 1 @@ -203,7 +208,8 @@ fn push_row_col_len_1() { } #[test] -fn push_column() { +fn push_column() +{ let mut a = Array::zeros((4, 0)); a.push_column(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_column(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -215,7 +221,8 @@ fn push_column() { } #[test] -fn append_array1() { +fn append_array1() +{ let mut a = Array::zeros((0, 4)); a.append(Axis(0), aview2(&[[0., 1., 2., 3.]])).unwrap(); println!("{:?}", a); @@ -228,7 +235,8 @@ fn append_array1() { array![[0., 1., 2., 3.], [4., 5., 6., 7.]]); - a.append(Axis(0), aview2(&[[5., 5., 4., 4.], [3., 3., 2., 2.]])).unwrap(); + a.append(Axis(0), aview2(&[[5., 5., 4., 4.], [3., 3., 2., 2.]])) + .unwrap(); println!("{:?}", a); assert_eq!(a, array![[0., 1., 2., 3.], @@ -238,7 +246,8 @@ fn append_array1() { } #[test] -fn append_array_3d() { +fn append_array_3d() +{ let mut a = Array::zeros((0, 2, 2)); a.append(Axis(0), array![[[0, 1], [2, 3]]].view()).unwrap(); println!("{:?}", a); @@ -279,11 +288,16 @@ fn append_array_3d() { } #[test] -fn test_append_2d() { +fn test_append_2d() +{ // create an empty array and append let mut a = Array::zeros((0, 4)); - let ones = ArrayView::from(&[1.; 12]).into_shape_with_order((3, 4)).unwrap(); - let zeros = ArrayView::from(&[0.; 8]).into_shape_with_order((2, 4)).unwrap(); + let ones = ArrayView::from(&[1.; 12]) + .into_shape_with_order((3, 4)) + .unwrap(); + let zeros = ArrayView::from(&[0.; 8]) + .into_shape_with_order((2, 4)) + .unwrap(); a.append(Axis(0), ones).unwrap(); a.append(Axis(0), zeros).unwrap(); a.append(Axis(0), ones).unwrap(); @@ -311,24 +325,54 @@ fn test_append_2d() { } #[test] -fn test_append_middle_axis() { +fn test_append_middle_axis() +{ // ensure we can append to Axis(1) by letting it become outermost let mut a = Array::::zeros((3, 0, 2)); - a.append(Axis(1), Array::from_iter(0..12).into_shape_with_order((3, 2, 2)).unwrap().view()).unwrap(); + a.append( + Axis(1), + Array::from_iter(0..12) + .into_shape_with_order((3, 2, 2)) + .unwrap() + .view(), + ) + .unwrap(); println!("{:?}", a); - a.append(Axis(1), Array::from_iter(12..24).into_shape_with_order((3, 2, 2)).unwrap().view()).unwrap(); + a.append( + Axis(1), + Array::from_iter(12..24) + .into_shape_with_order((3, 2, 2)) + .unwrap() + .view(), + ) + .unwrap(); println!("{:?}", a); // ensure we can append to Axis(1) by letting it become outermost let mut a = Array::::zeros((3, 1, 2)); - a.append(Axis(1), Array::from_iter(0..12).into_shape_with_order((3, 2, 2)).unwrap().view()).unwrap(); + a.append( + Axis(1), + Array::from_iter(0..12) + .into_shape_with_order((3, 2, 2)) + .unwrap() + .view(), + ) + .unwrap(); println!("{:?}", a); - a.append(Axis(1), Array::from_iter(12..24).into_shape_with_order((3, 2, 2)).unwrap().view()).unwrap(); + a.append( + Axis(1), + Array::from_iter(12..24) + .into_shape_with_order((3, 2, 2)) + .unwrap() + .view(), + ) + .unwrap(); println!("{:?}", a); } #[test] -fn test_append_zero_size() { +fn test_append_zero_size() +{ { let mut a = Array::::zeros((0, 0)); a.append(Axis(0), aview2(&[[]])).unwrap(); @@ -339,15 +383,18 @@ fn test_append_zero_size() { { let mut a = Array::::zeros((0, 0)); - a.append(Axis(1), ArrayView::from(&[]).into_shape_with_order((0, 1)).unwrap()).unwrap(); - a.append(Axis(1), ArrayView::from(&[]).into_shape_with_order((0, 1)).unwrap()).unwrap(); + a.append(Axis(1), ArrayView::from(&[]).into_shape_with_order((0, 1)).unwrap()) + .unwrap(); + a.append(Axis(1), ArrayView::from(&[]).into_shape_with_order((0, 1)).unwrap()) + .unwrap(); assert_eq!(a.len(), 0); assert_eq!(a.shape(), &[0, 2]); } } #[test] -fn push_row_neg_stride_3() { +fn push_row_neg_stride_3() +{ let mut a = Array::zeros((0, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.invert_axis(Axis(1)); @@ -358,13 +405,13 @@ fn push_row_neg_stride_3() { } #[test] -fn push_row_ignore_strides_length_one_axes() { +fn push_row_ignore_strides_length_one_axes() +{ let strides = &[0, 1, 10, 20]; for invert in &[vec![], vec![0], vec![1], vec![0, 1]] { for &stride0 in strides { for &stride1 in strides { - let mut a = - Array::from_shape_vec([1, 1].strides([stride0, stride1]), vec![0.]).unwrap(); + let mut a = Array::from_shape_vec([1, 1].strides([stride0, stride1]), vec![0.]).unwrap(); for &ax in invert { a.invert_axis(Axis(ax)); } @@ -379,20 +426,23 @@ fn push_row_ignore_strides_length_one_axes() { #[test] #[should_panic(expected = "IncompatibleShape")] -fn zero_dimensional_error1() { +fn zero_dimensional_error1() +{ let mut a = Array::zeros(()).into_dyn(); a.append(Axis(0), arr0(0).into_dyn().view()).unwrap(); } #[test] #[should_panic(expected = "IncompatibleShape")] -fn zero_dimensional_error2() { +fn zero_dimensional_error2() +{ let mut a = Array::zeros(()).into_dyn(); a.push(Axis(0), arr0(0).into_dyn().view()).unwrap(); } #[test] -fn zero_dimensional_ok() { +fn zero_dimensional_ok() +{ let mut a = Array::zeros(0); let one = aview0(&1); let two = aview0(&2); diff --git a/tests/array-construct.rs b/tests/array-construct.rs index a3949fcab..f7339dff6 100644 --- a/tests/array-construct.rs +++ b/tests/array-construct.rs @@ -1,26 +1,23 @@ #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names )] use defmac::defmac; -use ndarray::prelude::*; use ndarray::arr3; +use ndarray::prelude::*; use ndarray::Zip; #[test] -fn test_from_shape_fn() { +fn test_from_shape_fn() +{ let step = 3.1; - let h = Array::from_shape_fn((5, 5), |(i, j)| { - f64::sin(i as f64 / step) * f64::cos(j as f64 / step) - }); + let h = Array::from_shape_fn((5, 5), |(i, j)| f64::sin(i as f64 / step) * f64::cos(j as f64 / step)); assert_eq!(h.shape(), &[5, 5]); } #[test] -fn test_dimension_zero() { +fn test_dimension_zero() +{ let a: Array2 = Array2::from(vec![[], [], []]); assert_eq!(vec![0.; 0], a.into_raw_vec()); let a: Array3 = Array3::from(vec![[[]], [[]], [[]]]); @@ -29,7 +26,8 @@ fn test_dimension_zero() { #[test] #[cfg(feature = "approx")] -fn test_arc_into_owned() { +fn test_arc_into_owned() +{ use approx::assert_abs_diff_ne; let a = Array2::from_elem((5, 5), 1.).into_shared(); @@ -42,7 +40,8 @@ fn test_arc_into_owned() { } #[test] -fn test_arcarray_thread_safe() { +fn test_arcarray_thread_safe() +{ fn is_send(_t: &T) {} fn is_sync(_t: &T) {} let a = Array2::from_elem((5, 5), 1.).into_shared(); @@ -54,7 +53,8 @@ fn test_arcarray_thread_safe() { #[test] #[cfg(feature = "std")] #[allow(deprecated)] // uninitialized -fn test_uninit() { +fn test_uninit() +{ unsafe { let mut a = Array::::uninitialized((3, 4).f()); assert_eq!(a.dim(), (3, 4)); @@ -69,7 +69,8 @@ fn test_uninit() { } #[test] -fn test_from_fn_c0() { +fn test_from_fn_c0() +{ let a = Array::from_shape_fn((), |i| i); assert_eq!(a[()], ()); assert_eq!(a.len(), 1); @@ -77,7 +78,8 @@ fn test_from_fn_c0() { } #[test] -fn test_from_fn_c1() { +fn test_from_fn_c1() +{ let a = Array::from_shape_fn(28, |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -85,7 +87,8 @@ fn test_from_fn_c1() { } #[test] -fn test_from_fn_c() { +fn test_from_fn_c() +{ let a = Array::from_shape_fn((4, 7), |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -93,7 +96,8 @@ fn test_from_fn_c() { } #[test] -fn test_from_fn_c3() { +fn test_from_fn_c3() +{ let a = Array::from_shape_fn((4, 3, 7), |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -101,7 +105,8 @@ fn test_from_fn_c3() { } #[test] -fn test_from_fn_f0() { +fn test_from_fn_f0() +{ let a = Array::from_shape_fn(().f(), |i| i); assert_eq!(a[()], ()); assert_eq!(a.len(), 1); @@ -109,7 +114,8 @@ fn test_from_fn_f0() { } #[test] -fn test_from_fn_f1() { +fn test_from_fn_f1() +{ let a = Array::from_shape_fn(28.f(), |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -117,7 +123,8 @@ fn test_from_fn_f1() { } #[test] -fn test_from_fn_f() { +fn test_from_fn_f() +{ let a = Array::from_shape_fn((4, 7).f(), |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -125,7 +132,8 @@ fn test_from_fn_f() { } #[test] -fn test_from_fn_f_with_zero() { +fn test_from_fn_f_with_zero() +{ defmac!(test_from_fn_f_with_zero shape => { let a = Array::from_shape_fn(shape.f(), |i| i); assert_eq!(a.len(), 0); @@ -140,7 +148,8 @@ fn test_from_fn_f_with_zero() { } #[test] -fn test_from_fn_f3() { +fn test_from_fn_f3() +{ let a = Array::from_shape_fn((4, 2, 7).f(), |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -148,7 +157,8 @@ fn test_from_fn_f3() { } #[test] -fn deny_wraparound_from_vec() { +fn deny_wraparound_from_vec() +{ let five = vec![0; 5]; let five_large = Array::from_shape_vec((3, 7, 29, 36760123, 823996703), five.clone()); println!("{:?}", five_large); @@ -158,7 +168,8 @@ fn deny_wraparound_from_vec() { } #[test] -fn test_ones() { +fn test_ones() +{ let mut a = Array::::zeros((2, 3, 4)); a.fill(1.0); let b = Array::::ones((2, 3, 4)); @@ -166,7 +177,8 @@ fn test_ones() { } #[test] -fn test_from_shape_empty_with_neg_stride() { +fn test_from_shape_empty_with_neg_stride() +{ // Issue #998, negative strides for an axis where it doesn't matter. let s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]; let v = s[..12].to_vec(); @@ -177,7 +189,8 @@ fn test_from_shape_empty_with_neg_stride() { } #[test] -fn test_from_shape_with_neg_stride() { +fn test_from_shape_with_neg_stride() +{ // Issue #998, negative strides for an axis where it doesn't matter. let s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]; let v = s[..12].to_vec(); @@ -189,7 +202,8 @@ fn test_from_shape_with_neg_stride() { } #[test] -fn test_from_shape_2_2_2_with_neg_stride() { +fn test_from_shape_2_2_2_with_neg_stride() +{ // Issue #998, negative strides for an axis where it doesn't matter. let s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]; let v = s[..12].to_vec(); @@ -204,35 +218,42 @@ fn test_from_shape_2_2_2_with_neg_stride() { #[should_panic] #[test] -fn deny_wraparound_zeros() { +fn deny_wraparound_zeros() +{ //2^64 + 5 = 18446744073709551621 = 3×7×29×36760123×823996703 (5 distinct prime factors) let _five_large = Array::::zeros((3, 7, 29, 36760123, 823996703)); } #[should_panic] #[test] -fn deny_wraparound_reshape() { +fn deny_wraparound_reshape() +{ //2^64 + 5 = 18446744073709551621 = 3×7×29×36760123×823996703 (5 distinct prime factors) let five = Array::::zeros(5); - let _five_large = five.into_shape_with_order((3, 7, 29, 36760123, 823996703)).unwrap(); + let _five_large = five + .into_shape_with_order((3, 7, 29, 36760123, 823996703)) + .unwrap(); } #[should_panic] #[test] -fn deny_wraparound_default() { +fn deny_wraparound_default() +{ let _five_large = Array::::default((3, 7, 29, 36760123, 823996703)); } #[should_panic] #[test] -fn deny_wraparound_from_shape_fn() { +fn deny_wraparound_from_shape_fn() +{ let _five_large = Array::::from_shape_fn((3, 7, 29, 36760123, 823996703), |_| 0.); } #[should_panic] #[test] #[allow(deprecated)] // uninitialized -fn deny_wraparound_uninitialized() { +fn deny_wraparound_uninitialized() +{ unsafe { let _five_large = Array::::uninitialized((3, 7, 29, 36760123, 823996703)); } @@ -240,36 +261,42 @@ fn deny_wraparound_uninitialized() { #[should_panic] #[test] -fn deny_wraparound_uninit() { +fn deny_wraparound_uninit() +{ let _five_large = Array::::uninit((3, 7, 29, 36760123, 823996703)); } #[should_panic] #[test] -fn deny_slice_with_too_many_rows_to_arrayview2() { +fn deny_slice_with_too_many_rows_to_arrayview2() +{ let _view = ArrayView2::from(&[[0u8; 0]; usize::MAX][..]); } #[should_panic] #[test] -fn deny_slice_with_too_many_zero_sized_elems_to_arrayview2() { +fn deny_slice_with_too_many_zero_sized_elems_to_arrayview2() +{ let _view = ArrayView2::from(&[[(); isize::MAX as usize]; isize::MAX as usize][..]); } #[should_panic] #[test] -fn deny_slice_with_too_many_rows_to_arrayviewmut2() { +fn deny_slice_with_too_many_rows_to_arrayviewmut2() +{ let _view = ArrayViewMut2::from(&mut [[0u8; 0]; usize::MAX][..]); } #[should_panic] #[test] -fn deny_slice_with_too_many_zero_sized_elems_to_arrayviewmut2() { +fn deny_slice_with_too_many_zero_sized_elems_to_arrayviewmut2() +{ let _view = ArrayViewMut2::from(&mut [[(); isize::MAX as usize]; isize::MAX as usize][..]); } #[test] -fn maybe_uninit_1() { +fn maybe_uninit_1() +{ use std::mem::MaybeUninit; unsafe { @@ -299,13 +326,10 @@ fn maybe_uninit_1() { // RawArrayViewMut let mut a = Mat::uninit((10, 10)); let v = a.raw_view_mut(); - Zip::from(v) - .for_each(|ptr| *(*ptr).as_mut_ptr() = 1.); + Zip::from(v).for_each(|ptr| *(*ptr).as_mut_ptr() = 1.); let u = a.raw_view_mut().assume_init(); - Zip::from(u) - .for_each(|ptr| assert_eq!(*ptr, 1.)); - + Zip::from(u).for_each(|ptr| assert_eq!(*ptr, 1.)); } } diff --git a/tests/array.rs b/tests/array.rs index 561284869..3f2c38a62 100644 --- a/tests/array.rs +++ b/tests/array.rs @@ -1,9 +1,6 @@ #![allow(non_snake_case)] #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names, + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names, clippy::float_cmp )] @@ -11,9 +8,9 @@ use approx::assert_relative_eq; use defmac::defmac; #[allow(deprecated)] use itertools::{zip, Itertools}; +use ndarray::indices; use ndarray::prelude::*; use ndarray::{arr3, rcarr2}; -use ndarray::indices; use ndarray::{Slice, SliceInfo, SliceInfoElem}; use num_complex::Complex; use std::convert::TryFrom; @@ -33,7 +30,8 @@ macro_rules! assert_panics { } #[test] -fn test_matmul_arcarray() { +fn test_matmul_arcarray() +{ let mut A = ArcArray::::zeros((2, 3)); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -49,8 +47,7 @@ fn test_matmul_arcarray() { println!("B = \n{:?}", B); println!("A x B = \n{:?}", c); unsafe { - let result = - ArcArray::from_shape_vec_unchecked((2, 4), vec![20, 23, 26, 29, 56, 68, 80, 92]); + let result = ArcArray::from_shape_vec_unchecked((2, 4), vec![20, 23, 26, 29, 56, 68, 80, 92]); assert_eq!(c.shape(), result.shape()); assert!(c.iter().zip(result.iter()).all(|(a, b)| a == b)); assert!(c == result); @@ -58,23 +55,26 @@ fn test_matmul_arcarray() { } #[allow(unused)] -fn arrayview_shrink_lifetime<'a, 'b: 'a>(view: ArrayView1<'b, f64>) -> ArrayView1<'a, f64> { +fn arrayview_shrink_lifetime<'a, 'b: 'a>(view: ArrayView1<'b, f64>) -> ArrayView1<'a, f64> +{ view.reborrow() } #[allow(unused)] -fn arrayviewmut_shrink_lifetime<'a, 'b: 'a>( - view: ArrayViewMut1<'b, f64>, -) -> ArrayViewMut1<'a, f64> { +fn arrayviewmut_shrink_lifetime<'a, 'b: 'a>(view: ArrayViewMut1<'b, f64>) -> ArrayViewMut1<'a, f64> +{ view.reborrow() } #[test] #[cfg(feature = "std")] -fn test_mat_mul() { +fn test_mat_mul() +{ // smoke test, a big matrix multiplication of uneven size let (n, m) = (45, 33); - let a = ArcArray::linspace(0., ((n * m) - 1) as f32, n as usize * m as usize).into_shape_with_order((n, m)).unwrap(); + let a = ArcArray::linspace(0., ((n * m) - 1) as f32, n as usize * m as usize) + .into_shape_with_order((n, m)) + .unwrap(); let b = ArcArray::eye(m); assert_eq!(a.dot(&b), a); let c = ArcArray::eye(n); @@ -83,7 +83,8 @@ fn test_mat_mul() { #[deny(unsafe_code)] #[test] -fn test_slice() { +fn test_slice() +{ let mut A = ArcArray::::zeros((3, 4, 5)); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -98,13 +99,15 @@ fn test_slice() { #[deny(unsafe_code)] #[test] -fn test_slice_ix0() { +fn test_slice_ix0() +{ let arr = arr0(5); assert_eq!(arr.slice(s![]), aview0(&5)); } #[test] -fn test_slice_edge_cases() { +fn test_slice_edge_cases() +{ let mut arr = Array3::::zeros((3, 4, 5)); arr.slice_collapse(s![0..0;-1, .., ..]); assert_eq!(arr.shape(), &[0, 4, 5]); @@ -114,7 +117,8 @@ fn test_slice_edge_cases() { } #[test] -fn test_slice_inclusive_range() { +fn test_slice_inclusive_range() +{ let arr = array![[1, 2, 3], [4, 5, 6]]; assert_eq!(arr.slice(s![1..=1, 1..=2]), array![[5, 6]]); assert_eq!(arr.slice(s![1..=-1, -2..=2;-1]), array![[6, 5]]); @@ -128,7 +132,8 @@ fn test_slice_inclusive_range() { /// `ArrayView1` and `ArrayView2`, so the compiler needs to determine which /// type is the correct result for the `.slice()` call. #[test] -fn test_slice_infer() { +fn test_slice_infer() +{ let a = array![1., 2.]; let b = array![[3., 4.], [5., 6.]]; b.slice(s![..-1, ..]).dot(&a); @@ -136,7 +141,8 @@ fn test_slice_infer() { } #[test] -fn test_slice_with_many_dim() { +fn test_slice_with_many_dim() +{ let mut A = ArcArray::::zeros(&[3, 1, 4, 1, 3, 2, 1][..]); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -163,14 +169,16 @@ fn test_slice_with_many_dim() { } #[test] -fn test_slice_range_variable() { +fn test_slice_range_variable() +{ let range = 1..4; let arr = array![0, 1, 2, 3, 4]; assert_eq!(arr.slice(s![range]), array![1, 2, 3]); } #[test] -fn test_slice_args_eval_range_once() { +fn test_slice_args_eval_range_once() +{ let mut eval_count = 0; { let mut range = || { @@ -184,7 +192,8 @@ fn test_slice_args_eval_range_once() { } #[test] -fn test_slice_args_eval_step_once() { +fn test_slice_args_eval_step_once() +{ let mut eval_count = 0; { let mut step = || { @@ -198,7 +207,8 @@ fn test_slice_args_eval_step_once() { } #[test] -fn test_slice_array_fixed() { +fn test_slice_array_fixed() +{ let mut arr = Array3::::zeros((5, 2, 5)); let info = s![1.., 1, NewAxis, ..;2]; arr.slice(info); @@ -209,7 +219,8 @@ fn test_slice_array_fixed() { } #[test] -fn test_slice_dyninput_array_fixed() { +fn test_slice_dyninput_array_fixed() +{ let mut arr = Array3::::zeros((5, 2, 5)).into_dyn(); let info = s![1.., 1, NewAxis, ..;2]; arr.slice(info); @@ -220,7 +231,8 @@ fn test_slice_dyninput_array_fixed() { } #[test] -fn test_slice_array_dyn() { +fn test_slice_array_dyn() +{ let mut arr = Array3::::zeros((5, 2, 5)); let info = SliceInfo::<_, Ix3, IxDyn>::try_from([ SliceInfoElem::from(1..), @@ -242,7 +254,8 @@ fn test_slice_array_dyn() { } #[test] -fn test_slice_dyninput_array_dyn() { +fn test_slice_dyninput_array_dyn() +{ let mut arr = Array3::::zeros((5, 2, 5)).into_dyn(); let info = SliceInfo::<_, Ix3, IxDyn>::try_from([ SliceInfoElem::from(1..), @@ -264,7 +277,8 @@ fn test_slice_dyninput_array_dyn() { } #[test] -fn test_slice_dyninput_vec_fixed() { +fn test_slice_dyninput_vec_fixed() +{ let mut arr = Array3::::zeros((5, 2, 5)).into_dyn(); let info = &SliceInfo::<_, Ix3, Ix3>::try_from(vec![ SliceInfoElem::from(1..), @@ -286,7 +300,8 @@ fn test_slice_dyninput_vec_fixed() { } #[test] -fn test_slice_dyninput_vec_dyn() { +fn test_slice_dyninput_vec_dyn() +{ let mut arr = Array3::::zeros((5, 2, 5)).into_dyn(); let info = &SliceInfo::<_, Ix3, IxDyn>::try_from(vec![ SliceInfoElem::from(1..), @@ -308,7 +323,8 @@ fn test_slice_dyninput_vec_dyn() { } #[test] -fn test_slice_with_subview_and_new_axis() { +fn test_slice_with_subview_and_new_axis() +{ let mut arr = ArcArray::::zeros((3, 5, 4)); for (i, elt) in arr.iter_mut().enumerate() { *elt = i; @@ -345,7 +361,8 @@ fn test_slice_with_subview_and_new_axis() { } #[test] -fn test_slice_collapse_with_indices() { +fn test_slice_collapse_with_indices() +{ let mut arr = ArcArray::::zeros((3, 5, 4)); for (i, elt) in arr.iter_mut().enumerate() { *elt = i; @@ -384,13 +401,15 @@ fn test_slice_collapse_with_indices() { #[test] #[should_panic] -fn test_slice_collapse_with_newaxis() { +fn test_slice_collapse_with_newaxis() +{ let mut arr = Array2::::zeros((2, 3)); arr.slice_collapse(s![0, 0, NewAxis]); } #[test] -fn test_multislice() { +fn test_multislice() +{ macro_rules! do_test { ($arr:expr, $($s:expr),*) => { { @@ -404,7 +423,9 @@ fn test_multislice() { }; } - let mut arr = Array1::from_iter(0..48).into_shape_with_order((8, 6)).unwrap(); + let mut arr = Array1::from_iter(0..48) + .into_shape_with_order((8, 6)) + .unwrap(); assert_eq!( (arr.clone().view_mut(),), @@ -426,7 +447,8 @@ fn test_multislice() { } #[test] -fn test_multislice_intersecting() { +fn test_multislice_intersecting() +{ assert_panics!({ let mut arr = Array2::::zeros((8, 6)); arr.multi_slice_mut((s![3, .., NewAxis], s![3, ..])); @@ -467,34 +489,39 @@ fn test_multislice_intersecting() { #[should_panic] #[test] -fn index_out_of_bounds() { +fn index_out_of_bounds() +{ let mut a = Array::::zeros((3, 4)); a[[3, 2]] = 1; } #[should_panic] #[test] -fn slice_oob() { +fn slice_oob() +{ let a = ArcArray::::zeros((3, 4)); let _vi = a.slice(s![..10, ..]); } #[should_panic] #[test] -fn slice_axis_oob() { +fn slice_axis_oob() +{ let a = ArcArray::::zeros((3, 4)); let _vi = a.slice_axis(Axis(0), Slice::new(0, Some(10), 1)); } #[should_panic] #[test] -fn slice_wrong_dim() { +fn slice_wrong_dim() +{ let a = ArcArray::::zeros(vec![3, 4, 5]); let _vi = a.slice(s![.., ..]); } #[test] -fn test_index() { +fn test_index() +{ let mut A = ArcArray::::zeros((2, 3)); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -515,7 +542,8 @@ fn test_index() { } #[test] -fn test_index_arrays() { +fn test_index_arrays() +{ let a = Array1::from_iter(0..12); assert_eq!(a[1], a[[1]]); let v = a.view().into_shape_with_order((3, 4)).unwrap(); @@ -526,7 +554,8 @@ fn test_index_arrays() { #[test] #[allow(clippy::assign_op_pattern)] -fn test_add() { +fn test_add() +{ let mut A = ArcArray::::zeros((2, 2)); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -541,8 +570,11 @@ fn test_add() { } #[test] -fn test_multidim() { - let mut mat = ArcArray::zeros(2 * 3 * 4 * 5 * 6).into_shape_with_order((2, 3, 4, 5, 6)).unwrap(); +fn test_multidim() +{ + let mut mat = ArcArray::zeros(2 * 3 * 4 * 5 * 6) + .into_shape_with_order((2, 3, 4, 5, 6)) + .unwrap(); mat[(0, 0, 0, 0, 0)] = 22u8; { for (i, elt) in mat.iter_mut().enumerate() { @@ -564,7 +596,8 @@ array([[[ 7, 6], [ 9, 8]]]) */ #[test] -fn test_negative_stride_arcarray() { +fn test_negative_stride_arcarray() +{ let mut mat = ArcArray::zeros((2, 4, 2)); mat[[0, 0, 0]] = 1.0f32; for (i, elt) in mat.iter_mut().enumerate() { @@ -575,9 +608,7 @@ fn test_negative_stride_arcarray() { let vi = mat.slice(s![.., ..;-1, ..;-1]); assert_eq!(vi.shape(), &[2, 4, 2]); // Test against sequential iterator - let seq = [ - 7f32, 6., 5., 4., 3., 2., 1., 0., 15., 14., 13., 12., 11., 10., 9., 8., - ]; + let seq = [7f32, 6., 5., 4., 3., 2., 1., 0., 15., 14., 13., 12., 11., 10., 9., 8.]; for (a, b) in vi.iter().zip(seq.iter()) { assert_eq!(*a, *b); } @@ -592,7 +623,8 @@ fn test_negative_stride_arcarray() { } #[test] -fn test_cow() { +fn test_cow() +{ let mut mat = ArcArray::zeros((2, 2)); mat[[0, 0]] = 1; let n = mat.clone(); @@ -624,7 +656,8 @@ fn test_cow() { } #[test] -fn test_cow_shrink() { +fn test_cow_shrink() +{ // A test for clone-on-write in the case that // mutation shrinks the array and gives it different strides // @@ -659,41 +692,44 @@ fn test_cow_shrink() { #[test] #[cfg(feature = "std")] -fn test_sub() { - let mat = ArcArray::linspace(0., 15., 16).into_shape_with_order((2, 4, 2)).unwrap(); +fn test_sub() +{ + let mat = ArcArray::linspace(0., 15., 16) + .into_shape_with_order((2, 4, 2)) + .unwrap(); let s1 = mat.index_axis(Axis(0), 0); let s2 = mat.index_axis(Axis(0), 1); assert_eq!(s1.shape(), &[4, 2]); assert_eq!(s2.shape(), &[4, 2]); - let n = ArcArray::linspace(8., 15., 8).into_shape_with_order((4, 2)).unwrap(); + let n = ArcArray::linspace(8., 15., 8) + .into_shape_with_order((4, 2)) + .unwrap(); assert_eq!(n, s2); - let m = ArcArray::from(vec![2., 3., 10., 11.]).into_shape_with_order((2, 2)).unwrap(); + let m = ArcArray::from(vec![2., 3., 10., 11.]) + .into_shape_with_order((2, 2)) + .unwrap(); assert_eq!(m, mat.index_axis(Axis(1), 1)); } #[should_panic] #[test] #[cfg(feature = "std")] -fn test_sub_oob_1() { - let mat = ArcArray::linspace(0., 15., 16).into_shape_with_order((2, 4, 2)).unwrap(); +fn test_sub_oob_1() +{ + let mat = ArcArray::linspace(0., 15., 16) + .into_shape_with_order((2, 4, 2)) + .unwrap(); mat.index_axis(Axis(0), 2); } #[test] #[cfg(feature = "approx")] -fn test_select() { +fn test_select() +{ use approx::assert_abs_diff_eq; // test for 2-d array - let x = arr2(&[ - [0., 1.], - [1., 0.], - [1., 0.], - [1., 0.], - [1., 0.], - [0., 1.], - [0., 1.], - ]); + let x = arr2(&[[0., 1.], [1., 0.], [1., 0.], [1., 0.], [1., 0.], [0., 1.], [0., 1.]]); let r = x.select(Axis(0), &[1, 3, 5]); let c = x.select(Axis(1), &[1]); let r_target = arr2(&[[1., 0.], [1., 0.], [0., 1.]]); @@ -702,10 +738,7 @@ fn test_select() { assert_abs_diff_eq!(c, c_target.t()); // test for 3-d array - let y = arr3(&[ - [[1., 2., 3.], [1.5, 1.5, 3.]], - [[1., 2., 8.], [1., 2.5, 3.]], - ]); + let y = arr3(&[[[1., 2., 3.], [1.5, 1.5, 3.]], [[1., 2., 8.], [1., 2.5, 3.]]]); let r = y.select(Axis(1), &[1]); let c = y.select(Axis(2), &[1]); let r_target = arr3(&[[[1.5, 1.5, 3.]], [[1., 2.5, 3.]]]); @@ -715,7 +748,8 @@ fn test_select() { } #[test] -fn test_select_1d() { +fn test_select_1d() +{ let x = arr1(&[0, 1, 2, 3, 4, 5, 6]); let r1 = x.select(Axis(0), &[1, 3, 4, 2, 2, 5]); assert_eq!(r1, arr1(&[1, 3, 4, 2, 2, 5])); @@ -728,7 +762,8 @@ fn test_select_1d() { } #[test] -fn diag() { +fn diag() +{ let d = arr2(&[[1., 2., 3.0f32]]).into_diag(); assert_eq!(d.dim(), 1); let a = arr2(&[[1., 2., 3.0f32], [0., 0., 0.]]); @@ -745,7 +780,8 @@ fn diag() { /// Note that this does not check the strides in the "merged" case! #[test] #[allow(clippy::cognitive_complexity)] -fn merge_axes() { +fn merge_axes() +{ macro_rules! assert_merged { ($arr:expr, $slice:expr, $take:expr, $into:expr) => { let mut v = $arr.slice($slice); @@ -833,7 +869,8 @@ fn merge_axes() { } #[test] -fn swapaxes() { +fn swapaxes() +{ let mut a = arr2(&[[1., 2.], [3., 4.0f32]]); let b = arr2(&[[1., 3.], [2., 4.0f32]]); assert!(a != b); @@ -846,7 +883,8 @@ fn swapaxes() { } #[test] -fn permuted_axes() { +fn permuted_axes() +{ let a = array![1].index_axis_move(Axis(0), 0); let permuted = a.view().permuted_axes([]); assert_eq!(a, permuted); @@ -855,7 +893,9 @@ fn permuted_axes() { let permuted = a.view().permuted_axes([0]); assert_eq!(a, permuted); - let a = Array::from_iter(0..24).into_shape_with_order((2, 3, 4)).unwrap(); + let a = Array::from_iter(0..24) + .into_shape_with_order((2, 3, 4)) + .unwrap(); let permuted = a.view().permuted_axes([2, 1, 0]); for ((i0, i1, i2), elem) in a.indexed_iter() { assert_eq!(*elem, permuted[(i2, i1, i0)]); @@ -865,7 +905,9 @@ fn permuted_axes() { assert_eq!(*elem, permuted[&[i0, i2, i1][..]]); } - let a = Array::from_iter(0..120).into_shape_with_order((2, 3, 4, 5)).unwrap(); + let a = Array::from_iter(0..120) + .into_shape_with_order((2, 3, 4, 5)) + .unwrap(); let permuted = a.view().permuted_axes([1, 0, 3, 2]); for ((i0, i1, i2, i3), elem) in a.indexed_iter() { assert_eq!(*elem, permuted[(i1, i0, i3, i2)]); @@ -878,14 +920,18 @@ fn permuted_axes() { #[should_panic] #[test] -fn permuted_axes_repeated_axis() { - let a = Array::from_iter(0..24).into_shape_with_order((2, 3, 4)).unwrap(); +fn permuted_axes_repeated_axis() +{ + let a = Array::from_iter(0..24) + .into_shape_with_order((2, 3, 4)) + .unwrap(); a.view().permuted_axes([1, 0, 1]); } #[should_panic] #[test] -fn permuted_axes_missing_axis() { +fn permuted_axes_missing_axis() +{ let a = Array::from_iter(0..24) .into_shape_with_order((2, 3, 4)) .unwrap() @@ -895,13 +941,17 @@ fn permuted_axes_missing_axis() { #[should_panic] #[test] -fn permuted_axes_oob() { - let a = Array::from_iter(0..24).into_shape_with_order((2, 3, 4)).unwrap(); +fn permuted_axes_oob() +{ + let a = Array::from_iter(0..24) + .into_shape_with_order((2, 3, 4)) + .unwrap(); a.view().permuted_axes([1, 0, 3]); } #[test] -fn standard_layout() { +fn standard_layout() +{ let mut a = arr2(&[[1., 2.], [3., 4.0]]); assert!(a.is_standard_layout()); a.swap_axes(0, 1); @@ -919,7 +969,8 @@ fn standard_layout() { } #[test] -fn iter_size_hint() { +fn iter_size_hint() +{ let mut a = arr2(&[[1., 2.], [3., 4.]]); { let mut it = a.iter(); @@ -954,7 +1005,8 @@ fn iter_size_hint() { } #[test] -fn zero_axes() { +fn zero_axes() +{ let mut a = arr1::(&[]); for _ in a.iter() { panic!(); @@ -972,7 +1024,8 @@ fn zero_axes() { } #[test] -fn equality() { +fn equality() +{ let a = arr2(&[[1., 2.], [3., 4.]]); let mut b = arr2(&[[1., 2.], [2., 4.]]); assert!(a != b); @@ -985,7 +1038,8 @@ fn equality() { } #[test] -fn map1() { +fn map1() +{ let a = arr2(&[[1., 2.], [3., 4.]]); let b = a.map(|&x| (x / 3.) as isize); assert_eq!(b, arr2(&[[0, 0], [1, 1]])); @@ -995,21 +1049,24 @@ fn map1() { } #[test] -fn mapv_into_any_same_type() { +fn mapv_into_any_same_type() +{ let a: Array = array![[1., 2., 3.], [4., 5., 6.]]; let a_plus_one: Array = array![[2., 3., 4.], [5., 6., 7.]]; assert_eq!(a.mapv_into_any(|a| a + 1.), a_plus_one); } #[test] -fn mapv_into_any_diff_types() { +fn mapv_into_any_diff_types() +{ let a: Array = array![[1., 2., 3.], [4., 5., 6.]]; let a_even: Array = array![[false, true, false], [true, false, true]]; assert_eq!(a.mapv_into_any(|a| a.round() as i32 % 2 == 0), a_even); } #[test] -fn as_slice_memory_order_mut_arcarray() { +fn as_slice_memory_order_mut_arcarray() +{ // Test that mutation breaks sharing for `ArcArray`. let a = rcarr2(&[[1., 2.], [3., 4.0f32]]); let mut b = a.clone(); @@ -1020,7 +1077,8 @@ fn as_slice_memory_order_mut_arcarray() { } #[test] -fn as_slice_memory_order_mut_cowarray() { +fn as_slice_memory_order_mut_cowarray() +{ // Test that mutation breaks sharing for `CowArray`. let a = arr2(&[[1., 2.], [3., 4.0f32]]); let mut b = CowArray::from(a.view()); @@ -1031,7 +1089,8 @@ fn as_slice_memory_order_mut_cowarray() { } #[test] -fn as_slice_memory_order_mut_contiguous_arcarray() { +fn as_slice_memory_order_mut_contiguous_arcarray() +{ // Test that unsharing preserves the strides in the contiguous case for `ArcArray`. let a = rcarr2(&[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]]).reversed_axes(); let mut b = a.clone().slice_move(s![.., ..2]); @@ -1041,7 +1100,8 @@ fn as_slice_memory_order_mut_contiguous_arcarray() { } #[test] -fn as_slice_memory_order_mut_contiguous_cowarray() { +fn as_slice_memory_order_mut_contiguous_cowarray() +{ // Test that unsharing preserves the strides in the contiguous case for `CowArray`. let a = arr2(&[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]]).reversed_axes(); let mut b = CowArray::from(a.slice(s![.., ..2])); @@ -1052,10 +1112,13 @@ fn as_slice_memory_order_mut_contiguous_cowarray() { } #[test] -fn to_slice_memory_order() { +fn to_slice_memory_order() +{ for shape in vec![[2, 0, 3, 5], [2, 1, 3, 5], [2, 4, 3, 5]] { let data: Vec = (0..shape.iter().product()).collect(); - let mut orig = Array1::from(data.clone()).into_shape_with_order(shape).unwrap(); + let mut orig = Array1::from(data.clone()) + .into_shape_with_order(shape) + .unwrap(); for perm in vec![[0, 1, 2, 3], [0, 2, 1, 3], [2, 0, 1, 3]] { let mut a = orig.view_mut().permuted_axes(perm); assert_eq!(a.as_slice_memory_order().unwrap(), &data); @@ -1067,7 +1130,8 @@ fn to_slice_memory_order() { } #[test] -fn to_slice_memory_order_discontiguous() { +fn to_slice_memory_order_discontiguous() +{ let mut orig = Array3::::zeros([3, 2, 4]); assert!(orig .slice(s![.., 1.., ..]) @@ -1088,7 +1152,8 @@ fn to_slice_memory_order_discontiguous() { } #[test] -fn array0_into_scalar() { +fn array0_into_scalar() +{ // With this kind of setup, the `Array`'s pointer is not the same as the // underlying `Vec`'s pointer. let a: Array0 = array![4, 5, 6, 7].index_axis_move(Axis(0), 2); @@ -1103,7 +1168,8 @@ fn array0_into_scalar() { } #[test] -fn array_view0_into_scalar() { +fn array_view0_into_scalar() +{ // With this kind of setup, the `Array`'s pointer is not the same as the // underlying `Vec`'s pointer. let a: Array0 = array![4, 5, 6, 7].index_axis_move(Axis(0), 2); @@ -1118,7 +1184,8 @@ fn array_view0_into_scalar() { } #[test] -fn array_view_mut0_into_scalar() { +fn array_view_mut0_into_scalar() +{ // With this kind of setup, the `Array`'s pointer is not the same as the // underlying `Vec`'s pointer. let a: Array0 = array![4, 5, 6, 7].index_axis_move(Axis(0), 2); @@ -1133,7 +1200,8 @@ fn array_view_mut0_into_scalar() { } #[test] -fn owned_array1() { +fn owned_array1() +{ let mut a = Array::from(vec![1, 2, 3, 4]); for elt in a.iter_mut() { *elt = 2; @@ -1158,7 +1226,8 @@ fn owned_array1() { } #[test] -fn owned_array_with_stride() { +fn owned_array_with_stride() +{ let v: Vec<_> = (0..12).collect(); let dim = (2, 3, 2); let strides = (1, 4, 2); @@ -1168,7 +1237,8 @@ fn owned_array_with_stride() { } #[test] -fn owned_array_discontiguous() { +fn owned_array_discontiguous() +{ use std::iter::repeat; let v: Vec<_> = (0..12).flat_map(|x| repeat(x).take(2)).collect(); let dim = (3, 2, 2); @@ -1181,14 +1251,17 @@ fn owned_array_discontiguous() { } #[test] -fn owned_array_discontiguous_drop() { +fn owned_array_discontiguous_drop() +{ use std::cell::RefCell; use std::collections::BTreeSet; use std::rc::Rc; struct InsertOnDrop(Rc>>, Option); - impl Drop for InsertOnDrop { - fn drop(&mut self) { + impl Drop for InsertOnDrop + { + fn drop(&mut self) + { let InsertOnDrop(ref set, ref mut value) = *self; set.borrow_mut().insert(value.take().expect("double drop!")); } @@ -1222,13 +1295,15 @@ macro_rules! assert_matches { } #[test] -fn from_vec_dim_stride_empty_1d() { +fn from_vec_dim_stride_empty_1d() +{ let empty: [f32; 0] = []; assert_matches!(Array::from_shape_vec(0.strides(1), empty.to_vec()), Ok(_)); } #[test] -fn from_vec_dim_stride_0d() { +fn from_vec_dim_stride_0d() +{ let empty: [f32; 0] = []; let one = [1.]; let two = [1., 2.]; @@ -1244,7 +1319,8 @@ fn from_vec_dim_stride_0d() { } #[test] -fn from_vec_dim_stride_2d_1() { +fn from_vec_dim_stride_2d_1() +{ let two = [1., 2.]; let d = Ix2(2, 1); let s = d.default_strides(); @@ -1252,7 +1328,8 @@ fn from_vec_dim_stride_2d_1() { } #[test] -fn from_vec_dim_stride_2d_2() { +fn from_vec_dim_stride_2d_2() +{ let two = [1., 2.]; let d = Ix2(1, 2); let s = d.default_strides(); @@ -1260,7 +1337,8 @@ fn from_vec_dim_stride_2d_2() { } #[test] -fn from_vec_dim_stride_2d_3() { +fn from_vec_dim_stride_2d_3() +{ let a = arr3(&[[[1]], [[2]], [[3]]]); let d = a.raw_dim(); let s = d.default_strides(); @@ -1271,7 +1349,8 @@ fn from_vec_dim_stride_2d_3() { } #[test] -fn from_vec_dim_stride_2d_4() { +fn from_vec_dim_stride_2d_4() +{ let a = arr3(&[[[1]], [[2]], [[3]]]); let d = a.raw_dim(); let s = d.fortran_strides(); @@ -1282,7 +1361,8 @@ fn from_vec_dim_stride_2d_4() { } #[test] -fn from_vec_dim_stride_2d_5() { +fn from_vec_dim_stride_2d_5() +{ let a = arr3(&[[[1, 2, 3]]]); let d = a.raw_dim(); let s = d.fortran_strides(); @@ -1293,7 +1373,8 @@ fn from_vec_dim_stride_2d_5() { } #[test] -fn from_vec_dim_stride_2d_6() { +fn from_vec_dim_stride_2d_6() +{ let a = [1., 2., 3., 4., 5., 6.]; let d = (2, 1, 1); let s = (2, 2, 1); @@ -1305,7 +1386,8 @@ fn from_vec_dim_stride_2d_6() { } #[test] -fn from_vec_dim_stride_2d_7() { +fn from_vec_dim_stride_2d_7() +{ // empty arrays can have 0 strides let a: [f32; 0] = []; // [[]] shape=[4, 0], strides=[0, 1] @@ -1315,7 +1397,8 @@ fn from_vec_dim_stride_2d_7() { } #[test] -fn from_vec_dim_stride_2d_8() { +fn from_vec_dim_stride_2d_8() +{ // strides of length 1 axes can be zero let a = [1.]; let d = (1, 1); @@ -1324,7 +1407,8 @@ fn from_vec_dim_stride_2d_8() { } #[test] -fn from_vec_dim_stride_2d_rejects() { +fn from_vec_dim_stride_2d_rejects() +{ let two = [1., 2.]; let d = (2, 2); let s = (1, 0); @@ -1336,8 +1420,11 @@ fn from_vec_dim_stride_2d_rejects() { } #[test] -fn views() { - let a = ArcArray::from(vec![1, 2, 3, 4]).into_shape_with_order((2, 2)).unwrap(); +fn views() +{ + let a = ArcArray::from(vec![1, 2, 3, 4]) + .into_shape_with_order((2, 2)) + .unwrap(); let b = a.view(); assert_eq!(a, b); assert_eq!(a.shape(), b.shape()); @@ -1353,8 +1440,11 @@ fn views() { } #[test] -fn view_mut() { - let mut a = ArcArray::from(vec![1, 2, 3, 4]).into_shape_with_order((2, 2)).unwrap(); +fn view_mut() +{ + let mut a = ArcArray::from(vec![1, 2, 3, 4]) + .into_shape_with_order((2, 2)) + .unwrap(); for elt in &mut a.view_mut() { *elt = 0; } @@ -1372,8 +1462,11 @@ fn view_mut() { } #[test] -fn slice_mut() { - let mut a = ArcArray::from(vec![1, 2, 3, 4]).into_shape_with_order((2, 2)).unwrap(); +fn slice_mut() +{ + let mut a = ArcArray::from(vec![1, 2, 3, 4]) + .into_shape_with_order((2, 2)) + .unwrap(); for elt in a.slice_mut(s![.., ..]) { *elt = 0; } @@ -1394,7 +1487,8 @@ fn slice_mut() { } #[test] -fn assign_ops() { +fn assign_ops() +{ let mut a = arr2(&[[1., 2.], [3., 4.]]); let b = arr2(&[[1., 3.], [2., 4.]]); (*&mut a.view_mut()) += &b; @@ -1412,7 +1506,8 @@ fn assign_ops() { } #[test] -fn aview() { +fn aview() +{ let a = arr2(&[[1., 2., 3.], [4., 5., 6.]]); let data = [[1., 2., 3.], [4., 5., 6.]]; let b = aview2(&data); @@ -1421,7 +1516,8 @@ fn aview() { } #[test] -fn aview_mut() { +fn aview_mut() +{ let mut data = [0; 16]; { let mut a = aview_mut1(&mut data).into_shape_with_order((4, 4)).unwrap(); @@ -1434,7 +1530,8 @@ fn aview_mut() { } #[test] -fn transpose_view() { +fn transpose_view() +{ let a = arr2(&[[1, 2], [3, 4]]); let at = a.view().reversed_axes(); assert_eq!(at, arr2(&[[1, 3], [2, 4]])); @@ -1445,7 +1542,8 @@ fn transpose_view() { } #[test] -fn transpose_view_mut() { +fn transpose_view_mut() +{ let mut a = arr2(&[[1, 2], [3, 4]]); let mut at = a.view_mut().reversed_axes(); at[[0, 1]] = 5; @@ -1459,7 +1557,8 @@ fn transpose_view_mut() { #[test] #[allow(clippy::cognitive_complexity)] -fn insert_axis() { +fn insert_axis() +{ defmac!(test_insert orig, index, new => { let res = orig.insert_axis(Axis(index)); assert_eq!(res, new); @@ -1554,7 +1653,8 @@ fn insert_axis() { } #[test] -fn insert_axis_f() { +fn insert_axis_f() +{ defmac!(test_insert_f orig, index, new => { let res = orig.insert_axis(Axis(index)); assert_eq!(res, new); @@ -1601,7 +1701,8 @@ fn insert_axis_f() { } #[test] -fn insert_axis_view() { +fn insert_axis_view() +{ let a = array![[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]; assert_eq!( @@ -1619,7 +1720,8 @@ fn insert_axis_view() { } #[test] -fn arithmetic_broadcast() { +fn arithmetic_broadcast() +{ let mut a = arr2(&[[1., 2.], [3., 4.]]); let b = a.clone() * aview0(&1.); assert_eq!(a, b); @@ -1678,14 +1780,18 @@ fn arithmetic_broadcast() { } #[test] -fn char_array() { +fn char_array() +{ // test compilation & basics of non-numerical array - let cc = ArcArray::from_iter("alphabet".chars()).into_shape_with_order((4, 2)).unwrap(); + let cc = ArcArray::from_iter("alphabet".chars()) + .into_shape_with_order((4, 2)) + .unwrap(); assert!(cc.index_axis(Axis(1), 0) == ArcArray::from_iter("apae".chars())); } #[test] -fn scalar_ops() { +fn scalar_ops() +{ let a = Array::::zeros((5, 5)); let b = &a + 1; let c = (&a + &a + 2) - 3; @@ -1723,7 +1829,8 @@ fn scalar_ops() { #[test] #[cfg(feature = "std")] -fn split_at() { +fn split_at() +{ let mut a = arr2(&[[1., 2.], [3., 4.]]); { @@ -1740,7 +1847,9 @@ fn split_at() { } assert_eq!(a, arr2(&[[1., 5.], [8., 4.]])); - let b = ArcArray::linspace(0., 59., 60).into_shape_with_order((3, 4, 5)).unwrap(); + let b = ArcArray::linspace(0., 59., 60) + .into_shape_with_order((3, 4, 5)) + .unwrap(); let (left, right) = b.view().split_at(Axis(2), 2); assert_eq!(left.shape(), [3, 4, 2]); @@ -1761,21 +1870,24 @@ fn split_at() { #[test] #[should_panic] -fn deny_split_at_axis_out_of_bounds() { +fn deny_split_at_axis_out_of_bounds() +{ let a = arr2(&[[1., 2.], [3., 4.]]); a.view().split_at(Axis(2), 0); } #[test] #[should_panic] -fn deny_split_at_index_out_of_bounds() { +fn deny_split_at_index_out_of_bounds() +{ let a = arr2(&[[1., 2.], [3., 4.]]); a.view().split_at(Axis(1), 3); } #[test] #[cfg(feature = "std")] -fn test_range() { +fn test_range() +{ let a = Array::range(0., 5., 1.); assert_eq!(a.len(), 5); assert_eq!(a[0], 0.); @@ -1804,7 +1916,8 @@ fn test_range() { } #[test] -fn test_f_order() { +fn test_f_order() +{ // Test that arrays are logically equal in every way, // even if the underlying memory order is different let c = arr2(&[[1, 2, 3], [4, 5, 6]]); @@ -1826,7 +1939,8 @@ fn test_f_order() { } #[test] -fn to_owned_memory_order() { +fn to_owned_memory_order() +{ // check that .to_owned() makes f-contiguous arrays out of f-contiguous // input. let c = arr2(&[[1, 2, 3], [4, 5, 6]]); @@ -1846,7 +1960,8 @@ fn to_owned_memory_order() { } #[test] -fn to_owned_neg_stride() { +fn to_owned_neg_stride() +{ let mut c = arr2(&[[1, 2, 3], [4, 5, 6]]); c.slice_collapse(s![.., ..;-1]); let co = c.to_owned(); @@ -1855,7 +1970,8 @@ fn to_owned_neg_stride() { } #[test] -fn discontiguous_owned_to_owned() { +fn discontiguous_owned_to_owned() +{ let mut c = arr2(&[[1, 2, 3], [4, 5, 6]]); c.slice_collapse(s![.., ..;2]); @@ -1866,7 +1982,8 @@ fn discontiguous_owned_to_owned() { } #[test] -fn map_memory_order() { +fn map_memory_order() +{ let a = arr3(&[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [0, -1, -2]]]); let mut v = a.view(); v.swap_axes(0, 1); @@ -1876,7 +1993,8 @@ fn map_memory_order() { } #[test] -fn map_mut_with_unsharing() { +fn map_mut_with_unsharing() +{ // Fortran-layout `ArcArray`. let a = rcarr2(&[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]]).reversed_axes(); assert_eq!(a.shape(), &[2, 5]); @@ -1903,10 +2021,13 @@ fn map_mut_with_unsharing() { } #[test] -fn test_view_from_shape() { +fn test_view_from_shape() +{ let s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; let a = ArrayView::from_shape((2, 3, 2), &s).unwrap(); - let mut answer = Array::from(s.to_vec()).into_shape_with_order((2, 3, 2)).unwrap(); + let mut answer = Array::from(s.to_vec()) + .into_shape_with_order((2, 3, 2)) + .unwrap(); assert_eq!(a, answer); // custom strides (row major) @@ -1924,7 +2045,8 @@ fn test_view_from_shape() { } #[test] -fn test_contiguous() { +fn test_contiguous() +{ let c = arr3(&[[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 7, 7]]]); assert!(c.is_standard_layout()); assert!(c.as_slice_memory_order().is_some()); @@ -1974,7 +2096,8 @@ fn test_contiguous_single_element() } #[test] -fn test_contiguous_neg_strides() { +fn test_contiguous_neg_strides() +{ let s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]; let a = ArrayView::from_shape((2, 3, 2).strides((1, 4, 2)), &s).unwrap(); assert_eq!( @@ -2032,7 +2155,8 @@ fn test_contiguous_neg_strides() { } #[test] -fn test_swap() { +fn test_swap() +{ let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9]]); let b = a.clone(); @@ -2045,7 +2169,8 @@ fn test_swap() { } #[test] -fn test_uswap() { +fn test_uswap() +{ let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9]]); let b = a.clone(); @@ -2058,7 +2183,8 @@ fn test_uswap() { } #[test] -fn test_shape() { +fn test_shape() +{ let data = [0, 1, 2, 3, 4, 5]; let a = Array::from_shape_vec((1, 2, 3), data.to_vec()).unwrap(); let b = Array::from_shape_vec((1, 2, 3).f(), data.to_vec()).unwrap(); @@ -2072,7 +2198,8 @@ fn test_shape() { } #[test] -fn test_view_from_shape_ptr() { +fn test_view_from_shape_ptr() +{ let data = [0, 1, 2, 3, 4, 5]; let view = unsafe { ArrayView::from_shape_ptr((2, 3), data.as_ptr()) }; assert_eq!(view, aview2(&[[0, 1, 2], [3, 4, 5]])); @@ -2088,45 +2215,42 @@ fn test_view_from_shape_ptr() { #[should_panic(expected = "Unsupported")] #[cfg(debug_assertions)] #[test] -fn test_view_from_shape_ptr_deny_neg_strides() { +fn test_view_from_shape_ptr_deny_neg_strides() +{ let data = [0, 1, 2, 3, 4, 5]; - let _view = unsafe { - ArrayView::from_shape_ptr((2, 3).strides((-3isize as usize, 1)), data.as_ptr()) - }; + let _view = unsafe { ArrayView::from_shape_ptr((2, 3).strides((-3isize as usize, 1)), data.as_ptr()) }; } #[should_panic(expected = "Unsupported")] #[cfg(debug_assertions)] #[test] -fn test_view_mut_from_shape_ptr_deny_neg_strides() { +fn test_view_mut_from_shape_ptr_deny_neg_strides() +{ let mut data = [0, 1, 2, 3, 4, 5]; - let _view = unsafe { - ArrayViewMut::from_shape_ptr((2, 3).strides((-3isize as usize, 1)), data.as_mut_ptr()) - }; + let _view = unsafe { ArrayViewMut::from_shape_ptr((2, 3).strides((-3isize as usize, 1)), data.as_mut_ptr()) }; } #[should_panic(expected = "Unsupported")] #[cfg(debug_assertions)] #[test] -fn test_raw_view_from_shape_ptr_deny_neg_strides() { +fn test_raw_view_from_shape_ptr_deny_neg_strides() +{ let data = [0, 1, 2, 3, 4, 5]; - let _view = unsafe { - RawArrayView::from_shape_ptr((2, 3).strides((-3isize as usize, 1)), data.as_ptr()) - }; + let _view = unsafe { RawArrayView::from_shape_ptr((2, 3).strides((-3isize as usize, 1)), data.as_ptr()) }; } #[should_panic(expected = "Unsupported")] #[cfg(debug_assertions)] #[test] -fn test_raw_view_mut_from_shape_ptr_deny_neg_strides() { +fn test_raw_view_mut_from_shape_ptr_deny_neg_strides() +{ let mut data = [0, 1, 2, 3, 4, 5]; - let _view = unsafe { - RawArrayViewMut::from_shape_ptr((2, 3).strides((-3isize as usize, 1)), data.as_mut_ptr()) - }; + let _view = unsafe { RawArrayViewMut::from_shape_ptr((2, 3).strides((-3isize as usize, 1)), data.as_mut_ptr()) }; } #[test] -fn test_default() { +fn test_default() +{ let a = as Default>::default(); assert_eq!(a, aview2(&[[0.0; 0]; 0])); @@ -2137,14 +2261,16 @@ fn test_default() { } #[test] -fn test_default_ixdyn() { +fn test_default_ixdyn() +{ let a = as Default>::default(); let b = >::zeros(IxDyn(&[0])); assert_eq!(a, b); } #[test] -fn test_map_axis() { +fn test_map_axis() +{ let a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]); let b = a.map_axis(Axis(0), |view| view.sum()); @@ -2177,7 +2303,8 @@ fn test_map_axis() { } #[test] -fn test_accumulate_axis_inplace_noop() { +fn test_accumulate_axis_inplace_noop() +{ let mut a = Array2::::zeros((0, 3)); a.accumulate_axis_inplace(Axis(0), |&prev, curr| *curr += prev); assert_eq!(a, Array2::zeros((0, 3))); @@ -2219,7 +2346,8 @@ fn test_accumulate_axis_inplace_nonstandard_layout() { } #[test] -fn test_to_vec() { +fn test_to_vec() +{ let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]); a.slice_collapse(s![..;-1, ..]); @@ -2230,7 +2358,8 @@ fn test_to_vec() { } #[test] -fn test_array_clone_unalias() { +fn test_array_clone_unalias() +{ let a = Array::::zeros((3, 3)); let mut b = a.clone(); b.fill(1); @@ -2239,15 +2368,19 @@ fn test_array_clone_unalias() { } #[test] -fn test_array_clone_same_view() { - let mut a = Array::from_iter(0..9).into_shape_with_order((3, 3)).unwrap(); +fn test_array_clone_same_view() +{ + let mut a = Array::from_iter(0..9) + .into_shape_with_order((3, 3)) + .unwrap(); a.slice_collapse(s![..;-1, ..;-1]); let b = a.clone(); assert_eq!(a, b); } #[test] -fn test_array2_from_diag() { +fn test_array2_from_diag() +{ let diag = arr1(&[0, 1, 2]); let x = Array2::from_diag(&diag); let x_exp = arr2(&[[0, 0, 0], [0, 1, 0], [0, 0, 2]]); @@ -2261,7 +2394,8 @@ fn test_array2_from_diag() { } #[test] -fn array_macros() { +fn array_macros() +{ // array let a1 = array![1, 2, 3]; assert_eq!(a1, arr1(&[1, 2, 3])); @@ -2289,7 +2423,8 @@ fn array_macros() { } #[cfg(test)] -mod as_standard_layout_tests { +mod as_standard_layout_tests +{ use super::*; use ndarray::Data; use std::fmt::Debug; @@ -2308,7 +2443,8 @@ mod as_standard_layout_tests { } #[test] - fn test_f_layout() { + fn test_f_layout() + { let shape = (2, 2).f(); let arr = Array::::from_shape_vec(shape, vec![1, 2, 3, 4]).unwrap(); assert!(!arr.is_standard_layout()); @@ -2316,14 +2452,16 @@ mod as_standard_layout_tests { } #[test] - fn test_c_layout() { + fn test_c_layout() + { let arr = Array::::from_shape_vec((2, 2), vec![1, 2, 3, 4]).unwrap(); assert!(arr.is_standard_layout()); test_as_standard_layout_for(arr); } #[test] - fn test_f_layout_view() { + fn test_f_layout_view() + { let shape = (2, 2).f(); let arr = Array::::from_shape_vec(shape, vec![1, 2, 3, 4]).unwrap(); let arr_view = arr.view(); @@ -2332,7 +2470,8 @@ mod as_standard_layout_tests { } #[test] - fn test_c_layout_view() { + fn test_c_layout_view() + { let arr = Array::::from_shape_vec((2, 2), vec![1, 2, 3, 4]).unwrap(); let arr_view = arr.view(); assert!(arr_view.is_standard_layout()); @@ -2340,14 +2479,16 @@ mod as_standard_layout_tests { } #[test] - fn test_zero_dimensional_array() { + fn test_zero_dimensional_array() + { let arr_view = ArrayView1::::from(&[]); assert!(arr_view.is_standard_layout()); test_as_standard_layout_for(arr_view); } #[test] - fn test_custom_layout() { + fn test_custom_layout() + { let shape = (1, 2, 3, 2).strides((12, 1, 2, 6)); let arr_data: Vec = (0..12).collect(); let arr = Array::::from_shape_vec(shape, arr_data).unwrap(); @@ -2357,11 +2498,13 @@ mod as_standard_layout_tests { } #[cfg(test)] -mod array_cow_tests { +mod array_cow_tests +{ use super::*; #[test] - fn test_is_variant() { + fn test_is_variant() + { let arr: Array = array![[1, 2], [3, 4]]; let arr_cow = CowArray::::from(arr.view()); assert!(arr_cow.is_view()); @@ -2371,7 +2514,8 @@ mod array_cow_tests { assert!(!arr_cow.is_view()); } - fn run_with_various_layouts(mut f: impl FnMut(Array2)) { + fn run_with_various_layouts(mut f: impl FnMut(Array2)) + { for all in vec![ Array2::from_shape_vec((7, 8), (0..7 * 8).collect()).unwrap(), Array2::from_shape_vec((7, 8).f(), (0..7 * 8).collect()).unwrap(), @@ -2389,7 +2533,8 @@ mod array_cow_tests { } #[test] - fn test_element_mutation() { + fn test_element_mutation() + { run_with_various_layouts(|arr: Array2| { let mut expected = arr.clone(); expected[(1, 1)] = 2; @@ -2409,7 +2554,8 @@ mod array_cow_tests { } #[test] - fn test_clone() { + fn test_clone() + { run_with_various_layouts(|arr: Array2| { let arr_cow = CowArray::::from(arr.view()); let arr_cow_clone = arr_cow.clone(); @@ -2428,11 +2574,10 @@ mod array_cow_tests { } #[test] - fn test_clone_from() { - fn assert_eq_contents_and_layout( - arr1: &CowArray<'_, i32, Ix2>, - arr2: &CowArray<'_, i32, Ix2>, - ) { + fn test_clone_from() + { + fn assert_eq_contents_and_layout(arr1: &CowArray<'_, i32, Ix2>, arr2: &CowArray<'_, i32, Ix2>) + { assert_eq!(arr1, arr2); assert_eq!(arr1.dim(), arr2.dim()); assert_eq!(arr1.strides(), arr2.strides()); @@ -2468,7 +2613,8 @@ mod array_cow_tests { } #[test] - fn test_into_owned() { + fn test_into_owned() + { run_with_various_layouts(|arr: Array2| { let before = CowArray::::from(arr.view()); let after = before.into_owned(); @@ -2484,11 +2630,9 @@ mod array_cow_tests { } #[test] -fn test_remove_index() { - let mut a = arr2(&[[1, 2, 3], - [4, 5, 6], - [7, 8, 9], - [10,11,12]]); +fn test_remove_index() +{ + let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]); a.remove_index(Axis(0), 1); a.remove_index(Axis(1), 2); assert_eq!(a.shape(), &[3, 2]); @@ -2497,10 +2641,7 @@ fn test_remove_index() { [7, 8], [10,11]]); - let mut a = arr2(&[[1, 2, 3], - [4, 5, 6], - [7, 8, 9], - [10,11,12]]); + let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]); a.invert_axis(Axis(0)); a.remove_index(Axis(0), 1); a.remove_index(Axis(1), 2); @@ -2525,19 +2666,18 @@ fn test_remove_index() { []]); } -#[should_panic(expected="must be less")] +#[should_panic(expected = "must be less")] #[test] -fn test_remove_index_oob1() { - let mut a = arr2(&[[1, 2, 3], - [4, 5, 6], - [7, 8, 9], - [10,11,12]]); +fn test_remove_index_oob1() +{ + let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]); a.remove_index(Axis(0), 4); } -#[should_panic(expected="must be less")] +#[should_panic(expected = "must be less")] #[test] -fn test_remove_index_oob2() { +fn test_remove_index_oob2() +{ let mut a = array![[10], [4], [1]]; a.remove_index(Axis(1), 0); assert_eq!(a.shape(), &[3, 0]); @@ -2552,41 +2692,37 @@ fn test_remove_index_oob2() { a.remove_index(Axis(1), 0); // oob } -#[should_panic(expected="index out of bounds")] +#[should_panic(expected = "index out of bounds")] #[test] -fn test_remove_index_oob3() { +fn test_remove_index_oob3() +{ let mut a = array![[10], [4], [1]]; a.remove_index(Axis(2), 0); } #[test] -fn test_split_complex_view() { - let a = Array3::from_shape_fn((3, 4, 5), |(i, j, k)| { - Complex::::new(i as f32 * j as f32, k as f32) - }); +fn test_split_complex_view() +{ + let a = Array3::from_shape_fn((3, 4, 5), |(i, j, k)| Complex::::new(i as f32 * j as f32, k as f32)); let Complex { re, im } = a.view().split_complex(); assert_relative_eq!(re.sum(), 90.); assert_relative_eq!(im.sum(), 120.); } #[test] -fn test_split_complex_view_roundtrip() { - let a_re = Array3::from_shape_fn((3,1,5), |(i, j, _k)| { - i * j - }); - let a_im = Array3::from_shape_fn((3,1,5), |(_i, _j, k)| { - k - }); - let a = Array3::from_shape_fn((3,1,5), |(i,j,k)| { - Complex::new(a_re[[i,j,k]], a_im[[i,j,k]]) - }); +fn test_split_complex_view_roundtrip() +{ + let a_re = Array3::from_shape_fn((3, 1, 5), |(i, j, _k)| i * j); + let a_im = Array3::from_shape_fn((3, 1, 5), |(_i, _j, k)| k); + let a = Array3::from_shape_fn((3, 1, 5), |(i, j, k)| Complex::new(a_re[[i, j, k]], a_im[[i, j, k]])); let Complex { re, im } = a.view().split_complex(); assert_eq!(a_re, re); assert_eq!(a_im, im); } #[test] -fn test_split_complex_view_mut() { +fn test_split_complex_view_mut() +{ let eye_scalar = Array2::::eye(4); let eye_complex = Array2::>::eye(4); let mut a = Array2::>::zeros((4, 4)); @@ -2597,7 +2733,8 @@ fn test_split_complex_view_mut() { } #[test] -fn test_split_complex_zerod() { +fn test_split_complex_zerod() +{ let mut a = Array0::from_elem((), Complex::new(42, 32)); let Complex { re, im } = a.view().split_complex(); assert_eq!(re.get(()), Some(&42)); @@ -2608,18 +2745,18 @@ fn test_split_complex_zerod() { } #[test] -fn test_split_complex_permuted() { - let a = Array3::from_shape_fn((3, 4, 5), |(i, j, k)| { - Complex::new(i * k + j, k) - }); - let permuted = a.view().permuted_axes([1,0,2]); +fn test_split_complex_permuted() +{ + let a = Array3::from_shape_fn((3, 4, 5), |(i, j, k)| Complex::new(i * k + j, k)); + let permuted = a.view().permuted_axes([1, 0, 2]); let Complex { re, im } = permuted.split_complex(); assert_eq!(re.get((3,2,4)).unwrap(), &11); assert_eq!(im.get((3,2,4)).unwrap(), &4); } #[test] -fn test_split_complex_invert_axis() { +fn test_split_complex_invert_axis() +{ let mut a = Array::from_shape_fn((2, 3, 2), |(i, j, k)| Complex::new(i as f64 + j as f64, i as f64 + k as f64)); a.invert_axis(Axis(1)); let cmplx = a.view().split_complex(); diff --git a/tests/assign.rs b/tests/assign.rs index 5c300943d..29a6b851a 100644 --- a/tests/assign.rs +++ b/tests/assign.rs @@ -3,7 +3,8 @@ use ndarray::prelude::*; use std::sync::atomic::{AtomicUsize, Ordering}; #[test] -fn assign() { +fn assign() +{ let mut a = arr2(&[[1., 2.], [3., 4.]]); let b = arr2(&[[1., 3.], [2., 4.]]); a.assign(&b); @@ -27,9 +28,9 @@ fn assign() { assert_eq!(a, arr2(&[[0, 0], [3, 4]])); } - #[test] -fn assign_to() { +fn assign_to() +{ let mut a = arr2(&[[1., 2.], [3., 4.]]); let b = arr2(&[[0., 3.], [2., 0.]]); b.assign_to(&mut a); @@ -37,7 +38,8 @@ fn assign_to() { } #[test] -fn move_into_copy() { +fn move_into_copy() +{ let a = arr2(&[[1., 2.], [3., 4.]]); let acopy = a.clone(); let mut b = Array::uninit(a.dim()); @@ -54,13 +56,14 @@ fn move_into_copy() { } #[test] -fn move_into_owned() { +fn move_into_owned() +{ // Test various memory layouts and holes while moving String elements. for &use_f_order in &[false, true] { - for &invert_axis in &[0b00, 0b01, 0b10, 0b11] { // bitmask for axis to invert + for &invert_axis in &[0b00, 0b01, 0b10, 0b11] { + // bitmask for axis to invert for &slice in &[false, true] { - let mut a = Array::from_shape_fn((5, 4).set_f(use_f_order), - |idx| format!("{:?}", idx)); + let mut a = Array::from_shape_fn((5, 4).set_f(use_f_order), |idx| format!("{:?}", idx)); if slice { a.slice_collapse(s![1..-1, ..;2]); } @@ -84,10 +87,12 @@ fn move_into_owned() { } #[test] -fn move_into_slicing() { +fn move_into_slicing() +{ // Count correct number of drops when using move_into_uninit and discontiguous arrays (with holes). for &use_f_order in &[false, true] { - for &invert_axis in &[0b00, 0b01, 0b10, 0b11] { // bitmask for axis to invert + for &invert_axis in &[0b00, 0b01, 0b10, 0b11] { + // bitmask for axis to invert let counter = DropCounter::default(); { let (m, n) = (5, 4); @@ -117,7 +122,8 @@ fn move_into_slicing() { } #[test] -fn move_into_diag() { +fn move_into_diag() +{ // Count correct number of drops when using move_into_uninit and discontiguous arrays (with holes). for &use_f_order in &[false, true] { let counter = DropCounter::default(); @@ -142,7 +148,8 @@ fn move_into_diag() { } #[test] -fn move_into_0dim() { +fn move_into_0dim() +{ // Count correct number of drops when using move_into_uninit and discontiguous arrays (with holes). for &use_f_order in &[false, true] { let counter = DropCounter::default(); @@ -169,7 +176,8 @@ fn move_into_0dim() { } #[test] -fn move_into_empty() { +fn move_into_empty() +{ // Count correct number of drops when using move_into_uninit and discontiguous arrays (with holes). for &use_f_order in &[false, true] { let counter = DropCounter::default(); @@ -195,13 +203,14 @@ fn move_into_empty() { } #[test] -fn move_into() { +fn move_into() +{ // Test various memory layouts and holes while moving String elements with move_into for &use_f_order in &[false, true] { - for &invert_axis in &[0b00, 0b01, 0b10, 0b11] { // bitmask for axis to invert + for &invert_axis in &[0b00, 0b01, 0b10, 0b11] { + // bitmask for axis to invert for &slice in &[false, true] { - let mut a = Array::from_shape_fn((5, 4).set_f(use_f_order), - |idx| format!("{:?}", idx)); + let mut a = Array::from_shape_fn((5, 4).set_f(use_f_order), |idx| format!("{:?}", idx)); if slice { a.slice_collapse(s![1..-1, ..;2]); } @@ -223,32 +232,37 @@ fn move_into() { } } - /// This counter can create elements, and then count and verify /// the number of which have actually been dropped again. #[derive(Default)] -struct DropCounter { +struct DropCounter +{ created: AtomicUsize, dropped: AtomicUsize, } struct Element<'a>(&'a AtomicUsize); -impl DropCounter { - fn created(&self) -> usize { +impl DropCounter +{ + fn created(&self) -> usize + { self.created.load(Ordering::Relaxed) } - fn dropped(&self) -> usize { + fn dropped(&self) -> usize + { self.dropped.load(Ordering::Relaxed) } - fn element(&self) -> Element<'_> { + fn element(&self) -> Element<'_> + { self.created.fetch_add(1, Ordering::Relaxed); Element(&self.dropped) } - fn assert_drop_count(&self) { + fn assert_drop_count(&self) + { assert_eq!( self.created(), self.dropped(), @@ -259,8 +273,10 @@ impl DropCounter { } } -impl<'a> Drop for Element<'a> { - fn drop(&mut self) { +impl<'a> Drop for Element<'a> +{ + fn drop(&mut self) + { self.0.fetch_add(1, Ordering::Relaxed); } } diff --git a/tests/azip.rs b/tests/azip.rs index d41c019dd..a4bb6ffac 100644 --- a/tests/azip.rs +++ b/tests/azip.rs @@ -1,8 +1,5 @@ #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names, + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names, clippy::float_cmp )] @@ -14,7 +11,8 @@ use itertools::{assert_equal, cloned}; use std::mem::swap; #[test] -fn test_azip1() { +fn test_azip1() +{ let mut a = Array::zeros(62); let mut x = 0; azip!((a in &mut a) { *a = x; x += 1; }); @@ -22,7 +20,8 @@ fn test_azip1() { } #[test] -fn test_azip2() { +fn test_azip2() +{ let mut a = Array::zeros((5, 7)); let b = Array::from_shape_fn(a.dim(), |(i, j)| 1. / (i + 2 * j) as f32); azip!((a in &mut a, &b in &b) *a = b); @@ -30,7 +29,8 @@ fn test_azip2() { } #[test] -fn test_azip2_1() { +fn test_azip2_1() +{ let mut a = Array::zeros((5, 7)); let b = Array::from_shape_fn((5, 10), |(i, j)| 1. / (i + 2 * j) as f32); let b = b.slice(s![..;-1, 3..]); @@ -39,7 +39,8 @@ fn test_azip2_1() { } #[test] -fn test_azip2_3() { +fn test_azip2_3() +{ let mut b = Array::from_shape_fn((5, 10), |(i, j)| 1. / (i + 2 * j) as f32); let mut c = Array::from_shape_fn((5, 10), |(i, j)| f32::exp((i + j) as f32)); let a = b.clone(); @@ -50,7 +51,8 @@ fn test_azip2_3() { #[test] #[cfg(feature = "approx")] -fn test_zip_collect() { +fn test_zip_collect() +{ use approx::assert_abs_diff_eq; // test Zip::map_collect and that it preserves c/f layout. @@ -78,7 +80,8 @@ fn test_zip_collect() { #[test] #[cfg(feature = "approx")] -fn test_zip_assign_into() { +fn test_zip_assign_into() +{ use approx::assert_abs_diff_eq; let mut a = Array::::zeros((5, 10)); @@ -92,7 +95,8 @@ fn test_zip_assign_into() { #[test] #[cfg(feature = "approx")] -fn test_zip_assign_into_cell() { +fn test_zip_assign_into_cell() +{ use approx::assert_abs_diff_eq; use std::cell::Cell; @@ -107,33 +111,40 @@ fn test_zip_assign_into_cell() { } #[test] -fn test_zip_collect_drop() { +fn test_zip_collect_drop() +{ use std::cell::RefCell; use std::panic; struct Recorddrop<'a>((usize, usize), &'a RefCell>); - impl<'a> Drop for Recorddrop<'a> { - fn drop(&mut self) { + impl<'a> Drop for Recorddrop<'a> + { + fn drop(&mut self) + { self.1.borrow_mut().push(self.0); } } #[derive(Copy, Clone)] - enum Config { + enum Config + { CC, CF, FF, } - impl Config { - fn a_is_f(self) -> bool { + impl Config + { + fn a_is_f(self) -> bool + { match self { Config::CC | Config::CF => false, _ => true, } } - fn b_is_f(self) -> bool { + fn b_is_f(self) -> bool + { match self { Config::CC => false, _ => true, @@ -178,9 +189,9 @@ fn test_zip_collect_drop() { } } - #[test] -fn test_azip_syntax_trailing_comma() { +fn test_azip_syntax_trailing_comma() +{ let mut b = Array::::zeros((5, 5)); let mut c = Array::::ones((5, 5)); let a = b.clone(); @@ -191,7 +202,8 @@ fn test_azip_syntax_trailing_comma() { #[test] #[cfg(feature = "approx")] -fn test_azip2_sum() { +fn test_azip2_sum() +{ use approx::assert_abs_diff_eq; let c = Array::from_shape_fn((5, 10), |(i, j)| f32::exp((i + j) as f32)); @@ -205,7 +217,8 @@ fn test_azip2_sum() { #[test] #[cfg(feature = "approx")] -fn test_azip3_slices() { +fn test_azip3_slices() +{ use approx::assert_abs_diff_eq; let mut a = [0.; 32]; @@ -225,7 +238,8 @@ fn test_azip3_slices() { #[test] #[cfg(feature = "approx")] -fn test_broadcast() { +fn test_broadcast() +{ use approx::assert_abs_diff_eq; let n = 16; @@ -250,7 +264,8 @@ fn test_broadcast() { #[should_panic] #[test] -fn test_zip_dim_mismatch_1() { +fn test_zip_dim_mismatch_1() +{ let mut a = Array::zeros((5, 7)); let mut d = a.raw_dim(); d[0] += 1; @@ -262,8 +277,11 @@ fn test_zip_dim_mismatch_1() { // Zip::from(A).and(B) // where A is F-contiguous and B contiguous but neither F nor C contiguous. #[test] -fn test_contiguous_but_not_c_or_f() { - let a = Array::from_iter(0..27).into_shape_with_order((3, 3, 3)).unwrap(); +fn test_contiguous_but_not_c_or_f() +{ + let a = Array::from_iter(0..27) + .into_shape_with_order((3, 3, 3)) + .unwrap(); // both F order let a = a.reversed_axes(); @@ -286,8 +304,11 @@ fn test_contiguous_but_not_c_or_f() { } #[test] -fn test_clone() { - let a = Array::from_iter(0..27).into_shape_with_order((3, 3, 3)).unwrap(); +fn test_clone() +{ + let a = Array::from_iter(0..27) + .into_shape_with_order((3, 3, 3)) + .unwrap(); let z = Zip::from(&a).and(a.exact_chunks((1, 1, 1))); let w = z.clone(); @@ -303,7 +324,8 @@ fn test_clone() { } #[test] -fn test_indices_0() { +fn test_indices_0() +{ let a1 = arr0(3); let mut count = 0; @@ -316,7 +338,8 @@ fn test_indices_0() { } #[test] -fn test_indices_1() { +fn test_indices_1() +{ let mut a1 = Array::default(12); for (i, elt) in a1.indexed_iter_mut() { *elt = i; @@ -346,7 +369,8 @@ fn test_indices_1() { } #[test] -fn test_indices_2() { +fn test_indices_2() +{ let mut a1 = Array::default((10, 12)); for (i, elt) in a1.indexed_iter_mut() { *elt = i; @@ -376,7 +400,8 @@ fn test_indices_2() { } #[test] -fn test_indices_3() { +fn test_indices_3() +{ let mut a1 = Array::default((4, 5, 6)); for (i, elt) in a1.indexed_iter_mut() { *elt = i; @@ -406,7 +431,8 @@ fn test_indices_3() { } #[test] -fn test_indices_split_1() { +fn test_indices_split_1() +{ for m in (0..4).chain(10..12) { for n in (0..4).chain(10..12) { let a1 = Array::::default((m, n)); @@ -438,7 +464,8 @@ fn test_indices_split_1() { } #[test] -fn test_zip_all() { +fn test_zip_all() +{ let a = Array::::zeros(62); let b = Array::::ones(62); let mut c = Array::::ones(62); @@ -449,7 +476,8 @@ fn test_zip_all() { } #[test] -fn test_zip_all_empty_array() { +fn test_zip_all_empty_array() +{ let a = Array::::zeros(0); let b = Array::::ones(0); assert_eq!(true, Zip::from(&a).and(&b).all(|&_x, &_y| true)); diff --git a/tests/broadcast.rs b/tests/broadcast.rs index 6dee901e2..288ccb38a 100644 --- a/tests/broadcast.rs +++ b/tests/broadcast.rs @@ -2,15 +2,22 @@ use ndarray::prelude::*; #[test] #[cfg(feature = "std")] -fn broadcast_1() { +fn broadcast_1() +{ let a_dim = Dim([2, 4, 2, 2]); let b_dim = Dim([2, 1, 2, 1]); - let a = ArcArray::linspace(0., 1., a_dim.size()).into_shape_with_order(a_dim).unwrap(); - let b = ArcArray::linspace(0., 1., b_dim.size()).into_shape_with_order(b_dim).unwrap(); + let a = ArcArray::linspace(0., 1., a_dim.size()) + .into_shape_with_order(a_dim) + .unwrap(); + let b = ArcArray::linspace(0., 1., b_dim.size()) + .into_shape_with_order(b_dim) + .unwrap(); assert!(b.broadcast(a.dim()).is_some()); let c_dim = Dim([2, 1]); - let c = ArcArray::linspace(0., 1., c_dim.size()).into_shape_with_order(c_dim).unwrap(); + let c = ArcArray::linspace(0., 1., c_dim.size()) + .into_shape_with_order(c_dim) + .unwrap(); assert!(c.broadcast(1).is_none()); assert!(c.broadcast(()).is_none()); assert!(c.broadcast((2, 1)).is_some()); @@ -28,11 +35,16 @@ fn broadcast_1() { #[test] #[cfg(feature = "std")] -fn test_add() { +fn test_add() +{ let a_dim = Dim([2, 4, 2, 2]); let b_dim = Dim([2, 1, 2, 1]); - let mut a = ArcArray::linspace(0.0, 1., a_dim.size()).into_shape_with_order(a_dim).unwrap(); - let b = ArcArray::linspace(0.0, 1., b_dim.size()).into_shape_with_order(b_dim).unwrap(); + let mut a = ArcArray::linspace(0.0, 1., a_dim.size()) + .into_shape_with_order(a_dim) + .unwrap(); + let b = ArcArray::linspace(0.0, 1., b_dim.size()) + .into_shape_with_order(b_dim) + .unwrap(); a += &b; let t = ArcArray::from_elem((), 1.0f32); a += &t; @@ -41,15 +53,19 @@ fn test_add() { #[test] #[should_panic] #[cfg(feature = "std")] -fn test_add_incompat() { +fn test_add_incompat() +{ let a_dim = Dim([2, 4, 2, 2]); - let mut a = ArcArray::linspace(0.0, 1., a_dim.size()).into_shape_with_order(a_dim).unwrap(); + let mut a = ArcArray::linspace(0.0, 1., a_dim.size()) + .into_shape_with_order(a_dim) + .unwrap(); let incompat = ArcArray::from_elem(3, 1.0f32); a += &incompat; } #[test] -fn test_broadcast() { +fn test_broadcast() +{ let (_, n, k) = (16, 16, 16); let x1 = 1.; // b0 broadcast 1 -> n, k @@ -69,7 +85,8 @@ fn test_broadcast() { } #[test] -fn test_broadcast_1d() { +fn test_broadcast_1d() +{ let n = 16; let x1 = 1.; // b0 broadcast 1 -> n diff --git a/tests/clone.rs b/tests/clone.rs index e1914ba7f..4a7e50b8e 100644 --- a/tests/clone.rs +++ b/tests/clone.rs @@ -1,7 +1,8 @@ use ndarray::arr2; #[test] -fn test_clone_from() { +fn test_clone_from() +{ let a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9]]); let b = arr2(&[[7, 7, 7]]); let mut c = b.clone(); diff --git a/tests/complex.rs b/tests/complex.rs index 1b52b2671..824e296a4 100644 --- a/tests/complex.rs +++ b/tests/complex.rs @@ -3,12 +3,14 @@ use ndarray::{arr1, arr2, Axis}; use num_complex::Complex; use num_traits::Num; -fn c(re: T, im: T) -> Complex { +fn c(re: T, im: T) -> Complex +{ Complex::new(re, im) } #[test] -fn complex_mat_mul() { +fn complex_mat_mul() +{ let a = arr2(&[[c(3., 4.), c(2., 0.)], [c(0., -2.), c(3., 0.)]]); let b = (&a * c(3., 0.)).map(|c| 5. * c / c.norm_sqr()); println!("{:>8.2}", b); diff --git a/tests/dimension.rs b/tests/dimension.rs index 5dae5b5a3..6a9207e4c 100644 --- a/tests/dimension.rs +++ b/tests/dimension.rs @@ -7,7 +7,8 @@ use ndarray::{arr2, ArcArray, Array, Axis, Dim, Dimension, IxDyn, RemoveAxis}; use std::hash::{Hash, Hasher}; #[test] -fn insert_axis() { +fn insert_axis() +{ assert_eq!(Dim([]).insert_axis(Axis(0)), Dim([1])); assert_eq!(Dim([3]).insert_axis(Axis(0)), Dim([1, 3])); @@ -41,7 +42,8 @@ fn insert_axis() { } #[test] -fn remove_axis() { +fn remove_axis() +{ assert_eq!(Dim([3]).remove_axis(Axis(0)), Dim([])); assert_eq!(Dim([1, 2]).remove_axis(Axis(0)), Dim([2])); assert_eq!(Dim([4, 5, 6]).remove_axis(Axis(1)), Dim([4, 6])); @@ -55,14 +57,19 @@ fn remove_axis() { let a = ArcArray::::zeros(vec![4, 5, 6]); let _b = a .index_axis_move(Axis(1), 0) - .to_shape((4, 6)).unwrap() - .to_shape(vec![2, 3, 4]).unwrap(); + .to_shape((4, 6)) + .unwrap() + .to_shape(vec![2, 3, 4]) + .unwrap(); } #[test] #[allow(clippy::eq_op)] -fn dyn_dimension() { - let a = arr2(&[[1., 2.], [3., 4.0]]).into_shape_with_order(vec![2, 2]).unwrap(); +fn dyn_dimension() +{ + let a = arr2(&[[1., 2.], [3., 4.0]]) + .into_shape_with_order(vec![2, 2]) + .unwrap(); assert_eq!(&a - &a, Array::zeros(vec![2, 2])); assert_eq!(a[&[0, 0][..]], 1.); assert_eq!(a[[0, 0]], 1.); @@ -75,7 +82,8 @@ fn dyn_dimension() { } #[test] -fn dyn_insert() { +fn dyn_insert() +{ let mut v = vec![2, 3, 4, 5]; let mut dim = Dim(v.clone()); defmac!(test_insert index => { @@ -94,7 +102,8 @@ fn dyn_insert() { } #[test] -fn dyn_remove() { +fn dyn_remove() +{ let mut v = vec![1, 2, 3, 4, 5, 6, 7]; let mut dim = Dim(v.clone()); defmac!(test_remove index => { @@ -113,7 +122,8 @@ fn dyn_remove() { } #[test] -fn fastest_varying_order() { +fn fastest_varying_order() +{ let strides = Dim([2, 8, 4, 1]); let order = strides._fastest_varying_stride_order(); assert_eq!(order.slice(), &[3, 0, 2, 1]); @@ -186,7 +196,8 @@ fn min_stride_axis() { */ #[test] -fn max_stride_axis() { +fn max_stride_axis() +{ let a = ArrayF32::zeros(10); assert_eq!(a.max_stride_axis(), Axis(0)); @@ -213,7 +224,8 @@ fn max_stride_axis() { } #[test] -fn test_indexing() { +fn test_indexing() +{ let mut x = Dim([1, 2]); assert_eq!(x[0], 1); @@ -224,7 +236,8 @@ fn test_indexing() { } #[test] -fn test_operations() { +fn test_operations() +{ let mut x = Dim([1, 2]); let mut y = Dim([1, 1]); @@ -241,8 +254,10 @@ fn test_operations() { #[test] #[allow(clippy::cognitive_complexity)] -fn test_hash() { - fn calc_hash(value: &T) -> u64 { +fn test_hash() +{ + fn calc_hash(value: &T) -> u64 + { let mut hasher = std::collections::hash_map::DefaultHasher::new(); value.hash(&mut hasher); hasher.finish() @@ -277,8 +292,10 @@ fn test_hash() { } #[test] -fn test_generic_operations() { - fn test_dim(d: &D) { +fn test_generic_operations() +{ + fn test_dim(d: &D) + { let mut x = d.clone(); x[0] += 1; assert_eq!(x[0], 3); @@ -292,8 +309,10 @@ fn test_generic_operations() { } #[test] -fn test_array_view() { - fn test_dim(d: &D) { +fn test_array_view() +{ + fn test_dim(d: &D) + { assert_eq!(d.as_array_view().sum(), 7); assert_eq!(d.as_array_view().strides(), &[1]); } @@ -306,10 +325,11 @@ fn test_array_view() { #[test] #[cfg(feature = "std")] #[allow(clippy::cognitive_complexity)] -fn test_all_ndindex() { +fn test_all_ndindex() +{ use ndarray::IntoDimension; macro_rules! ndindex { - ($($i:expr),*) => { + ($($i:expr),*) => { for &rev in &[false, true] { // rev is for C / F order let size = $($i *)* 1; @@ -331,8 +351,8 @@ fn test_all_ndindex() { assert_eq!(elt, b[dim]); } } + }; } -} ndindex!(10); ndindex!(10, 4); ndindex!(10, 4, 3); diff --git a/tests/format.rs b/tests/format.rs index 4b21fe39d..35909871f 100644 --- a/tests/format.rs +++ b/tests/format.rs @@ -2,7 +2,8 @@ use ndarray::prelude::*; use ndarray::rcarr1; #[test] -fn formatting() { +fn formatting() +{ let a = rcarr1::(&[1., 2., 3., 4.]); assert_eq!(format!("{}", a), "[1, 2, 3, 4]"); assert_eq!(format!("{:4}", a), "[ 1, 2, 3, 4]"); @@ -55,7 +56,8 @@ fn formatting() { } #[test] -fn debug_format() { +fn debug_format() +{ let a = Array2::::zeros((3, 4)); assert_eq!( format!("{:?}", a), diff --git a/tests/higher_order_f.rs b/tests/higher_order_f.rs index c567eb3e0..72245412f 100644 --- a/tests/higher_order_f.rs +++ b/tests/higher_order_f.rs @@ -2,7 +2,8 @@ use ndarray::prelude::*; #[test] #[should_panic] -fn test_fold_axis_oob() { +fn test_fold_axis_oob() +{ let a = arr2(&[[1., 2.], [3., 4.]]); a.fold_axis(Axis(2), 0., |x, y| x + y); } diff --git a/tests/indices.rs b/tests/indices.rs index ca6ca9887..a9414f9a7 100644 --- a/tests/indices.rs +++ b/tests/indices.rs @@ -3,7 +3,8 @@ use ndarray::prelude::*; use ndarray::Order; #[test] -fn test_ixdyn_index_iterate() { +fn test_ixdyn_index_iterate() +{ for &order in &[Order::C, Order::F] { let mut a = Array::zeros((2, 3, 4).set_f(order.is_column_major())); let dim = a.shape().to_vec(); diff --git a/tests/into-ixdyn.rs b/tests/into-ixdyn.rs index a9383a0e6..6e7bf9607 100644 --- a/tests/into-ixdyn.rs +++ b/tests/into-ixdyn.rs @@ -1,20 +1,19 @@ #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names, + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names, clippy::float_cmp )] use ndarray::prelude::*; #[test] -fn test_arr0_into_dyn() { +fn test_arr0_into_dyn() +{ assert!(arr0(1.234).into_dyn()[IxDyn(&[])] == 1.234); } #[test] -fn test_arr2_into_arrd_nonstandard_strides() { +fn test_arr2_into_arrd_nonstandard_strides() +{ let arr = Array2::from_shape_fn((12, 34).f(), |(i, j)| i * 34 + j).into_dyn(); let brr = ArrayD::from_shape_fn(vec![12, 34], |d| d[0] * 34 + d[1]); diff --git a/tests/iterator_chunks.rs b/tests/iterator_chunks.rs index bf3af2d56..79b5403ef 100644 --- a/tests/iterator_chunks.rs +++ b/tests/iterator_chunks.rs @@ -1,8 +1,5 @@ #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names, + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names, clippy::float_cmp )] @@ -10,7 +7,8 @@ use ndarray::prelude::*; #[test] #[cfg(feature = "std")] -fn chunks() { +fn chunks() +{ use ndarray::NdProducer; let a = >::linspace(1., 100., 10 * 10) .into_shape_with_order((10, 10)) @@ -49,13 +47,15 @@ fn chunks() { #[should_panic] #[test] -fn chunks_different_size_1() { +fn chunks_different_size_1() +{ let a = Array::::zeros(vec![2, 3]); a.exact_chunks(vec![2]); } #[test] -fn chunks_ok_size() { +fn chunks_ok_size() +{ let mut a = Array::::zeros(vec![2, 3]); a.fill(1.); let mut c = 0; @@ -69,13 +69,15 @@ fn chunks_ok_size() { #[should_panic] #[test] -fn chunks_different_size_2() { +fn chunks_different_size_2() +{ let a = Array::::zeros(vec![2, 3]); a.exact_chunks(vec![2, 3, 4]); } #[test] -fn chunks_mut() { +fn chunks_mut() +{ let mut a = Array::zeros((7, 8)); for (i, mut chunk) in a.exact_chunks_mut((2, 3)).into_iter().enumerate() { chunk.fill(i); @@ -95,7 +97,8 @@ fn chunks_mut() { #[should_panic] #[test] -fn chunks_different_size_3() { +fn chunks_different_size_3() +{ let mut a = Array::::zeros(vec![2, 3]); a.exact_chunks_mut(vec![2, 3, 4]); } diff --git a/tests/iterators.rs b/tests/iterators.rs index b2cd58ecf..23175fd40 100644 --- a/tests/iterators.rs +++ b/tests/iterators.rs @@ -1,8 +1,5 @@ #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names )] use ndarray::prelude::*; @@ -28,7 +25,8 @@ macro_rules! assert_panics { #[test] #[cfg(feature = "std")] -fn double_ended() { +fn double_ended() +{ let a = ArcArray::linspace(0., 7., 8); let mut it = a.iter().cloned(); assert_eq!(it.next(), Some(0.)); @@ -40,7 +38,8 @@ fn double_ended() { } #[test] -fn double_ended_rows() { +fn double_ended_rows() +{ let a = ArcArray::from_iter(0..8).into_shape_clone((4, 2)).unwrap(); let mut row_it = a.rows().into_iter(); assert_equal(row_it.next_back().unwrap(), &[6, 7]); @@ -50,15 +49,23 @@ fn double_ended_rows() { assert!(row_it.next().is_none()); assert!(row_it.next_back().is_none()); - for (row, check) in a.rows().into_iter().rev().zip(&[[6, 7], [4, 5], [2, 3], [0, 1]]) { + for (row, check) in a + .rows() + .into_iter() + .rev() + .zip(&[[6, 7], [4, 5], [2, 3], [0, 1]]) + { assert_equal(row, check); } } #[test] -fn iter_size_hint() { +fn iter_size_hint() +{ // Check that the size hint is correctly computed - let a = ArcArray::from_iter(0..24).into_shape_with_order((2, 3, 4)).unwrap(); + let a = ArcArray::from_iter(0..24) + .into_shape_with_order((2, 3, 4)) + .unwrap(); let mut data = [0; 24]; for (i, elt) in enumerate(&mut data) { *elt = i as i32; @@ -75,7 +82,8 @@ fn iter_size_hint() { #[test] #[cfg(feature = "std")] -fn indexed() { +fn indexed() +{ let a = ArcArray::linspace(0., 7., 8); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt as usize); @@ -95,7 +103,8 @@ fn indexed() { #[test] #[cfg(feature = "std")] -fn as_slice() { +fn as_slice() +{ use ndarray::Data; fn assert_slice_correct(v: &ArrayBase) @@ -152,7 +161,8 @@ fn as_slice() { } #[test] -fn inner_iter() { +fn inner_iter() +{ let a = ArcArray::from_iter(0..12); let a = a.into_shape_with_order((2, 3, 2)).unwrap(); // [[[0, 1], @@ -161,35 +171,30 @@ fn inner_iter() { // [[6, 7], // [8, 9], // ... - assert_equal( - a.rows(), - vec![ + assert_equal(a.rows(), vec![ aview1(&[0, 1]), aview1(&[2, 3]), aview1(&[4, 5]), aview1(&[6, 7]), aview1(&[8, 9]), aview1(&[10, 11]), - ], - ); + ]); let mut b = ArcArray::zeros((2, 3, 2)); b.swap_axes(0, 2); b.assign(&a); - assert_equal( - b.rows(), - vec![ + assert_equal(b.rows(), vec![ aview1(&[0, 1]), aview1(&[2, 3]), aview1(&[4, 5]), aview1(&[6, 7]), aview1(&[8, 9]), aview1(&[10, 11]), - ], - ); + ]); } #[test] -fn inner_iter_corner_cases() { +fn inner_iter_corner_cases() +{ let a0 = ArcArray::::zeros(()); assert_equal(a0.rows(), vec![aview1(&[0])]); @@ -201,9 +206,12 @@ fn inner_iter_corner_cases() { } #[test] -fn inner_iter_size_hint() { +fn inner_iter_size_hint() +{ // Check that the size hint is correctly computed - let a = ArcArray::from_iter(0..24).into_shape_with_order((2, 3, 4)).unwrap(); + let a = ArcArray::from_iter(0..24) + .into_shape_with_order((2, 3, 4)) + .unwrap(); let mut len = 6; let mut it = a.rows().into_iter(); assert_eq!(it.len(), len); @@ -216,7 +224,8 @@ fn inner_iter_size_hint() { #[allow(deprecated)] // into_outer_iter #[test] -fn outer_iter() { +fn outer_iter() +{ let a = ArcArray::from_iter(0..12); let a = a.into_shape_with_order((2, 3, 2)).unwrap(); // [[[0, 1], @@ -225,17 +234,11 @@ fn outer_iter() { // [[6, 7], // [8, 9], // ... - assert_equal( - a.outer_iter(), - vec![a.index_axis(Axis(0), 0), a.index_axis(Axis(0), 1)], - ); + assert_equal(a.outer_iter(), vec![a.index_axis(Axis(0), 0), a.index_axis(Axis(0), 1)]); let mut b = ArcArray::zeros((2, 3, 2)); b.swap_axes(0, 2); b.assign(&a); - assert_equal( - b.outer_iter(), - vec![a.index_axis(Axis(0), 0), a.index_axis(Axis(0), 1)], - ); + assert_equal(b.outer_iter(), vec![a.index_axis(Axis(0), 0), a.index_axis(Axis(0), 1)]); let mut found_rows = Vec::new(); for sub in b.outer_iter() { @@ -259,10 +262,7 @@ fn outer_iter() { let mut cv = c.slice_mut(s![..;-1, ..;-1, ..;-1]); cv.assign(&a); assert_eq!(&a, &cv); - assert_equal( - cv.outer_iter(), - vec![a.index_axis(Axis(0), 0), a.index_axis(Axis(0), 1)], - ); + assert_equal(cv.outer_iter(), vec![a.index_axis(Axis(0), 0), a.index_axis(Axis(0), 1)]); let mut found_rows = Vec::new(); for sub in cv.outer_iter() { @@ -275,7 +275,8 @@ fn outer_iter() { } #[test] -fn axis_iter() { +fn axis_iter() +{ let a = ArcArray::from_iter(0..12); let a = a.into_shape_with_order((2, 3, 2)).unwrap(); // [[[0, 1], @@ -284,18 +285,16 @@ fn axis_iter() { // [[6, 7], // [8, 9], // ... - assert_equal( - a.axis_iter(Axis(1)), - vec![ + assert_equal(a.axis_iter(Axis(1)), vec![ a.index_axis(Axis(1), 0), a.index_axis(Axis(1), 1), a.index_axis(Axis(1), 2), - ], - ); + ]); } #[test] -fn axis_iter_split_at() { +fn axis_iter_split_at() +{ let a = Array::from_iter(0..5); let iter = a.axis_iter(Axis(0)); let all: Vec<_> = iter.clone().collect(); @@ -307,7 +306,8 @@ fn axis_iter_split_at() { } #[test] -fn axis_iter_split_at_partially_consumed() { +fn axis_iter_split_at_partially_consumed() +{ let a = Array::from_iter(0..5); let mut iter = a.axis_iter(Axis(0)); while iter.next().is_some() { @@ -321,7 +321,8 @@ fn axis_iter_split_at_partially_consumed() { } #[test] -fn axis_iter_zip() { +fn axis_iter_zip() +{ let a = Array::from_iter(0..5); let iter = a.axis_iter(Axis(0)); let mut b = Array::zeros(5); @@ -330,20 +331,24 @@ fn axis_iter_zip() { } #[test] -fn axis_iter_zip_partially_consumed() { +fn axis_iter_zip_partially_consumed() +{ let a = Array::from_iter(0..5); let mut iter = a.axis_iter(Axis(0)); let mut consumed = 0; while iter.next().is_some() { consumed += 1; let mut b = Array::zeros(a.len() - consumed); - Zip::from(&mut b).and(iter.clone()).for_each(|b, a| *b = a[()]); + Zip::from(&mut b) + .and(iter.clone()) + .for_each(|b, a| *b = a[()]); assert_eq!(a.slice(s![consumed..]), b); } } #[test] -fn axis_iter_zip_partially_consumed_discontiguous() { +fn axis_iter_zip_partially_consumed_discontiguous() +{ let a = Array::from_iter(0..5); let mut iter = a.axis_iter(Axis(0)); let mut consumed = 0; @@ -351,13 +356,16 @@ fn axis_iter_zip_partially_consumed_discontiguous() { consumed += 1; let mut b = Array::zeros((a.len() - consumed) * 2); b.slice_collapse(s![..;2]); - Zip::from(&mut b).and(iter.clone()).for_each(|b, a| *b = a[()]); + Zip::from(&mut b) + .and(iter.clone()) + .for_each(|b, a| *b = a[()]); assert_eq!(a.slice(s![consumed..]), b); } } #[test] -fn outer_iter_corner_cases() { +fn outer_iter_corner_cases() +{ let a2 = ArcArray::::zeros((0, 3)); assert_equal(a2.outer_iter(), vec![aview1(&[]); 0]); @@ -367,7 +375,8 @@ fn outer_iter_corner_cases() { #[allow(deprecated)] #[test] -fn outer_iter_mut() { +fn outer_iter_mut() +{ let a = ArcArray::from_iter(0..12); let a = a.into_shape_with_order((2, 3, 2)).unwrap(); // [[[0, 1], @@ -379,10 +388,7 @@ fn outer_iter_mut() { let mut b = ArcArray::zeros((2, 3, 2)); b.swap_axes(0, 2); b.assign(&a); - assert_equal( - b.outer_iter_mut(), - vec![a.index_axis(Axis(0), 0), a.index_axis(Axis(0), 1)], - ); + assert_equal(b.outer_iter_mut(), vec![a.index_axis(Axis(0), 0), a.index_axis(Axis(0), 1)]); let mut found_rows = Vec::new(); for sub in b.outer_iter_mut() { @@ -394,7 +400,8 @@ fn outer_iter_mut() { } #[test] -fn axis_iter_mut() { +fn axis_iter_mut() +{ let a = ArcArray::from_iter(0..12); let a = a.into_shape_with_order((2, 3, 2)).unwrap(); // [[[0, 1], @@ -414,44 +421,36 @@ fn axis_iter_mut() { } #[test] -fn axis_chunks_iter() { +fn axis_chunks_iter() +{ let a = ArcArray::from_iter(0..24); let a = a.into_shape_with_order((2, 6, 2)).unwrap(); let it = a.axis_chunks_iter(Axis(1), 2); - assert_equal( - it, - vec![ + assert_equal(it, vec![ arr3(&[[[0, 1], [2, 3]], [[12, 13], [14, 15]]]), arr3(&[[[4, 5], [6, 7]], [[16, 17], [18, 19]]]), arr3(&[[[8, 9], [10, 11]], [[20, 21], [22, 23]]]), - ], - ); + ]); let a = ArcArray::from_iter(0..28); let a = a.into_shape_with_order((2, 7, 2)).unwrap(); let it = a.axis_chunks_iter(Axis(1), 2); - assert_equal( - it, - vec![ + assert_equal(it, vec![ arr3(&[[[0, 1], [2, 3]], [[14, 15], [16, 17]]]), arr3(&[[[4, 5], [6, 7]], [[18, 19], [20, 21]]]), arr3(&[[[8, 9], [10, 11]], [[22, 23], [24, 25]]]), arr3(&[[[12, 13]], [[26, 27]]]), - ], - ); + ]); let it = a.axis_chunks_iter(Axis(1), 2).rev(); - assert_equal( - it, - vec![ + assert_equal(it, vec![ arr3(&[[[12, 13]], [[26, 27]]]), arr3(&[[[8, 9], [10, 11]], [[22, 23], [24, 25]]]), arr3(&[[[4, 5], [6, 7]], [[18, 19], [20, 21]]]), arr3(&[[[0, 1], [2, 3]], [[14, 15], [16, 17]]]), - ], - ); + ]); let it = a.axis_chunks_iter(Axis(1), 7); assert_equal(it, vec![a.view()]); @@ -461,7 +460,8 @@ fn axis_chunks_iter() { } #[test] -fn axis_iter_mut_split_at() { +fn axis_iter_mut_split_at() +{ let mut a = Array::from_iter(0..5); let mut a_clone = a.clone(); let all: Vec<_> = a_clone.axis_iter_mut(Axis(0)).collect(); @@ -473,7 +473,8 @@ fn axis_iter_mut_split_at() { } #[test] -fn axis_iter_mut_split_at_partially_consumed() { +fn axis_iter_mut_split_at_partially_consumed() +{ let mut a = Array::from_iter(0..5); for consumed in 1..=a.len() { for mid in 0..=(a.len() - consumed) { @@ -499,7 +500,8 @@ fn axis_iter_mut_split_at_partially_consumed() { } #[test] -fn axis_iter_mut_zip() { +fn axis_iter_mut_zip() +{ let orig = Array::from_iter(0..5); let mut cloned = orig.clone(); let iter = cloned.axis_iter_mut(Axis(0)); @@ -513,7 +515,8 @@ fn axis_iter_mut_zip() { } #[test] -fn axis_iter_mut_zip_partially_consumed() { +fn axis_iter_mut_zip_partially_consumed() +{ let mut a = Array::from_iter(0..5); for consumed in 1..=a.len() { let remaining = a.len() - consumed; @@ -528,7 +531,8 @@ fn axis_iter_mut_zip_partially_consumed() { } #[test] -fn axis_iter_mut_zip_partially_consumed_discontiguous() { +fn axis_iter_mut_zip_partially_consumed_discontiguous() +{ let mut a = Array::from_iter(0..5); for consumed in 1..=a.len() { let remaining = a.len() - consumed; @@ -545,27 +549,27 @@ fn axis_iter_mut_zip_partially_consumed_discontiguous() { #[test] #[cfg(feature = "std")] -fn axis_chunks_iter_corner_cases() { +fn axis_chunks_iter_corner_cases() +{ // examples provided by @bluss in PR #65 // these tests highlight corner cases of the axis_chunks_iter implementation // and enable checking if no pointer offsetting is out of bounds. However // checking the absence of of out of bounds offsetting cannot (?) be // done automatically, so one has to launch this test in a debugger. - let a = ArcArray::::linspace(0., 7., 8).into_shape_with_order((8, 1)).unwrap(); + let a = ArcArray::::linspace(0., 7., 8) + .into_shape_with_order((8, 1)) + .unwrap(); let it = a.axis_chunks_iter(Axis(0), 4); assert_equal(it, vec![a.slice(s![..4, ..]), a.slice(s![4.., ..])]); let a = a.slice(s![..;-1,..]); let it = a.axis_chunks_iter(Axis(0), 8); assert_equal(it, vec![a.view()]); let it = a.axis_chunks_iter(Axis(0), 3); - assert_equal( - it, - vec![ + assert_equal(it, vec![ array![[7.], [6.], [5.]], array![[4.], [3.], [2.]], array![[1.], [0.]], - ], - ); + ]); let b = ArcArray::::zeros((8, 2)); let a = b.slice(s![1..;2,..]); @@ -577,10 +581,13 @@ fn axis_chunks_iter_corner_cases() { } #[test] -fn axis_chunks_iter_zero_stride() { +fn axis_chunks_iter_zero_stride() +{ { // stride 0 case - let b = Array::from(vec![0f32; 0]).into_shape_with_order((5, 0, 3)).unwrap(); + let b = Array::from(vec![0f32; 0]) + .into_shape_with_order((5, 0, 3)) + .unwrap(); let shapes: Vec<_> = b .axis_chunks_iter(Axis(0), 2) .map(|v| v.raw_dim()) @@ -590,7 +597,9 @@ fn axis_chunks_iter_zero_stride() { { // stride 0 case reverse - let b = Array::from(vec![0f32; 0]).into_shape_with_order((5, 0, 3)).unwrap(); + let b = Array::from(vec![0f32; 0]) + .into_shape_with_order((5, 0, 3)) + .unwrap(); let shapes: Vec<_> = b .axis_chunks_iter(Axis(0), 2) .rev() @@ -609,19 +618,22 @@ fn axis_chunks_iter_zero_stride() { #[should_panic] #[test] -fn axis_chunks_iter_zero_chunk_size() { +fn axis_chunks_iter_zero_chunk_size() +{ let a = Array::from_iter(0..5); a.axis_chunks_iter(Axis(0), 0); } #[test] -fn axis_chunks_iter_zero_axis_len() { +fn axis_chunks_iter_zero_axis_len() +{ let a = Array::from_iter(0..0); assert!(a.axis_chunks_iter(Axis(0), 5).next().is_none()); } #[test] -fn axis_chunks_iter_split_at() { +fn axis_chunks_iter_split_at() +{ let mut a = Array2::::zeros((11, 3)); a.iter_mut().enumerate().for_each(|(i, elt)| *elt = i); for source in &[ @@ -648,7 +660,8 @@ fn axis_chunks_iter_split_at() { } #[test] -fn axis_chunks_iter_mut() { +fn axis_chunks_iter_mut() +{ let a = ArcArray::from_iter(0..24); let mut a = a.into_shape_with_order((2, 6, 2)).unwrap(); @@ -660,21 +673,26 @@ fn axis_chunks_iter_mut() { #[should_panic] #[test] -fn axis_chunks_iter_mut_zero_chunk_size() { +fn axis_chunks_iter_mut_zero_chunk_size() +{ let mut a = Array::from_iter(0..5); a.axis_chunks_iter_mut(Axis(0), 0); } #[test] -fn axis_chunks_iter_mut_zero_axis_len() { +fn axis_chunks_iter_mut_zero_axis_len() +{ let mut a = Array::from_iter(0..0); assert!(a.axis_chunks_iter_mut(Axis(0), 5).next().is_none()); } #[test] -fn outer_iter_size_hint() { +fn outer_iter_size_hint() +{ // Check that the size hint is correctly computed - let a = ArcArray::from_iter(0..24).into_shape_with_order((4, 3, 2)).unwrap(); + let a = ArcArray::from_iter(0..24) + .into_shape_with_order((4, 3, 2)) + .unwrap(); let mut len = 4; let mut it = a.outer_iter(); assert_eq!(it.len(), len); @@ -705,8 +723,11 @@ fn outer_iter_size_hint() { } #[test] -fn outer_iter_split_at() { - let a = ArcArray::from_iter(0..30).into_shape_with_order((5, 3, 2)).unwrap(); +fn outer_iter_split_at() +{ + let a = ArcArray::from_iter(0..30) + .into_shape_with_order((5, 3, 2)) + .unwrap(); let it = a.outer_iter(); let (mut itl, mut itr) = it.clone().split_at(2); @@ -727,16 +748,22 @@ fn outer_iter_split_at() { #[test] #[should_panic] -fn outer_iter_split_at_panics() { - let a = ArcArray::from_iter(0..30).into_shape_with_order((5, 3, 2)).unwrap(); +fn outer_iter_split_at_panics() +{ + let a = ArcArray::from_iter(0..30) + .into_shape_with_order((5, 3, 2)) + .unwrap(); let it = a.outer_iter(); it.split_at(6); } #[test] -fn outer_iter_mut_split_at() { - let mut a = ArcArray::from_iter(0..30).into_shape_with_order((5, 3, 2)).unwrap(); +fn outer_iter_mut_split_at() +{ + let mut a = ArcArray::from_iter(0..30) + .into_shape_with_order((5, 3, 2)) + .unwrap(); { let it = a.outer_iter_mut(); @@ -755,12 +782,15 @@ fn outer_iter_mut_split_at() { } #[test] -fn iterators_are_send_sync() { +fn iterators_are_send_sync() +{ // When the element type is Send + Sync, then the iterators and views // are too. fn _send_sync(_: &T) {} - let mut a = ArcArray::from_iter(0..30).into_shape_with_order((5, 3, 2)).unwrap(); + let mut a = ArcArray::from_iter(0..30) + .into_shape_with_order((5, 3, 2)) + .unwrap(); _send_sync(&a.view()); _send_sync(&a.view_mut()); @@ -785,7 +815,8 @@ fn iterators_are_send_sync() { #[test] #[allow(clippy::unnecessary_fold)] -fn test_fold() { +fn test_fold() +{ let mut a = Array2::::default((20, 20)); a += 1; let mut iter = a.iter(); @@ -798,7 +829,8 @@ fn test_fold() { } #[test] -fn nth_back_examples() { +fn nth_back_examples() +{ let mut a: Array1 = (0..256).collect(); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); assert_eq!(a.iter().nth_back(0), Some(&a[a.len() - 1])); @@ -811,7 +843,8 @@ fn nth_back_examples() { } #[test] -fn nth_back_zero_n() { +fn nth_back_zero_n() +{ let mut a: Array1 = (0..256).collect(); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); let mut iter1 = a.iter(); @@ -823,7 +856,8 @@ fn nth_back_zero_n() { } #[test] -fn nth_back_nonzero_n() { +fn nth_back_nonzero_n() +{ let mut a: Array1 = (0..256).collect(); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); let mut iter1 = a.iter(); @@ -839,7 +873,8 @@ fn nth_back_nonzero_n() { } #[test] -fn nth_back_past_end() { +fn nth_back_past_end() +{ let mut a: Array1 = (0..256).collect(); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); let mut iter = a.iter(); @@ -848,7 +883,8 @@ fn nth_back_past_end() { } #[test] -fn nth_back_partially_consumed() { +fn nth_back_partially_consumed() +{ let mut a: Array1 = (0..256).collect(); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); let mut iter = a.iter(); @@ -866,7 +902,8 @@ fn nth_back_partially_consumed() { } #[test] -fn test_rfold() { +fn test_rfold() +{ { let mut a = Array1::::default(256); a += 1; @@ -912,32 +949,40 @@ fn test_rfold() { } #[test] -fn test_into_iter() { +fn test_into_iter() +{ let a = Array1::from(vec![1, 2, 3, 4]); let v = a.into_iter().collect::>(); assert_eq!(v, [1, 2, 3, 4]); } #[test] -fn test_into_iter_2d() { - let a = Array1::from(vec![1, 2, 3, 4]).into_shape_with_order((2, 2)).unwrap(); +fn test_into_iter_2d() +{ + let a = Array1::from(vec![1, 2, 3, 4]) + .into_shape_with_order((2, 2)) + .unwrap(); let v = a.into_iter().collect::>(); assert_eq!(v, [1, 2, 3, 4]); - let a = Array1::from(vec![1, 2, 3, 4]).into_shape_with_order((2, 2)).unwrap().reversed_axes(); + let a = Array1::from(vec![1, 2, 3, 4]) + .into_shape_with_order((2, 2)) + .unwrap() + .reversed_axes(); let v = a.into_iter().collect::>(); assert_eq!(v, [1, 3, 2, 4]); } #[test] -fn test_into_iter_sliced() { +fn test_into_iter_sliced() +{ let (m, n) = (4, 5); let drops = Cell::new(0); for i in 0..m - 1 { for j in 0..n - 1 { - for i2 in i + 1 .. m { - for j2 in j + 1 .. n { + for i2 in i + 1..m { + for j2 in j + 1..n { for invert in 0..3 { drops.set(0); let i = i as isize; @@ -946,7 +991,8 @@ fn test_into_iter_sliced() { let j2 = j2 as isize; let mut a = Array1::from_iter(0..(m * n) as i32) .mapv(|v| DropCount::new(v, &drops)) - .into_shape_with_order((m, n)).unwrap(); + .into_shape_with_order((m, n)) + .unwrap(); a.slice_collapse(s![i..i2, j..j2]); if invert < a.ndim() { a.invert_axis(Axis(invert)); @@ -973,26 +1019,37 @@ fn test_into_iter_sliced() { /// /// Compares equal by its "represented value". #[derive(Clone, Debug)] -struct DropCount<'a> { +struct DropCount<'a> +{ value: i32, my_drops: usize, - drops: &'a Cell + drops: &'a Cell, } -impl PartialEq for DropCount<'_> { - fn eq(&self, other: &Self) -> bool { +impl PartialEq for DropCount<'_> +{ + fn eq(&self, other: &Self) -> bool + { self.value == other.value } } -impl<'a> DropCount<'a> { - fn new(value: i32, drops: &'a Cell) -> Self { - DropCount { value, my_drops: 0, drops } +impl<'a> DropCount<'a> +{ + fn new(value: i32, drops: &'a Cell) -> Self + { + DropCount { + value, + my_drops: 0, + drops, + } } } -impl Drop for DropCount<'_> { - fn drop(&mut self) { +impl Drop for DropCount<'_> +{ + fn drop(&mut self) + { assert_eq!(self.my_drops, 0); self.my_drops += 1; self.drops.set(self.drops.get() + 1); diff --git a/tests/ix0.rs b/tests/ix0.rs index c8c6c73aa..f1038556a 100644 --- a/tests/ix0.rs +++ b/tests/ix0.rs @@ -1,8 +1,5 @@ #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names, + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names, clippy::float_cmp )] @@ -11,7 +8,8 @@ use ndarray::Ix0; use ndarray::ShapeBuilder; #[test] -fn test_ix0() { +fn test_ix0() +{ let mut a = Array::zeros(Ix0()); assert_eq!(a[()], 0.); a[()] = 1.; @@ -30,7 +28,8 @@ fn test_ix0() { } #[test] -fn test_ix0_add() { +fn test_ix0_add() +{ let mut a = Array::zeros(Ix0()); a += 1.; assert_eq!(a[()], 1.); @@ -39,7 +38,8 @@ fn test_ix0_add() { } #[test] -fn test_ix0_add_add() { +fn test_ix0_add_add() +{ let mut a = Array::zeros(Ix0()); a += 1.; let mut b = Array::zeros(Ix0()); @@ -49,7 +49,8 @@ fn test_ix0_add_add() { } #[test] -fn test_ix0_add_broad() { +fn test_ix0_add_broad() +{ let mut b = Array::from(vec![5., 6.]); let mut a = Array::zeros(Ix0()); a += 1.; diff --git a/tests/ixdyn.rs b/tests/ixdyn.rs index 5b7ef9327..05f123ba1 100644 --- a/tests/ixdyn.rs +++ b/tests/ixdyn.rs @@ -1,19 +1,17 @@ #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names, + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names, clippy::float_cmp )] use ndarray::Array; use ndarray::IntoDimension; -use ndarray::ShapeBuilder; use ndarray::Ix3; use ndarray::Order; +use ndarray::ShapeBuilder; #[test] -fn test_ixdyn() { +fn test_ixdyn() +{ // check that we can use fixed size arrays for indexing let mut a = Array::zeros(vec![2, 3, 4]); a[[1, 1, 1]] = 1.; @@ -22,7 +20,8 @@ fn test_ixdyn() { #[should_panic] #[test] -fn test_ixdyn_wrong_dim() { +fn test_ixdyn_wrong_dim() +{ // check that we can use but it panics at runtime, if number of axes is wrong let mut a = Array::zeros(vec![2, 3, 4]); a[[1, 1, 1]] = 1.; @@ -31,7 +30,8 @@ fn test_ixdyn_wrong_dim() { } #[test] -fn test_ixdyn_out_of_bounds() { +fn test_ixdyn_out_of_bounds() +{ // check that we are out of bounds let a = Array::::zeros(vec![2, 3, 4]); let res = a.get([0, 3, 0]); @@ -39,7 +39,8 @@ fn test_ixdyn_out_of_bounds() { } #[test] -fn test_ixdyn_iterate() { +fn test_ixdyn_iterate() +{ for &order in &[Order::C, Order::F] { let mut a = Array::zeros((2, 3, 4).set_f(order.is_column_major())); let dim = a.shape().to_vec(); @@ -59,7 +60,8 @@ fn test_ixdyn_iterate() { } #[test] -fn test_ixdyn_index_iterate() { +fn test_ixdyn_index_iterate() +{ for &order in &[Order::C, Order::F] { let mut a = Array::zeros((2, 3, 4).set_f(order.is_column_major())); let dim = a.shape().to_vec(); @@ -78,7 +80,8 @@ fn test_ixdyn_index_iterate() { } #[test] -fn test_ixdyn_uget() { +fn test_ixdyn_uget() +{ // check that we are out of bounds let mut a = Array::::zeros(vec![2, 3, 4]); @@ -107,7 +110,8 @@ fn test_ixdyn_uget() { } #[test] -fn test_0() { +fn test_0() +{ let mut a = Array::zeros(vec![]); let z = vec![].into_dimension(); assert_eq!(a[z.clone()], 0.); @@ -127,7 +131,8 @@ fn test_0() { } #[test] -fn test_0_add() { +fn test_0_add() +{ let mut a = Array::zeros(vec![]); a += 1.; assert_eq!(a[[]], 1.); @@ -136,7 +141,8 @@ fn test_0_add() { } #[test] -fn test_0_add_add() { +fn test_0_add_add() +{ let mut a = Array::zeros(vec![]); a += 1.; let mut b = Array::zeros(vec![]); @@ -146,7 +152,8 @@ fn test_0_add_add() { } #[test] -fn test_0_add_broad() { +fn test_0_add_broad() +{ let mut b = Array::from(vec![5., 6.]); let mut a = Array::zeros(vec![]); a += 1.; @@ -157,10 +164,13 @@ fn test_0_add_broad() { #[test] #[cfg(feature = "std")] -fn test_into_dimension() { +fn test_into_dimension() +{ use ndarray::{Ix0, Ix1, Ix2, IxDyn}; - let a = Array::linspace(0., 41., 6 * 7).into_shape_with_order((6, 7)).unwrap(); + let a = Array::linspace(0., 41., 6 * 7) + .into_shape_with_order((6, 7)) + .unwrap(); let a2 = a.clone().into_shape_with_order(IxDyn(&[6, 7])).unwrap(); let b = a2.clone().into_dimensionality::().unwrap(); assert_eq!(a, b); diff --git a/tests/numeric.rs b/tests/numeric.rs index e08979d29..4d70d4502 100644 --- a/tests/numeric.rs +++ b/tests/numeric.rs @@ -1,8 +1,5 @@ #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names, + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names, clippy::float_cmp )] @@ -11,19 +8,22 @@ use ndarray::{arr0, arr1, arr2, array, aview1, Array, Array1, Array2, Array3, Ax use std::f64; #[test] -fn test_mean_with_nan_values() { +fn test_mean_with_nan_values() +{ let a = array![f64::NAN, 1.]; assert!(a.mean().unwrap().is_nan()); } #[test] -fn test_mean_with_empty_array_of_floats() { +fn test_mean_with_empty_array_of_floats() +{ let a: Array1 = array![]; assert!(a.mean().is_none()); } #[test] -fn test_mean_with_array_of_floats() { +fn test_mean_with_array_of_floats() +{ let a: Array1 = array![ 0.99889651, 0.0150731, 0.28492482, 0.83819218, 0.48413156, 0.80710412, 0.41762936, 0.22879429, 0.43997224, 0.23831807, 0.02416466, 0.6269962, 0.47420614, 0.56275487, @@ -39,7 +39,8 @@ fn test_mean_with_array_of_floats() { } #[test] -fn sum_mean() { +fn sum_mean() +{ let a: Array2 = arr2(&[[1., 2.], [3., 4.]]); assert_eq!(a.sum_axis(Axis(0)), arr1(&[4., 6.])); assert_eq!(a.sum_axis(Axis(1)), arr1(&[3., 7.])); @@ -51,7 +52,8 @@ fn sum_mean() { } #[test] -fn sum_mean_empty() { +fn sum_mean_empty() +{ assert_eq!(Array3::::ones((2, 0, 3)).sum(), 0.); assert_eq!(Array1::::ones(0).sum_axis(Axis(0)), arr0(0.)); assert_eq!( @@ -66,7 +68,8 @@ fn sum_mean_empty() { #[test] #[cfg(feature = "std")] -fn var() { +fn var() +{ let a = array![1., -4.32, 1.14, 0.32]; assert_abs_diff_eq!(a.var(0.), 5.049875, epsilon = 1e-8); } @@ -74,7 +77,8 @@ fn var() { #[test] #[cfg(feature = "std")] #[should_panic] -fn var_negative_ddof() { +fn var_negative_ddof() +{ let a = array![1., 2., 3.]; a.var(-1.); } @@ -82,14 +86,16 @@ fn var_negative_ddof() { #[test] #[cfg(feature = "std")] #[should_panic] -fn var_too_large_ddof() { +fn var_too_large_ddof() +{ let a = array![1., 2., 3.]; a.var(4.); } #[test] #[cfg(feature = "std")] -fn var_nan_ddof() { +fn var_nan_ddof() +{ let a = Array2::::zeros((2, 3)); let v = a.var(::std::f64::NAN); assert!(v.is_nan()); @@ -97,14 +103,16 @@ fn var_nan_ddof() { #[test] #[cfg(feature = "std")] -fn var_empty_arr() { +fn var_empty_arr() +{ let a: Array1 = array![]; assert!(a.var(0.0).is_nan()); } #[test] #[cfg(feature = "std")] -fn std() { +fn std() +{ let a = array![1., -4.32, 1.14, 0.32]; assert_abs_diff_eq!(a.std(0.), 2.24719, epsilon = 1e-5); } @@ -112,7 +120,8 @@ fn std() { #[test] #[cfg(feature = "std")] #[should_panic] -fn std_negative_ddof() { +fn std_negative_ddof() +{ let a = array![1., 2., 3.]; a.std(-1.); } @@ -120,14 +129,16 @@ fn std_negative_ddof() { #[test] #[cfg(feature = "std")] #[should_panic] -fn std_too_large_ddof() { +fn std_too_large_ddof() +{ let a = array![1., 2., 3.]; a.std(4.); } #[test] #[cfg(feature = "std")] -fn std_nan_ddof() { +fn std_nan_ddof() +{ let a = Array2::::zeros((2, 3)); let v = a.std(::std::f64::NAN); assert!(v.is_nan()); @@ -135,14 +146,16 @@ fn std_nan_ddof() { #[test] #[cfg(feature = "std")] -fn std_empty_arr() { +fn std_empty_arr() +{ let a: Array1 = array![]; assert!(a.std(0.0).is_nan()); } #[test] #[cfg(feature = "approx")] -fn var_axis() { +fn var_axis() +{ use ndarray::{aview0, aview2}; let a = array![ @@ -200,7 +213,8 @@ fn var_axis() { #[test] #[cfg(feature = "approx")] -fn std_axis() { +fn std_axis() +{ use ndarray::aview2; let a = array![ @@ -260,7 +274,8 @@ fn std_axis() { #[test] #[should_panic] #[cfg(feature = "std")] -fn var_axis_negative_ddof() { +fn var_axis_negative_ddof() +{ let a = array![1., 2., 3.]; a.var_axis(Axis(0), -1.); } @@ -268,14 +283,16 @@ fn var_axis_negative_ddof() { #[test] #[should_panic] #[cfg(feature = "std")] -fn var_axis_too_large_ddof() { +fn var_axis_too_large_ddof() +{ let a = array![1., 2., 3.]; a.var_axis(Axis(0), 4.); } #[test] #[cfg(feature = "std")] -fn var_axis_nan_ddof() { +fn var_axis_nan_ddof() +{ let a = Array2::::zeros((2, 3)); let v = a.var_axis(Axis(1), ::std::f64::NAN); assert_eq!(v.shape(), &[2]); @@ -284,7 +301,8 @@ fn var_axis_nan_ddof() { #[test] #[cfg(feature = "std")] -fn var_axis_empty_axis() { +fn var_axis_empty_axis() +{ let a = Array2::::zeros((2, 0)); let v = a.var_axis(Axis(1), 0.); assert_eq!(v.shape(), &[2]); @@ -294,14 +312,16 @@ fn var_axis_empty_axis() { #[test] #[should_panic] #[cfg(feature = "std")] -fn std_axis_bad_dof() { +fn std_axis_bad_dof() +{ let a = array![1., 2., 3.]; a.std_axis(Axis(0), 4.); } #[test] #[cfg(feature = "std")] -fn std_axis_empty_axis() { +fn std_axis_empty_axis() +{ let a = Array2::::zeros((2, 0)); let v = a.std_axis(Axis(1), 0.); assert_eq!(v.shape(), &[2]); diff --git a/tests/oper.rs b/tests/oper.rs index 12e822cb7..294a762c6 100644 --- a/tests/oper.rs +++ b/tests/oper.rs @@ -1,8 +1,5 @@ #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names )] #![cfg(feature = "std")] use ndarray::linalg::general_mat_mul; @@ -16,7 +13,8 @@ use num_traits::Zero; use approx::assert_abs_diff_eq; use defmac::defmac; -fn test_oper(op: &str, a: &[f32], b: &[f32], c: &[f32]) { +fn test_oper(op: &str, a: &[f32], b: &[f32], c: &[f32]) +{ let aa = CowArray::from(arr1(a)); let bb = CowArray::from(arr1(b)); let cc = CowArray::from(arr1(c)); @@ -33,10 +31,8 @@ fn test_oper(op: &str, a: &[f32], b: &[f32], c: &[f32]) { test_oper_arr::(op, aa.clone(), bb.clone(), cc.clone()); } - fn test_oper_arr(op: &str, mut aa: CowArray, bb: CowArray, cc: CowArray) -where - D: Dimension, +where D: Dimension { match op { "+" => { @@ -73,47 +69,19 @@ where } #[test] -fn operations() { - test_oper( - "+", - &[1.0, 2.0, 3.0, 4.0], - &[0.0, 1.0, 2.0, 3.0], - &[1.0, 3.0, 5.0, 7.0], - ); - test_oper( - "-", - &[1.0, 2.0, 3.0, 4.0], - &[0.0, 1.0, 2.0, 3.0], - &[1.0, 1.0, 1.0, 1.0], - ); - test_oper( - "*", - &[1.0, 2.0, 3.0, 4.0], - &[0.0, 1.0, 2.0, 3.0], - &[0.0, 2.0, 6.0, 12.0], - ); - test_oper( - "/", - &[1.0, 2.0, 3.0, 4.0], - &[1.0, 1.0, 2.0, 3.0], - &[1.0, 2.0, 3.0 / 2.0, 4.0 / 3.0], - ); - test_oper( - "%", - &[1.0, 2.0, 3.0, 4.0], - &[1.0, 1.0, 2.0, 3.0], - &[0.0, 0.0, 1.0, 1.0], - ); - test_oper( - "neg", - &[1.0, 2.0, 3.0, 4.0], - &[1.0, 1.0, 2.0, 3.0], - &[-1.0, -2.0, -3.0, -4.0], - ); +fn operations() +{ + test_oper("+", &[1.0, 2.0, 3.0, 4.0], &[0.0, 1.0, 2.0, 3.0], &[1.0, 3.0, 5.0, 7.0]); + test_oper("-", &[1.0, 2.0, 3.0, 4.0], &[0.0, 1.0, 2.0, 3.0], &[1.0, 1.0, 1.0, 1.0]); + test_oper("*", &[1.0, 2.0, 3.0, 4.0], &[0.0, 1.0, 2.0, 3.0], &[0.0, 2.0, 6.0, 12.0]); + test_oper("/", &[1.0, 2.0, 3.0, 4.0], &[1.0, 1.0, 2.0, 3.0], &[1.0, 2.0, 3.0 / 2.0, 4.0 / 3.0]); + test_oper("%", &[1.0, 2.0, 3.0, 4.0], &[1.0, 1.0, 2.0, 3.0], &[0.0, 0.0, 1.0, 1.0]); + test_oper("neg", &[1.0, 2.0, 3.0, 4.0], &[1.0, 1.0, 2.0, 3.0], &[-1.0, -2.0, -3.0, -4.0]); } #[test] -fn scalar_operations() { +fn scalar_operations() +{ let a = arr0::(1.); let b = rcarr1::(&[1., 1.]); let c = rcarr2(&[[1., 1.], [1., 1.]]); @@ -159,7 +127,8 @@ where } #[test] -fn dot_product() { +fn dot_product() +{ let a = Array::range(0., 69., 1.); let b = &a * 2. - 7.; let dot = 197846.; @@ -197,7 +166,8 @@ fn dot_product() { // test that we can dot product with a broadcast array #[test] -fn dot_product_0() { +fn dot_product_0() +{ let a = Array::range(0., 69., 1.); let x = 1.5; let b = aview0(&x); @@ -217,7 +187,8 @@ fn dot_product_0() { } #[test] -fn dot_product_neg_stride() { +fn dot_product_neg_stride() +{ // test that we can dot with negative stride let a = Array::range(0., 69., 1.); let b = &a * 2. - 7.; @@ -236,8 +207,11 @@ fn dot_product_neg_stride() { } #[test] -fn fold_and_sum() { - let a = Array::linspace(0., 127., 128).into_shape_with_order((8, 16)).unwrap(); +fn fold_and_sum() +{ + let a = Array::linspace(0., 127., 128) + .into_shape_with_order((8, 16)) + .unwrap(); assert_abs_diff_eq!(a.fold(0., |acc, &x| acc + x), a.sum(), epsilon = 1e-5); // test different strides @@ -275,8 +249,11 @@ fn fold_and_sum() { } #[test] -fn product() { - let a = Array::linspace(0.5, 2., 128).into_shape_with_order((8, 16)).unwrap(); +fn product() +{ + let a = Array::linspace(0.5, 2., 128) + .into_shape_with_order((8, 16)) + .unwrap(); assert_abs_diff_eq!(a.fold(1., |acc, &x| acc * x), a.product(), epsilon = 1e-5); // test different strides @@ -294,24 +271,28 @@ fn product() { } } -fn range_mat(m: Ix, n: Ix) -> Array2 { +fn range_mat(m: Ix, n: Ix) -> Array2 +{ Array::linspace(0., (m * n) as f32 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() } -fn range_mat64(m: Ix, n: Ix) -> Array2 { +fn range_mat64(m: Ix, n: Ix) -> Array2 +{ Array::linspace(0., (m * n) as f64 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() } #[cfg(feature = "approx")] -fn range1_mat64(m: Ix) -> Array1 { +fn range1_mat64(m: Ix) -> Array1 +{ Array::linspace(0., m as f64 - 1., m) } -fn range_i32(m: Ix, n: Ix) -> Array2 { +fn range_i32(m: Ix, n: Ix) -> Array2 +{ Array::from_iter(0..(m * n) as i32) .into_shape_with_order((m, n)) .unwrap() @@ -336,9 +317,7 @@ where let mut j = 0; for rr in &mut res_elems { unsafe { - *rr = (0..k).fold(A::zero(), move |s, x| { - s + *lhs.uget((i, x)) * *rhs.uget((x, j)) - }); + *rr = (0..k).fold(A::zero(), move |s, x| s + *lhs.uget((i, x)) * *rhs.uget((x, j))); } j += 1; if j == n { @@ -350,7 +329,8 @@ where } #[test] -fn mat_mul() { +fn mat_mul() +{ let (m, n, k) = (8, 8, 8); let a = range_mat(m, n); let b = range_mat(n, k); @@ -412,7 +392,8 @@ fn mat_mul() { // Check that matrix multiplication of contiguous matrices returns a // matrix with the same order #[test] -fn mat_mul_order() { +fn mat_mul_order() +{ let (m, n, k) = (8, 8, 8); let a = range_mat(m, n); let b = range_mat(n, k); @@ -431,7 +412,8 @@ fn mat_mul_order() { // test matrix multiplication shape mismatch #[test] #[should_panic] -fn mat_mul_shape_mismatch() { +fn mat_mul_shape_mismatch() +{ let (m, k, k2, n) = (8, 8, 9, 8); let a = range_mat(m, k); let b = range_mat(k2, n); @@ -441,7 +423,8 @@ fn mat_mul_shape_mismatch() { // test matrix multiplication shape mismatch #[test] #[should_panic] -fn mat_mul_shape_mismatch_2() { +fn mat_mul_shape_mismatch_2() +{ let (m, k, k2, n) = (8, 8, 8, 8); let a = range_mat(m, k); let b = range_mat(k2, n); @@ -452,7 +435,8 @@ fn mat_mul_shape_mismatch_2() { // Check that matrix multiplication // supports broadcast arrays. #[test] -fn mat_mul_broadcast() { +fn mat_mul_broadcast() +{ let (m, n, k) = (16, 16, 16); let a = range_mat(m, n); let x1 = 1.; @@ -471,7 +455,8 @@ fn mat_mul_broadcast() { // Check that matrix multiplication supports reversed axes #[test] -fn mat_mul_rev() { +fn mat_mul_rev() +{ let (m, n, k) = (16, 16, 16); let a = range_mat(m, n); let b = range_mat(n, k); @@ -487,7 +472,8 @@ fn mat_mul_rev() { // Check that matrix multiplication supports arrays with zero rows or columns #[test] -fn mat_mut_zero_len() { +fn mat_mut_zero_len() +{ defmac!(mat_mul_zero_len range_mat_fn => { for n in 0..4 { for m in 0..4 { @@ -508,7 +494,8 @@ fn mat_mut_zero_len() { } #[test] -fn scaled_add() { +fn scaled_add() +{ let a = range_mat(16, 15); let mut b = range_mat(16, 15); b.mapv_inplace(f32::exp); @@ -523,7 +510,8 @@ fn scaled_add() { #[cfg(feature = "approx")] #[test] -fn scaled_add_2() { +fn scaled_add_2() +{ let beta = -2.3; let sizes = vec![ (4, 4, 1, 4), @@ -560,7 +548,8 @@ fn scaled_add_2() { #[cfg(feature = "approx")] #[test] -fn scaled_add_3() { +fn scaled_add_3() +{ use approx::assert_relative_eq; use ndarray::{Slice, SliceInfo, SliceInfoElem}; use std::convert::TryFrom; @@ -611,7 +600,8 @@ fn scaled_add_3() { #[cfg(feature = "approx")] #[test] -fn gen_mat_mul() { +fn gen_mat_mul() +{ let alpha = -2.3; let beta = 3.14; let sizes = vec![ @@ -653,7 +643,8 @@ fn gen_mat_mul() { // Test y = A x where A is f-order #[cfg(feature = "approx")] #[test] -fn gemm_64_1_f() { +fn gemm_64_1_f() +{ let a = range_mat64(64, 64).reversed_axes(); let (m, n) = a.dim(); // m x n times n x 1 == m x 1 @@ -665,7 +656,8 @@ fn gemm_64_1_f() { } #[test] -fn gen_mat_mul_i32() { +fn gen_mat_mul_i32() +{ let alpha = -1; let beta = 2; let sizes = if cfg!(miri) { @@ -696,24 +688,27 @@ fn gen_mat_mul_i32() { #[cfg(feature = "approx")] #[test] -fn gen_mat_vec_mul() { +fn gen_mat_vec_mul() +{ use approx::assert_relative_eq; use ndarray::linalg::general_mat_vec_mul; // simple, slow, correct (hopefully) mat mul - fn reference_mat_vec_mul( - lhs: &ArrayBase, - rhs: &ArrayBase, - ) -> Array1 + fn reference_mat_vec_mul(lhs: &ArrayBase, rhs: &ArrayBase) -> Array1 where A: LinalgScalar, S: Data, S2: Data, { let ((m, _), k) = (lhs.dim(), rhs.dim()); - reference_mat_mul(lhs, &rhs.as_standard_layout().into_shape_with_order((k, 1)).unwrap()) - .into_shape_with_order(m) - .unwrap() + reference_mat_mul( + lhs, + &rhs.as_standard_layout() + .into_shape_with_order((k, 1)) + .unwrap(), + ) + .into_shape_with_order(m) + .unwrap() } let alpha = -2.3; @@ -762,23 +757,26 @@ fn gen_mat_vec_mul() { #[cfg(feature = "approx")] #[test] -fn vec_mat_mul() { +fn vec_mat_mul() +{ use approx::assert_relative_eq; // simple, slow, correct (hopefully) mat mul - fn reference_vec_mat_mul( - lhs: &ArrayBase, - rhs: &ArrayBase, - ) -> Array1 + fn reference_vec_mat_mul(lhs: &ArrayBase, rhs: &ArrayBase) -> Array1 where A: LinalgScalar, S: Data, S2: Data, { let (m, (_, n)) = (lhs.dim(), rhs.dim()); - reference_mat_mul(&lhs.as_standard_layout().into_shape_with_order((1, m)).unwrap(), rhs) - .into_shape_with_order(n) - .unwrap() + reference_mat_mul( + &lhs.as_standard_layout() + .into_shape_with_order((1, m)) + .unwrap(), + rhs, + ) + .into_shape_with_order(n) + .unwrap() } let sizes = vec![ @@ -823,7 +821,8 @@ fn vec_mat_mul() { } #[test] -fn kron_square_f64() { +fn kron_square_f64() +{ let a = arr2(&[[1.0, 0.0], [0.0, 1.0]]); let b = arr2(&[[0.0, 1.0], [1.0, 0.0]]); @@ -849,7 +848,8 @@ fn kron_square_f64() { } #[test] -fn kron_square_i64() { +fn kron_square_i64() +{ let a = arr2(&[[1, 0], [0, 1]]); let b = arr2(&[[0, 1], [1, 0]]); @@ -865,7 +865,8 @@ fn kron_square_i64() { } #[test] -fn kron_i64() { +fn kron_i64() +{ let a = arr2(&[[1, 0]]); let b = arr2(&[[0, 1], [1, 0]]); let r = arr2(&[[0, 1, 0, 0], [1, 0, 0, 0]]); @@ -873,13 +874,6 @@ fn kron_i64() { let a = arr2(&[[1, 0], [0, 0], [0, 1]]); let b = arr2(&[[0, 1], [1, 0]]); - let r = arr2(&[ - [0, 1, 0, 0], - [1, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 1], - [0, 0, 1, 0], - ]); + let r = arr2(&[[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]); assert_eq!(kron(&a, &b), r); } diff --git a/tests/par_azip.rs b/tests/par_azip.rs index e5dc02c4e..418c21ef8 100644 --- a/tests/par_azip.rs +++ b/tests/par_azip.rs @@ -7,7 +7,8 @@ use ndarray::prelude::*; use std::sync::atomic::{AtomicUsize, Ordering}; #[test] -fn test_par_azip1() { +fn test_par_azip1() +{ let mut a = Array::zeros(62); let b = Array::from_elem(62, 42); par_azip!((a in &mut a) { *a = 42 }); @@ -15,7 +16,8 @@ fn test_par_azip1() { } #[test] -fn test_par_azip2() { +fn test_par_azip2() +{ let mut a = Array::zeros((5, 7)); let b = Array::from_shape_fn(a.dim(), |(i, j)| 1. / (i + 2 * j) as f32); par_azip!((a in &mut a, &b in &b, ) *a = b ); @@ -24,7 +26,8 @@ fn test_par_azip2() { #[test] #[cfg(feature = "approx")] -fn test_par_azip3() { +fn test_par_azip3() +{ use approx::assert_abs_diff_eq; let mut a = [0.; 32]; @@ -44,7 +47,8 @@ fn test_par_azip3() { #[should_panic] #[test] -fn test_zip_dim_mismatch_1() { +fn test_zip_dim_mismatch_1() +{ let mut a = Array::zeros((5, 7)); let mut d = a.raw_dim(); d[0] += 1; @@ -53,7 +57,8 @@ fn test_zip_dim_mismatch_1() { } #[test] -fn test_indices_1() { +fn test_indices_1() +{ let mut a1 = Array::default(12); for (i, elt) in a1.indexed_iter_mut() { *elt = i; diff --git a/tests/par_rayon.rs b/tests/par_rayon.rs index 40670c6bf..13669763f 100644 --- a/tests/par_rayon.rs +++ b/tests/par_rayon.rs @@ -9,7 +9,8 @@ const CHUNK_SIZE: usize = 100; const N_CHUNKS: usize = (M + CHUNK_SIZE - 1) / CHUNK_SIZE; #[test] -fn test_axis_iter() { +fn test_axis_iter() +{ let mut a = Array2::::zeros((M, N)); for (i, mut v) in a.axis_iter_mut(Axis(0)).enumerate() { v.fill(i as _); @@ -22,7 +23,8 @@ fn test_axis_iter() { #[test] #[cfg(feature = "approx")] -fn test_axis_iter_mut() { +fn test_axis_iter_mut() +{ use approx::assert_abs_diff_eq; let mut a = Array::linspace(0., 1.0f64, M * N) .into_shape_with_order((M, N)) @@ -36,7 +38,8 @@ fn test_axis_iter_mut() { } #[test] -fn test_regular_iter() { +fn test_regular_iter() +{ let mut a = Array2::::zeros((M, N)); for (i, mut v) in a.axis_iter_mut(Axis(0)).enumerate() { v.fill(i as _); @@ -47,7 +50,8 @@ fn test_regular_iter() { } #[test] -fn test_regular_iter_collect() { +fn test_regular_iter_collect() +{ let mut a = Array2::::zeros((M, N)); for (i, mut v) in a.axis_iter_mut(Axis(0)).enumerate() { v.fill(i as _); @@ -57,7 +61,8 @@ fn test_regular_iter_collect() { } #[test] -fn test_axis_chunks_iter() { +fn test_axis_chunks_iter() +{ let mut a = Array2::::zeros((M, N)); for (i, mut v) in a.axis_chunks_iter_mut(Axis(0), CHUNK_SIZE).enumerate() { v.fill(i as _); @@ -74,7 +79,8 @@ fn test_axis_chunks_iter() { #[test] #[cfg(feature = "approx")] -fn test_axis_chunks_iter_mut() { +fn test_axis_chunks_iter_mut() +{ use approx::assert_abs_diff_eq; let mut a = Array::linspace(0., 1.0f64, M * N) .into_shape_with_order((M, N)) diff --git a/tests/par_zip.rs b/tests/par_zip.rs index ee929f36e..9f10d9fd5 100644 --- a/tests/par_zip.rs +++ b/tests/par_zip.rs @@ -8,14 +8,16 @@ const M: usize = 1024 * 10; const N: usize = 100; #[test] -fn test_zip_1() { +fn test_zip_1() +{ let mut a = Array2::::zeros((M, N)); Zip::from(&mut a).par_for_each(|x| *x = x.exp()); } #[test] -fn test_zip_index_1() { +fn test_zip_index_1() +{ let mut a = Array2::default((10, 10)); Zip::indexed(&mut a).par_for_each(|i, x| { @@ -28,7 +30,8 @@ fn test_zip_index_1() { } #[test] -fn test_zip_index_2() { +fn test_zip_index_2() +{ let mut a = Array2::default((M, N)); Zip::indexed(&mut a).par_for_each(|i, x| { @@ -41,7 +44,8 @@ fn test_zip_index_2() { } #[test] -fn test_zip_index_3() { +fn test_zip_index_3() +{ let mut a = Array::default((1, 2, 1, 2, 3)); Zip::indexed(&mut a).par_for_each(|i, x| { @@ -54,14 +58,17 @@ fn test_zip_index_3() { } #[test] -fn test_zip_index_4() { +fn test_zip_index_4() +{ let mut a = Array2::zeros((M, N)); let mut b = Array2::zeros((M, N)); - Zip::indexed(&mut a).and(&mut b).par_for_each(|(i, j), x, y| { - *x = i; - *y = j; - }); + Zip::indexed(&mut a) + .and(&mut b) + .par_for_each(|(i, j), x, y| { + *x = i; + *y = j; + }); for ((i, _), elt) in a.indexed_iter() { assert_eq!(*elt, i); @@ -73,7 +80,8 @@ fn test_zip_index_4() { #[test] #[cfg(feature = "approx")] -fn test_zip_collect() { +fn test_zip_collect() +{ use approx::assert_abs_diff_eq; // test Zip::map_collect and that it preserves c/f layout. @@ -97,12 +105,12 @@ fn test_zip_collect() { assert_abs_diff_eq!(a, &b + &c, epsilon = 1e-6); assert_eq!(a.strides(), b.strides()); } - } #[test] #[cfg(feature = "approx")] -fn test_zip_small_collect() { +fn test_zip_small_collect() +{ use approx::assert_abs_diff_eq; for m in 0..32 { @@ -126,17 +134,19 @@ fn test_zip_small_collect() { } } - #[test] #[cfg(feature = "approx")] -fn test_zip_assign_into() { +fn test_zip_assign_into() +{ use approx::assert_abs_diff_eq; let mut a = Array::::zeros((M, N)); let b = Array::from_shape_fn((M, N), |(i, j)| 1. / (i + 2 * j + 1) as f32); let c = Array::from_shape_fn((M, N), |(i, j)| f32::ln((1 + i + j) as f32)); - Zip::from(&b).and(&c).par_map_assign_into(&mut a, |x, y| x + y); + Zip::from(&b) + .and(&c) + .par_map_assign_into(&mut a, |x, y| x + y); assert_abs_diff_eq!(a, &b + &c, epsilon = 1e-6); } diff --git a/tests/raw_views.rs b/tests/raw_views.rs index bb39547e8..929e969d7 100644 --- a/tests/raw_views.rs +++ b/tests/raw_views.rs @@ -4,7 +4,8 @@ use ndarray::Zip; use std::cell::Cell; #[test] -fn raw_view_cast_cell() { +fn raw_view_cast_cell() +{ // Test .cast() by creating an ArrayView> let mut a = Array::from_shape_fn((10, 5), |(i, j)| (i * j) as f32); @@ -20,7 +21,8 @@ fn raw_view_cast_cell() { } #[test] -fn raw_view_cast_reinterpret() { +fn raw_view_cast_reinterpret() +{ // Test .cast() by reinterpreting u16 as [u8; 2] let a = Array::from_shape_fn((5, 5).f(), |(i, j)| (i as u16) << 8 | j as u16); let answer = a.mapv(u16::to_ne_bytes); @@ -31,7 +33,8 @@ fn raw_view_cast_reinterpret() { } #[test] -fn raw_view_cast_zst() { +fn raw_view_cast_zst() +{ struct Zst; let a = Array::<(), _>::default((250, 250)); @@ -42,14 +45,16 @@ fn raw_view_cast_zst() { #[test] #[should_panic] -fn raw_view_invalid_size_cast() { +fn raw_view_invalid_size_cast() +{ let data = [0i32; 16]; ArrayView::from(&data[..]).raw_view().cast::(); } #[test] #[should_panic] -fn raw_view_mut_invalid_size_cast() { +fn raw_view_mut_invalid_size_cast() +{ let mut data = [0i32; 16]; ArrayViewMut::from(&mut data[..]) .raw_view_mut() @@ -57,7 +62,8 @@ fn raw_view_mut_invalid_size_cast() { } #[test] -fn raw_view_misaligned() { +fn raw_view_misaligned() +{ let data: [u16; 2] = [0x0011, 0x2233]; let ptr: *const u16 = data.as_ptr(); unsafe { @@ -69,8 +75,10 @@ fn raw_view_misaligned() { #[test] #[cfg(debug_assertions)] #[should_panic = "The pointer must be aligned."] -fn raw_view_deref_into_view_misaligned() { - fn misaligned_deref(data: &[u16; 2]) -> ArrayView1<'_, u16> { +fn raw_view_deref_into_view_misaligned() +{ + fn misaligned_deref(data: &[u16; 2]) -> ArrayView1<'_, u16> + { let ptr: *const u16 = data.as_ptr(); unsafe { let misaligned_ptr = (ptr as *const u8).add(1) as *const u16; @@ -85,8 +93,10 @@ fn raw_view_deref_into_view_misaligned() { #[test] #[cfg(debug_assertions)] #[should_panic = "Unsupported"] -fn raw_view_negative_strides() { - fn misaligned_deref(data: &[u16; 2]) -> ArrayView1<'_, u16> { +fn raw_view_negative_strides() +{ + fn misaligned_deref(data: &[u16; 2]) -> ArrayView1<'_, u16> + { let ptr: *const u16 = data.as_ptr(); unsafe { let raw_view = RawArrayView::from_shape_ptr(1.strides((-1isize) as usize), ptr); diff --git a/tests/reshape.rs b/tests/reshape.rs index 24e7d01f8..a13a5c05f 100644 --- a/tests/reshape.rs +++ b/tests/reshape.rs @@ -5,7 +5,8 @@ use itertools::enumerate; use ndarray::Order; #[test] -fn reshape() { +fn reshape() +{ let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); let u = v.into_shape_with_order((3, 3)); @@ -21,7 +22,8 @@ fn reshape() { #[test] #[should_panic(expected = "IncompatibleShape")] -fn reshape_error1() { +fn reshape_error1() +{ let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); let _u = v.into_shape_with_order((2, 5)).unwrap(); @@ -29,7 +31,8 @@ fn reshape_error1() { #[test] #[should_panic(expected = "IncompatibleLayout")] -fn reshape_error2() { +fn reshape_error2() +{ let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); let mut u = v.into_shape_with_order((2, 2, 2)).unwrap(); @@ -38,7 +41,8 @@ fn reshape_error2() { } #[test] -fn reshape_f() { +fn reshape_f() +{ let mut u = Array::zeros((3, 4).f()); for (i, elt) in enumerate(u.as_slice_memory_order_mut().unwrap()) { *elt = i as i32; @@ -62,9 +66,9 @@ fn reshape_f() { assert_eq!(s, aview2(&[[0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11]])); } - #[test] -fn to_shape_easy() { +fn to_shape_easy() +{ // 1D -> C -> C let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); @@ -103,7 +107,8 @@ fn to_shape_easy() { } #[test] -fn to_shape_copy() { +fn to_shape_copy() +{ // 1D -> C -> F let v = ArrayView::from(&[1, 2, 3, 4, 5, 6, 7, 8]); let u = v.to_shape(((4, 2), Order::RowMajor)).unwrap(); @@ -126,7 +131,8 @@ fn to_shape_copy() { } #[test] -fn to_shape_add_axis() { +fn to_shape_add_axis() +{ // 1D -> C -> C let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); @@ -136,9 +142,9 @@ fn to_shape_add_axis() { assert!(u.to_shape(((1, 4, 2), Order::ColumnMajor)).unwrap().is_view()); } - #[test] -fn to_shape_copy_stride() { +fn to_shape_copy_stride() +{ let v = array![[1, 2, 3, 4], [5, 6, 7, 8]]; let vs = v.slice(s![.., ..3]); let lin1 = vs.to_shape(6).unwrap(); @@ -150,9 +156,9 @@ fn to_shape_copy_stride() { assert!(lin2.is_owned()); } - #[test] -fn to_shape_zero_len() { +fn to_shape_zero_len() +{ let v = array![[1, 2, 3, 4], [5, 6, 7, 8]]; let vs = v.slice(s![.., ..0]); let lin1 = vs.to_shape(0).unwrap(); @@ -162,7 +168,8 @@ fn to_shape_zero_len() { #[test] #[should_panic(expected = "IncompatibleShape")] -fn to_shape_error1() { +fn to_shape_error1() +{ let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); let _u = v.to_shape((2, 5)).unwrap(); @@ -170,7 +177,8 @@ fn to_shape_error1() { #[test] #[should_panic(expected = "IncompatibleShape")] -fn to_shape_error2() { +fn to_shape_error2() +{ // overflow let data = [3, 4, 5, 6, 7, 8]; let v = aview1(&data); @@ -178,7 +186,8 @@ fn to_shape_error2() { } #[test] -fn to_shape_discontig() { +fn to_shape_discontig() +{ for &create_order in &[Order::C, Order::F] { let a = Array::from_iter(0..64); let mut a1 = a.to_shape(((4, 4, 4), create_order)).unwrap(); @@ -205,7 +214,8 @@ fn to_shape_discontig() { } #[test] -fn to_shape_broadcast() { +fn to_shape_broadcast() +{ for &create_order in &[Order::C, Order::F] { let a = Array::from_iter(0..64); let mut a1 = a.to_shape(((4, 4, 4), create_order)).unwrap(); @@ -231,9 +241,9 @@ fn to_shape_broadcast() { } } - #[test] -fn into_shape_with_order() { +fn into_shape_with_order() +{ // 1D -> C -> C let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); @@ -264,13 +274,16 @@ fn into_shape_with_order() { assert_eq!(u.shape(), &[2, 2, 2]); assert_eq!(u, array![[[1, 5], [3, 7]], [[2, 6], [4, 8]]]); - let s = u.into_shape_with_order(((4, 2), Order::ColumnMajor)).unwrap(); + let s = u + .into_shape_with_order(((4, 2), Order::ColumnMajor)) + .unwrap(); assert_eq!(s.shape(), &[4, 2]); assert_eq!(s, array![[1, 5], [2, 6], [3, 7], [4, 8]]); } #[test] -fn into_shape_clone() { +fn into_shape_clone() +{ // 1D -> C -> C { let data = [1, 2, 3, 4, 5, 6, 7, 8]; diff --git a/tests/s.rs b/tests/s.rs index dbc68184a..edb3f071a 100644 --- a/tests/s.rs +++ b/tests/s.rs @@ -1,14 +1,12 @@ #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names )] use ndarray::{s, Array}; #[test] -fn test_s() { +fn test_s() +{ let a = Array::::zeros((3, 4)); let vi = a.slice(s![1.., ..;2]); assert_eq!(vi.shape(), &[2, 2]); diff --git a/tests/stacking.rs b/tests/stacking.rs index 0c4e79c79..bdfe478b4 100644 --- a/tests/stacking.rs +++ b/tests/stacking.rs @@ -1,7 +1,8 @@ use ndarray::{arr2, arr3, aview1, aview2, concatenate, stack, Array2, Axis, ErrorKind, Ix1}; #[test] -fn concatenating() { +fn concatenating() +{ let a = arr2(&[[2., 2.], [3., 3.]]); let b = ndarray::concatenate(Axis(0), &[a.view(), a.view()]).unwrap(); assert_eq!(b, arr2(&[[2., 2.], [3., 3.], [2., 2.], [3., 3.]])); @@ -33,7 +34,8 @@ fn concatenating() { } #[test] -fn stacking() { +fn stacking() +{ let a = arr2(&[[2., 2.], [3., 3.]]); let b = ndarray::stack(Axis(0), &[a.view(), a.view()]).unwrap(); assert_eq!(b, arr3(&[[[2., 2.], [3., 3.]], [[2., 2.], [3., 3.]]])); diff --git a/tests/views.rs b/tests/views.rs index ecef72fe8..02970b1b7 100644 --- a/tests/views.rs +++ b/tests/views.rs @@ -2,7 +2,8 @@ use ndarray::prelude::*; use ndarray::Zip; #[test] -fn cell_view() { +fn cell_view() +{ let mut a = Array::from_shape_fn((10, 5), |(i, j)| (i * j) as f32); let answer = &a + 1.; diff --git a/tests/windows.rs b/tests/windows.rs index c24a47ef9..d8d5b699e 100644 --- a/tests/windows.rs +++ b/tests/windows.rs @@ -1,8 +1,5 @@ #![allow( - clippy::many_single_char_names, - clippy::deref_addrof, - clippy::unreadable_literal, - clippy::many_single_char_names + clippy::many_single_char_names, clippy::deref_addrof, clippy::unreadable_literal, clippy::many_single_char_names )] use ndarray::prelude::*; @@ -25,26 +22,31 @@ use ndarray::{arr3, Zip}; /// Test that verifies the `Windows` iterator panics on window sizes equal to zero. #[test] #[should_panic] -fn windows_iterator_zero_size() { - let a = Array::from_iter(10..37).into_shape_with_order((3, 3, 3)).unwrap(); +fn windows_iterator_zero_size() +{ + let a = Array::from_iter(10..37) + .into_shape_with_order((3, 3, 3)) + .unwrap(); a.windows(Dim((0, 0, 0))); } /// Test that verifies that no windows are yielded on oversized window sizes. #[test] -fn windows_iterator_oversized() { - let a = Array::from_iter(10..37).into_shape_with_order((3, 3, 3)).unwrap(); +fn windows_iterator_oversized() +{ + let a = Array::from_iter(10..37) + .into_shape_with_order((3, 3, 3)) + .unwrap(); let mut iter = a.windows((4, 3, 2)).into_iter(); // (4,3,2) doesn't fit into (3,3,3) => oversized! assert_eq!(iter.next(), None); } /// Simple test for iterating 1d-arrays via `Windows`. #[test] -fn windows_iterator_1d() { +fn windows_iterator_1d() +{ let a = Array::from_iter(10..20).into_shape_with_order(10).unwrap(); - itertools::assert_equal( - a.windows(Dim(4)), - vec![ + itertools::assert_equal(a.windows(Dim(4)), vec![ arr1(&[10, 11, 12, 13]), arr1(&[11, 12, 13, 14]), arr1(&[12, 13, 14, 15]), @@ -52,17 +54,17 @@ fn windows_iterator_1d() { arr1(&[14, 15, 16, 17]), arr1(&[15, 16, 17, 18]), arr1(&[16, 17, 18, 19]), - ], - ); + ]); } /// Simple test for iterating 2d-arrays via `Windows`. #[test] -fn windows_iterator_2d() { - let a = Array::from_iter(10..30).into_shape_with_order((5, 4)).unwrap(); - itertools::assert_equal( - a.windows(Dim((3, 2))), - vec![ +fn windows_iterator_2d() +{ + let a = Array::from_iter(10..30) + .into_shape_with_order((5, 4)) + .unwrap(); + itertools::assert_equal(a.windows(Dim((3, 2))), vec![ arr2(&[[10, 11], [14, 15], [18, 19]]), arr2(&[[11, 12], [15, 16], [19, 20]]), arr2(&[[12, 13], [16, 17], [20, 21]]), @@ -72,17 +74,17 @@ fn windows_iterator_2d() { arr2(&[[18, 19], [22, 23], [26, 27]]), arr2(&[[19, 20], [23, 24], [27, 28]]), arr2(&[[20, 21], [24, 25], [28, 29]]), - ], - ); + ]); } /// Simple test for iterating 3d-arrays via `Windows`. #[test] -fn windows_iterator_3d() { - let a = Array::from_iter(10..37).into_shape_with_order((3, 3, 3)).unwrap(); - itertools::assert_equal( - a.windows(Dim((2, 2, 2))), - vec![ +fn windows_iterator_3d() +{ + let a = Array::from_iter(10..37) + .into_shape_with_order((3, 3, 3)) + .unwrap(); + itertools::assert_equal(a.windows(Dim((2, 2, 2))), vec![ arr3(&[[[10, 11], [13, 14]], [[19, 20], [22, 23]]]), arr3(&[[[11, 12], [14, 15]], [[20, 21], [23, 24]]]), arr3(&[[[13, 14], [16, 17]], [[22, 23], [25, 26]]]), @@ -91,68 +93,69 @@ fn windows_iterator_3d() { arr3(&[[[20, 21], [23, 24]], [[29, 30], [32, 33]]]), arr3(&[[[22, 23], [25, 26]], [[31, 32], [34, 35]]]), arr3(&[[[23, 24], [26, 27]], [[32, 33], [35, 36]]]), - ], - ); + ]); } /// Test that verifies the `Windows` iterator panics when stride has an axis equal to zero. #[test] #[should_panic] -fn windows_iterator_stride_axis_zero() { - let a = Array::from_iter(10..37).into_shape_with_order((3, 3, 3)).unwrap(); +fn windows_iterator_stride_axis_zero() +{ + let a = Array::from_iter(10..37) + .into_shape_with_order((3, 3, 3)) + .unwrap(); a.windows_with_stride((2, 2, 2), (0, 2, 2)); } /// Test that verifies that only first window is yielded when stride is oversized on every axis. #[test] -fn windows_iterator_only_one_valid_window_for_oversized_stride() { - let a = Array::from_iter(10..135).into_shape_with_order((5, 5, 5)).unwrap(); +fn windows_iterator_only_one_valid_window_for_oversized_stride() +{ + let a = Array::from_iter(10..135) + .into_shape_with_order((5, 5, 5)) + .unwrap(); let mut iter = a.windows_with_stride((2, 2, 2), (8, 8, 8)).into_iter(); // (4,3,2) doesn't fit into (3,3,3) => oversized! - itertools::assert_equal( - iter.next(), - Some(arr3(&[[[10, 11], [15, 16]], [[35, 36], [40, 41]]])), - ); + itertools::assert_equal(iter.next(), Some(arr3(&[[[10, 11], [15, 16]], [[35, 36], [40, 41]]]))); } /// Simple test for iterating 1d-arrays via `Windows` with stride. #[test] -fn windows_iterator_1d_with_stride() { +fn windows_iterator_1d_with_stride() +{ let a = Array::from_iter(10..20).into_shape_with_order(10).unwrap(); - itertools::assert_equal( - a.windows_with_stride(4, 2), - vec![ + itertools::assert_equal(a.windows_with_stride(4, 2), vec![ arr1(&[10, 11, 12, 13]), arr1(&[12, 13, 14, 15]), arr1(&[14, 15, 16, 17]), arr1(&[16, 17, 18, 19]), - ], - ); + ]); } /// Simple test for iterating 2d-arrays via `Windows` with stride. #[test] -fn windows_iterator_2d_with_stride() { - let a = Array::from_iter(10..30).into_shape_with_order((5, 4)).unwrap(); - itertools::assert_equal( - a.windows_with_stride((3, 2), (2, 1)), - vec![ +fn windows_iterator_2d_with_stride() +{ + let a = Array::from_iter(10..30) + .into_shape_with_order((5, 4)) + .unwrap(); + itertools::assert_equal(a.windows_with_stride((3, 2), (2, 1)), vec![ arr2(&[[10, 11], [14, 15], [18, 19]]), arr2(&[[11, 12], [15, 16], [19, 20]]), arr2(&[[12, 13], [16, 17], [20, 21]]), arr2(&[[18, 19], [22, 23], [26, 27]]), arr2(&[[19, 20], [23, 24], [27, 28]]), arr2(&[[20, 21], [24, 25], [28, 29]]), - ], - ); + ]); } /// Simple test for iterating 3d-arrays via `Windows` with stride. #[test] -fn windows_iterator_3d_with_stride() { - let a = Array::from_iter(10..74).into_shape_with_order((4, 4, 4)).unwrap(); - itertools::assert_equal( - a.windows_with_stride((2, 2, 2), (2, 2, 2)), - vec![ +fn windows_iterator_3d_with_stride() +{ + let a = Array::from_iter(10..74) + .into_shape_with_order((4, 4, 4)) + .unwrap(); + itertools::assert_equal(a.windows_with_stride((2, 2, 2), (2, 2, 2)), vec![ arr3(&[[[10, 11], [14, 15]], [[26, 27], [30, 31]]]), arr3(&[[[12, 13], [16, 17]], [[28, 29], [32, 33]]]), arr3(&[[[18, 19], [22, 23]], [[34, 35], [38, 39]]]), @@ -161,13 +164,15 @@ fn windows_iterator_3d_with_stride() { arr3(&[[[44, 45], [48, 49]], [[60, 61], [64, 65]]]), arr3(&[[[50, 51], [54, 55]], [[66, 67], [70, 71]]]), arr3(&[[[52, 53], [56, 57]], [[68, 69], [72, 73]]]), - ], - ); + ]); } #[test] -fn test_window_zip() { - let a = Array::from_iter(0..64).into_shape_with_order((4, 4, 4)).unwrap(); +fn test_window_zip() +{ + let a = Array::from_iter(0..64) + .into_shape_with_order((4, 4, 4)) + .unwrap(); for x in 1..4 { for y in 1..4 { @@ -189,69 +194,77 @@ fn test_window_zip() { /// Test verifies that non existent Axis results in panic #[test] #[should_panic] -fn axis_windows_outofbound() { - let a = Array::from_iter(10..37).into_shape_with_order((3, 3, 3)).unwrap(); +fn axis_windows_outofbound() +{ + let a = Array::from_iter(10..37) + .into_shape_with_order((3, 3, 3)) + .unwrap(); a.axis_windows(Axis(4), 2); } /// Test verifies that zero sizes results in panic #[test] #[should_panic] -fn axis_windows_zero_size() { - let a = Array::from_iter(10..37).into_shape_with_order((3, 3, 3)).unwrap(); +fn axis_windows_zero_size() +{ + let a = Array::from_iter(10..37) + .into_shape_with_order((3, 3, 3)) + .unwrap(); a.axis_windows(Axis(0), 0); } /// Test verifies that over sized windows yield nothing #[test] -fn axis_windows_oversized() { - let a = Array::from_iter(10..37).into_shape_with_order((3, 3, 3)).unwrap(); +fn axis_windows_oversized() +{ + let a = Array::from_iter(10..37) + .into_shape_with_order((3, 3, 3)) + .unwrap(); let mut iter = a.axis_windows(Axis(2), 4).into_iter(); assert_eq!(iter.next(), None); } /// Simple test for iterating 1d-arrays via `Axis Windows`. #[test] -fn test_axis_windows_1d() { +fn test_axis_windows_1d() +{ let a = Array::from_iter(10..20).into_shape_with_order(10).unwrap(); - itertools::assert_equal( - a.axis_windows(Axis(0), 5), - vec![ + itertools::assert_equal(a.axis_windows(Axis(0), 5), vec![ arr1(&[10, 11, 12, 13, 14]), arr1(&[11, 12, 13, 14, 15]), arr1(&[12, 13, 14, 15, 16]), arr1(&[13, 14, 15, 16, 17]), arr1(&[14, 15, 16, 17, 18]), arr1(&[15, 16, 17, 18, 19]), - ], - ); + ]); } /// Simple test for iterating 2d-arrays via `Axis Windows`. #[test] -fn test_axis_windows_2d() { - let a = Array::from_iter(10..30).into_shape_with_order((5, 4)).unwrap(); +fn test_axis_windows_2d() +{ + let a = Array::from_iter(10..30) + .into_shape_with_order((5, 4)) + .unwrap(); - itertools::assert_equal( - a.axis_windows(Axis(0), 2), - vec![ + itertools::assert_equal(a.axis_windows(Axis(0), 2), vec![ arr2(&[[10, 11, 12, 13], [14, 15, 16, 17]]), arr2(&[[14, 15, 16, 17], [18, 19, 20, 21]]), arr2(&[[18, 19, 20, 21], [22, 23, 24, 25]]), arr2(&[[22, 23, 24, 25], [26, 27, 28, 29]]), - ], - ); + ]); } /// Simple test for iterating 3d-arrays via `Axis Windows`. #[test] -fn test_axis_windows_3d() { - let a = Array::from_iter(0..27).into_shape_with_order((3, 3, 3)).unwrap(); +fn test_axis_windows_3d() +{ + let a = Array::from_iter(0..27) + .into_shape_with_order((3, 3, 3)) + .unwrap(); - itertools::assert_equal( - a.axis_windows(Axis(1), 2), - vec![ + itertools::assert_equal(a.axis_windows(Axis(1), 2), vec![ arr3(&[ [[0, 1, 2], [3, 4, 5]], [[9, 10, 11], [12, 13, 14]], @@ -262,71 +275,61 @@ fn test_axis_windows_3d() { [[12, 13, 14], [15, 16, 17]], [[21, 22, 23], [24, 25, 26]], ]), - ], - ); + ]); } - #[test] -fn test_window_neg_stride() { - let array = Array::from_iter(1..10).into_shape_with_order((3, 3)).unwrap(); +fn test_window_neg_stride() +{ + let array = Array::from_iter(1..10) + .into_shape_with_order((3, 3)) + .unwrap(); // window neg/pos stride combinations - + // Make a 2 x 2 array of the windows of the 3 x 3 array // and compute test answers from here let mut answer = Array::from_iter(array.windows((2, 2)).into_iter().map(|a| a.to_owned())) - .into_shape_with_order((2, 2)).unwrap(); + .into_shape_with_order((2, 2)) + .unwrap(); answer.invert_axis(Axis(1)); answer.map_inplace(|a| a.invert_axis(Axis(1))); - itertools::assert_equal( - array.slice(s![.., ..;-1]).windows((2, 2)), - answer.iter() - ); + itertools::assert_equal(array.slice(s![.., ..;-1]).windows((2, 2)), answer.iter()); answer.invert_axis(Axis(0)); answer.map_inplace(|a| a.invert_axis(Axis(0))); - itertools::assert_equal( - array.slice(s![..;-1, ..;-1]).windows((2, 2)), - answer.iter() - ); + itertools::assert_equal(array.slice(s![..;-1, ..;-1]).windows((2, 2)), answer.iter()); answer.invert_axis(Axis(1)); answer.map_inplace(|a| a.invert_axis(Axis(1))); - itertools::assert_equal( - array.slice(s![..;-1, ..]).windows((2, 2)), - answer.iter() - ); + itertools::assert_equal(array.slice(s![..;-1, ..]).windows((2, 2)), answer.iter()); } #[test] -fn test_windows_with_stride_on_inverted_axis() { - let mut array = Array::from_iter(1..17).into_shape_with_order((4, 4)).unwrap(); - +fn test_windows_with_stride_on_inverted_axis() +{ + let mut array = Array::from_iter(1..17) + .into_shape_with_order((4, 4)) + .unwrap(); + // inverting axis results in negative stride array.invert_axis(Axis(0)); - itertools::assert_equal( - array.windows_with_stride((2, 2), (2,2)), - vec![ + itertools::assert_equal(array.windows_with_stride((2, 2), (2, 2)), vec![ arr2(&[[13, 14], [9, 10]]), arr2(&[[15, 16], [11, 12]]), arr2(&[[5, 6], [1, 2]]), arr2(&[[7, 8], [3, 4]]), - ], - ); + ]); array.invert_axis(Axis(1)); - itertools::assert_equal( - array.windows_with_stride((2, 2), (2,2)), - vec![ + itertools::assert_equal(array.windows_with_stride((2, 2), (2, 2)), vec![ arr2(&[[16, 15], [12, 11]]), arr2(&[[14, 13], [10, 9]]), arr2(&[[8, 7], [4, 3]]), arr2(&[[6, 5], [2, 1]]), - ], - ); -} \ No newline at end of file + ]); +} diff --git a/tests/zst.rs b/tests/zst.rs index c3c779d2c..f5f2c8e32 100644 --- a/tests/zst.rs +++ b/tests/zst.rs @@ -2,7 +2,8 @@ use ndarray::arr2; use ndarray::ArcArray; #[test] -fn test_swap() { +fn test_swap() +{ let mut a = arr2(&[[(); 3]; 3]); let b = a.clone(); @@ -16,7 +17,8 @@ fn test_swap() { } #[test] -fn test() { +fn test() +{ let c = ArcArray::<(), _>::default((3, 4)); let mut d = c.clone(); for _ in d.iter_mut() {} diff --git a/xtest-blas/src/lib.rs b/xtest-blas/src/lib.rs index 857b91a3b..fc031eedb 100644 --- a/xtest-blas/src/lib.rs +++ b/xtest-blas/src/lib.rs @@ -1,6 +1,4 @@ - #[cfg(not(feature = "blas-src"))] compile_error!("Missing backend: could not compile. Help: For this testing crate, select one of the blas backend features, for example \ openblas-system"); - diff --git a/xtest-blas/tests/oper.rs b/xtest-blas/tests/oper.rs index 5f11893df..3ed81915e 100644 --- a/xtest-blas/tests/oper.rs +++ b/xtest-blas/tests/oper.rs @@ -17,7 +17,8 @@ use num_complex::Complex32; use num_complex::Complex64; #[test] -fn mat_vec_product_1d() { +fn mat_vec_product_1d() +{ let a = arr2(&[[1.], [2.]]); let b = arr1(&[1., 2.]); let ans = arr1(&[5.]); @@ -25,7 +26,8 @@ fn mat_vec_product_1d() { } #[test] -fn mat_vec_product_1d_broadcast() { +fn mat_vec_product_1d_broadcast() +{ let a = arr2(&[[1.], [2.], [3.]]); let b = arr1(&[1.]); let b = b.broadcast(3).unwrap(); @@ -34,7 +36,8 @@ fn mat_vec_product_1d_broadcast() { } #[test] -fn mat_vec_product_1d_inverted_axis() { +fn mat_vec_product_1d_inverted_axis() +{ let a = arr2(&[[1.], [2.], [3.]]); let mut b = arr1(&[1., 2., 3.]); b.invert_axis(Axis(0)); @@ -43,37 +46,43 @@ fn mat_vec_product_1d_inverted_axis() { assert_eq!(a.t().dot(&b), ans); } -fn range_mat(m: Ix, n: Ix) -> Array2 { +fn range_mat(m: Ix, n: Ix) -> Array2 +{ Array::linspace(0., (m * n) as f32 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() } -fn range_mat64(m: Ix, n: Ix) -> Array2 { +fn range_mat64(m: Ix, n: Ix) -> Array2 +{ Array::linspace(0., (m * n) as f64 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() } -fn range_mat_complex(m: Ix, n: Ix) -> Array2 { +fn range_mat_complex(m: Ix, n: Ix) -> Array2 +{ Array::linspace(0., (m * n) as f32 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() .map(|&f| Complex32::new(f, 0.)) } -fn range_mat_complex64(m: Ix, n: Ix) -> Array2 { +fn range_mat_complex64(m: Ix, n: Ix) -> Array2 +{ Array::linspace(0., (m * n) as f64 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() .map(|&f| Complex64::new(f, 0.)) } -fn range1_mat64(m: Ix) -> Array1 { +fn range1_mat64(m: Ix) -> Array1 +{ Array::linspace(0., m as f64 - 1., m) } -fn range_i32(m: Ix, n: Ix) -> Array2 { +fn range_i32(m: Ix, n: Ix) -> Array2 +{ Array::from_iter(0..(m * n) as i32) .into_shape_with_order((m, n)) .unwrap() @@ -98,9 +107,7 @@ where let mut j = 0; for rr in &mut res_elems { unsafe { - *rr = (0..k).fold(A::zero(), move |s, x| { - s + *lhs.uget((i, x)) * *rhs.uget((x, j)) - }); + *rr = (0..k).fold(A::zero(), move |s, x| s + *lhs.uget((i, x)) * *rhs.uget((x, j))); } j += 1; if j == n { @@ -119,9 +126,14 @@ where S2: Data, { let ((m, _), k) = (lhs.dim(), rhs.dim()); - reference_mat_mul(lhs, &rhs.as_standard_layout().into_shape_with_order((k, 1)).unwrap()) - .into_shape_with_order(m) - .unwrap() + reference_mat_mul( + lhs, + &rhs.as_standard_layout() + .into_shape_with_order((k, 1)) + .unwrap(), + ) + .into_shape_with_order(m) + .unwrap() } // simple, slow, correct (hopefully) mat mul @@ -132,15 +144,21 @@ where S2: Data, { let (m, (_, n)) = (lhs.dim(), rhs.dim()); - reference_mat_mul(&lhs.as_standard_layout().into_shape_with_order((1, m)).unwrap(), rhs) - .into_shape_with_order(n) - .unwrap() + reference_mat_mul( + &lhs.as_standard_layout() + .into_shape_with_order((1, m)) + .unwrap(), + rhs, + ) + .into_shape_with_order(n) + .unwrap() } // Check that matrix multiplication of contiguous matrices returns a // matrix with the same order #[test] -fn mat_mul_order() { +fn mat_mul_order() +{ let (m, n, k) = (50, 50, 50); let a = range_mat(m, n); let b = range_mat(n, k); @@ -159,7 +177,8 @@ fn mat_mul_order() { // Check that matrix multiplication // supports broadcast arrays. #[test] -fn mat_mul_broadcast() { +fn mat_mul_broadcast() +{ let (m, n, k) = (16, 16, 16); let a = range_mat(m, n); let x1 = 1.; @@ -178,7 +197,8 @@ fn mat_mul_broadcast() { // Check that matrix multiplication supports reversed axes #[test] -fn mat_mul_rev() { +fn mat_mul_rev() +{ let (m, n, k) = (16, 16, 16); let a = range_mat(m, n); let b = range_mat(n, k); @@ -194,7 +214,8 @@ fn mat_mul_rev() { // Check that matrix multiplication supports arrays with zero rows or columns #[test] -fn mat_mut_zero_len() { +fn mat_mut_zero_len() +{ defmac!(mat_mul_zero_len range_mat_fn => { for n in 0..4 { for m in 0..4 { @@ -215,7 +236,8 @@ fn mat_mut_zero_len() { } #[test] -fn gen_mat_mul() { +fn gen_mat_mul() +{ let alpha = -2.3; let beta = 3.14; let sizes = vec![ @@ -256,7 +278,8 @@ fn gen_mat_mul() { // Test y = A x where A is f-order #[test] -fn gemm_64_1_f() { +fn gemm_64_1_f() +{ let a = range_mat64(64, 64).reversed_axes(); let (m, n) = a.dim(); // m x n times n x 1 == m x 1 @@ -268,20 +291,15 @@ fn gemm_64_1_f() { } #[test] -fn gemm_c64_1_f() { +fn gemm_c64_1_f() +{ let a = range_mat_complex64(64, 64).reversed_axes(); let (m, n) = a.dim(); // m x n times n x 1 == m x 1 let x = range_mat_complex64(n, 1); let mut y = range_mat_complex64(m, 1); let answer = reference_mat_mul(&a, &x) + &y; - general_mat_mul( - Complex64::new(1.0, 0.), - &a, - &x, - Complex64::new(1.0, 0.), - &mut y, - ); + general_mat_mul(Complex64::new(1.0, 0.), &a, &x, Complex64::new(1.0, 0.), &mut y); assert_relative_eq!( y.mapv(|i| i.norm_sqr()), answer.mapv(|i| i.norm_sqr()), @@ -291,20 +309,15 @@ fn gemm_c64_1_f() { } #[test] -fn gemm_c32_1_f() { +fn gemm_c32_1_f() +{ let a = range_mat_complex(64, 64).reversed_axes(); let (m, n) = a.dim(); // m x n times n x 1 == m x 1 let x = range_mat_complex(n, 1); let mut y = range_mat_complex(m, 1); let answer = reference_mat_mul(&a, &x) + &y; - general_mat_mul( - Complex32::new(1.0, 0.), - &a, - &x, - Complex32::new(1.0, 0.), - &mut y, - ); + general_mat_mul(Complex32::new(1.0, 0.), &a, &x, Complex32::new(1.0, 0.), &mut y); assert_relative_eq!( y.mapv(|i| i.norm_sqr()), answer.mapv(|i| i.norm_sqr()), @@ -314,22 +327,17 @@ fn gemm_c32_1_f() { } #[test] -fn gemm_c64_actually_complex() { - let mut a = range_mat_complex64(4,4); +fn gemm_c64_actually_complex() +{ + let mut a = range_mat_complex64(4, 4); a = a.map(|&i| if i.re > 8. { i.conj() } else { i }); - let mut b = range_mat_complex64(4,6); - b = b.map(|&i| if i.re > 4. { i.conj() } else {i}); - let mut y = range_mat_complex64(4,6); + let mut b = range_mat_complex64(4, 6); + b = b.map(|&i| if i.re > 4. { i.conj() } else { i }); + let mut y = range_mat_complex64(4, 6); let alpha = Complex64::new(0., 1.0); let beta = Complex64::new(1.0, 1.0); let answer = alpha * reference_mat_mul(&a, &b) + beta * &y; - general_mat_mul( - alpha.clone(), - &a, - &b, - beta.clone(), - &mut y, - ); + general_mat_mul(alpha.clone(), &a, &b, beta.clone(), &mut y); assert_relative_eq!( y.mapv(|i| i.norm_sqr()), answer.mapv(|i| i.norm_sqr()), @@ -339,7 +347,8 @@ fn gemm_c64_actually_complex() { } #[test] -fn gen_mat_vec_mul() { +fn gen_mat_vec_mul() +{ let alpha = -2.3; let beta = 3.14; let sizes = vec![ @@ -385,7 +394,8 @@ fn gen_mat_vec_mul() { } #[test] -fn vec_mat_mul() { +fn vec_mat_mul() +{ let sizes = vec![ (4, 4), (8, 8), diff --git a/xtest-numeric/src/lib.rs b/xtest-numeric/src/lib.rs index ba1c3b0d9..79ffc274e 100644 --- a/xtest-numeric/src/lib.rs +++ b/xtest-numeric/src/lib.rs @@ -1,3 +1,2 @@ #[cfg(feature = "test_blas")] extern crate blas_src; - diff --git a/xtest-numeric/tests/accuracy.rs b/xtest-numeric/tests/accuracy.rs index 679267096..e98fb3c4d 100644 --- a/xtest-numeric/tests/accuracy.rs +++ b/xtest-numeric/tests/accuracy.rs @@ -1,32 +1,29 @@ extern crate approx; -extern crate rand_distr; extern crate ndarray; extern crate ndarray_rand; extern crate rand; +extern crate rand_distr; extern crate numeric_tests; use std::fmt; use ndarray_rand::RandomExt; -use rand::{Rng, SeedableRng}; use rand::rngs::SmallRng; +use rand::{Rng, SeedableRng}; -use ndarray::prelude::*; -use ndarray::{ - Data, - LinalgScalar, -}; use ndarray::linalg::general_mat_mul; +use ndarray::prelude::*; +use ndarray::{Data, LinalgScalar}; -use rand_distr::{Normal, StandardNormal, Distribution}; -use num_traits::{Float, AsPrimitive}; use num_complex::Complex; +use num_traits::{AsPrimitive, Float}; +use rand_distr::{Distribution, Normal, StandardNormal}; use approx::{assert_abs_diff_eq, assert_relative_eq}; fn kahan_sum(iter: impl Iterator) -> A - where A: LinalgScalar +where A: LinalgScalar { let mut sum = A::zero(); let mut compensation = A::zero(); @@ -42,11 +39,11 @@ fn kahan_sum(iter: impl Iterator) -> A } // simple, slow, correct (hopefully) mat mul -fn reference_mat_mul(lhs: &ArrayBase, rhs: &ArrayBase) - -> Array - where A: LinalgScalar, - S: Data, - S2: Data, +fn reference_mat_mul(lhs: &ArrayBase, rhs: &ArrayBase) -> Array +where + A: LinalgScalar, + S: Data, + S2: Data, { let ((m, k), (_, n)) = (lhs.dim(), rhs.dim()); let mut res_elems = Array::zeros(m * n); @@ -69,23 +66,26 @@ fn reference_mat_mul(lhs: &ArrayBase, rhs: &ArrayBase } fn gen(d: D, rng: &mut SmallRng) -> Array - where D: Dimension, - A: Float, - StandardNormal: Distribution, +where + D: Dimension, + A: Float, + StandardNormal: Distribution, { Array::random_using(d, Normal::new(A::zero(), A::one()).unwrap(), rng) } fn gen_complex(d: D, rng: &mut SmallRng) -> Array, D> - where D: Dimension, - A: Float, - StandardNormal: Distribution, +where + D: Dimension, + A: Float, + StandardNormal: Distribution, { gen(d.clone(), rng).mapv(Complex::from) + gen(d, rng).mapv(|x| Complex::new(A::zero(), x)) } #[test] -fn accurate_eye_f32() { +fn accurate_eye_f32() +{ let rng = &mut SmallRng::from_entropy(); for i in 0..20 { let eye = Array::eye(i); @@ -112,7 +112,8 @@ fn accurate_eye_f32() { } #[test] -fn accurate_eye_f64() { +fn accurate_eye_f64() +{ let rng = &mut SmallRng::from_entropy(); let abs_tol = 1e-15; for i in 0..20 { @@ -140,32 +141,36 @@ fn accurate_eye_f64() { } #[test] -fn accurate_mul_f32_dot() { +fn accurate_mul_f32_dot() +{ accurate_mul_float_general::(1e-5, false); } #[test] -fn accurate_mul_f32_general() { +fn accurate_mul_f32_general() +{ accurate_mul_float_general::(1e-5, true); } #[test] -fn accurate_mul_f64_dot() { +fn accurate_mul_f64_dot() +{ accurate_mul_float_general::(1e-14, false); } #[test] -fn accurate_mul_f64_general() { +fn accurate_mul_f64_general() +{ accurate_mul_float_general::(1e-14, true); } /// Generate random sized matrices using the given generator function. /// Compute gemm using either .dot() (if use_general is false) otherwise general_mat_mul. /// Return tuple of actual result matrix and reference matrix, which should be equal. -fn random_matrix_mul(rng: &mut SmallRng, use_stride: bool, use_general: bool, - generator: fn(Ix2, &mut SmallRng) -> Array2) - -> (Array2, Array2) - where A: LinalgScalar, +fn random_matrix_mul( + rng: &mut SmallRng, use_stride: bool, use_general: bool, generator: fn(Ix2, &mut SmallRng) -> Array2, +) -> (Array2, Array2) +where A: LinalgScalar { let m = rng.gen_range(15..512); let k = rng.gen_range(15..512); @@ -180,13 +185,9 @@ fn random_matrix_mul(rng: &mut SmallRng, use_stride: bool, use_general: bool, let b = b.t(); let (a, b, mut c) = if use_stride { - (a.slice(s![..;2, ..;2]), - b.slice(s![..;2, ..;2]), - c.map(|c_| c_.slice_move(s![..;2, ..;2]))) + (a.slice(s![..;2, ..;2]), b.slice(s![..;2, ..;2]), c.map(|c_| c_.slice_move(s![..;2, ..;2]))) } else { - (a.view(), - b, - c) + (a.view(), b, c) }; println!("Testing size {} by {} by {}", a.shape()[0], a.shape()[1], b.shape()[1]); @@ -202,9 +203,10 @@ fn random_matrix_mul(rng: &mut SmallRng, use_stride: bool, use_general: bool, } fn accurate_mul_float_general(limit: f64, use_general: bool) - where A: Float + Copy + 'static + AsPrimitive, - StandardNormal: Distribution, - A: fmt::Debug, +where + A: Float + Copy + 'static + AsPrimitive, + StandardNormal: Distribution, + A: fmt::Debug, { // pick a few random sizes let mut rng = SmallRng::from_entropy(); @@ -221,19 +223,22 @@ fn accurate_mul_float_general(limit: f64, use_general: bool) } #[test] -fn accurate_mul_complex32() { +fn accurate_mul_complex32() +{ accurate_mul_complex_general::(1e-5); } #[test] -fn accurate_mul_complex64() { +fn accurate_mul_complex64() +{ accurate_mul_complex_general::(1e-14); } fn accurate_mul_complex_general(limit: f64) - where A: Float + Copy + 'static + AsPrimitive, - StandardNormal: Distribution, - A: fmt::Debug, +where + A: Float + Copy + 'static + AsPrimitive, + StandardNormal: Distribution, + A: fmt::Debug, { // pick a few random sizes let mut rng = SmallRng::from_entropy(); @@ -251,7 +256,8 @@ fn accurate_mul_complex_general(limit: f64) } #[test] -fn accurate_mul_with_column_f64() { +fn accurate_mul_with_column_f64() +{ // pick a few random sizes let rng = &mut SmallRng::from_entropy(); for i in 0..10 { @@ -264,8 +270,8 @@ fn accurate_mul_with_column_f64() { // pick dense square or broadcasted to square matrix match i { - 0 ..= 3 => b_sq = b_owner.view(), - 4 ..= 7 => { + 0..=3 => b_sq = b_owner.view(), + 4..=7 => { b_row_col = b_owner.column(0); b_sq = b_row_col.broadcast((k, k)).unwrap(); } diff --git a/xtest-serialization/tests/serialize.rs b/xtest-serialization/tests/serialize.rs index cea84dd7f..95e93e4fb 100644 --- a/xtest-serialization/tests/serialize.rs +++ b/xtest-serialization/tests/serialize.rs @@ -12,7 +12,8 @@ extern crate ron; use ndarray::{arr0, arr1, arr2, s, ArcArray, ArcArray2, ArrayD, IxDyn}; #[test] -fn serial_many_dim_serde() { +fn serial_many_dim_serde() +{ { let a = arr0::(2.72); let serial = serde_json::to_string(&a).unwrap(); @@ -45,7 +46,9 @@ fn serial_many_dim_serde() { { // Test a sliced array. - let mut a = ArcArray::linspace(0., 31., 32).into_shape_with_order((2, 2, 2, 4)).unwrap(); + let mut a = ArcArray::linspace(0., 31., 32) + .into_shape_with_order((2, 2, 2, 4)) + .unwrap(); a.slice_collapse(s![..;-1, .., .., ..2]); let serial = serde_json::to_string(&a).unwrap(); println!("Encode {:?} => {:?}", a, serial); @@ -56,7 +59,8 @@ fn serial_many_dim_serde() { } #[test] -fn serial_ixdyn_serde() { +fn serial_ixdyn_serde() +{ { let a = arr0::(2.72).into_dyn(); let serial = serde_json::to_string(&a).unwrap(); @@ -95,7 +99,8 @@ fn serial_ixdyn_serde() { } #[test] -fn serial_wrong_count_serde() { +fn serial_wrong_count_serde() +{ // one element too few let text = r##"{"v":1,"dim":[2,3],"data":[3,1,2.2,3.1,4]}"##; let arr = serde_json::from_str::>(text); @@ -110,7 +115,8 @@ fn serial_wrong_count_serde() { } #[test] -fn serial_many_dim_serde_msgpack() { +fn serial_many_dim_serde_msgpack() +{ { let a = arr0::(2.72); @@ -155,7 +161,9 @@ fn serial_many_dim_serde_msgpack() { { // Test a sliced array. - let mut a = ArcArray::linspace(0., 31., 32).into_shape_with_order((2, 2, 2, 4)).unwrap(); + let mut a = ArcArray::linspace(0., 31., 32) + .into_shape_with_order((2, 2, 2, 4)) + .unwrap(); a.slice_collapse(s![..;-1, .., .., ..2]); let mut buf = Vec::new(); @@ -172,7 +180,8 @@ fn serial_many_dim_serde_msgpack() { #[test] #[cfg(feature = "ron")] -fn serial_many_dim_ron() { +fn serial_many_dim_ron() +{ use ron::de::from_str as ron_deserialize; use ron::ser::to_string as ron_serialize; @@ -208,7 +217,9 @@ fn serial_many_dim_ron() { { // Test a sliced array. - let mut a = ArcArray::linspace(0., 31., 32).into_shape_with_order((2, 2, 2, 4)).unwrap(); + let mut a = ArcArray::linspace(0., 31., 32) + .into_shape_with_order((2, 2, 2, 4)) + .unwrap(); a.slice_collapse(s![..;-1, .., .., ..2]); let a_s = ron_serialize(&a).unwrap();