diff --git a/Cargo.toml b/Cargo.toml index e5acbd20224a..623726504c60 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -192,6 +192,7 @@ or_fun_call = "warn" unnecessary_lazy_evaluations = "warn" uninlined_format_args = "warn" inefficient_to_string = "warn" +needless_pass_by_value = "warn" [workspace.lints.rust] unexpected_cfgs = { level = "warn", check-cfg = [ diff --git a/datafusion-examples/examples/custom_data_source/custom_datasource.rs b/datafusion-examples/examples/custom_data_source/custom_datasource.rs index 72c05c1a231e..ebb6c7a822e8 100644 --- a/datafusion-examples/examples/custom_data_source/custom_datasource.rs +++ b/datafusion-examples/examples/custom_data_source/custom_datasource.rs @@ -196,6 +196,7 @@ struct CustomExec { } impl CustomExec { + #[expect(clippy::needless_pass_by_value)] fn new( projections: Option<&Vec>, schema: SchemaRef, diff --git a/datafusion-examples/examples/execution_monitoring/memory_pool_execution_plan.rs b/datafusion-examples/examples/execution_monitoring/memory_pool_execution_plan.rs index 896d244fbd75..d17927124666 100644 --- a/datafusion-examples/examples/execution_monitoring/memory_pool_execution_plan.rs +++ b/datafusion-examples/examples/execution_monitoring/memory_pool_execution_plan.rs @@ -142,6 +142,7 @@ impl ExternalBatchBufferer { } } + #[expect(clippy::needless_pass_by_value)] fn add_batch(&mut self, batch_data: Vec) -> Result<()> { let additional_memory = batch_data.len(); diff --git a/datafusion-examples/examples/flight/server.rs b/datafusion-examples/examples/flight/server.rs index e4c05a13e8eb..7f348e7d5941 100644 --- a/datafusion-examples/examples/flight/server.rs +++ b/datafusion-examples/examples/flight/server.rs @@ -189,6 +189,7 @@ impl FlightService for FlightServiceImpl { } } +#[expect(clippy::needless_pass_by_value)] fn to_tonic_err(e: datafusion::error::DataFusionError) -> Status { Status::internal(format!("{e:?}")) } diff --git a/datafusion-examples/examples/query_planning/pruning.rs b/datafusion-examples/examples/query_planning/pruning.rs index d7562139c2b6..33f3f8428a77 100644 --- a/datafusion-examples/examples/query_planning/pruning.rs +++ b/datafusion-examples/examples/query_planning/pruning.rs @@ -190,6 +190,7 @@ impl PruningStatistics for MyCatalog { } } +#[expect(clippy::needless_pass_by_value)] fn create_pruning_predicate(expr: Expr, schema: &SchemaRef) -> PruningPredicate { let df_schema = DFSchema::try_from(Arc::clone(schema)).unwrap(); let props = ExecutionProps::new(); diff --git a/datafusion/core/benches/aggregate_query_sql.rs b/datafusion/core/benches/aggregate_query_sql.rs index 87aeed49337e..4aa667504e45 100644 --- a/datafusion/core/benches/aggregate_query_sql.rs +++ b/datafusion/core/benches/aggregate_query_sql.rs @@ -31,6 +31,7 @@ use std::hint::black_box; use std::sync::Arc; use tokio::runtime::Runtime; +#[expect(clippy::needless_pass_by_value)] fn query(ctx: Arc>, rt: &Runtime, sql: &str) { let df = rt.block_on(ctx.lock().sql(sql)).unwrap(); black_box(rt.block_on(df.collect()).unwrap()); diff --git a/datafusion/core/benches/csv_load.rs b/datafusion/core/benches/csv_load.rs index de0f0d825057..228457947fd5 100644 --- a/datafusion/core/benches/csv_load.rs +++ b/datafusion/core/benches/csv_load.rs @@ -34,6 +34,7 @@ use std::time::Duration; use test_utils::AccessLogGenerator; use tokio::runtime::Runtime; +#[expect(clippy::needless_pass_by_value)] fn load_csv( ctx: Arc>, rt: &Runtime, diff --git a/datafusion/core/benches/data_utils/mod.rs b/datafusion/core/benches/data_utils/mod.rs index fffe2e2d1752..ff2a4e247494 100644 --- a/datafusion/core/benches/data_utils/mod.rs +++ b/datafusion/core/benches/data_utils/mod.rs @@ -139,6 +139,7 @@ fn create_record_batch( /// Create record batches of `partitions_len` partitions and `batch_size` for each batch, /// with a total number of `array_len` records +#[expect(clippy::needless_pass_by_value)] pub fn create_record_batches( schema: SchemaRef, array_len: usize, diff --git a/datafusion/core/benches/dataframe.rs b/datafusion/core/benches/dataframe.rs index 00fa85918347..726187ab5e92 100644 --- a/datafusion/core/benches/dataframe.rs +++ b/datafusion/core/benches/dataframe.rs @@ -45,6 +45,7 @@ fn create_context(field_count: u32) -> datafusion_common::Result, rt: &Runtime) { black_box(rt.block_on(async { let mut data_frame = ctx.table("t").await.unwrap(); diff --git a/datafusion/core/benches/distinct_query_sql.rs b/datafusion/core/benches/distinct_query_sql.rs index d05e8b13b2af..1eb1524df8b6 100644 --- a/datafusion/core/benches/distinct_query_sql.rs +++ b/datafusion/core/benches/distinct_query_sql.rs @@ -34,6 +34,7 @@ use std::hint::black_box; use std::{sync::Arc, time::Duration}; use tokio::runtime::Runtime; +#[expect(clippy::needless_pass_by_value)] fn query(ctx: Arc>, rt: &Runtime, sql: &str) { let df = rt.block_on(ctx.lock().sql(sql)).unwrap(); black_box(rt.block_on(df.collect()).unwrap()); @@ -124,6 +125,7 @@ async fn distinct_with_limit( Ok(()) } +#[expect(clippy::needless_pass_by_value)] fn run(rt: &Runtime, plan: Arc, ctx: Arc) { black_box(rt.block_on(distinct_with_limit(plan.clone(), ctx.clone()))).unwrap(); } diff --git a/datafusion/core/benches/math_query_sql.rs b/datafusion/core/benches/math_query_sql.rs index 76824850c114..4d1d4abb6783 100644 --- a/datafusion/core/benches/math_query_sql.rs +++ b/datafusion/core/benches/math_query_sql.rs @@ -36,6 +36,7 @@ use datafusion::datasource::MemTable; use datafusion::error::Result; use datafusion::execution::context::SessionContext; +#[expect(clippy::needless_pass_by_value)] fn query(ctx: Arc>, rt: &Runtime, sql: &str) { // execute the query let df = rt.block_on(ctx.lock().sql(sql)).unwrap(); diff --git a/datafusion/core/benches/physical_plan.rs b/datafusion/core/benches/physical_plan.rs index e4838572f60f..782c29a8096f 100644 --- a/datafusion/core/benches/physical_plan.rs +++ b/datafusion/core/benches/physical_plan.rs @@ -40,6 +40,7 @@ use datafusion_physical_expr_common::sort_expr::LexOrdering; // Initialize the operator using the provided record batches and the sort key // as inputs. All record batches must have the same schema. +#[expect(clippy::needless_pass_by_value)] fn sort_preserving_merge_operator( session_ctx: Arc, rt: &Runtime, diff --git a/datafusion/core/benches/sort_limit_query_sql.rs b/datafusion/core/benches/sort_limit_query_sql.rs index e535a018161f..7c8e5d730d99 100644 --- a/datafusion/core/benches/sort_limit_query_sql.rs +++ b/datafusion/core/benches/sort_limit_query_sql.rs @@ -37,6 +37,7 @@ use datafusion::execution::context::SessionContext; use tokio::runtime::Runtime; +#[expect(clippy::needless_pass_by_value)] fn query(ctx: Arc>, rt: &Runtime, sql: &str) { // execute the query let df = rt.block_on(ctx.lock().sql(sql)).unwrap(); diff --git a/datafusion/core/benches/sql_planner.rs b/datafusion/core/benches/sql_planner.rs index 25129a354ee8..027e925d4f4a 100644 --- a/datafusion/core/benches/sql_planner.rs +++ b/datafusion/core/benches/sql_planner.rs @@ -93,6 +93,7 @@ fn create_context() -> SessionContext { /// Register the table definitions as a MemTable with the context and return the /// context +#[expect(clippy::needless_pass_by_value)] fn register_defs(ctx: SessionContext, defs: Vec) -> SessionContext { defs.iter().for_each(|TableDef { name, schema }| { ctx.register_table( diff --git a/datafusion/core/benches/topk_aggregate.rs b/datafusion/core/benches/topk_aggregate.rs index 7971293c9ce2..16e044416761 100644 --- a/datafusion/core/benches/topk_aggregate.rs +++ b/datafusion/core/benches/topk_aggregate.rs @@ -58,6 +58,7 @@ async fn create_context( Ok((physical_plan, ctx.task_ctx())) } +#[expect(clippy::needless_pass_by_value)] fn run(rt: &Runtime, plan: Arc, ctx: Arc, asc: bool) { black_box(rt.block_on(async { aggregate(plan.clone(), ctx.clone(), asc).await })) .unwrap(); diff --git a/datafusion/core/benches/window_query_sql.rs b/datafusion/core/benches/window_query_sql.rs index 6d83959f7eb3..e4643567a0f0 100644 --- a/datafusion/core/benches/window_query_sql.rs +++ b/datafusion/core/benches/window_query_sql.rs @@ -31,6 +31,7 @@ use std::hint::black_box; use std::sync::Arc; use tokio::runtime::Runtime; +#[expect(clippy::needless_pass_by_value)] fn query(ctx: Arc>, rt: &Runtime, sql: &str) { let df = rt.block_on(ctx.lock().sql(sql)).unwrap(); black_box(rt.block_on(df.collect()).unwrap()); diff --git a/datafusion/core/src/bin/print_functions_docs.rs b/datafusion/core/src/bin/print_functions_docs.rs index 63387c023b11..a9e6d4a30e0e 100644 --- a/datafusion/core/src/bin/print_functions_docs.rs +++ b/datafusion/core/src/bin/print_functions_docs.rs @@ -108,6 +108,7 @@ fn save_doc_code_text(documentation: &Documentation, name: &str) { file.write_all(attr_text.as_bytes()).unwrap(); } +#[expect(clippy::needless_pass_by_value)] fn print_docs( providers: Vec>, doc_sections: Vec, diff --git a/datafusion/core/tests/custom_sources_cases/mod.rs b/datafusion/core/tests/custom_sources_cases/mod.rs index cbdc4a448ea4..44da7d4e62e5 100644 --- a/datafusion/core/tests/custom_sources_cases/mod.rs +++ b/datafusion/core/tests/custom_sources_cases/mod.rs @@ -316,6 +316,7 @@ async fn optimizers_catch_all_statistics() { assert_eq!(format!("{:?}", actual[0]), format!("{expected:?}")); } +#[expect(clippy::needless_pass_by_value)] fn contains_place_holder_exec(plan: Arc) -> bool { if plan.as_any().is::() { true diff --git a/datafusion/core/tests/expr_api/mod.rs b/datafusion/core/tests/expr_api/mod.rs index 84e644480a4f..ad2b86684459 100644 --- a/datafusion/core/tests/expr_api/mod.rs +++ b/datafusion/core/tests/expr_api/mod.rs @@ -36,6 +36,7 @@ use datafusion_optimizer::simplify_expressions::ExprSimplifier; use std::sync::{Arc, LazyLock}; mod parse_sql_expr; +#[expect(clippy::needless_pass_by_value)] mod simplification; #[test] @@ -384,6 +385,7 @@ async fn evaluate_agg_test(expr: Expr, expected_lines: Vec<&str>) { /// Converts the `Expr` to a `PhysicalExpr`, evaluates it against the provided /// `RecordBatch` and compares the result to the expected result. +#[expect(clippy::needless_pass_by_value)] fn evaluate_expr_test(expr: Expr, expected_lines: Vec<&str>) { let batch = &TEST_BATCH; let df_schema = DFSchema::try_from(batch.schema()).unwrap(); diff --git a/datafusion/core/tests/fuzz_cases/mod.rs b/datafusion/core/tests/fuzz_cases/mod.rs index 9e2fd170f7f0..edb53df382c6 100644 --- a/datafusion/core/tests/fuzz_cases/mod.rs +++ b/datafusion/core/tests/fuzz_cases/mod.rs @@ -15,20 +15,26 @@ // specific language governing permissions and limitations // under the License. +#[expect(clippy::needless_pass_by_value)] mod aggregate_fuzz; mod distinct_count_string_fuzz; +#[expect(clippy::needless_pass_by_value)] mod join_fuzz; mod merge_fuzz; +#[expect(clippy::needless_pass_by_value)] mod sort_fuzz; +#[expect(clippy::needless_pass_by_value)] mod sort_query_fuzz; mod topk_filter_pushdown; mod aggregation_fuzzer; +#[expect(clippy::needless_pass_by_value)] mod equivalence; mod pruning; mod limit_fuzz; +#[expect(clippy::needless_pass_by_value)] mod sort_preserving_repartition_fuzz; mod window_fuzz; diff --git a/datafusion/core/tests/parquet/encryption.rs b/datafusion/core/tests/parquet/encryption.rs index 09b93f06ce85..82d8e61d9a2e 100644 --- a/datafusion/core/tests/parquet/encryption.rs +++ b/datafusion/core/tests/parquet/encryption.rs @@ -54,6 +54,7 @@ async fn read_parquet_test_data<'a, T: Into>( .unwrap() } +#[expect(clippy::needless_pass_by_value)] pub fn write_batches( path: PathBuf, props: WriterProperties, diff --git a/datafusion/core/tests/parquet/mod.rs b/datafusion/core/tests/parquet/mod.rs index 097600e45ead..aa2a687d2980 100644 --- a/datafusion/core/tests/parquet/mod.rs +++ b/datafusion/core/tests/parquet/mod.rs @@ -652,6 +652,7 @@ fn make_date_batch(offset: Duration) -> RecordBatch { /// of the column. It is *not* a table named service.name /// /// name | service.name +#[expect(clippy::needless_pass_by_value)] fn make_bytearray_batch( name: &str, string_values: Vec<&str>, @@ -707,6 +708,7 @@ fn make_bytearray_batch( /// of the column. It is *not* a table named service.name /// /// name | service.name +#[expect(clippy::needless_pass_by_value)] fn make_names_batch(name: &str, service_name_values: Vec<&str>) -> RecordBatch { let num_rows = service_name_values.len(); let name: StringArray = std::iter::repeat_n(Some(name), num_rows).collect(); @@ -791,6 +793,7 @@ fn make_utf8_batch(value: Vec>) -> RecordBatch { .unwrap() } +#[expect(clippy::needless_pass_by_value)] fn make_dictionary_batch(strings: Vec<&str>, integers: Vec) -> RecordBatch { let keys = Int32Array::from_iter(0..strings.len() as i32); let small_keys = Int16Array::from_iter(0..strings.len() as i16); @@ -839,6 +842,7 @@ fn make_dictionary_batch(strings: Vec<&str>, integers: Vec) -> RecordBatch .unwrap() } +#[expect(clippy::needless_pass_by_value)] fn create_data_batch(scenario: Scenario) -> Vec { match scenario { Scenario::Timestamps => { diff --git a/datafusion/core/tests/physical_optimizer/mod.rs b/datafusion/core/tests/physical_optimizer/mod.rs index 936c02eb2a02..fe9db1975d27 100644 --- a/datafusion/core/tests/physical_optimizer/mod.rs +++ b/datafusion/core/tests/physical_optimizer/mod.rs @@ -17,18 +17,23 @@ //! Physical Optimizer integration tests +#[expect(clippy::needless_pass_by_value)] mod aggregate_statistics; mod combine_partial_final_agg; +#[expect(clippy::needless_pass_by_value)] mod enforce_distribution; mod enforce_sorting; mod enforce_sorting_monotonicity; +#[expect(clippy::needless_pass_by_value)] mod filter_pushdown; mod join_selection; +#[expect(clippy::needless_pass_by_value)] mod limit_pushdown; mod limited_distinct_aggregation; mod partition_statistics; mod projection_pushdown; mod replace_with_order_preserving_variants; mod sanity_checker; +#[expect(clippy::needless_pass_by_value)] mod test_utils; mod window_optimize; diff --git a/datafusion/core/tests/user_defined/user_defined_aggregates.rs b/datafusion/core/tests/user_defined/user_defined_aggregates.rs index 62e8ab18b9be..28a78feed3af 100644 --- a/datafusion/core/tests/user_defined/user_defined_aggregates.rs +++ b/datafusion/core/tests/user_defined/user_defined_aggregates.rs @@ -569,6 +569,7 @@ impl TimeSum { Self { sum: 0, test_state } } + #[expect(clippy::needless_pass_by_value)] fn register(ctx: &mut SessionContext, test_state: Arc, name: &str) { let timestamp_type = DataType::Timestamp(TimeUnit::Nanosecond, None); let input_type = vec![timestamp_type.clone()]; diff --git a/datafusion/functions-aggregate/benches/array_agg.rs b/datafusion/functions-aggregate/benches/array_agg.rs index 83b0c4a4c659..ac1b44c4e84d 100644 --- a/datafusion/functions-aggregate/benches/array_agg.rs +++ b/datafusion/functions-aggregate/benches/array_agg.rs @@ -38,6 +38,7 @@ pub fn seedable_rng() -> StdRng { StdRng::seed_from_u64(42) } +#[expect(clippy::needless_pass_by_value)] fn merge_batch_bench(c: &mut Criterion, name: &str, values: ArrayRef) { let list_item_data_type = values.as_list::().values().data_type().clone(); c.bench_function(name, |b| { diff --git a/datafusion/functions-aggregate/benches/count.rs b/datafusion/functions-aggregate/benches/count.rs index 53484652fd25..5299f6c70baf 100644 --- a/datafusion/functions-aggregate/benches/count.rs +++ b/datafusion/functions-aggregate/benches/count.rs @@ -76,6 +76,7 @@ fn prepare_accumulator() -> Box { count_fn.accumulator(accumulator_args).unwrap() } +#[expect(clippy::needless_pass_by_value)] fn convert_to_state_bench( c: &mut Criterion, name: &str, diff --git a/datafusion/functions-aggregate/benches/sum.rs b/datafusion/functions-aggregate/benches/sum.rs index d85f0686224b..48f2f4d835f5 100644 --- a/datafusion/functions-aggregate/benches/sum.rs +++ b/datafusion/functions-aggregate/benches/sum.rs @@ -47,6 +47,7 @@ fn prepare_accumulator(data_type: &DataType) -> Box { sum_fn.create_groups_accumulator(accumulator_args).unwrap() } +#[expect(clippy::needless_pass_by_value)] fn convert_to_state_bench( c: &mut Criterion, name: &str, diff --git a/datafusion/functions/benches/pad.rs b/datafusion/functions/benches/pad.rs index f92a69bbf4f9..bdd1d3c3679f 100644 --- a/datafusion/functions/benches/pad.rs +++ b/datafusion/functions/benches/pad.rs @@ -98,6 +98,7 @@ fn create_args( } } +#[expect(clippy::needless_pass_by_value)] fn invoke_pad_with_args( args: Vec, number_rows: usize, diff --git a/datafusion/functions/benches/substr.rs b/datafusion/functions/benches/substr.rs index 771413458c1f..542c01346535 100644 --- a/datafusion/functions/benches/substr.rs +++ b/datafusion/functions/benches/substr.rs @@ -99,6 +99,7 @@ fn create_args_with_count( } } +#[expect(clippy::needless_pass_by_value)] fn invoke_substr_with_args( args: Vec, number_rows: usize, diff --git a/datafusion/physical-expr/benches/binary_op.rs b/datafusion/physical-expr/benches/binary_op.rs index 9bffd79dc00f..06a14e2fbc59 100644 --- a/datafusion/physical-expr/benches/binary_op.rs +++ b/datafusion/physical-expr/benches/binary_op.rs @@ -286,6 +286,7 @@ fn generate_test_strings(num_rows: usize) -> (Vec, Vec) { /// Creates record batches with boolean arrays that test different short-circuit scenarios. /// When TEST_ALL_FALSE = true: creates data for AND operator benchmarks (needs early false exit) /// When TEST_ALL_FALSE = false: creates data for OR operator benchmarks (needs early true exit) +#[expect(clippy::needless_pass_by_value)] fn create_record_batch( schema: Arc, b_values: &[String], diff --git a/datafusion/physical-expr/benches/case_when.rs b/datafusion/physical-expr/benches/case_when.rs index 9ed6b58da7f7..a05b4a968660 100644 --- a/datafusion/physical-expr/benches/case_when.rs +++ b/datafusion/physical-expr/benches/case_when.rs @@ -293,7 +293,7 @@ fn create_random_string_generator( /// `null_percentage` is the percentage of null values /// The rest of the values will be outside the specified range fn generate_values_for_lookup( - options: Options, + options: &Options, generate_other_value: impl Fn(&mut StdRng, &[T]) -> T, ) -> A where @@ -416,7 +416,7 @@ fn benchmark_lookup_table_case_when(c: &mut Criterion, batch_size: usize) { &input, |b, input| { let array: Int32Array = generate_values_for_lookup( - Options:: { + &Options:: { number_of_rows: batch_size, range_of_values: when_thens_primitive_to_string .iter() @@ -469,7 +469,7 @@ fn benchmark_lookup_table_case_when(c: &mut Criterion, batch_size: usize) { &input, |b, input| { let array: StringArray = generate_values_for_lookup( - Options:: { + &Options:: { number_of_rows: batch_size, range_of_values: when_thens_string_to_primitive .iter() diff --git a/datafusion/physical-plan/benches/aggregate_vectorized.rs b/datafusion/physical-plan/benches/aggregate_vectorized.rs index 3c1899406c98..66e7a28a28b4 100644 --- a/datafusion/physical-plan/benches/aggregate_vectorized.rs +++ b/datafusion/physical-plan/benches/aggregate_vectorized.rs @@ -271,6 +271,7 @@ fn bench_single_primitive( } /// Test `vectorized_equal_to` with different number of true in the initial results +#[expect(clippy::needless_pass_by_value)] fn vectorized_equal_to( group: &mut BenchmarkGroup, mut builder: GroupColumnBuilder, diff --git a/datafusion/physical-plan/benches/spill_io.rs b/datafusion/physical-plan/benches/spill_io.rs index 40c8f7634c8c..5123b91a6ab9 100644 --- a/datafusion/physical-plan/benches/spill_io.rs +++ b/datafusion/physical-plan/benches/spill_io.rs @@ -490,6 +490,7 @@ fn bench_spill_compression(c: &mut Criterion) { group.finish(); } +#[expect(clippy::needless_pass_by_value)] fn benchmark_spill_batches_for_all_codec( group: &mut BenchmarkGroup<'_, WallTime>, batch_label: &str, diff --git a/datafusion/sql/tests/sql_integration.rs b/datafusion/sql/tests/sql_integration.rs index b69352159a96..2d62fd9c0c23 100644 --- a/datafusion/sql/tests/sql_integration.rs +++ b/datafusion/sql/tests/sql_integration.rs @@ -15,6 +15,10 @@ // specific language governing permissions and limitations // under the License. +// This lint violation is acceptable for tests, so suppress for now +// Issue: +#![expect(clippy::needless_pass_by_value)] + use std::any::Any; use std::hash::Hash; #[cfg(test)] diff --git a/datafusion/sqllogictest/bin/sqllogictests.rs b/datafusion/sqllogictest/bin/sqllogictests.rs index ec705fc9ba06..0f9f6c83dcf0 100644 --- a/datafusion/sqllogictest/bin/sqllogictests.rs +++ b/datafusion/sqllogictest/bin/sqllogictests.rs @@ -407,6 +407,7 @@ async fn run_file_in_runner>( Ok(()) } +#[expect(clippy::needless_pass_by_value)] fn get_record_count(path: &PathBuf, label: String) -> u64 { let records: Vec::ColumnType>> = parse_file(path).unwrap(); diff --git a/datafusion/sqllogictest/src/engines/conversion.rs b/datafusion/sqllogictest/src/engines/conversion.rs index de3acbee93b1..984845b584bf 100644 --- a/datafusion/sqllogictest/src/engines/conversion.rs +++ b/datafusion/sqllogictest/src/engines/conversion.rs @@ -122,6 +122,7 @@ pub(crate) fn decimal_to_str(value: Decimal) -> String { /// Converts a `BigDecimal` to its plain string representation, optionally rounding to a specified number of decimal places. /// /// If `round_digits` is `None`, the value is rounded to 12 decimal places by default. +#[expect(clippy::needless_pass_by_value)] pub(crate) fn big_decimal_to_str(value: BigDecimal, round_digits: Option) -> String { // Round the value to limit the number of decimal places let value = value.round(round_digits.unwrap_or(12)).normalized();