diff --git a/datafusion/core/src/datasource/file_format/json.rs b/datafusion/core/src/datasource/file_format/json.rs index a66edab888bf..21cd22f0701a 100644 --- a/datafusion/core/src/datasource/file_format/json.rs +++ b/datafusion/core/src/datasource/file_format/json.rs @@ -256,7 +256,7 @@ mod tests { projection: Option>, limit: Option, ) -> Result> { - let filename = "tests/jsons/2.json"; + let filename = "tests/data/2.json"; let format = JsonFormat::default(); scan_format(state, &format, ".", filename, projection, limit).await } @@ -266,7 +266,7 @@ mod tests { let session = SessionContext::new(); let ctx = session.state(); let store = Arc::new(LocalFileSystem::new()) as _; - let filename = "tests/jsons/schema_infer_limit.json"; + let filename = "tests/data/schema_infer_limit.json"; let format = JsonFormat::default().with_schema_infer_max_rec(Some(3)); let file_schema = format diff --git a/datafusion/core/src/physical_plan/file_format/csv.rs b/datafusion/core/src/physical_plan/file_format/csv.rs index e2d2bb8ef7bf..e7633807e069 100644 --- a/datafusion/core/src/physical_plan/file_format/csv.rs +++ b/datafusion/core/src/physical_plan/file_format/csv.rs @@ -785,7 +785,7 @@ mod tests { let options = CsvReadOptions::default() .schema_infer_max_records(2) .has_header(true); - let df = ctx.read_csv("tests/csv/corrupt.csv", options).await?; + let df = ctx.read_csv("tests/data/corrupt.csv", options).await?; let tmp_dir = TempDir::new()?; let out_dir = tmp_dir.as_ref().to_str().unwrap().to_string() + "/out"; let e = df diff --git a/datafusion/core/src/physical_plan/file_format/json.rs b/datafusion/core/src/physical_plan/file_format/json.rs index d122fd78b4b2..bb686b55a03c 100644 --- a/datafusion/core/src/physical_plan/file_format/json.rs +++ b/datafusion/core/src/physical_plan/file_format/json.rs @@ -309,7 +309,7 @@ mod tests { use super::*; - const TEST_DATA_BASE: &str = "tests/jsons"; + const TEST_DATA_BASE: &str = "tests/data"; async fn prepare_store( state: &SessionState, @@ -707,7 +707,7 @@ mod tests { let options = CsvReadOptions::default() .schema_infer_max_records(2) .has_header(true); - let df = ctx.read_csv("tests/csv/corrupt.csv", options).await?; + let df = ctx.read_csv("tests/data/corrupt.csv", options).await?; let tmp_dir = TempDir::new()?; let out_dir = tmp_dir.as_ref().to_str().unwrap().to_string() + "/out"; let e = df diff --git a/datafusion/core/src/physical_plan/file_format/parquet.rs b/datafusion/core/src/physical_plan/file_format/parquet.rs index 0afb819d43ef..641be002b6ad 100644 --- a/datafusion/core/src/physical_plan/file_format/parquet.rs +++ b/datafusion/core/src/physical_plan/file_format/parquet.rs @@ -940,7 +940,7 @@ mod tests { let options = CsvReadOptions::default() .schema_infer_max_records(2) .has_header(true); - let df = ctx.read_csv("tests/csv/corrupt.csv", options).await?; + let df = ctx.read_csv("tests/data/corrupt.csv", options).await?; let tmp_dir = TempDir::new()?; let out_dir = tmp_dir.as_ref().to_str().unwrap().to_string() + "/out"; let e = df diff --git a/datafusion/core/tests/jsons/1.json b/datafusion/core/tests/data/1.json similarity index 100% rename from datafusion/core/tests/jsons/1.json rename to datafusion/core/tests/data/1.json diff --git a/datafusion/core/tests/jsons/2.json b/datafusion/core/tests/data/2.json similarity index 100% rename from datafusion/core/tests/jsons/2.json rename to datafusion/core/tests/data/2.json diff --git a/datafusion/core/tests/jsons/3.json b/datafusion/core/tests/data/3.json similarity index 100% rename from datafusion/core/tests/jsons/3.json rename to datafusion/core/tests/data/3.json diff --git a/datafusion/core/tests/csv/corrupt.csv b/datafusion/core/tests/data/corrupt.csv similarity index 100% rename from datafusion/core/tests/csv/corrupt.csv rename to datafusion/core/tests/data/corrupt.csv diff --git a/datafusion/core/tests/parquet/data/repeat_much.snappy.parquet b/datafusion/core/tests/data/repeat_much.snappy.parquet similarity index 100% rename from datafusion/core/tests/parquet/data/repeat_much.snappy.parquet rename to datafusion/core/tests/data/repeat_much.snappy.parquet diff --git a/datafusion/core/tests/jsons/schema_infer_limit.json b/datafusion/core/tests/data/schema_infer_limit.json similarity index 100% rename from datafusion/core/tests/jsons/schema_infer_limit.json rename to datafusion/core/tests/data/schema_infer_limit.json diff --git a/datafusion/core/tests/parquet/data/test_binary.parquet b/datafusion/core/tests/data/test_binary.parquet similarity index 100% rename from datafusion/core/tests/parquet/data/test_binary.parquet rename to datafusion/core/tests/data/test_binary.parquet diff --git a/datafusion/core/tests/parquet/data/timestamp_with_tz.parquet b/datafusion/core/tests/data/timestamp_with_tz.parquet similarity index 100% rename from datafusion/core/tests/parquet/data/timestamp_with_tz.parquet rename to datafusion/core/tests/data/timestamp_with_tz.parquet diff --git a/datafusion/core/tests/sql/json.rs b/datafusion/core/tests/sql/json.rs index 10fcdfda20de..8608305f152d 100644 --- a/datafusion/core/tests/sql/json.rs +++ b/datafusion/core/tests/sql/json.rs @@ -17,7 +17,7 @@ use super::*; -const TEST_DATA_BASE: &str = "tests/jsons"; +const TEST_DATA_BASE: &str = "tests/data"; #[tokio::test] async fn json_query() { @@ -92,7 +92,7 @@ async fn json_explain() { \n CoalescePartitionsExec\ \n AggregateExec: mode=Partial, gby=[], aggr=[COUNT(UInt8(1))]\ \n RepartitionExec: partitioning=RoundRobinBatch(NUM_CORES), input_partitions=1\ - \n JsonExec: file_groups={1 group: [[WORKING_DIR/tests/jsons/2.json]]}, projection=[a]\n", + \n JsonExec: file_groups={1 group: [[WORKING_DIR/tests/data/2.json]]}, projection=[a]\n", ], ]; assert_eq!(expected, actual); diff --git a/datafusion/core/tests/sql/order.rs b/datafusion/core/tests/sql/order.rs index a85fee776dd6..fa6ac612440c 100644 --- a/datafusion/core/tests/sql/order.rs +++ b/datafusion/core/tests/sql/order.rs @@ -25,7 +25,7 @@ use test_utils::{batches_to_vec, partitions_to_sorted_vec}; #[tokio::test] async fn sort_with_lots_of_repetition_values() -> Result<()> { let ctx = SessionContext::new(); - let filename = "tests/parquet/data/repeat_much.snappy.parquet"; + let filename = "tests/data/repeat_much.snappy.parquet"; ctx.register_parquet("rep", filename, ParquetReadOptions::default()) .await?; diff --git a/datafusion/core/tests/sql/parquet.rs b/datafusion/core/tests/sql/parquet.rs index d2998209a22c..532a0414f9c2 100644 --- a/datafusion/core/tests/sql/parquet.rs +++ b/datafusion/core/tests/sql/parquet.rs @@ -151,7 +151,7 @@ async fn fixed_size_binary_columns() { let ctx = SessionContext::new(); ctx.register_parquet( "t0", - "tests/parquet/data/test_binary.parquet", + "tests/data/test_binary.parquet", ParquetReadOptions::default(), ) .await @@ -170,7 +170,7 @@ async fn window_fn_timestamp_tz() { let ctx = SessionContext::new(); ctx.register_parquet( "t0", - "tests/parquet/data/timestamp_with_tz.parquet", + "tests/data/timestamp_with_tz.parquet", ParquetReadOptions::default(), ) .await