diff --git a/crates/integration_tests/testdata/spark/Dockerfile b/crates/integration_tests/testdata/spark/Dockerfile index 420edb2318..339051bfc1 100644 --- a/crates/integration_tests/testdata/spark/Dockerfile +++ b/crates/integration_tests/testdata/spark/Dockerfile @@ -29,7 +29,7 @@ WORKDIR ${SPARK_HOME} ENV SPARK_VERSION=3.5.6 ENV ICEBERG_SPARK_RUNTIME_VERSION=3.5_2.12 -ENV ICEBERG_VERSION=1.6.0 +ENV ICEBERG_VERSION=1.10.0 RUN curl --retry 5 -s -C - https://dlcdn.apache.org/spark/spark-${SPARK_VERSION}/spark-${SPARK_VERSION}-bin-hadoop3.tgz -o spark-${SPARK_VERSION}-bin-hadoop3.tgz \ && tar xzf spark-${SPARK_VERSION}-bin-hadoop3.tgz --directory /opt/spark --strip-components 1 \ diff --git a/crates/integration_tests/tests/shared_tests/read_positional_deletes.rs b/crates/integration_tests/tests/shared_tests/read_positional_deletes.rs index 565f8ba427..7641810073 100644 --- a/crates/integration_tests/tests/shared_tests/read_positional_deletes.rs +++ b/crates/integration_tests/tests/shared_tests/read_positional_deletes.rs @@ -53,7 +53,7 @@ async fn test_read_table_with_positional_deletes() { // Scan plan phase should include delete files in file plan // when with_delete_file_processing_enabled == true - assert_eq!(plan[0].deletes.len(), 2); + assert_eq!(plan[0].deletes.len(), 1); // we should see two rows deleted, returning 10 rows instead of 12 let batch_stream = scan.to_arrow().await.unwrap();