diff --git a/sql/core/src/test/scala/org/apache/spark/sql/VariantSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/VariantSuite.scala index 16be9558409c..6510a165e258 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/VariantSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/VariantSuite.scala @@ -820,7 +820,9 @@ class VariantSuite extends QueryTest with SharedSparkSession with ExpressionEval // The initial size of the buffer backing a cached dataframe column is 128KB. // See `ColumnBuilder`. val numKeys = 128 * 1024 - val keyIterator = (0 until numKeys).iterator + // We start in long range because the shredded writer writes int64 by default which wouldn't + // match narrower binaries. + val keyIterator = (Int.MaxValue + 1L until Int.MaxValue + 1L + numKeys).iterator val entries = Array.fill(numKeys)(s"""\"${keyIterator.next()}\": \"test\"""") val jsonStr = s"{${entries.mkString(", ")}}" val query = s"""select named_struct( diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetVariantShreddingSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetVariantShreddingSuite.scala index 1f06ddb29bd4..1cc6d3afbee5 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetVariantShreddingSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetVariantShreddingSuite.scala @@ -379,7 +379,8 @@ class ParquetVariantShreddingSuite extends QueryTest with ParquetTest with Share "struct>>" withSQLConf(SQLConf.VARIANT_WRITE_SHREDDING_ENABLED.key -> true.toString, SQLConf.VARIANT_ALLOW_READING_SHREDDED.key -> true.toString, - SQLConf.VARIANT_FORCE_SHREDDING_SCHEMA_FOR_TEST.key -> schema) { + SQLConf.VARIANT_FORCE_SHREDDING_SCHEMA_FOR_TEST.key -> schema, + SQLConf.PARQUET_IGNORE_VARIANT_ANNOTATION.key -> true.toString) { df.write.mode("overwrite").parquet(dir.getAbsolutePath) // Verify that we can read the full variant.