|
16 | 16 |
|
17 | 17 | package org.apache.spark.sql.delta.optimize
|
18 | 18 |
|
19 |
| -import org.apache.spark.sql.delta.DeletionVectorsTestUtils |
| 19 | +import org.apache.spark.sql.delta.{DeletionVectorsTestUtils, DeltaColumnMapping, DeltaLog} |
| 20 | +import org.apache.spark.sql.delta.actions.AddFile |
| 21 | +import org.apache.spark.sql.delta.commands.VacuumCommand.generateCandidateFileMap |
20 | 22 | import org.apache.spark.sql.delta.sources.DeltaSQLConf
|
21 | 23 | import org.apache.spark.sql.delta.test.DeltaSQLCommandTest
|
| 24 | +import org.apache.spark.sql.delta.util.DeltaFileOperations |
22 | 25 | import io.delta.tables.DeltaTable
|
| 26 | +import org.apache.hadoop.fs.{FileStatus, Path} |
| 27 | +import org.apache.parquet.hadoop.Footer |
23 | 28 |
|
24 | 29 | import org.apache.spark.sql.QueryTest
|
25 | 30 | import org.apache.spark.sql.functions.col
|
26 | 31 | import org.apache.spark.sql.test.SharedSparkSession
|
| 32 | +import org.apache.spark.util.SerializableConfiguration |
27 | 33 |
|
28 | 34 | class DeltaReorgSuite extends QueryTest
|
29 | 35 | with SharedSparkSession
|
@@ -150,4 +156,147 @@ class DeltaReorgSuite extends QueryTest
|
150 | 156 | // Because each deleted file has a DV associated it which gets rewritten as part of PURGE
|
151 | 157 | assert(opMetrics("numDeletionVectorsRemoved").toLong === numFilesRemoved)
|
152 | 158 | }
|
| 159 | + |
| 160 | + /** |
| 161 | + * Get all parquet footers for the input `files`, used only for testing. |
| 162 | + * |
| 163 | + * @param files the sequence of `AddFile` used to read the parquet footers |
| 164 | + * by the data file path in each `AddFile`. |
| 165 | + * @param log the delta log used to get the configuration and data path. |
| 166 | + * @return the sequence of the corresponding parquet footers, corresponds to |
| 167 | + * the sequence of `AddFile`. |
| 168 | + */ |
| 169 | + private def getParquetFooters( |
| 170 | + files: Seq[AddFile], |
| 171 | + log: DeltaLog): Seq[Footer] = { |
| 172 | + val serializedConf = new SerializableConfiguration(log.newDeltaHadoopConf()) |
| 173 | + val dataPath = new Path(log.dataPath.toString) |
| 174 | + val nameToAddFileMap = generateCandidateFileMap(dataPath, files) |
| 175 | + val fileStatuses = nameToAddFileMap.map { case (absPath, addFile) => |
| 176 | + new FileStatus( |
| 177 | + /* length */ addFile.size, |
| 178 | + /* isDir */ false, |
| 179 | + /* blockReplication */ 0, |
| 180 | + /* blockSize */ 1, |
| 181 | + /* modificationTime */ addFile.modificationTime, |
| 182 | + new Path(absPath) |
| 183 | + ) |
| 184 | + } |
| 185 | + DeltaFileOperations.readParquetFootersInParallel( |
| 186 | + serializedConf.value, |
| 187 | + fileStatuses.toList, |
| 188 | + ignoreCorruptFiles = false |
| 189 | + ) |
| 190 | + } |
| 191 | + |
| 192 | + test("Purge dropped columns of a table without DV") { |
| 193 | + val targetDf = spark.range(0, 100, 1, numPartitions = 5) |
| 194 | + .withColumn("id_dropped", col("id") % 4) |
| 195 | + .toDF() |
| 196 | + withTempDeltaTable(targetDf) { (_, log) => |
| 197 | + val path = log.dataPath.toString |
| 198 | + |
| 199 | + val (addFiles1, _) = getFileActionsInLastVersion(log) |
| 200 | + assert(addFiles1.size === 5) |
| 201 | + val footers1 = getParquetFooters(addFiles1, log) |
| 202 | + footers1.foreach { footer => |
| 203 | + val fields = footer.getParquetMetadata.getFileMetaData.getSchema.getFields |
| 204 | + assert(fields.size == 2) |
| 205 | + assert(fields.toArray.map { _.toString }.contains("optional int64 id_dropped")) |
| 206 | + } |
| 207 | + |
| 208 | + // enable column-mapping first |
| 209 | + sql( |
| 210 | + s""" |
| 211 | + | ALTER TABLE delta.`$path` |
| 212 | + | SET TBLPROPERTIES ( |
| 213 | + | 'delta.columnMapping.mode' = 'name' |
| 214 | + | ) |
| 215 | + |""".stripMargin |
| 216 | + ) |
| 217 | + // drop the extra column by alter table and run REORG PURGE |
| 218 | + sql( |
| 219 | + s""" |
| 220 | + | ALTER TABLE delta.`$path` |
| 221 | + | DROP COLUMN id_dropped |
| 222 | + |""".stripMargin |
| 223 | + ) |
| 224 | + executePurge(path) |
| 225 | + |
| 226 | + val (addFiles2, _) = getFileActionsInLastVersion(log) |
| 227 | + assert(addFiles2.size === 1) |
| 228 | + val footers2 = getParquetFooters(addFiles2, log) |
| 229 | + footers2.foreach { footer => |
| 230 | + val fields = footer.getParquetMetadata.getFileMetaData.getSchema.getFields |
| 231 | + assert(fields.size == 1) |
| 232 | + assert(!fields.toArray.map { _.toString }.contains("optional int64 id_dropped")) |
| 233 | + } |
| 234 | + } |
| 235 | + } |
| 236 | + |
| 237 | + test("Columns being renamed should not be purged") { |
| 238 | + val targetDf = spark.range(0, 100, 1, numPartitions = 5) |
| 239 | + .withColumn("id_before_rename", col("id") % 4) |
| 240 | + .withColumn("id_dropped", col("id") % 5) |
| 241 | + .toDF() |
| 242 | + withTempDeltaTable(targetDf) { (_, log) => |
| 243 | + val path = log.dataPath.toString |
| 244 | + |
| 245 | + val (addFiles1, _) = getFileActionsInLastVersion(log) |
| 246 | + assert(addFiles1.size === 5) |
| 247 | + val footers1 = getParquetFooters(addFiles1, log) |
| 248 | + footers1.foreach { footer => |
| 249 | + val fields = footer.getParquetMetadata.getFileMetaData.getSchema.getFields |
| 250 | + assert(fields.size == 3) |
| 251 | + assert(fields.toArray.map { _.toString }.contains("optional int64 id_dropped")) |
| 252 | + assert(fields.toArray.map { _.toString }.contains("optional int64 id_before_rename")) |
| 253 | + } |
| 254 | + |
| 255 | + // enable column-mapping first |
| 256 | + sql( |
| 257 | + s""" |
| 258 | + | ALTER TABLE delta.`$path` |
| 259 | + | SET TBLPROPERTIES ( |
| 260 | + | 'delta.columnMapping.mode' = 'name' |
| 261 | + | ) |
| 262 | + |""".stripMargin |
| 263 | + ) |
| 264 | + // drop `id_dropped` and rename `id_before_rename` via alter table and run REORG PURGE, |
| 265 | + // this should remove `id_dropped` but keep `id_after_rename` in the parquet files. |
| 266 | + sql( |
| 267 | + s""" |
| 268 | + | ALTER TABLE delta.`$path` |
| 269 | + | DROP COLUMN id_dropped |
| 270 | + |""".stripMargin |
| 271 | + ) |
| 272 | + sql( |
| 273 | + s""" |
| 274 | + | ALTER TABLE delta.`$path` |
| 275 | + | RENAME COLUMN id_before_rename TO id_after_rename |
| 276 | + |""".stripMargin |
| 277 | + ) |
| 278 | + executePurge(path) |
| 279 | + |
| 280 | + val tableSchema = log.update().schema |
| 281 | + val tablePhysicalSchema = DeltaColumnMapping.renameColumns(tableSchema) |
| 282 | + val beforeRenameColStr = "StructField(id_before_rename,LongType,true)" |
| 283 | + val afterRenameColStr = "StructField(id_after_rename,LongType,true)" |
| 284 | + assert(tableSchema.fields.length == 2 && |
| 285 | + tableSchema.map { _.toString }.contains(afterRenameColStr)) |
| 286 | + assert(tablePhysicalSchema.fields.length == 2 && |
| 287 | + tablePhysicalSchema.map { _.toString }.contains(beforeRenameColStr)) |
| 288 | + |
| 289 | + val (addFiles2, _) = getFileActionsInLastVersion(log) |
| 290 | + assert(addFiles2.size === 1) |
| 291 | + val footers2 = getParquetFooters(addFiles2, log) |
| 292 | + footers2.foreach { footer => |
| 293 | + val fields = footer.getParquetMetadata.getFileMetaData.getSchema.getFields |
| 294 | + assert(fields.size == 2) |
| 295 | + assert(!fields.toArray.map { _.toString }.contains("optional int64 id_dropped = 3")) |
| 296 | + // do note that the actual name for the column will not be |
| 297 | + // changed in parquet file level |
| 298 | + assert(fields.toArray.map { _.toString }.contains("optional int64 id_before_rename = 2")) |
| 299 | + } |
| 300 | + } |
| 301 | + } |
153 | 302 | }
|
0 commit comments