Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

force overwrite metadata in table properties #610

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 4 additions & 0 deletions spark/src/main/scala/ai/chronon/spark/Driver.scala
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,10 @@ object Driver {
opt[String](required = false,
descr =
"Start date to compute join backfill, this start date will override start partition in conf.")
val forceOverwriteMetadata: ScallopOption[Boolean] =
opt[Boolean](required = false,
default = Some(false),
descr = "Force overwrite metadata in the table properties if it already exists.")
lazy val joinConf: api.Join = parseConf[api.Join](confPath())
override def subcommandName() = s"join_${joinConf.metaData.name}"
}
Expand Down
7 changes: 3 additions & 4 deletions spark/src/main/scala/ai/chronon/spark/JoinBase.scala
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@ abstract class JoinBase(joinConf: api.Join,
tableUtils: TableUtils,
skipFirstHole: Boolean,
mutationScan: Boolean = true,
showDf: Boolean = false) {
showDf: Boolean = false,
forceOverwriteMetadata: Boolean = false) {
assert(Option(joinConf.metaData.outputNamespace).nonEmpty, s"output namespace could not be empty or null")
val metrics: Metrics.Context = Metrics.Context(Metrics.Environment.JoinOffline, joinConf)
private val outputTable = joinConf.metaData.outputTable
Expand Down Expand Up @@ -300,9 +301,7 @@ abstract class JoinBase(joinConf: api.Join,
}

// First run command to archive tables that have changed semantically since the last run
val archivedAtTs = Instant.now()
tablesToRecompute(joinConf, outputTable, tableUtils).foreach(
tableUtils.archiveOrDropTableIfExists(_, Some(archivedAtTs)))
tablesToRecompute(joinConf, outputTable, tableUtils, forceOverwriteMetadata)

// detect holes and chunks to fill
// OverrideStartPartition is used to replace the start partition of the join config. This is useful when
Expand Down
19 changes: 17 additions & 2 deletions spark/src/main/scala/ai/chronon/spark/JoinUtils.scala
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.functions.{coalesce, col, udf}
import org.apache.spark.util.sketch.BloomFilter

import java.time.Instant
import scala.collection.Seq
import scala.util.ScalaJavaConversions.MapOps

Expand Down Expand Up @@ -319,15 +320,29 @@ object JoinUtils {

def tablesToRecompute(joinConf: ai.chronon.api.Join,
outputTable: String,
tableUtils: TableUtils): collection.Seq[String] = {
tableUtils: TableUtils,
forceOverwriteMetadata: Boolean = false): collection.Seq[String] = {
val gson = new Gson()
(for (
val tablesAfterVersionCheck = (for (
props <- tableUtils.getTableProperties(outputTable);
oldSemanticJson <- props.get(Constants.SemanticHashKey);
oldSemanticHash = gson.fromJson(oldSemanticJson, classOf[java.util.HashMap[String, String]]).toScala
) yield {
println(s"Comparing Hashes:\nNew: ${joinConf.semanticHash},\nOld: $oldSemanticHash")
joinConf.tablesToDrop(oldSemanticHash)
}).getOrElse(collection.Seq.empty)

if (forceOverwriteMetadata) {
tablesAfterVersionCheck.foreach { table =>
tableUtils.sql(tableUtils.unsetTablePropertiesSql(table, Constants.SemanticHashKey))
}
collection.Seq.empty
} else {
val archivedAtTs = Instant.now()
tablesAfterVersionCheck.foreach { table =>
tableUtils.archiveOrDropTableIfExists(table, Some(archivedAtTs))
}
tablesAfterVersionCheck
}
}
}
4 changes: 4 additions & 0 deletions spark/src/main/scala/ai/chronon/spark/TableUtils.scala
Original file line number Diff line number Diff line change
Expand Up @@ -400,6 +400,10 @@ case class TableUtils(sparkSession: SparkSession) {
s"ALTER TABLE $tableName SET TBLPROPERTIES ($propertiesString)"
}

def unsetTablePropertiesSql(tableName: String, propertyKey: String): String = {
s"ALTER TABLE $tableName UNSET TBLPROPERTIES ('$propertyKey')"
}

def chunk(partitions: Set[String]): Seq[PartitionRange] = {
val sortedDates = partitions.toSeq.sorted
sortedDates.foldLeft(Seq[PartitionRange]()) { (ranges, nextDate) =>
Expand Down
9 changes: 8 additions & 1 deletion spark/src/test/scala/ai/chronon/spark/test/JoinTest.scala
Original file line number Diff line number Diff line change
Expand Up @@ -791,6 +791,12 @@ class JoinTest {
JoinUtils.tablesToRecompute(leftChangeJoinConf, leftChangeJoinConf.metaData.outputTable, tableUtils)
println(leftChangeRecompute)
assertEquals(leftChangeRecompute.size, 3)

val leftChangeNoRecompute =
JoinUtils.tablesToRecompute(leftChangeJoinConf, leftChangeJoinConf.metaData.outputTable, tableUtils, true)
println(leftChangeNoRecompute)
assertEquals(leftChangeNoRecompute.size, 0)

val partTable = s"${leftChangeJoinConf.metaData.outputTable}_user_unit_test_item_views"
assertEquals(leftChangeRecompute,
Seq(partTable, leftChangeJoinConf.metaData.bootstrapTable, leftChangeJoinConf.metaData.outputTable))
Expand All @@ -804,6 +810,7 @@ class JoinTest {
val addPartRecompute =
JoinUtils.tablesToRecompute(addPartJoinConf, addPartJoinConf.metaData.outputTable, tableUtils)
assertEquals(addPartRecompute.size, 1)
assertEquals(JoinUtils.tablesToRecompute(addPartJoinConf, addPartJoinConf.metaData.outputTable, tableUtils,true).size, 0)
assertEquals(addPartRecompute, Seq(addPartJoinConf.metaData.outputTable))
// Compute to ensure that it works and to set the stage for the next assertion
addPartJoin.computeJoin(Some(100))
Expand All @@ -815,6 +822,7 @@ class JoinTest {
val rightModRecompute =
JoinUtils.tablesToRecompute(rightModJoinConf, rightModJoinConf.metaData.outputTable, tableUtils)
assertEquals(rightModRecompute.size, 2)
assertEquals(JoinUtils.tablesToRecompute(rightModJoinConf, rightModJoinConf.metaData.outputTable, tableUtils,true).size, 0)
val rightModPartTable = s"${addPartJoinConf.metaData.outputTable}_user_2_unit_test_item_views"
assertEquals(rightModRecompute, Seq(rightModPartTable, addPartJoinConf.metaData.outputTable))
// Modify both
Expand Down Expand Up @@ -1055,7 +1063,6 @@ class JoinTest {

@Test
def testMigration(): Unit = {

// Left
val itemQueriesTable = s"$namespace.item_queries"
val ds = "2023-01-01"
Expand Down