From 0e3831cc56f2532d5fdd8500b26dfe34a62069cd Mon Sep 17 00:00:00 2001 From: uncleGen Date: Mon, 22 Sep 2014 17:48:29 +0800 Subject: [PATCH] [SPARK-3636][CORE]:It is not friendly to interrupt a Job when user passe different storageLevels to a RDD --- .../src/main/scala/org/apache/spark/rdd/RDD.scala | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/rdd/RDD.scala b/core/src/main/scala/org/apache/spark/rdd/RDD.scala index a9b905b0d1a63..b97b429e6b255 100644 --- a/core/src/main/scala/org/apache/spark/rdd/RDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/RDD.scala @@ -141,14 +141,15 @@ abstract class RDD[T: ClassTag]( def persist(newLevel: StorageLevel): this.type = { // TODO: Handle changes of StorageLevel if (storageLevel != StorageLevel.NONE && newLevel != storageLevel) { - throw new UnsupportedOperationException( - "Cannot change storage level of an RDD after it was already assigned a level") + logError("Cannot change storage level of an RDD after it was already assigned a level") + this + } else { + sc.persistRDD(this) + // Register the RDD with the ContextCleaner for automatic GC-based cleanup + sc.cleaner.foreach(_.registerRDDForCleanup(this)) + storageLevel = newLevel + this } - sc.persistRDD(this) - // Register the RDD with the ContextCleaner for automatic GC-based cleanup - sc.cleaner.foreach(_.registerRDDForCleanup(this)) - storageLevel = newLevel - this } /** Persist this RDD with the default storage level (`MEMORY_ONLY`). */