-
Notifications
You must be signed in to change notification settings - Fork 28.2k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[SPARK-13579][build][test-maven] Stop building the main Spark assembly. #11796
Changes from 17 commits
083235c
0d977bd
611a6fd
597e6ec
5be649d
63b1e99
b06ed8b
c682d04
3d51178
3ae4e02
08502d2
976ce2c
ef56b81
01d9f0d
2f09e07
eb828ce
ef19b3c
9275ea6
b5d4292
a419707
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -36,21 +36,20 @@ else | |
fi | ||
|
||
# Find Spark jars. | ||
# TODO: change the directory name when Spark jars move from "lib". | ||
if [ -f "${SPARK_HOME}/RELEASE" ]; then | ||
SPARK_JARS_DIR="${SPARK_HOME}/lib" | ||
SPARK_JARS_DIR="${SPARK_HOME}/jars" | ||
else | ||
SPARK_JARS_DIR="${SPARK_HOME}/assembly/target/scala-$SPARK_SCALA_VERSION" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think you also have to append |
||
fi | ||
|
||
if [ ! -d "$SPARK_JARS_DIR" ]; then | ||
if [ ! -d "$SPARK_JARS_DIR" ] && [ -z "$SPARK_TESTING$SPARK_SQL_TESTING" ]; then | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is this an idiomatic way to do an There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I can add an explicit |
||
echo "Failed to find Spark jars directory ($SPARK_JARS_DIR)." 1>&2 | ||
echo "You need to build Spark before running this program." 1>&2 | ||
exit 1 | ||
else | ||
LAUNCH_CLASSPATH="$SPARK_JARS_DIR/*" | ||
fi | ||
|
||
LAUNCH_CLASSPATH="$SPARK_JARS_DIR/*" | ||
|
||
# Add the launcher build dir to the classpath if requested. | ||
if [ -n "$SPARK_PREPEND_CLASSES" ]; then | ||
LAUNCH_CLASSPATH="${SPARK_HOME}/launcher/target/scala-$SPARK_SCALA_VERSION/classes:$LAUNCH_CLASSPATH" | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -201,24 +201,29 @@ class FileAppenderSuite extends SparkFunSuite with BeforeAndAfter with Logging { | |
|
||
// Make sure only logging errors | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What's the deal with these logging changes? Got a short summary? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For some reason these tests failed in one of my runs. And when they failed, they used to leave the error level at "ERROR", which basically means all tests that ran after these didn't write any logs. |
||
val logger = Logger.getRootLogger | ||
val oldLogLevel = logger.getLevel | ||
logger.setLevel(Level.ERROR) | ||
logger.addAppender(mockAppender) | ||
try { | ||
logger.addAppender(mockAppender) | ||
|
||
val testOutputStream = new PipedOutputStream() | ||
val testInputStream = new PipedInputStream(testOutputStream) | ||
val testOutputStream = new PipedOutputStream() | ||
val testInputStream = new PipedInputStream(testOutputStream) | ||
|
||
// Close the stream before appender tries to read will cause an IOException | ||
testInputStream.close() | ||
testOutputStream.close() | ||
val appender = FileAppender(testInputStream, testFile, new SparkConf) | ||
// Close the stream before appender tries to read will cause an IOException | ||
testInputStream.close() | ||
testOutputStream.close() | ||
val appender = FileAppender(testInputStream, testFile, new SparkConf) | ||
|
||
appender.awaitTermination() | ||
appender.awaitTermination() | ||
|
||
// If InputStream was closed without first stopping the appender, an exception will be logged | ||
verify(mockAppender, atLeast(1)).doAppend(loggingEventCaptor.capture) | ||
val loggingEvent = loggingEventCaptor.getValue | ||
assert(loggingEvent.getThrowableInformation !== null) | ||
assert(loggingEvent.getThrowableInformation.getThrowable.isInstanceOf[IOException]) | ||
// If InputStream was closed without first stopping the appender, an exception will be logged | ||
verify(mockAppender, atLeast(1)).doAppend(loggingEventCaptor.capture) | ||
val loggingEvent = loggingEventCaptor.getValue | ||
assert(loggingEvent.getThrowableInformation !== null) | ||
assert(loggingEvent.getThrowableInformation.getThrowable.isInstanceOf[IOException]) | ||
} finally { | ||
logger.setLevel(oldLogLevel) | ||
} | ||
} | ||
|
||
test("file appender async close stream gracefully") { | ||
|
@@ -228,30 +233,35 @@ class FileAppenderSuite extends SparkFunSuite with BeforeAndAfter with Logging { | |
|
||
// Make sure only logging errors | ||
val logger = Logger.getRootLogger | ||
val oldLogLevel = logger.getLevel | ||
logger.setLevel(Level.ERROR) | ||
logger.addAppender(mockAppender) | ||
try { | ||
logger.addAppender(mockAppender) | ||
|
||
val testOutputStream = new PipedOutputStream() | ||
val testInputStream = new PipedInputStream(testOutputStream) with LatchedInputStream | ||
val testOutputStream = new PipedOutputStream() | ||
val testInputStream = new PipedInputStream(testOutputStream) with LatchedInputStream | ||
|
||
// Close the stream before appender tries to read will cause an IOException | ||
testInputStream.close() | ||
testOutputStream.close() | ||
val appender = FileAppender(testInputStream, testFile, new SparkConf) | ||
// Close the stream before appender tries to read will cause an IOException | ||
testInputStream.close() | ||
testOutputStream.close() | ||
val appender = FileAppender(testInputStream, testFile, new SparkConf) | ||
|
||
// Stop the appender before an IOException is called during read | ||
testInputStream.latchReadStarted.await() | ||
appender.stop() | ||
testInputStream.latchReadProceed.countDown() | ||
// Stop the appender before an IOException is called during read | ||
testInputStream.latchReadStarted.await() | ||
appender.stop() | ||
testInputStream.latchReadProceed.countDown() | ||
|
||
appender.awaitTermination() | ||
appender.awaitTermination() | ||
|
||
// Make sure no IOException errors have been logged as a result of appender closing gracefully | ||
verify(mockAppender, atLeast(0)).doAppend(loggingEventCaptor.capture) | ||
import scala.collection.JavaConverters._ | ||
loggingEventCaptor.getAllValues.asScala.foreach { loggingEvent => | ||
assert(loggingEvent.getThrowableInformation === null | ||
|| !loggingEvent.getThrowableInformation.getThrowable.isInstanceOf[IOException]) | ||
// Make sure no IOException errors have been logged as a result of appender closing gracefully | ||
verify(mockAppender, atLeast(0)).doAppend(loggingEventCaptor.capture) | ||
import scala.collection.JavaConverters._ | ||
loggingEventCaptor.getAllValues.asScala.foreach { loggingEvent => | ||
assert(loggingEvent.getThrowableInformation === null | ||
|| !loggingEvent.getThrowableInformation.getThrowable.isInstanceOf[IOException]) | ||
} | ||
} finally { | ||
logger.setLevel(oldLogLevel) | ||
} | ||
} | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
When and if this turns out to be a problem for user applications, we can consider a different dependency shading strategy in a followup patch. Therefore, I'm fine with this change for now.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is a problem for the Spark Cassandra Connector. The Cassandra Java Driver requires a 16.0 or greater version of guava. This necessarily means we need to shade now. This was on our roadmap anyway just wanted you to be aware.