diff --git a/bin/spark-submit b/bin/spark-submit index a297714c67da0..9e7cecedd0325 100755 --- a/bin/spark-submit +++ b/bin/spark-submit @@ -37,22 +37,6 @@ done DEPLOY_MODE=${DEPLOY_MODE:-"client"} - -# This is a hack to make DStream.pyprint work. -# This will be removed after pyprint is moved to PythonDStream. -# Problem is that print function is in (Scala)DStream. -# Whenever python code is executed, we call PythonDStream which passes -# pythonExec(which python Spark should execute). pythonExec is used to call python. -# Since pyprint is located in DStream, Spark does not know which python should use. -# In that case, get python path from PYSPARK_PYTHON, environmental variable. - -# Figure out which Python executable to use -if [[ -z "$PYSPARK_PYTHON" ]]; then - PYSPARK_PYTHON="python" -fi -export PYSPARK_PYTHON - - if [ -n "$DRIVER_MEMORY" ] && [ $DEPLOY_MODE == "client" ]; then export SPARK_DRIVER_MEMORY=$DRIVER_MEMORY fi