Skip to content

Commit

Permalink
changed type to SQL_PANDAS_UDF
Browse files Browse the repository at this point in the history
  • Loading branch information
BryanCutler committed Sep 12, 2017
1 parent 91dead2 commit 4a2fec2
Show file tree
Hide file tree
Showing 4 changed files with 4 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ private[spark] case class ChainedPythonFunctions(funcs: Seq[PythonFunction])
private[spark] object PythonEvalType {
val NON_UDF = 0
val SQL_BATCHED_UDF = 1
val SQL_ARROW_UDF = 2
val SQL_PANDAS_UDF = 2
}

private[spark] object PythonRunner {
Expand Down
2 changes: 1 addition & 1 deletion python/pyspark/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ class SpecialLengths(object):
class PythonEvalType(object):
NON_UDF = 0
SQL_BATCHED_UDF = 1
SQL_ARROW_UDF = 2
SQL_PANDAS_UDF = 2


class Serializer(object):
Expand Down
2 changes: 1 addition & 1 deletion python/pyspark/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def read_udfs(pickleSer, infile, eval_type):

func = lambda _, it: map(mapper, it)

if eval_type == PythonEvalType.SQL_ARROW_UDF:
if eval_type == PythonEvalType.SQL_PANDAS_UDF:
ser = ArrowPandasSerializer()
else:
ser = BatchedSerializer(PickleSerializer(), 100)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ case class ArrowEvalPythonExec(udfs: Seq[PythonUDF], output: Seq[Attribute], chi

// Output iterator for results from Python.
val outputIterator = new PythonRunner(
pyFuncs, bufferSize, reuseWorker, PythonEvalType.SQL_ARROW_UDF, argOffsets).
pyFuncs, bufferSize, reuseWorker, PythonEvalType.SQL_PANDAS_UDF, argOffsets).
compute(inputIterator, context.partitionId(), context)

val joined = new JoinedRow
Expand Down

0 comments on commit 4a2fec2

Please sign in to comment.