Skip to content

Commit

Permalink
[SPARK-46189][PS][SQL] Perform comparisons and arithmetic between sam…
Browse files Browse the repository at this point in the history
…e types in various Pandas aggregate functions to avoid interpreted mode errors

### What changes were proposed in this pull request?

In various Pandas aggregate functions, remove each comparison or arithmetic operation between `DoubleType` and `IntergerType` in `evaluateExpression` and replace with a comparison or arithmetic operation between `DoubleType` and `DoubleType`.

Affected functions are `PandasStddev`, `PandasVariance`, `PandasSkewness`, `PandasKurtosis`, and `PandasCovar`.

### Why are the changes needed?

These functions fail in interpreted mode. For example, `evaluateExpression` in `PandasKurtosis` compares a double to an integer:
```
If(n < 4, Literal.create(null, DoubleType) ...
```
This results in a boxed double and a boxed integer getting passed to `SQLOrderingUtil.compareDoubles` which expects two doubles as arguments. The scala runtime tries to unbox the boxed integer as a double, resulting in an error.

Reproduction example:
```
spark.sql("set spark.sql.codegen.wholeStage=false")
spark.sql("set spark.sql.codegen.factoryMode=NO_CODEGEN")

import numpy as np
import pandas as pd

import pyspark.pandas as ps

pser = pd.Series([1, 2, 3, 7, 9, 8], index=np.random.rand(6), name="a")
psser = ps.from_pandas(pser)

psser.kurt()
```
See Jira (SPARK-46189) for the other reproduction cases.

This works fine in codegen mode because the integer is already unboxed and the Java runtime will implictly cast it to a double.

### Does this PR introduce _any_ user-facing change?

No.

### How was this patch tested?

New unit tests.

### Was this patch authored or co-authored using generative AI tooling?

No.

Closes #44099 from bersprockets/unboxing_error.

Authored-by: Bruce Robbins <bersprockets@gmail.com>
Signed-off-by: Ruifeng Zheng <ruifengz@apache.org>
  • Loading branch information
bersprockets authored and zhengruifeng committed Dec 1, 2023
1 parent bfc5ad7 commit 042d854
Show file tree
Hide file tree
Showing 6 changed files with 165 additions and 14 deletions.
Expand Up @@ -355,7 +355,7 @@ case class PandasStddev(

override val evaluateExpression: Expression = {
If(n === 0.0, Literal.create(null, DoubleType),
If(n === ddof, divideByZeroEvalResult, sqrt(m2 / (n - ddof))))
If(n === ddof.toDouble, divideByZeroEvalResult, sqrt(m2 / (n - ddof.toDouble))))
}

override def prettyName: String = "pandas_stddev"
Expand All @@ -377,7 +377,7 @@ case class PandasVariance(

override val evaluateExpression: Expression = {
If(n === 0.0, Literal.create(null, DoubleType),
If(n === ddof, divideByZeroEvalResult, m2 / (n - ddof)))
If(n === ddof.toDouble, divideByZeroEvalResult, m2 / (n - ddof.toDouble)))
}

override def prettyName: String = "pandas_variance"
Expand Down Expand Up @@ -407,8 +407,8 @@ case class PandasSkewness(child: Expression)
val _m2 = If(abs(m2) < 1e-14, Literal(0.0), m2)
val _m3 = If(abs(m3) < 1e-14, Literal(0.0), m3)

If(n < 3, Literal.create(null, DoubleType),
If(_m2 === 0.0, Literal(0.0), sqrt(n - 1) * (n / (n - 2)) * _m3 / sqrt(_m2 * _m2 * _m2)))
If(n < 3.0, Literal.create(null, DoubleType),
If(_m2 === 0.0, Literal(0.0), sqrt(n - 1.0) * (n / (n - 2.0)) * _m3 / sqrt(_m2 * _m2 * _m2)))
}

override protected def withNewChildInternal(newChild: Expression): PandasSkewness =
Expand All @@ -425,9 +425,9 @@ case class PandasKurtosis(child: Expression)
override protected def momentOrder = 4

override val evaluateExpression: Expression = {
val adj = ((n - 1) / (n - 2)) * ((n - 1) / (n - 3)) * 3
val numerator = n * (n + 1) * (n - 1) * m4
val denominator = (n - 2) * (n - 3) * m2 * m2
val adj = ((n - 1.0) / (n - 2.0)) * ((n - 1.0) / (n - 3.0)) * 3.0
val numerator = n * (n + 1.0) * (n - 1.0) * m4
val denominator = (n - 2.0) * (n - 3.0) * m2 * m2

// floating point error
//
Expand All @@ -438,7 +438,7 @@ case class PandasKurtosis(child: Expression)
val _numerator = If(abs(numerator) < 1e-14, Literal(0.0), numerator)
val _denominator = If(abs(denominator) < 1e-14, Literal(0.0), denominator)

If(n < 4, Literal.create(null, DoubleType),
If(n < 4.0, Literal.create(null, DoubleType),
If(_denominator === 0.0, Literal(0.0), _numerator / _denominator - adj))
}

Expand Down
Expand Up @@ -157,7 +157,7 @@ case class PandasCovar(

override val evaluateExpression: Expression = {
If(n === 0.0, Literal.create(null, DoubleType),
If(n === ddof, divideByZeroEvalResult, ck / (n - ddof)))
If(n === ddof.toDouble, divideByZeroEvalResult, ck / (n - ddof.toDouble)))
}
override def prettyName: String = "pandas_covar"

Expand Down
@@ -0,0 +1,77 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.aggregate

import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.AttributeReference
import org.apache.spark.sql.types.DoubleType

class CentralMomentAggSuite extends TestWithAndWithoutCodegen {
val input = AttributeReference("input", DoubleType, nullable = true)()

testBothCodegenAndInterpreted("SPARK-46189: pandas_kurtosis eval") {
val evaluator = DeclarativeAggregateEvaluator(PandasKurtosis(input), Seq(input))
val buffer = evaluator.update(
InternalRow(1.0d),
InternalRow(2.0d),
InternalRow(3.0d),
InternalRow(7.0d),
InternalRow(9.0d),
InternalRow(8.0d))
val result = evaluator.eval(buffer)
assert(result === InternalRow(-2.5772889417360285d))
}

testBothCodegenAndInterpreted("SPARK-46189: pandas_skew eval") {
val evaluator = DeclarativeAggregateEvaluator(PandasSkewness(input), Seq(input))
val buffer = evaluator.update(
InternalRow(1.0d),
InternalRow(2.0d),
InternalRow(2.0d),
InternalRow(2.0d),
InternalRow(2.0d),
InternalRow(100.0d))
val result = evaluator.eval(buffer)
assert(result === InternalRow(2.4489389171333733d))
}

testBothCodegenAndInterpreted("SPARK-46189: pandas_stddev eval") {
val evaluator = DeclarativeAggregateEvaluator(PandasStddev(input, 1), Seq(input))
val buffer = evaluator.update(
InternalRow(1.0d),
InternalRow(2.0d),
InternalRow(3.0d),
InternalRow(7.0d),
InternalRow(9.0d),
InternalRow(8.0d))
val result = evaluator.eval(buffer)
assert(result === InternalRow(3.40587727318528d))
}

testBothCodegenAndInterpreted("SPARK-46189: pandas_variance eval") {
val evaluator = DeclarativeAggregateEvaluator(PandasVariance(input, 1), Seq(input))
val buffer = evaluator.update(
InternalRow(1.0d),
InternalRow(2.0d),
InternalRow(3.0d),
InternalRow(7.0d),
InternalRow(9.0d),
InternalRow(8.0d))
val result = evaluator.eval(buffer)
assert(result === InternalRow(11.6d))
}
}
@@ -0,0 +1,39 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.aggregate

import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.AttributeReference
import org.apache.spark.sql.types.DoubleType

class CovarianceAggSuite extends TestWithAndWithoutCodegen {
val a = AttributeReference("a", DoubleType, nullable = true)()
val b = AttributeReference("b", DoubleType, nullable = true)()

testBothCodegenAndInterpreted("SPARK-46189: pandas_covar eval") {
val evaluator = DeclarativeAggregateEvaluator(PandasCovar(a, b, 1), Seq(a, b))
val buffer = evaluator.update(
InternalRow(1.0d, 1.0d),
InternalRow(2.0d, 2.0d),
InternalRow(3.0d, 3.0d),
InternalRow(7.0d, 7.0d),
InternalRow(9.0, 9.0),
InternalRow(8.0d, 6.0))
val result = evaluator.eval(buffer)
assert(result === InternalRow(10.4d))
}
}
Expand Up @@ -17,24 +17,24 @@
package org.apache.spark.sql.catalyst.expressions.aggregate

import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, JoinedRow, SafeProjection}
import org.apache.spark.sql.catalyst.expressions.{Attribute, JoinedRow, MutableProjection}

/**
* Evaluator for a [[DeclarativeAggregate]].
*/
case class DeclarativeAggregateEvaluator(function: DeclarativeAggregate, input: Seq[Attribute]) {

lazy val initializer = SafeProjection.create(function.initialValues)
lazy val initializer = MutableProjection.create(function.initialValues)

lazy val updater = SafeProjection.create(
lazy val updater = MutableProjection.create(
function.updateExpressions,
function.aggBufferAttributes ++ input)

lazy val merger = SafeProjection.create(
lazy val merger = MutableProjection.create(
function.mergeExpressions,
function.aggBufferAttributes ++ function.inputAggBufferAttributes)

lazy val evaluator = SafeProjection.create(
lazy val evaluator = MutableProjection.create(
function.evaluateExpression :: Nil,
function.aggBufferAttributes)

Expand Down
@@ -0,0 +1,35 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.aggregate

import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.expressions.CodegenObjectFactoryMode
import org.apache.spark.sql.catalyst.plans.SQLHelper
import org.apache.spark.sql.internal.SQLConf

trait TestWithAndWithoutCodegen extends SparkFunSuite with SQLHelper {
def testBothCodegenAndInterpreted(name: String)(f: => Unit): Unit = {
val modes = Seq(CodegenObjectFactoryMode.CODEGEN_ONLY, CodegenObjectFactoryMode.NO_CODEGEN)
for (fallbackMode <- modes) {
test(s"$name with $fallbackMode") {
withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> fallbackMode.toString) {
f
}
}
}
}
}

0 comments on commit 042d854

Please sign in to comment.