Skip to content

Collapse withColumns calls #259

Collapse withColumns calls

Collapse withColumns calls #259

GitHub Actions / Report test results failed Dec 4, 2023 in 0s

35900 tests run, 766 skipped, 39 failed.

Annotations

Check failure on line 1 in python/pyspark/sql/tests/connect/streaming/test_parity_streaming.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/streaming/test_parity_streaming.py.test_stream_save_options

Partition column id not found in existing columns (value).

JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.errors.QueryCompilationErrors$.columnNotFoundInExistingColumnsError(QueryCompilationErrors.scala:2078)
	at org.apache.spark.sql.streaming.DataStreamWriter.$anonfun$normalize$3(DataStreamWriter.scala:522)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.streaming.DataStreamWriter.normalize(DataStreamWriter.scala:521)
	at org.apache.spark.sql.streaming.DataStreamWriter.$anonfun$normalizedParCols$2(DataStreamWriter.scala:510)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.streaming.DataStreamWriter.$anonfun$normalizedParCols$1(DataStreamWriter.scala:510)
	at scala.Option.map(Option.scala:242)
	at org.apache.spark.sql.streaming.DataStreamWriter.normalizedParCols(DataStreamWriter.scala:509)
	at org.apache.spark.sql.streaming.DataStreamWriter.createV1Sink(DataStreamWriter.scala:442)
	at org.apache.spark.sql.streaming.DataStreamWriter.startInternal(DataStreamWriter.scala:408)
	at org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:252)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.handleWriteStreamOperationStart(SparkConnectPlanner.scala:2992)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.process(SparkConnectPlanner.scala:2515)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.handleCommand(ExecuteThreadRunner.scala:214)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1(ExecuteThreadRunner.scala:158)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1$adapted(ExecuteThreadRunner.scala:132)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.executeInternal(ExecuteThreadRunner.scala:132)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.org$apache$spark$sql$connect$execution$ExecuteThreadRunner$$execute(ExecuteThreadRunner.scala:84)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner$ExecutionThread.run(ExecuteThreadRunner.scala:240)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/sql/tests/streaming/test_streaming.py", line 129, in test_stream_save_options
    df.writeStream.option("checkpointLocation", chk)
  File "/__w/spark/spark/python/pyspark/sql/connect/streaming/readwriter.py", line 618, in start
    return self._start_internal(
  File "/__w/spark/spark/python/pyspark/sql/connect/streaming/readwriter.py", line 597, in _start_internal
    (_, properties) = self._session.client.execute_command(cmd)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 940, in execute_command
    data, _, _, _, properties = self._execute_and_fetch(req, observations or {})
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1267, in _execute_and_fetch
    for response in self._execute_and_fetch_as_iterator(req, observations):
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1245, in _execute_and_fetch_as_iterator
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: Partition column id not found in existing columns (value).

JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.errors.QueryCompilationErrors$.columnNotFoundInExistingColumnsError(QueryCompilationErrors.scala:2078)
	at org.apache.spark.sql.streaming.DataStreamWriter.$anonfun$normalize$3(DataStreamWriter.scala:522)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.streaming.DataStreamWriter.normalize(DataStreamWriter.scala:521)
	at org.apache.spark.sql.streaming.DataStreamWriter.$anonfun$normalizedParCols$2(DataStreamWriter.scala:510)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.streaming.DataStreamWriter.$anonfun$normalizedParCols$1(DataStreamWriter.scala:510)
	at scala.Option.map(Option.scala:242)
	at org.apache.spark.sql.streaming.DataStreamWriter.normalizedParCols(DataStreamWriter.scala:509)
	at org.apache.spark.sql.streaming.DataStreamWriter.createV1Sink(DataStreamWriter.scala:442)
	at org.apache.spark.sql.streaming.DataStreamWriter.startInternal(DataStreamWriter.scala:408)
	at org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:252)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.handleWriteStreamOperationStart(SparkConnectPlanner.scala:2992)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.process(SparkConnectPlanner.scala:2515)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.handleCommand(ExecuteThreadRunner.scala:214)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1(ExecuteThreadRunner.scala:158)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1$adapted(ExecuteThreadRunner.scala:132)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.executeInternal(ExecuteThreadRunner.scala:132)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.org$apache$spark$sql$connect$execution$ExecuteThreadRunner$$execute(ExecuteThreadRunner.scala:84)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner$ExecutionThread.run(ExecuteThreadRunner.scala:240)

Check failure on line 1 in python/pyspark/ml/tests/connect/test_connect_classification.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/ml/tests/connect/test_connect_classification.py.test_binary_classes_logistic_regression

'DataFrame' object has no attribute 'prediction'
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/ml/tests/connect/test_legacy_mode_classification.py", line 86, in test_binary_classes_logistic_regression
    self._check_result(result, expected_predictions, expected_probabilities)
  File "/__w/spark/spark/python/pyspark/ml/tests/connect/test_legacy_mode_classification.py", line 44, in _check_result
    np.testing.assert_array_equal(list(result_dataframe.prediction), expected_predictions)
  File "/usr/local/lib/python3.9/dist-packages/pandas/core/generic.py", line 6204, in __getattr__
    return object.__getattribute__(self, name)
AttributeError: 'DataFrame' object has no attribute 'prediction'

Check failure on line 1 in python/pyspark/ml/tests/connect/test_connect_classification.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/ml/tests/connect/test_connect_classification.py.test_multi_classes_logistic_regression

'DataFrame' object has no attribute 'prediction'
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/ml/tests/connect/test_legacy_mode_classification.py", line 130, in test_multi_classes_logistic_regression
    self._check_result(result, expected_predictions, expected_probabilities)
  File "/__w/spark/spark/python/pyspark/ml/tests/connect/test_legacy_mode_classification.py", line 44, in _check_result
    np.testing.assert_array_equal(list(result_dataframe.prediction), expected_predictions)
  File "/usr/local/lib/python3.9/dist-packages/pandas/core/generic.py", line 6204, in __getattr__
    return object.__getattribute__(self, name)
AttributeError: 'DataFrame' object has no attribute 'prediction'

Check failure on line 1 in python/pyspark/pandas/tests/connect/computation/test_parity_any_all.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/computation/test_parity_any_all.py.test_all

When resolving '__index_level_0__, fail to find subplan with plan_id=2 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#703 AS __index_level_0__#717, col1#704 AS col1#718, col2#705 AS col2#719, col3#706L AS col3#720L, col4#707L AS col4#721L, col5#708 AS col5#722, col6#709 AS col6#723]
   +- LocalRelation [__index_level_0__#703, col1#704, col2#705, col3#706L, col4#707L, col5#708, col6#709]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/computation/test_any_all.py", line 54, in test_all
    psdf = ps.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=2 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#703 AS __index_level_0__#717, col1#704 AS col1#718, col2#705 AS col2#719, col3#706L AS col3#720L, col4#707L AS col4#721L, col5#708 AS col5#722, col6#709 AS col6#723]
   +- LocalRelation [__index_level_0__#703, col1#704, col2#705, col3#706L, col4#707L, col5#708, col6#709]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/computation/test_parity_any_all.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/computation/test_parity_any_all.py.test_any

When resolving '__index_level_0__, fail to find subplan with plan_id=13 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#1434 AS __index_level_0__#1448, col1#1435 AS col1#1449, col2#1436 AS col2#1450, col3#1437L AS col3#1451L, col4#1438L AS col4#1452L, col5#1439 AS col5#1453, col6#1440 AS col6#1454]
   +- LocalRelation [__index_level_0__#1434, col1#1435, col2#1436, col3#1437L, col4#1438L, col5#1439, col6#1440]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/computation/test_any_all.py", line 126, in test_any
    psdf = ps.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=13 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#1434 AS __index_level_0__#1448, col1#1435 AS col1#1449, col2#1436 AS col2#1450, col3#1437L AS col3#1451L, col4#1438L AS col4#1452L, col5#1439 AS col5#1453, col6#1440 AS col6#1454]
   +- LocalRelation [__index_level_0__#1434, col1#1435, col2#1436, col3#1437L, col4#1438L, col5#1439, col6#1440]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py.test_add

When resolving '__index_level_0__, fail to find subplan with plan_id=2 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#2383L AS __index_level_0__#2411L, int32#2384 AS int32#2412, int#2385L AS int#2413L, float32#2386 AS float32#2414, float#2387 AS float#2415, decimal#2388 AS decimal#2416, float_nan#2389 AS float_nan#2417, decimal_nan#2390 AS decimal_nan#2418, string#2391 AS string#2419, bool#2392 AS bool#2420, date#2393 AS date#2421, datetime#2394 AS datetime#2422, timedelta#2395 AS timedelta#2423, categorical#2396 AS categorical#2424]
   +- LocalRelation [__index_level_0__#2383L, int32#2384, int#2385L, float32#2386, float#2387, decimal#2388, float_nan#2389, decimal_nan#2390, string#2391, bool#2392, date#2393, datetime#2394, timedelta#2395, categorical#2396]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/data_type_ops/test_num_arithmetic.py", line 44, in test_add
    pdf, psdf = self.pdf, self.psdf
  File "/__w/spark/spark/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py", line 31, in psdf
    return ps.from_pandas(self.pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=2 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#2383L AS __index_level_0__#2411L, int32#2384 AS int32#2412, int#2385L AS int#2413L, float32#2386 AS float32#2414, float#2387 AS float#2415, decimal#2388 AS decimal#2416, float_nan#2389 AS float_nan#2417, decimal_nan#2390 AS decimal_nan#2418, string#2391 AS string#2419, bool#2392 AS bool#2420, date#2393 AS date#2421, datetime#2394 AS datetime#2422, timedelta#2395 AS timedelta#2423, categorical#2396 AS categorical#2424]
   +- LocalRelation [__index_level_0__#2383L, int32#2384, int#2385L, float32#2386, float#2387, decimal#2388, float_nan#2389, decimal_nan#2390, string#2391, bool#2392, date#2393, datetime#2394, timedelta#2395, categorical#2396]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py.test_floordiv

When resolving '__index_level_0__, fail to find subplan with plan_id=20 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#4822L AS __index_level_0__#4850L, int32#4823 AS int32#4851, int#4824L AS int#4852L, float32#4825 AS float32#4853, float#4826 AS float#4854, decimal#4827 AS decimal#4855, float_nan#4828 AS float_nan#4856, decimal_nan#4829 AS decimal_nan#4857, string#4830 AS string#4858, bool#4831 AS bool#4859, date#4832 AS date#4860, datetime#4833 AS datetime#4861, timedelta#4834 AS timedelta#4862, categorical#4835 AS categorical#4863]
   +- LocalRelation [__index_level_0__#4822L, int32#4823, int#4824L, float32#4825, float#4826, decimal#4827, float_nan#4828, decimal_nan#4829, string#4830, bool#4831, date#4832, datetime#4833, timedelta#4834, categorical#4835]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/data_type_ops/test_num_arithmetic.py", line 114, in test_floordiv
    pdf, psdf = self.pdf, self.psdf
  File "/__w/spark/spark/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py", line 31, in psdf
    return ps.from_pandas(self.pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=20 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#4822L AS __index_level_0__#4850L, int32#4823 AS int32#4851, int#4824L AS int#4852L, float32#4825 AS float32#4853, float#4826 AS float#4854, decimal#4827 AS decimal#4855, float_nan#4828 AS float_nan#4856, decimal_nan#4829 AS decimal_nan#4857, string#4830 AS string#4858, bool#4831 AS bool#4859, date#4832 AS date#4860, datetime#4833 AS datetime#4861, timedelta#4834 AS timedelta#4862, categorical#4835 AS categorical#4863]
   +- LocalRelation [__index_level_0__#4822L, int32#4823, int#4824L, float32#4825, float#4826, decimal#4827, float_nan#4828, decimal_nan#4829, string#4830, bool#4831, date#4832, datetime#4833, timedelta#4834, categorical#4835]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py.test_mod

When resolving '__index_level_0__, fail to find subplan with plan_id=38 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#7261L AS __index_level_0__#7289L, int32#7262 AS int32#7290, int#7263L AS int#7291L, float32#7264 AS float32#7292, float#7265 AS float#7293, decimal#7266 AS decimal#7294, float_nan#7267 AS float_nan#7295, decimal_nan#7268 AS decimal_nan#7296, string#7269 AS string#7297, bool#7270 AS bool#7298, date#7271 AS date#7299, datetime#7272 AS datetime#7300, timedelta#7273 AS timedelta#7301, categorical#7274 AS categorical#7302]
   +- LocalRelation [__index_level_0__#7261L, int32#7262, int#7263L, float32#7264, float#7265, decimal#7266, float_nan#7267, decimal_nan#7268, string#7269, bool#7270, date#7271, datetime#7272, timedelta#7273, categorical#7274]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/data_type_ops/test_num_arithmetic.py", line 130, in test_mod
    pdf, psdf = self.pdf, self.psdf
  File "/__w/spark/spark/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py", line 31, in psdf
    return ps.from_pandas(self.pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=38 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#7261L AS __index_level_0__#7289L, int32#7262 AS int32#7290, int#7263L AS int#7291L, float32#7264 AS float32#7292, float#7265 AS float#7293, decimal#7266 AS decimal#7294, float_nan#7267 AS float_nan#7295, decimal_nan#7268 AS decimal_nan#7296, string#7269 AS string#7297, bool#7270 AS bool#7298, date#7271 AS date#7299, datetime#7272 AS datetime#7300, timedelta#7273 AS timedelta#7301, categorical#7274 AS categorical#7302]
   +- LocalRelation [__index_level_0__#7261L, int32#7262, int#7263L, float32#7264, float#7265, decimal#7266, float_nan#7267, decimal_nan#7268, string#7269, bool#7270, date#7271, datetime#7272, timedelta#7273, categorical#7274]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py.test_mul

When resolving '__index_level_0__, fail to find subplan with plan_id=56 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#9700L AS __index_level_0__#9728L, int32#9701 AS int32#9729, int#9702L AS int#9730L, float32#9703 AS float32#9731, float#9704 AS float#9732, decimal#9705 AS decimal#9733, float_nan#9706 AS float_nan#9734, decimal_nan#9707 AS decimal_nan#9735, string#9708 AS string#9736, bool#9709 AS bool#9737, date#9710 AS date#9738, datetime#9711 AS datetime#9739, timedelta#9712 AS timedelta#9740, categorical#9713 AS categorical#9741]
   +- LocalRelation [__index_level_0__#9700L, int32#9701, int#9702L, float32#9703, float#9704, decimal#9705, float_nan#9706, decimal_nan#9707, string#9708, bool#9709, date#9710, datetime#9711, timedelta#9712, categorical#9713]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/data_type_ops/test_num_arithmetic.py", line 78, in test_mul
    pdf, psdf = self.pdf, self.psdf
  File "/__w/spark/spark/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py", line 31, in psdf
    return ps.from_pandas(self.pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=56 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#9700L AS __index_level_0__#9728L, int32#9701 AS int32#9729, int#9702L AS int#9730L, float32#9703 AS float32#9731, float#9704 AS float#9732, decimal#9705 AS decimal#9733, float_nan#9706 AS float_nan#9734, decimal_nan#9707 AS decimal_nan#9735, string#9708 AS string#9736, bool#9709 AS bool#9737, date#9710 AS date#9738, datetime#9711 AS datetime#9739, timedelta#9712 AS timedelta#9740, categorical#9713 AS categorical#9741]
   +- LocalRelation [__index_level_0__#9700L, int32#9701, int#9702L, float32#9703, float#9704, decimal#9705, float_nan#9706, decimal_nan#9707, string#9708, bool#9709, date#9710, datetime#9711, timedelta#9712, categorical#9713]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py.test_pow

When resolving '__index_level_0__, fail to find subplan with plan_id=74 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#12139L AS __index_level_0__#12167L, int32#12140 AS int32#12168, int#12141L AS int#12169L, float32#12142 AS float32#12170, float#12143 AS float#12171, decimal#12144 AS decimal#12172, float_nan#12145 AS float_nan#12173, decimal_nan#12146 AS decimal_nan#12174, string#12147 AS string#12175, bool#12148 AS bool#12176, date#12149 AS date#12177, datetime#12150 AS datetime#12178, timedelta#12151 AS timedelta#12179, categorical#12152 AS categorical#12180]
   +- LocalRelation [__index_level_0__#12139L, int32#12140, int#12141L, float32#12142, float#12143, decimal#12144, float_nan#12145, decimal_nan#12146, string#12147, bool#12148, date#12149, datetime#12150, timedelta#12151, categorical#12152]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/data_type_ops/test_num_arithmetic.py", line 152, in test_pow
    pdf, psdf = self.pdf, self.psdf
  File "/__w/spark/spark/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py", line 31, in psdf
    return ps.from_pandas(self.pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=74 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#12139L AS __index_level_0__#12167L, int32#12140 AS int32#12168, int#12141L AS int#12169L, float32#12142 AS float32#12170, float#12143 AS float#12171, decimal#12144 AS decimal#12172, float_nan#12145 AS float_nan#12173, decimal_nan#12146 AS decimal_nan#12174, string#12147 AS string#12175, bool#12148 AS bool#12176, date#12149 AS date#12177, datetime#12150 AS datetime#12178, timedelta#12151 AS timedelta#12179, categorical#12152 AS categorical#12180]
   +- LocalRelation [__index_level_0__#12139L, int32#12140, int#12141L, float32#12142, float#12143, decimal#12144, float_nan#12145, decimal_nan#12146, string#12147, bool#12148, date#12149, datetime#12150, timedelta#12151, categorical#12152]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py.test_sub

When resolving '__index_level_0__, fail to find subplan with plan_id=92 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#14578L AS __index_level_0__#14606L, int32#14579 AS int32#14607, int#14580L AS int#14608L, float32#14581 AS float32#14609, float#14582 AS float#14610, decimal#14583 AS decimal#14611, float_nan#14584 AS float_nan#14612, decimal_nan#14585 AS decimal_nan#14613, string#14586 AS string#14614, bool#14587 AS bool#14615, date#14588 AS date#14616, datetime#14589 AS datetime#14617, timedelta#14590 AS timedelta#14618, categorical#14591 AS categorical#14619]
   +- LocalRelation [__index_level_0__#14578L, int32#14579, int#14580L, float32#14581, float#14582, decimal#14583, float_nan#14584, decimal_nan#14585, string#14586, bool#14587, date#14588, datetime#14589, timedelta#14590, categorical#14591]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/data_type_ops/test_num_arithmetic.py", line 61, in test_sub
    pdf, psdf = self.pdf, self.psdf
  File "/__w/spark/spark/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py", line 31, in psdf
    return ps.from_pandas(self.pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=92 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#14578L AS __index_level_0__#14606L, int32#14579 AS int32#14607, int#14580L AS int#14608L, float32#14581 AS float32#14609, float#14582 AS float#14610, decimal#14583 AS decimal#14611, float_nan#14584 AS float_nan#14612, decimal_nan#14585 AS decimal_nan#14613, string#14586 AS string#14614, bool#14587 AS bool#14615, date#14588 AS date#14616, datetime#14589 AS datetime#14617, timedelta#14590 AS timedelta#14618, categorical#14591 AS categorical#14619]
   +- LocalRelation [__index_level_0__#14578L, int32#14579, int#14580L, float32#14581, float#14582, decimal#14583, float_nan#14584, decimal_nan#14585, string#14586, bool#14587, date#14588, datetime#14589, timedelta#14590, categorical#14591]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py.test_truediv

When resolving '__index_level_0__, fail to find subplan with plan_id=110 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#17017L AS __index_level_0__#17045L, int32#17018 AS int32#17046, int#17019L AS int#17047L, float32#17020 AS float32#17048, float#17021 AS float#17049, decimal#17022 AS decimal#17050, float_nan#17023 AS float_nan#17051, decimal_nan#17024 AS decimal_nan#17052, string#17025 AS string#17053, bool#17026 AS bool#17054, date#17027 AS date#17055, datetime#17028 AS datetime#17056, timedelta#17029 AS timedelta#17057, categorical#17030 AS categorical#17058]
   +- LocalRelation [__index_level_0__#17017L, int32#17018, int#17019L, float32#17020, float#17021, decimal#17022, float_nan#17023, decimal_nan#17024, string#17025, bool#17026, date#17027, datetime#17028, timedelta#17029, categorical#17030]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/data_type_ops/test_num_arithmetic.py", line 98, in test_truediv
    pdf, psdf = self.pdf, self.psdf
  File "/__w/spark/spark/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py", line 31, in psdf
    return ps.from_pandas(self.pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=110 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#17017L AS __index_level_0__#17045L, int32#17018 AS int32#17046, int#17019L AS int#17047L, float32#17020 AS float32#17048, float#17021 AS float#17049, decimal#17022 AS decimal#17050, float_nan#17023 AS float_nan#17051, decimal_nan#17024 AS decimal_nan#17052, string#17025 AS string#17053, bool#17026 AS bool#17054, date#17027 AS date#17055, datetime#17028 AS datetime#17056, timedelta#17029 AS timedelta#17057, categorical#17030 AS categorical#17058]
   +- LocalRelation [__index_level_0__#17017L, int32#17018, int#17019L, float32#17020, float#17021, decimal#17022, float_nan#17023, decimal_nan#17024, string#17025, bool#17026, date#17027, datetime#17028, timedelta#17029, categorical#17030]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py.test_backfill

When resolving '__index_level_0__, fail to find subplan with plan_id=2 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#403L AS __index_level_0__#413L, A#404 AS A#414, B#405 AS B#415, C#406 AS C#416, D#407L AS D#417L]
   +- LocalRelation [__index_level_0__#403L, A#404, B#405, C#406, D#407L]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/computation/test_missing_data.py", line 54, in test_backfill
    psdf = ps.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=2 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#403L AS __index_level_0__#413L, A#404 AS A#414, B#405 AS B#415, C#406 AS C#416, D#407L AS D#417L]
   +- LocalRelation [__index_level_0__#403L, A#404, B#405, C#406, D#407L]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py.test_bfill

When resolving '__index_level_0__, fail to find subplan with plan_id=11 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#706 AS __index_level_0__#714, x#707 AS x#715, y#708 AS y#716, z#709 AS z#717]
   +- LocalRelation [__index_level_0__#706, x#707, y#708, z#709]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/computation/test_missing_data.py", line 436, in test_bfill
    psdf = ps.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=11 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#706 AS __index_level_0__#714, x#707 AS x#715, y#708 AS y#716, z#709 AS z#717]
   +- LocalRelation [__index_level_0__#706, x#707, y#708, z#709]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py.test_dropna_axis_column

When resolving '__index_level_0__, fail to find subplan with plan_id=19 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#1425 AS __index_level_0__#1439, 0.217135322744903#1426 AS 0.217135322744903#1440, 0.7473089944413854#1427 AS 0.7473089944413854#1441, 0.575509337776447#1428 AS 0.575509337776447#1442, 0.6522650206329972#1429 AS 0.6522650206329972#1443, 0.36292035847776116#1430 AS 0.36292035847776116#1444, 0.40282615513358755#1431 AS 0.40282615513358755#1445]
   +- LocalRelation [__index_level_0__#1425, 0.217135322744903#1426, 0.7473089944413854#1427, 0.575509337776447#1428, 0.6522650206329972#1429, 0.36292035847776116#1430, 0.40282615513358755#1431]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/computation/test_missing_data.py", line 172, in test_dropna_axis_column
    self._test_dropna(pdf, axis=1)
  File "/__w/spark/spark/python/pyspark/pandas/tests/computation/test_missing_data.py", line 64, in _test_dropna
    psdf = ps.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=19 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#1425 AS __index_level_0__#1439, 0.217135322744903#1426 AS 0.217135322744903#1440, 0.7473089944413854#1427 AS 0.7473089944413854#1441, 0.575509337776447#1428 AS 0.575509337776447#1442, 0.6522650206329972#1429 AS 0.6522650206329972#1443, 0.36292035847776116#1430 AS 0.36292035847776116#1444, 0.40282615513358755#1431 AS 0.40282615513358755#1445]
   +- LocalRelation [__index_level_0__#1425, 0.217135322744903#1426, 0.7473089944413854#1427, 0.575509337776447#1428, 0.6522650206329972#1429, 0.36292035847776116#1430, 0.40282615513358755#1431]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py.test_dropna_axis_index

When resolving '__index_level_0__, fail to find subplan with plan_id=30 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#1736 AS __index_level_0__#1744, x#1737 AS x#1745, y#1738 AS y#1746, z#1739 AS z#1747]
   +- LocalRelation [__index_level_0__#1736, x#1737, y#1738, z#1739]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/computation/test_missing_data.py", line 134, in test_dropna_axis_index
    self._test_dropna(pdf, axis=0)
  File "/__w/spark/spark/python/pyspark/pandas/tests/computation/test_missing_data.py", line 64, in _test_dropna
    psdf = ps.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=30 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#1736 AS __index_level_0__#1744, x#1737 AS x#1745, y#1738 AS y#1746, z#1739 AS z#1747]
   +- LocalRelation [__index_level_0__#1736, x#1737, y#1738, z#1739]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py.test_ffill

When resolving '__index_level_0__, fail to find subplan with plan_id=38 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#2035 AS __index_level_0__#2043, x#2036 AS x#2044, y#2037 AS y#2045, z#2038 AS z#2046]
   +- LocalRelation [__index_level_0__#2035, x#2036, y#2037, z#2038]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/computation/test_missing_data.py", line 411, in test_ffill
    psdf = ps.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=38 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#2035 AS __index_level_0__#2043, x#2036 AS x#2044, y#2037 AS y#2045, z#2038 AS z#2046]
   +- LocalRelation [__index_level_0__#2035, x#2036, y#2037, z#2038]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py.test_fillna

When resolving '__index_level_0__, fail to find subplan with plan_id=46 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#2334 AS __index_level_0__#2342, x#2335 AS x#2343, y#2336 AS y#2344, z#2337 AS z#2345]
   +- LocalRelation [__index_level_0__#2334, x#2335, y#2336, z#2337]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/computation/test_missing_data.py", line 198, in test_fillna
    psdf = ps.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=46 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#2334 AS __index_level_0__#2342, x#2335 AS x#2343, y#2336 AS y#2344, z#2337 AS z#2345]
   +- LocalRelation [__index_level_0__#2334, x#2335, y#2336, z#2337]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py.test_pad

When resolving '__index_level_0__, fail to find subplan with plan_id=54 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#2753L AS __index_level_0__#2763L, A#2754 AS A#2764, B#2755 AS B#2765, C#2756 AS C#2766, D#2757L AS D#2767L]
   +- LocalRelation [__index_level_0__#2753L, A#2754, B#2755, C#2756, D#2757L]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/computation/test_missing_data.py", line 461, in test_pad
    psdf = ps.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=54 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#2753L AS __index_level_0__#2763L, A#2754 AS A#2764, B#2755 AS B#2765, C#2756 AS C#2766, D#2757L AS D#2767L]
   +- LocalRelation [__index_level_0__#2753L, A#2754, B#2755, C#2756, D#2757L]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py.test_replace

When resolving '__index_level_0__, fail to find subplan with plan_id=63 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#2956 AS __index_level_0__#2962, name#2957 AS name#2963, weapon#2958 AS weapon#2964]
   +- LocalRelation [__index_level_0__#2956, name#2957, weapon#2958]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/computation/test_missing_data.py", line 297, in test_replace
    psdf = ps.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=63 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#2956 AS __index_level_0__#2962, name#2957 AS name#2963, weapon#2958 AS weapon#2964]
   +- LocalRelation [__index_level_0__#2956, name#2957, weapon#2958]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/indexes/test_parity_datetime.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/indexes/test_parity_datetime.py.test_arithmetic_op_exceptions

When resolving '__index_level_0__, fail to find subplan with plan_id=2 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#43 AS __index_level_0__#45]
   +- LocalRelation [__index_level_0__#43]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 196, in test_arithmetic_op_exceptions
    for psidx, pidx in self.idx_pairs:
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 60, in idx_pairs
    return list(zip(self.psidxs, self.pidxs))
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 56, in psidxs
    return [ps.from_pandas(pidx) for pidx in self.pidxs]
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 56, in <listcomp>
    return [ps.from_pandas(pidx) for pidx in self.pidxs]
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 156, in from_pandas
    return DataFrame(pd.DataFrame(index=pobj)).index
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=2 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#43 AS __index_level_0__#45]
   +- LocalRelation [__index_level_0__#43]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/indexes/test_parity_datetime.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/indexes/test_parity_datetime.py.test_ceil

When resolving '__index_level_0__, fail to find subplan with plan_id=7 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#90 AS __index_level_0__#92]
   +- LocalRelation [__index_level_0__#90]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 101, in test_ceil
    for psidx, pidx in self.idx_pairs:
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 60, in idx_pairs
    return list(zip(self.psidxs, self.pidxs))
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 56, in psidxs
    return [ps.from_pandas(pidx) for pidx in self.pidxs]
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 56, in <listcomp>
    return [ps.from_pandas(pidx) for pidx in self.pidxs]
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 156, in from_pandas
    return DataFrame(pd.DataFrame(index=pobj)).index
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=7 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#90 AS __index_level_0__#92]
   +- LocalRelation [__index_level_0__#90]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/indexes/test_parity_datetime.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/indexes/test_parity_datetime.py.test_datetime_index

When resolving '__index_level_0__, fail to find subplan with plan_id=12 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#137 AS __index_level_0__#139]
   +- LocalRelation [__index_level_0__#137]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 72, in test_datetime_index
    ps.DatetimeIndex(["2004-01-01", "2002-12-31", "2000-04-01"]).all()
  File "/__w/spark/spark/python/pyspark/pandas/indexes/datetimes.py", line 155, in __new__
    return cast(DatetimeIndex, ps.from_pandas(pd.DatetimeIndex(**kwargs)))
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 156, in from_pandas
    return DataFrame(pd.DataFrame(index=pobj)).index
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=12 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#137 AS __index_level_0__#139]
   +- LocalRelation [__index_level_0__#137]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/indexes/test_parity_datetime.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/indexes/test_parity_datetime.py.test_day_name

When resolving '__index_level_0__, fail to find subplan with plan_id=17 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#184 AS __index_level_0__#186]
   +- LocalRelation [__index_level_0__#184]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 122, in test_day_name
    for psidx, pidx in self.idx_pairs:
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 60, in idx_pairs
    return list(zip(self.psidxs, self.pidxs))
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 56, in psidxs
    return [ps.from_pandas(pidx) for pidx in self.pidxs]
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 56, in <listcomp>
    return [ps.from_pandas(pidx) for pidx in self.pidxs]
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 156, in from_pandas
    return DataFrame(pd.DataFrame(index=pobj)).index
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=17 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#184 AS __index_level_0__#186]
   +- LocalRelation [__index_level_0__#184]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/pandas/tests/connect/indexes/test_parity_datetime.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/indexes/test_parity_datetime.py.test_floor

When resolving '__index_level_0__, fail to find subplan with plan_id=22 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#231 AS __index_level_0__#233]
   +- LocalRelation [__index_level_0__#231]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 108, in test_floor
    for psidx, pidx in self.idx_pairs:
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 60, in idx_pairs
    return list(zip(self.psidxs, self.pidxs))
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 56, in psidxs
    return [ps.from_pandas(pidx) for pidx in self.pidxs]
  File "/__w/spark/spark/python/pyspark/pandas/tests/indexes/test_datetime.py", line 56, in <listcomp>
    return [ps.from_pandas(pidx) for pidx in self.pidxs]
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 156, in from_pandas
    return DataFrame(pd.DataFrame(index=pobj)).index
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1528, in from_pandas
    return InternalFrame(
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 758, in __init__
    struct_fields = spark_frame.select(index_spark_columns).schema.fields
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1776, in schema
    return self._session.client.schema(query)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 912, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1498, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1566, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: When resolving '__index_level_0__, fail to find subplan with plan_id=22 in 'Project ['__index_level_0__]
+- Project [__index_level_0__#231 AS __index_level_0__#233]
   +- LocalRelation [__index_level_0__#231]


JVM stacktrace:
org.apache.spark.sql.AnalysisException
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$3(ColumnResolutionHelper.scala:516)
	at scala.Option.getOrElse(Option.scala:201)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.$anonfun$resolveUnresolvedAttributeByPlanId$2(ColumnResolutionHelper.scala:516)
	at scala.collection.mutable.HashMap.getOrElseUpdate(HashMap.scala:469)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveUnresolvedAttributeByPlanId(ColumnResolutionHelper.scala:511)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.tryResolveColumnByPlanId(ColumnResolutionHelper.scala:494)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren(ColumnResolutionHelper.scala:451)
	at org.apache.spark.sql.catalyst.analysis.ColumnResolutionHelper.resolveExpressionByPlanChildren$(ColumnResolutionHelper.scala:446)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.resolveExpressionByPlanChildren(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.$anonfun$applyOrElse$110(Analyzer.scala:1639)
	at scala.collection.immutable.List.map(List.scala:246)
	at scala.collection.immutable.List.map(List.scala:79)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1639)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences$$anonfun$apply$16.applyOrElse(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:110)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:33)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1522)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences.apply(Analyzer.scala:1487)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
	at scala.collection.LinearSeqOps.foldLeft(LinearSeq.scala:183)
	at scala.collection.LinearSeqOps.foldLeft$(LinearSeq.scala:179)
	at scala.collection.immutable.List.foldLeft(List.scala:79)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:224)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:176)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:220)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:212)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:93)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:91)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.process(SparkConnectAnalyzeHandler.scala:59)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1(SparkConnectAnalyzeHandler.scala:43)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.$anonfun$handle$1$adapted(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:247)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:247)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:246)
	at org.apache.spark.sql.connect.service.SparkConnectAnalyzeHandler.handle(SparkConnectAnalyzeHandler.scala:42)
	at org.apache.spark.sql.connect.service.SparkConnectService.analyzePlan(SparkConnectService.scala:95)
	at org.apache.spark.connect.proto.SparkConnectServiceGrpc$MethodHandlers.invoke(SparkConnectServiceGrpc.java:907)
	at org.sparkproject.connect.grpc.io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:182)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:351)
	at org.sparkproject.connect.grpc.io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:860)
	at org.sparkproject.connect.grpc.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
	at org.sparkproject.connect.grpc.io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:133)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)