From 0fbb05a459f62a14896aa5d863685b449ab3e442 Mon Sep 17 00:00:00 2001 From: max <42827971+moiseenkov@users.noreply.github.com> Date: Fri, 21 Jul 2023 18:24:09 +0200 Subject: [PATCH] Fixup system test for DataprocSubmitJobOperator (SparkSQL job) (#32745) --- airflow/providers/google/cloud/operators/dataproc.py | 4 ++++ .../cloud/dataproc/example_dataproc_spark_sql.py | 12 ++++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/airflow/providers/google/cloud/operators/dataproc.py b/airflow/providers/google/cloud/operators/dataproc.py index b4123f08e9944..0d9cf592a1cb8 100644 --- a/airflow/providers/google/cloud/operators/dataproc.py +++ b/airflow/providers/google/cloud/operators/dataproc.py @@ -1286,6 +1286,10 @@ def execute(self, context: Context): class DataprocSubmitSparkSqlJobOperator(DataprocJobBaseOperator): """Start a Spark SQL query Job on a Cloud DataProc cluster. + .. seealso:: + This operator is deprecated, please use + :class:`~airflow.providers.google.cloud.operators.dataproc.DataprocSubmitJobOperator`: + :param query: The query or reference to the query file (q extension). (templated) :param query_uri: The HCFS URI of the script that contains the SQL queries. :param variables: Map of named parameters for the query. (templated) diff --git a/tests/system/providers/google/cloud/dataproc/example_dataproc_spark_sql.py b/tests/system/providers/google/cloud/dataproc/example_dataproc_spark_sql.py index ff742aef33bec..3db2f881bcd5b 100644 --- a/tests/system/providers/google/cloud/dataproc/example_dataproc_spark_sql.py +++ b/tests/system/providers/google/cloud/dataproc/example_dataproc_spark_sql.py @@ -37,7 +37,6 @@ CLUSTER_NAME = f"dataproc-spark-sql-{ENV_ID}" REGION = "europe-west1" -ZONE = "europe-west1-b" # Cluster definition CLUSTER_CONFIG = { @@ -53,8 +52,6 @@ }, } -TIMEOUT = {"seconds": 1 * 24 * 60 * 60} - # Jobs definitions # [START how_to_cloud_dataproc_sparksql_config] SPARK_SQL_JOB = { @@ -92,7 +89,14 @@ trigger_rule=TriggerRule.ALL_DONE, ) - create_cluster >> spark_sql_task >> delete_cluster + ( + # TEST SETUP + create_cluster + # TEST BODY + >> spark_sql_task + # TEST TEARDOWN + >> delete_cluster + ) from tests.system.utils.watcher import watcher