-
Notifications
You must be signed in to change notification settings - Fork 25
/
example_bigquery_queries.py
270 lines (242 loc) · 8.64 KB
/
example_bigquery_queries.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
"""
Example Airflow DAG for Google BigQuery service.
Uses Async version of BigQueryInsertJobOperator and BigQueryCheckOperator.
"""
import os
from datetime import datetime, timedelta
from typing import Any
from airflow import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
from airflow.providers.google.cloud.operators.bigquery import (
BigQueryCreateEmptyDatasetOperator,
BigQueryCreateEmptyTableOperator,
BigQueryDeleteDatasetOperator,
)
from airflow.utils.state import State
from airflow.utils.trigger_rule import TriggerRule
from astronomer.providers.google.cloud.operators.bigquery import (
BigQueryCheckOperatorAsync,
BigQueryGetDataOperatorAsync,
BigQueryInsertJobOperatorAsync,
BigQueryIntervalCheckOperatorAsync,
BigQueryValueCheckOperatorAsync,
)
PROJECT_ID = os.getenv("GCP_PROJECT_ID", "astronomer-airflow-providers")
DATASET_NAME = os.getenv("GCP_BIGQUERY_DATASET_NAME", "astro_dataset")
GCP_CONN_ID = os.getenv("GCP_CONN_ID", "google_cloud_default")
LOCATION = os.getenv("GCP_LOCATION", "us")
EXECUTION_TIMEOUT = int(os.getenv("EXECUTION_TIMEOUT", 6))
TABLE_1 = "table1"
TABLE_2 = "table2"
SCHEMA = [
{"name": "value", "type": "INTEGER", "mode": "REQUIRED"},
{"name": "name", "type": "STRING", "mode": "NULLABLE"},
{"name": "ds", "type": "STRING", "mode": "NULLABLE"},
]
DATASET = DATASET_NAME
INSERT_DATE = datetime.now().strftime("%Y-%m-%d")
INSERT_ROWS_QUERY = (
f"INSERT {DATASET}.{TABLE_1} VALUES "
f"(42, 'monthy python', '{INSERT_DATE}'), "
f"(42, 'fishy fish', '{INSERT_DATE}');"
)
default_args = {
"execution_timeout": timedelta(hours=EXECUTION_TIMEOUT),
"retries": int(os.getenv("DEFAULT_TASK_RETRIES", 2)),
"retry_delay": timedelta(seconds=int(os.getenv("DEFAULT_RETRY_DELAY_SECONDS", 60))),
}
def check_dag_status(**kwargs: Any) -> None:
"""Raises an exception if any of the DAG's tasks failed and as a result marking the DAG failed."""
for task_instance in kwargs["dag_run"].get_task_instances():
if (
task_instance.current_state() != State.SUCCESS
and task_instance.task_id != kwargs["task_instance"].task_id
):
raise Exception(f"Task {task_instance.task_id} failed. Failing this DAG run")
with DAG(
dag_id="example_async_bigquery_queries",
schedule=None,
start_date=datetime(2022, 1, 1),
catchup=False,
default_args=default_args,
tags=["example", "async", "bigquery"],
user_defined_macros={"DATASET": DATASET, "TABLE": TABLE_1},
) as dag:
create_dataset = BigQueryCreateEmptyDatasetOperator(
task_id="create_dataset",
dataset_id=DATASET,
location=LOCATION,
gcp_conn_id=GCP_CONN_ID,
)
create_table_1 = BigQueryCreateEmptyTableOperator(
task_id="create_table_1",
dataset_id=DATASET,
table_id=TABLE_1,
schema_fields=SCHEMA,
location=LOCATION,
bigquery_conn_id=GCP_CONN_ID,
)
create_dataset >> create_table_1
delete_dataset = BigQueryDeleteDatasetOperator(
task_id="delete_dataset",
dataset_id=DATASET,
delete_contents=True,
gcp_conn_id=GCP_CONN_ID,
trigger_rule="all_done",
)
# [START howto_operator_bigquery_insert_job_async]
insert_query_job = BigQueryInsertJobOperatorAsync(
task_id="insert_query_job",
configuration={
"query": {
"query": INSERT_ROWS_QUERY,
"useLegacySql": False,
}
},
location=LOCATION,
gcp_conn_id=GCP_CONN_ID,
)
# [END howto_operator_bigquery_insert_job_async]
# [START howto_operator_bigquery_select_job_async]
select_query_job = BigQueryInsertJobOperatorAsync(
task_id="select_query_job",
configuration={
"query": {
"query": "{% include 'example_bigquery_query.sql' %}",
"useLegacySql": False,
}
},
location=LOCATION,
gcp_conn_id=GCP_CONN_ID,
)
# [END howto_operator_bigquery_select_job_async]
# [START howto_operator_bigquery_value_check_async]
check_value = BigQueryValueCheckOperatorAsync(
task_id="check_value",
sql=f"SELECT COUNT(*) FROM {DATASET}.{TABLE_1}",
pass_value=2,
use_legacy_sql=False,
location=LOCATION,
gcp_conn_id=GCP_CONN_ID,
)
# [END howto_operator_bigquery_value_check_async]
# [START howto_operator_bigquery_interval_check_async]
check_interval = BigQueryIntervalCheckOperatorAsync(
task_id="check_interval",
table=f"{DATASET}.{TABLE_1}",
days_back=1,
metrics_thresholds={"COUNT(*)": 1.5},
use_legacy_sql=False,
location=LOCATION,
gcp_conn_id=GCP_CONN_ID,
)
# [END howto_operator_bigquery_interval_check_async]
# [START howto_operator_bigquery_multi_query_async]
bigquery_execute_multi_query = BigQueryInsertJobOperatorAsync(
task_id="execute_multi_query",
configuration={
"query": {
"query": [
f"SELECT * FROM {DATASET}.{TABLE_2}",
f"SELECT COUNT(*) FROM {DATASET}.{TABLE_2}",
],
"useLegacySql": False,
}
},
location=LOCATION,
gcp_conn_id=GCP_CONN_ID,
)
# [END howto_operator_bigquery_multi_query_async]
# [START howto_operator_bigquery_get_data_async]
get_data = BigQueryGetDataOperatorAsync(
task_id="get_data",
dataset_id=DATASET,
table_id=TABLE_1,
max_results=10,
selected_fields="value,name",
location=LOCATION,
gcp_conn_id=GCP_CONN_ID,
)
# [END howto_operator_bigquery_get_data_async]
get_data_result = BashOperator(
task_id="get_data_result",
bash_command=f"echo {get_data.output}",
trigger_rule="all_done",
)
# [START howto_operator_bigquery_check_async]
check_count = BigQueryCheckOperatorAsync(
task_id="check_count",
sql=f"SELECT COUNT(*) FROM {DATASET}.{TABLE_1}",
use_legacy_sql=False,
location=LOCATION,
gcp_conn_id=GCP_CONN_ID,
)
# [END howto_operator_bigquery_check_async]
# [START howto_operator_bigquery_execute_query_save_async]
execute_query_save = BigQueryInsertJobOperatorAsync(
task_id="execute_query_save",
configuration={
"query": {
"query": f"SELECT * FROM {DATASET}.{TABLE_1}",
"useLegacySql": False,
"destinationTable": {
"projectId": PROJECT_ID,
"datasetId": DATASET,
"tableId": TABLE_2,
},
}
},
location=LOCATION,
gcp_conn_id=GCP_CONN_ID,
)
# [END howto_operator_bigquery_execute_query_save_async]
execute_long_running_query = BigQueryInsertJobOperatorAsync(
task_id="execute_long_running_query",
configuration={
"query": {
"query": f"""DECLARE success BOOL;
DECLARE size_bytes INT64;
DECLARE row_count INT64;
DECLARE DELAY_TIME DATETIME;
DECLARE WAIT STRING;
SET success = FALSE;
SELECT row_count = (SELECT row_count FROM {DATASET}.__TABLES__ WHERE table_id='NON_EXISTING_TABLE');
IF row_count > 0 THEN
SELECT 'Table Exists!' as message, retry_count as retries;
SET success = TRUE;
ELSE
SELECT 'Table does not exist' as message, row_count;
SET WAIT = 'TRUE';
SET DELAY_TIME = DATETIME_ADD(CURRENT_DATETIME,INTERVAL 1 MINUTE);
WHILE WAIT = 'TRUE' DO
IF (DELAY_TIME < CURRENT_DATETIME) THEN
SET WAIT = 'FALSE';
END IF;
END WHILE;
END IF;""",
"useLegacySql": False,
}
},
location=LOCATION,
gcp_conn_id=GCP_CONN_ID,
)
dag_final_status = PythonOperator(
task_id="dag_final_status",
provide_context=True,
python_callable=check_dag_status,
trigger_rule=TriggerRule.ALL_DONE, # Ensures this task runs even if upstream fails
retries=0,
)
create_table_1 >> insert_query_job >> select_query_job >> check_count
insert_query_job >> get_data >> get_data_result
insert_query_job >> execute_query_save >> bigquery_execute_multi_query
insert_query_job >> execute_long_running_query >> check_value >> check_interval
[check_count, check_interval, bigquery_execute_multi_query, get_data_result] >> delete_dataset
[
check_count,
check_interval,
bigquery_execute_multi_query,
get_data_result,
delete_dataset,
] >> dag_final_status