diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index fab2463145..6773aef7c2 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -2,6 +2,9 @@ on: pull_request: branches: - main + push: + branches: + - main name: docs jobs: docs: diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 1051da0bdd..7914b72651 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -2,6 +2,9 @@ on: pull_request: branches: - main + push: + branches: + - main name: lint jobs: lint: diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index e6a79291d0..fc9e970946 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -2,6 +2,9 @@ on: pull_request: branches: - main + push: + branches: + - main name: mypy jobs: mypy: diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml index a7805de447..518cec6312 100644 --- a/.github/workflows/unittest.yml +++ b/.github/workflows/unittest.yml @@ -2,6 +2,9 @@ on: pull_request: branches: - main + push: + branches: + - main name: unittest jobs: unit: diff --git a/owlbot.py b/owlbot.py index 33dd33a84f..4a189ff0e2 100644 --- a/owlbot.py +++ b/owlbot.py @@ -58,8 +58,9 @@ ".kokoro/build.sh", ".kokoro/continuous/common.cfg", ".kokoro/presubmit/common.cfg", - # Temporary workaround to update docs job to use python 3.10 ".github/workflows/docs.yml", + ".github/workflows/lint.yml", + ".github/workflows/unittest.yml", ], ) diff --git a/tests/unit/session/test_io_bigquery.py b/tests/unit/session/test_io_bigquery.py index 57ac3d88f7..41f3755f13 100644 --- a/tests/unit/session/test_io_bigquery.py +++ b/tests/unit/session/test_io_bigquery.py @@ -18,12 +18,15 @@ from unittest import mock import google.cloud.bigquery as bigquery +import google.cloud.bigquery.job +import google.cloud.bigquery.table import pytest import bigframes from bigframes.core import log_adapter import bigframes.core.events import bigframes.pandas as bpd +import bigframes.session._io.bigquery import bigframes.session._io.bigquery as io_bq from bigframes.testing import mocks @@ -32,7 +35,7 @@ def mock_bq_client(): mock_client = mock.create_autospec(bigquery.Client) mock_query_job = mock.create_autospec(bigquery.QueryJob) - mock_row_iterator = mock.create_autospec(bigquery.table.RowIterator) + mock_row_iterator = mock.create_autospec(google.cloud.bigquery.table.RowIterator) mock_query_job.result.return_value = mock_row_iterator @@ -98,14 +101,12 @@ def test_create_job_configs_labels_log_adaptor_call_method_under_length_limit(): cur_labels = { "source": "bigquery-dataframes-temp", } - df = bpd.DataFrame( - {"col1": [1, 2], "col2": [3, 4]}, session=mocks.create_bigquery_session() - ) - # Test running two methods - df.head() - df.max() - df.columns - api_methods = log_adapter._api_methods + api_methods = [ + "dataframe-columns", + "dataframe-max", + "dataframe-head", + "dataframe-__init__", + ] labels = io_bq.create_job_configs_labels( job_configs_labels=cur_labels, api_methods=api_methods @@ -123,17 +124,13 @@ def test_create_job_configs_labels_log_adaptor_call_method_under_length_limit(): def test_create_job_configs_labels_length_limit_met_and_labels_is_none(): log_adapter.get_and_reset_api_methods() - df = bpd.DataFrame( - {"col1": [1, 2], "col2": [3, 4]}, session=mocks.create_bigquery_session() - ) # Test running methods more than the labels' length limit - for i in range(100): - df.head() - api_methods = log_adapter._api_methods + api_methods = list(["dataframe-head"] * 100) - labels = io_bq.create_job_configs_labels( - job_configs_labels=None, api_methods=api_methods - ) + with bpd.option_context("compute.extra_query_labels", {}): + labels = io_bq.create_job_configs_labels( + job_configs_labels=None, api_methods=api_methods + ) assert labels is not None assert len(labels) == log_adapter.MAX_LABELS_COUNT assert "dataframe-head" in labels.values() @@ -150,17 +147,14 @@ def test_create_job_configs_labels_length_limit_met(): value = f"test{i}" cur_labels[key] = value # If cur_labels length is 62, we can only add one label from api_methods - df = bpd.DataFrame( - {"col1": [1, 2], "col2": [3, 4]}, session=mocks.create_bigquery_session() - ) # Test running two methods - df.head() - df.max() - api_methods = log_adapter._api_methods + api_methods = ["dataframe-max", "dataframe-head"] + + with bpd.option_context("compute.extra_query_labels", {}): + labels = io_bq.create_job_configs_labels( + job_configs_labels=cur_labels, api_methods=api_methods + ) - labels = io_bq.create_job_configs_labels( - job_configs_labels=cur_labels, api_methods=api_methods - ) assert labels is not None assert len(labels) == 56 assert "dataframe-max" in labels.values() @@ -184,7 +178,7 @@ def test_add_and_trim_labels_length_limit_met(): {"col1": [1, 2], "col2": [3, 4]}, session=mocks.create_bigquery_session() ) - job_config = bigquery.job.QueryJobConfig() + job_config = google.cloud.bigquery.job.QueryJobConfig() job_config.labels = cur_labels df.max() @@ -221,7 +215,7 @@ def test_start_query_with_client_labels_length_limit_met( {"col1": [1, 2], "col2": [3, 4]}, session=mocks.create_bigquery_session() ) - job_config = bigquery.job.QueryJobConfig() + job_config = google.cloud.bigquery.job.QueryJobConfig() job_config.labels = cur_labels df.max()