diff --git a/dev/airflow-license b/dev/airflow-license index 2144f54b877149..0041a5cb46512b 100755 --- a/dev/airflow-license +++ b/dev/airflow-license @@ -76,8 +76,8 @@ if __name__ == "__main__": for notice in notices: notice = notice[0] - license = parse_license_file(notice[1]) - print(f"{notice[1]:<30}|{notice[2][:50]:<50}||{notice[0]:<20}||{license:<10}") + license_type = parse_license_file(notice[1]) + print(f"{notice[1]:<30}|{notice[2][:50]:<50}||{notice[0]:<20}||{license_type:<10}") file_count = len(os.listdir("../licenses")) print(f"Defined licenses: {len(notices)} Files found: {file_count}") diff --git a/dev/breeze/src/airflow_breeze/commands/developer_commands.py b/dev/breeze/src/airflow_breeze/commands/developer_commands.py index 27d046f2475fcb..dd94afdfa6bc3a 100644 --- a/dev/breeze/src/airflow_breeze/commands/developer_commands.py +++ b/dev/breeze/src/airflow_breeze/commands/developer_commands.py @@ -958,7 +958,7 @@ def down(preserve_volumes: bool, cleanup_mypy_cache: bool, project_name: str): @option_verbose @option_dry_run @click.argument("exec_args", nargs=-1, type=click.UNPROCESSED) -def exec(exec_args: tuple): +def exec_(exec_args: tuple): perform_environment_checks() container_running = find_airflow_container() if container_running: diff --git a/dev/breeze/src/airflow_breeze/commands/release_management_commands.py b/dev/breeze/src/airflow_breeze/commands/release_management_commands.py index 7dfccba03b1533..4b09523f5eaf1a 100644 --- a/dev/breeze/src/airflow_breeze/commands/release_management_commands.py +++ b/dev/breeze/src/airflow_breeze/commands/release_management_commands.py @@ -2788,9 +2788,9 @@ def _copy_selected_sources_from_tmp_directory_to_clients_python(): f"[info]Copying selected sources: {GENERATED_CLIENT_DIRECTORIES_TO_COPY} from " f"{PYTHON_CLIENT_TMP_DIR} to {PYTHON_CLIENT_DIR_PATH}[/]" ) - for dir in GENERATED_CLIENT_DIRECTORIES_TO_COPY: - source_dir = PYTHON_CLIENT_TMP_DIR / dir - target_dir = PYTHON_CLIENT_DIR_PATH / dir + for directory in GENERATED_CLIENT_DIRECTORIES_TO_COPY: + source_dir = PYTHON_CLIENT_TMP_DIR / directory + target_dir = PYTHON_CLIENT_DIR_PATH / directory get_console().print(f"[info] Copying generated sources from {source_dir} to {target_dir}[/]") shutil.rmtree(target_dir, ignore_errors=True) shutil.copytree(source_dir, target_dir) @@ -2926,9 +2926,9 @@ def prepare_python_client( get_console().print( f"[info]Copying generated client from {PYTHON_CLIENT_DIR_PATH} to {python_client_repo}[/]" ) - for dir in GENERATED_CLIENT_DIRECTORIES_TO_COPY: - source_dir = PYTHON_CLIENT_DIR_PATH / dir - target_dir = python_client_repo / dir + for directory in GENERATED_CLIENT_DIRECTORIES_TO_COPY: + source_dir = PYTHON_CLIENT_DIR_PATH / directory + target_dir = python_client_repo / directory get_console().print(f"[info] Copying {source_dir} to {target_dir}[/]") shutil.rmtree(target_dir, ignore_errors=True) shutil.copytree(source_dir, target_dir) diff --git a/dev/breeze/src/airflow_breeze/params/doc_build_params.py b/dev/breeze/src/airflow_breeze/params/doc_build_params.py index 01339dc14325b4..372637de4545ae 100644 --- a/dev/breeze/src/airflow_breeze/params/doc_build_params.py +++ b/dev/breeze/src/airflow_breeze/params/doc_build_params.py @@ -49,6 +49,6 @@ def args_doc_builder(self) -> list[str]: for filter_from_short_doc in get_long_package_names(self.short_doc_packages): doc_args.extend(["--package-filter", filter_from_short_doc]) if self.package_filter: - for filter in self.package_filter: - doc_args.extend(["--package-filter", filter]) + for filter_ in self.package_filter: + doc_args.extend(["--package-filter", filter_]) return doc_args diff --git a/dev/system_tests/update_issue_status.py b/dev/system_tests/update_issue_status.py index 83ba3a73e86213..1b3fdf217286c0 100755 --- a/dev/system_tests/update_issue_status.py +++ b/dev/system_tests/update_issue_status.py @@ -209,9 +209,11 @@ def update_issue_status( if not_completed_closed_issues: console.print("[yellow] Issues that are not completed and should be opened:[/]\n") for issue in not_completed_closed_issues: - all = per_issue_num_all[issue.id] + all_ = per_issue_num_all[issue.id] done = per_issue_num_done[issue.id] - console.print(f" * [[yellow]{issue.title}[/]]({issue.html_url}): {done}/{all} : {done / all:.2%}") + console.print( + f" * [[yellow]{issue.title}[/]]({issue.html_url}): {done}/{all_} : {done / all_:.2%}" + ) console.print() if completed_open_issues: console.print("[yellow] Issues that are completed and should be closed:[/]\n") @@ -221,9 +223,11 @@ def update_issue_status( if not_completed_opened_issues: console.print("[yellow] Issues that are not completed and are still opened:[/]\n") for issue in not_completed_opened_issues: - all = per_issue_num_all[issue.id] + all_ = per_issue_num_all[issue.id] done = per_issue_num_done[issue.id] - console.print(f" * [[yellow]{issue.title}[/]]({issue.html_url}): {done}/{all} : {done / all:.2%}") + console.print( + f" * [[yellow]{issue.title}[/]]({issue.html_url}): {done}/{all_} : {done / all_:.2%}" + ) console.print() if completed_closed_issues: console.print("[green] Issues that are completed and are already closed:[/]\n") diff --git a/hatch_build.py b/hatch_build.py index 7fddae5bb25454..11eb4d0b3c25c3 100644 --- a/hatch_build.py +++ b/hatch_build.py @@ -915,16 +915,16 @@ def _process_all_built_in_extras(self, version: str) -> None: :param version: "standard" or "editable" build. """ - for dict, _ in ALL_DYNAMIC_EXTRA_DICTS: - for extra, deps in dict.items(): + for d, _ in ALL_DYNAMIC_EXTRA_DICTS: + for extra, deps in d.items(): self.all_devel_extras.add(extra) self._add_devel_ci_dependencies(deps, python_exclusion="") - if dict not in [DEPRECATED_EXTRAS, DEVEL_EXTRAS, DOC_EXTRAS]: + if d not in [DEPRECATED_EXTRAS, DEVEL_EXTRAS, DOC_EXTRAS]: # do not add deprecated extras to "all" extras self.all_non_devel_extras.add(extra) if version == "standard": # for wheel builds we skip devel and doc extras - if dict not in [DEVEL_EXTRAS, DOC_EXTRAS]: + if d not in [DEVEL_EXTRAS, DOC_EXTRAS]: self.optional_dependencies[extra] = deps else: # for editable builds we add all extras diff --git a/pyproject.toml b/pyproject.toml index 6fd468aa507b26..2735e10f950e5b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -289,6 +289,7 @@ extend-select = [ "PGH004", # Use specific rule codes when using noqa "PGH005", # Invalid unittest.mock.Mock methods/attributes/properties "S101", # Checks use `assert` outside the test cases, test cases should be added into the exclusions + "A001", # Checks for variable (and function) assignments that use the same name as a builtin. "B004", # Checks for use of hasattr(x, "__call__") and replaces it with callable(x) "B006", # Checks for uses of mutable objects as function argument defaults. "B007", # Checks for unused variables in the loop @@ -392,6 +393,15 @@ combine-as-imports = true # https://github.com/apache/airflow/issues/39252 "airflow/providers/amazon/aws/hooks/eks.py" = ["W605"] +# Airflow Core / Providers modules which still shadowing a Python builtin variables/functions/classes +"airflow/api_connexion/endpoints/task_instance_endpoint.py" = ["A001"] +"airflow/cli/cli_config.py" = ["A001"] +"airflow/configuration.py" = ["A001"] +"airflow/models/dag.py" = ["A001"] +"airflow/providers/amazon/aws/hooks/s3.py" = ["A001"] +"airflow/providers/databricks/hooks/databricks.py" = ["A001"] +"airflow/providers/google/cloud/operators/bigquery.py" = ["A001"] + [tool.ruff.lint.flake8-tidy-imports] # Disallow all relative imports. ban-relative-imports = "all" diff --git a/scripts/ci/pre_commit/check_init_in_tests.py b/scripts/ci/pre_commit/check_init_in_tests.py index a9c6dee2379b66..61e1944c74aa65 100755 --- a/scripts/ci/pre_commit/check_init_in_tests.py +++ b/scripts/ci/pre_commit/check_init_in_tests.py @@ -41,10 +41,10 @@ if __name__ == "__main__": for dirname, sub_dirs, _ in os.walk(ROOT_DIR / "tests"): - dir = Path(dirname) + directory = Path(dirname) sub_dirs[:] = [subdir for subdir in sub_dirs if subdir not in {"__pycache__", "test_logs"}] for sub_dir in sub_dirs: - init_py_path = dir / sub_dir / "__init__.py" + init_py_path = directory / sub_dir / "__init__.py" if not init_py_path.exists(): init_py_path.touch() console.print(f"[yellow] Created {init_py_path}[/]") diff --git a/scripts/ci/pre_commit/check_providers_subpackages_all_have_init.py b/scripts/ci/pre_commit/check_providers_subpackages_all_have_init.py index 2407bffe9a580d..bf21197434030c 100755 --- a/scripts/ci/pre_commit/check_providers_subpackages_all_have_init.py +++ b/scripts/ci/pre_commit/check_providers_subpackages_all_have_init.py @@ -36,8 +36,8 @@ def check_dir_init_file(provider_files: list[str]) -> None: missing_init_dirs.append(path) if missing_init_dirs: - with open(os.path.join(ROOT_DIR, "scripts/ci/license-templates/LICENSE.txt")) as license: - license_txt = license.readlines() + with open(os.path.join(ROOT_DIR, "scripts/ci/license-templates/LICENSE.txt")) as fp: + license_txt = fp.readlines() prefixed_licensed_txt = [f"# {line}" if line != "\n" else "#\n" for line in license_txt] for missing_init_dir in missing_init_dirs: diff --git a/scripts/ci/pre_commit/www_lint.py b/scripts/ci/pre_commit/www_lint.py index 6d389b17999ab0..d4b40a85f97dfc 100755 --- a/scripts/ci/pre_commit/www_lint.py +++ b/scripts/ci/pre_commit/www_lint.py @@ -27,8 +27,8 @@ ) if __name__ == "__main__": - dir = Path("airflow") / "www" - subprocess.check_call(["yarn", "--frozen-lockfile", "--non-interactive"], cwd=dir) - subprocess.check_call(["yarn", "run", "generate-api-types"], cwd=dir) - subprocess.check_call(["yarn", "run", "format"], cwd=dir) - subprocess.check_call(["yarn", "run", "lint:fix"], cwd=dir) + www_dir = Path("airflow") / "www" + subprocess.check_call(["yarn", "--frozen-lockfile", "--non-interactive"], cwd=www_dir) + subprocess.check_call(["yarn", "run", "generate-api-types"], cwd=www_dir) + subprocess.check_call(["yarn", "run", "format"], cwd=www_dir) + subprocess.check_call(["yarn", "run", "lint:fix"], cwd=www_dir) diff --git a/tests/callbacks/test_callback_requests.py b/tests/callbacks/test_callback_requests.py index b9a00376ff0bb3..79fde6a6dea606 100644 --- a/tests/callbacks/test_callback_requests.py +++ b/tests/callbacks/test_callback_requests.py @@ -37,7 +37,7 @@ class TestCallbackRequest: @pytest.mark.parametrize( - "input,request_class", + "callback_request, request_class", [ (CallbackRequest(full_filepath="filepath", msg="task_failure"), CallbackRequest), ( @@ -64,8 +64,8 @@ class TestCallbackRequest: ), ], ) - def test_from_json(self, input, request_class): - if input is None: + def test_from_json(self, callback_request, request_class): + if callback_request is None: ti = TaskInstance( task=BashOperator( task_id="test", bash_command="true", dag=DAG(dag_id="id"), start_date=datetime.now() @@ -74,15 +74,15 @@ def test_from_json(self, input, request_class): state=State.RUNNING, ) - input = TaskCallbackRequest( + callback_request = TaskCallbackRequest( full_filepath="filepath", simple_task_instance=SimpleTaskInstance.from_ti(ti=ti), processor_subdir="/test_dir", is_failure_callback=True, ) - json_str = input.to_json() + json_str = callback_request.to_json() result = request_class.from_json(json_str=json_str) - assert result == input + assert result == callback_request def test_taskcallback_to_json_with_start_date_and_end_date(self, session, create_task_instance): ti = create_task_instance() @@ -90,15 +90,15 @@ def test_taskcallback_to_json_with_start_date_and_end_date(self, session, create ti.end_date = timezone.utcnow() session.merge(ti) session.flush() - input = TaskCallbackRequest( + callback_request = TaskCallbackRequest( full_filepath="filepath", simple_task_instance=SimpleTaskInstance.from_ti(ti), processor_subdir="/test_dir", is_failure_callback=True, ) - json_str = input.to_json() + json_str = callback_request.to_json() result = TaskCallbackRequest.from_json(json_str) - assert input == result + assert callback_request == result def test_simple_ti_roundtrip_exec_config_pod(self): """A callback request including a TI with an exec config with a V1Pod should safely roundtrip.""" diff --git a/tests/providers/amazon/aws/operators/test_ec2.py b/tests/providers/amazon/aws/operators/test_ec2.py index 8f8a755a84357d..029b6128b9887c 100644 --- a/tests/providers/amazon/aws/operators/test_ec2.py +++ b/tests/providers/amazon/aws/operators/test_ec2.py @@ -84,8 +84,8 @@ def test_create_multiple_instances(self): instance_ids = create_instances.execute(None) assert len(instance_ids) == 5 - for id in instance_ids: - assert ec2_hook.get_instance_state(instance_id=id) == "running" + for instance_id in instance_ids: + assert ec2_hook.get_instance_state(instance_id=instance_id) == "running" class TestEC2TerminateInstanceOperator(BaseEc2TestClass): @@ -130,15 +130,15 @@ def test_terminate_multiple_instances(self): instance_ids = create_instances.execute(None) assert len(instance_ids) == 5 - for id in instance_ids: - assert ec2_hook.get_instance_state(instance_id=id) == "running" + for instance_id in instance_ids: + assert ec2_hook.get_instance_state(instance_id=instance_id) == "running" terminate_instance = EC2TerminateInstanceOperator( task_id="test_terminate_instance", instance_ids=instance_ids ) terminate_instance.execute(None) - for id in instance_ids: - assert ec2_hook.get_instance_state(instance_id=id) == "terminated" + for instance_id in instance_ids: + assert ec2_hook.get_instance_state(instance_id=instance_id) == "terminated" class TestEC2StartInstanceOperator(BaseEc2TestClass): @@ -253,15 +253,15 @@ def test_hibernate_multiple_instances(self): instance_ids = create_instances.execute(None) assert len(instance_ids) == 5 - for id in instance_ids: - assert ec2_hook.get_instance_state(instance_id=id) == "running" + for instance_id in instance_ids: + assert ec2_hook.get_instance_state(instance_id=instance_id) == "running" hibernate_instance = EC2HibernateInstanceOperator( task_id="test_hibernate_instance", instance_ids=instance_ids ) hibernate_instance.execute(None) - for id in instance_ids: - assert ec2_hook.get_instance_state(instance_id=id) == "stopped" + for instance_id in instance_ids: + assert ec2_hook.get_instance_state(instance_id=instance_id) == "stopped" @mock_aws def test_cannot_hibernate_instance(self): @@ -319,8 +319,8 @@ def test_cannot_hibernate_some_instances(self): hibernate_test.execute(None) # assert instance state is running - for id in instance_ids: - assert ec2_hook.get_instance_state(instance_id=id) == "running" + for instance_id in instance_ids: + assert ec2_hook.get_instance_state(instance_id=instance_id) == "running" class TestEC2RebootInstanceOperator(BaseEc2TestClass): @@ -363,12 +363,12 @@ def test_reboot_multiple_instances(self): instance_ids = create_instances.execute(None) assert len(instance_ids) == 5 - for id in instance_ids: - assert ec2_hook.get_instance_state(instance_id=id) == "running" + for instance_id in instance_ids: + assert ec2_hook.get_instance_state(instance_id=instance_id) == "running" terminate_instance = EC2RebootInstanceOperator( task_id="test_reboot_instance", instance_ids=instance_ids ) terminate_instance.execute(None) - for id in instance_ids: - assert ec2_hook.get_instance_state(instance_id=id) == "running" + for instance_id in instance_ids: + assert ec2_hook.get_instance_state(instance_id=instance_id) == "running" diff --git a/tests/providers/amazon/aws/operators/test_ecs.py b/tests/providers/amazon/aws/operators/test_ecs.py index 554ccad51458d9..7aaabf40a59794 100644 --- a/tests/providers/amazon/aws/operators/test_ecs.py +++ b/tests/providers/amazon/aws/operators/test_ecs.py @@ -309,8 +309,8 @@ def test_execute_without_failures( assert self.ecs.arn == f"arn:aws:ecs:us-east-1:012345678910:task/{TASK_ID}" def test_task_id_parsing(self): - id = EcsRunTaskOperator._get_ecs_task_id(f"arn:aws:ecs:us-east-1:012345678910:task/{TASK_ID}") - assert id == TASK_ID + task_id = EcsRunTaskOperator._get_ecs_task_id(f"arn:aws:ecs:us-east-1:012345678910:task/{TASK_ID}") + assert task_id == TASK_ID @mock.patch.object(EcsBaseOperator, "client") def test_execute_with_failures(self, client_mock): diff --git a/tests/providers/amazon/aws/operators/test_emr_serverless.py b/tests/providers/amazon/aws/operators/test_emr_serverless.py index be42cf63ba499c..e1b22b91fe1970 100644 --- a/tests/providers/amazon/aws/operators/test_emr_serverless.py +++ b/tests/providers/amazon/aws/operators/test_emr_serverless.py @@ -100,7 +100,7 @@ def test_execute_successfully_with_wait_for_completion(self, mock_conn, mock_wai waiter_delay=0, ) - id = operator.execute(None) + result = operator.execute(None) mock_conn.create_application.assert_called_once_with( clientToken=client_request_token, @@ -117,7 +117,7 @@ def test_execute_successfully_with_wait_for_completion(self, mock_conn, mock_wai assert mock_waiter().wait.call_count == 2 mock_conn.start_application.assert_called_once_with(applicationId=application_id) - assert id == application_id + assert result == application_id mock_conn.get_application.call_count == 2 @mock.patch.object(EmrServerlessHook, "get_waiter") @@ -138,7 +138,7 @@ def test_execute_successfully_no_wait_for_completion(self, mock_conn, mock_waite config=config, ) - id = operator.execute(None) + result = operator.execute(None) mock_conn.create_application.assert_called_once_with( clientToken=client_request_token, @@ -149,7 +149,7 @@ def test_execute_successfully_no_wait_for_completion(self, mock_conn, mock_waite mock_conn.start_application.assert_called_once_with(applicationId=application_id) mock_waiter().wait.assert_called_once() - assert id == application_id + assert result == application_id @mock.patch.object(EmrServerlessHook, "conn") def test_failed_create_application_request(self, mock_conn): @@ -403,12 +403,12 @@ def test_job_run_app_started(self, mock_conn, mock_get_waiter): job_driver=job_driver, configuration_overrides=configuration_overrides, ) - id = operator.execute(self.mock_context) + result = operator.execute(self.mock_context) default_name = operator.name assert operator.wait_for_completion is True mock_conn.get_application.assert_called_once_with(applicationId=application_id) - assert id == job_run_id + assert result == job_run_id mock_conn.start_job_run.assert_called_once_with( clientToken=client_request_token, applicationId=application_id, @@ -475,13 +475,13 @@ def test_job_run_app_not_started(self, mock_conn, mock_get_waiter): job_driver=job_driver, configuration_overrides=configuration_overrides, ) - id = operator.execute(self.mock_context) + result = operator.execute(self.mock_context) default_name = operator.name assert operator.wait_for_completion is True mock_conn.get_application.assert_called_once_with(applicationId=application_id) assert mock_get_waiter().wait.call_count == 2 - assert id == job_run_id + assert result == job_run_id mock_conn.start_job_run.assert_called_once_with( clientToken=client_request_token, applicationId=application_id, @@ -544,12 +544,12 @@ def test_job_run_app_not_started_no_wait_for_completion(self, mock_conn, mock_ge configuration_overrides=configuration_overrides, wait_for_completion=False, ) - id = operator.execute(self.mock_context) + result = operator.execute(self.mock_context) default_name = operator.name mock_conn.get_application.assert_called_once_with(applicationId=application_id) mock_get_waiter().wait.assert_called_once() - assert id == job_run_id + assert result == job_run_id mock_conn.start_job_run.assert_called_once_with( clientToken=client_request_token, applicationId=application_id, @@ -578,8 +578,8 @@ def test_job_run_app_started_no_wait_for_completion(self, mock_conn, mock_get_wa configuration_overrides=configuration_overrides, wait_for_completion=False, ) - id = operator.execute(self.mock_context) - assert id == job_run_id + result = operator.execute(self.mock_context) + assert result == job_run_id default_name = operator.name mock_conn.start_job_run.assert_called_once_with( @@ -746,11 +746,11 @@ def test_cancel_job_run(self, mock_conn): wait_for_completion=False, ) - id = operator.execute(self.mock_context) + result = operator.execute(self.mock_context) operator.on_kill() mock_conn.cancel_job_run.assert_called_once_with( applicationId=application_id, - jobRunId=id, + jobRunId=result, ) @pytest.mark.parametrize( diff --git a/tests/providers/elasticsearch/log/elasticmock/fake_elasticsearch.py b/tests/providers/elasticsearch/log/elasticmock/fake_elasticsearch.py index 26e47cbf08f7a6..6e4e20e4af6e92 100644 --- a/tests/providers/elasticsearch/log/elasticmock/fake_elasticsearch.py +++ b/tests/providers/elasticsearch/log/elasticmock/fake_elasticsearch.py @@ -210,7 +210,7 @@ def index(self, index, doc_type, body, id=None, params=None, headers=None): self.__documents_dict[index] = [] if id is None: - id = get_random_id() + id = get_random_id() # noqa: A001 id is a part of ``Elasticsearch.index`` method version = 1 diff --git a/tests/providers/google/cloud/hooks/test_cloud_batch.py b/tests/providers/google/cloud/hooks/test_cloud_batch.py index de83b64fbe799c..730196a6c4f9a7 100644 --- a/tests/providers/google/cloud/hooks/test_cloud_batch.py +++ b/tests/providers/google/cloud/hooks/test_cloud_batch.py @@ -125,18 +125,18 @@ def test_list_jobs(self, mock_batch_service_client, cloud_batch_hook): number_of_jobs = 3 region = "us-central1" project_id = "test_project_id" - filter = "filter_description" + filter_ = "filter_description" page = self._mock_pager(number_of_jobs) mock_batch_service_client.return_value.list_jobs.return_value = page - jobs_list = cloud_batch_hook.list_jobs(region=region, project_id=project_id, filter=filter) + jobs_list = cloud_batch_hook.list_jobs(region=region, project_id=project_id, filter=filter_) for i in range(number_of_jobs): assert jobs_list[i].name == f"name{i}" expected_list_jobs_request: ListJobsRequest = ListJobsRequest( - parent=f"projects/{project_id}/locations/{region}", filter=filter + parent=f"projects/{project_id}/locations/{region}", filter=filter_ ) mock_batch_service_client.return_value.list_jobs.assert_called_once_with( request=expected_list_jobs_request @@ -152,13 +152,13 @@ def test_list_jobs_with_limit(self, mock_batch_service_client, cloud_batch_hook) limit = 2 region = "us-central1" project_id = "test_project_id" - filter = "filter_description" + filter_ = "filter_description" page = self._mock_pager(number_of_jobs) mock_batch_service_client.return_value.list_jobs.return_value = page jobs_list = cloud_batch_hook.list_jobs( - region=region, project_id=project_id, filter=filter, limit=limit + region=region, project_id=project_id, filter=filter_, limit=limit ) assert len(jobs_list) == limit @@ -175,13 +175,13 @@ def test_list_jobs_with_limit_zero(self, mock_batch_service_client, cloud_batch_ limit = 0 region = "us-central1" project_id = "test_project_id" - filter = "filter_description" + filter_ = "filter_description" page = self._mock_pager(number_of_jobs) mock_batch_service_client.return_value.list_jobs.return_value = page jobs_list = cloud_batch_hook.list_jobs( - region=region, project_id=project_id, filter=filter, limit=limit + region=region, project_id=project_id, filter=filter_, limit=limit ) assert len(jobs_list) == 0 @@ -196,13 +196,13 @@ def test_list_jobs_with_limit_greater_then_range(self, mock_batch_service_client limit = 5 region = "us-central1" project_id = "test_project_id" - filter = "filter_description" + filter_ = "filter_description" page = self._mock_pager(number_of_jobs) mock_batch_service_client.return_value.list_jobs.return_value = page jobs_list = cloud_batch_hook.list_jobs( - region=region, project_id=project_id, filter=filter, limit=limit + region=region, project_id=project_id, filter=filter_, limit=limit ) assert len(jobs_list) == number_of_jobs @@ -219,13 +219,13 @@ def test_list_jobs_with_limit_less_than_zero(self, mock_batch_service_client, cl limit = -1 region = "us-central1" project_id = "test_project_id" - filter = "filter_description" + filter_ = "filter_description" page = self._mock_pager(number_of_jobs) mock_batch_service_client.return_value.list_jobs.return_value = page with pytest.raises(expected_exception=AirflowException): - cloud_batch_hook.list_jobs(region=region, project_id=project_id, filter=filter, limit=limit) + cloud_batch_hook.list_jobs(region=region, project_id=project_id, filter=filter_, limit=limit) @mock.patch( "airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__", @@ -237,14 +237,14 @@ def test_list_tasks_with_limit(self, mock_batch_service_client, cloud_batch_hook limit = 2 region = "us-central1" project_id = "test_project_id" - filter = "filter_description" + filter_ = "filter_description" job_name = "test_job" page = self._mock_pager(number_of_tasks) mock_batch_service_client.return_value.list_tasks.return_value = page tasks_list = cloud_batch_hook.list_tasks( - region=region, project_id=project_id, job_name=job_name, filter=filter, limit=limit + region=region, project_id=project_id, job_name=job_name, filter=filter_, limit=limit ) assert len(tasks_list) == limit @@ -261,14 +261,14 @@ def test_list_tasks_with_limit_greater_then_range(self, mock_batch_service_clien limit = 5 region = "us-central1" project_id = "test_project_id" - filter = "filter_description" + filter_ = "filter_description" job_name = "test_job" page = self._mock_pager(number_of_tasks) mock_batch_service_client.return_value.list_tasks.return_value = page tasks_list = cloud_batch_hook.list_tasks( - region=region, project_id=project_id, filter=filter, job_name=job_name, limit=limit + region=region, project_id=project_id, filter=filter_, job_name=job_name, limit=limit ) assert len(tasks_list) == number_of_tasks @@ -285,7 +285,7 @@ def test_list_tasks_with_limit_less_than_zero(self, mock_batch_service_client, c limit = -1 region = "us-central1" project_id = "test_project_id" - filter = "filter_description" + filter_ = "filter_description" job_name = "test_job" page = self._mock_pager(number_of_tasks) @@ -293,7 +293,7 @@ def test_list_tasks_with_limit_less_than_zero(self, mock_batch_service_client, c with pytest.raises(expected_exception=AirflowException): cloud_batch_hook.list_tasks( - region=region, project_id=project_id, job_name=job_name, filter=filter, limit=limit + region=region, project_id=project_id, job_name=job_name, filter=filter_, limit=limit ) def _mock_job_with_status(self, status: JobStatus.State): diff --git a/tests/providers/google/cloud/operators/test_cloud_batch.py b/tests/providers/google/cloud/operators/test_cloud_batch.py index 31980aefceefba..62ee18a7a1a0b4 100644 --- a/tests/providers/google/cloud/operators/test_cloud_batch.py +++ b/tests/providers/google/cloud/operators/test_cloud_batch.py @@ -119,33 +119,33 @@ def _delete_operation_mock(self): class TestCloudBatchListJobsOperator: @mock.patch(CLOUD_BATCH_HOOK_PATH) def test_execute(self, hook_mock): - filter = "filter_description" + filter_ = "filter_description" limit = 2 operator = CloudBatchListJobsOperator( - task_id=TASK_ID, project_id=PROJECT_ID, region=REGION, filter=filter, limit=limit + task_id=TASK_ID, project_id=PROJECT_ID, region=REGION, filter=filter_, limit=limit ) operator.execute(context=mock.MagicMock()) hook_mock.return_value.list_jobs.assert_called_once_with( - region=REGION, project_id=PROJECT_ID, filter=filter, limit=limit + region=REGION, project_id=PROJECT_ID, filter=filter_, limit=limit ) @mock.patch(CLOUD_BATCH_HOOK_PATH) def test_execute_with_invalid_limit(self, hook_mock): - filter = "filter_description" + filter_ = "filter_description" limit = -1 with pytest.raises(expected_exception=AirflowException): CloudBatchListJobsOperator( - task_id=TASK_ID, project_id=PROJECT_ID, region=REGION, filter=filter, limit=limit + task_id=TASK_ID, project_id=PROJECT_ID, region=REGION, filter=filter_, limit=limit ) class TestCloudBatchListTasksOperator: @mock.patch(CLOUD_BATCH_HOOK_PATH) def test_execute(self, hook_mock): - filter = "filter_description" + filter_ = "filter_description" limit = 2 job_name = "test_job" @@ -154,7 +154,7 @@ def test_execute(self, hook_mock): project_id=PROJECT_ID, region=REGION, job_name=job_name, - filter=filter, + filter=filter_, limit=limit, ) @@ -163,7 +163,7 @@ def test_execute(self, hook_mock): hook_mock.return_value.list_tasks.assert_called_once_with( region=REGION, project_id=PROJECT_ID, - filter=filter, + filter=filter_, job_name=job_name, limit=limit, group_name="group0", @@ -171,7 +171,7 @@ def test_execute(self, hook_mock): @mock.patch(CLOUD_BATCH_HOOK_PATH) def test_execute_with_invalid_limit(self, hook_mock): - filter = "filter_description" + filter_ = "filter_description" limit = -1 job_name = "test_job" @@ -181,6 +181,6 @@ def test_execute_with_invalid_limit(self, hook_mock): project_id=PROJECT_ID, region=REGION, job_name=job_name, - filter=filter, + filter=filter_, limit=limit, ) diff --git a/tests/providers/google/cloud/operators/test_dataproc.py b/tests/providers/google/cloud/operators/test_dataproc.py index 7d986b50173106..544226b332cc41 100644 --- a/tests/providers/google/cloud/operators/test_dataproc.py +++ b/tests/providers/google/cloud/operators/test_dataproc.py @@ -2834,7 +2834,7 @@ class TestDataprocListBatchesOperator: def test_execute(self, mock_hook): page_token = "page_token" page_size = 42 - filter = 'batch_id=~"a-batch-id*" AND create_time>="2023-07-05T14:25:04.643818Z"' + filter_ = 'batch_id=~"a-batch-id*" AND create_time>="2023-07-05T14:25:04.643818Z"' order_by = "create_time desc" op = DataprocListBatchesOperator( @@ -2848,7 +2848,7 @@ def test_execute(self, mock_hook): retry=RETRY, timeout=TIMEOUT, metadata=METADATA, - filter=filter, + filter=filter_, order_by=order_by, ) op.execute(context=MagicMock()) @@ -2861,7 +2861,7 @@ def test_execute(self, mock_hook): retry=RETRY, timeout=TIMEOUT, metadata=METADATA, - filter=filter, + filter=filter_, order_by=order_by, ) diff --git a/tests/providers/google/cloud/operators/test_vertex_ai.py b/tests/providers/google/cloud/operators/test_vertex_ai.py index e891d65249a11a..e1187f9c3155da 100644 --- a/tests/providers/google/cloud/operators/test_vertex_ai.py +++ b/tests/providers/google/cloud/operators/test_vertex_ai.py @@ -799,7 +799,7 @@ class TestVertexAIListCustomTrainingJobOperator: def test_execute(self, mock_hook): page_token = "page_token" page_size = 42 - filter = "filter" + filter_ = "filter" read_mask = "read_mask" op = ListCustomTrainingJobOperator( @@ -810,7 +810,7 @@ def test_execute(self, mock_hook): project_id=GCP_PROJECT, page_size=page_size, page_token=page_token, - filter=filter, + filter=filter_, read_mask=read_mask, retry=RETRY, timeout=TIMEOUT, @@ -823,7 +823,7 @@ def test_execute(self, mock_hook): project_id=GCP_PROJECT, page_size=page_size, page_token=page_token, - filter=filter, + filter=filter_, read_mask=read_mask, retry=RETRY, timeout=TIMEOUT, @@ -949,7 +949,7 @@ class TestVertexAIListDatasetsOperator: def test_execute(self, mock_hook, to_dict_mock): page_token = "page_token" page_size = 42 - filter = "filter" + filter_ = "filter" read_mask = "read_mask" order_by = "order_by" @@ -959,7 +959,7 @@ def test_execute(self, mock_hook, to_dict_mock): impersonation_chain=IMPERSONATION_CHAIN, region=GCP_LOCATION, project_id=GCP_PROJECT, - filter=filter, + filter=filter_, page_size=page_size, page_token=page_token, read_mask=read_mask, @@ -973,7 +973,7 @@ def test_execute(self, mock_hook, to_dict_mock): mock_hook.return_value.list_datasets.assert_called_once_with( region=GCP_LOCATION, project_id=GCP_PROJECT, - filter=filter, + filter=filter_, page_size=page_size, page_token=page_token, read_mask=read_mask, @@ -1349,7 +1349,7 @@ class TestVertexAIListAutoMLTrainingJobOperator: def test_execute(self, mock_hook): page_token = "page_token" page_size = 42 - filter = "filter" + filter_ = "filter" read_mask = "read_mask" op = ListAutoMLTrainingJobOperator( @@ -1360,7 +1360,7 @@ def test_execute(self, mock_hook): project_id=GCP_PROJECT, page_size=page_size, page_token=page_token, - filter=filter, + filter=filter_, read_mask=read_mask, retry=RETRY, timeout=TIMEOUT, @@ -1373,7 +1373,7 @@ def test_execute(self, mock_hook): project_id=GCP_PROJECT, page_size=page_size, page_token=page_token, - filter=filter, + filter=filter_, read_mask=read_mask, retry=RETRY, timeout=TIMEOUT, @@ -1575,7 +1575,7 @@ class TestVertexAIListBatchPredictionJobsOperator: def test_execute(self, mock_hook): page_token = "page_token" page_size = 42 - filter = "filter" + filter_ = "filter" read_mask = "read_mask" op = ListBatchPredictionJobsOperator( @@ -1586,7 +1586,7 @@ def test_execute(self, mock_hook): project_id=GCP_PROJECT, page_size=page_size, page_token=page_token, - filter=filter, + filter=filter_, read_mask=read_mask, retry=RETRY, timeout=TIMEOUT, @@ -1599,7 +1599,7 @@ def test_execute(self, mock_hook): project_id=GCP_PROJECT, page_size=page_size, page_token=page_token, - filter=filter, + filter=filter_, read_mask=read_mask, retry=RETRY, timeout=TIMEOUT, @@ -1698,7 +1698,7 @@ class TestVertexAIListEndpointsOperator: def test_execute(self, mock_hook): page_token = "page_token" page_size = 42 - filter = "filter" + filter_ = "filter" read_mask = "read_mask" order_by = "order_by" @@ -1710,7 +1710,7 @@ def test_execute(self, mock_hook): project_id=GCP_PROJECT, page_size=page_size, page_token=page_token, - filter=filter, + filter=filter_, read_mask=read_mask, order_by=order_by, retry=RETRY, @@ -1724,7 +1724,7 @@ def test_execute(self, mock_hook): project_id=GCP_PROJECT, page_size=page_size, page_token=page_token, - filter=filter, + filter=filter_, read_mask=read_mask, order_by=order_by, retry=RETRY, @@ -1967,7 +1967,7 @@ class TestVertexAIListHyperparameterTuningJobOperator: def test_execute(self, mock_hook): page_token = "page_token" page_size = 42 - filter = "filter" + filter_ = "filter" read_mask = "read_mask" op = ListHyperparameterTuningJobOperator( @@ -1978,7 +1978,7 @@ def test_execute(self, mock_hook): project_id=GCP_PROJECT, page_size=page_size, page_token=page_token, - filter=filter, + filter=filter_, read_mask=read_mask, retry=RETRY, timeout=TIMEOUT, @@ -1991,7 +1991,7 @@ def test_execute(self, mock_hook): project_id=GCP_PROJECT, page_size=page_size, page_token=page_token, - filter=filter, + filter=filter_, read_mask=read_mask, retry=RETRY, timeout=TIMEOUT, @@ -2059,7 +2059,7 @@ class TestVertexAIListModelsOperator: def test_execute(self, mock_hook, to_dict_mock): page_token = "page_token" page_size = 42 - filter = "filter" + filter_ = "filter" read_mask = "read_mask" order_by = "order_by" @@ -2069,7 +2069,7 @@ def test_execute(self, mock_hook, to_dict_mock): impersonation_chain=IMPERSONATION_CHAIN, region=GCP_LOCATION, project_id=GCP_PROJECT, - filter=filter, + filter=filter_, page_size=page_size, page_token=page_token, read_mask=read_mask, @@ -2083,7 +2083,7 @@ def test_execute(self, mock_hook, to_dict_mock): mock_hook.return_value.list_models.assert_called_once_with( region=GCP_LOCATION, project_id=GCP_PROJECT, - filter=filter, + filter=filter_, page_size=page_size, page_token=page_token, read_mask=read_mask, @@ -2445,7 +2445,7 @@ class TestVertexAIListPipelineJobOperator: def test_execute(self, mock_hook): page_token = "page_token" page_size = 42 - filter = "filter" + filter_ = "filter" order_by = "order_by" op = ListPipelineJobOperator( @@ -2456,7 +2456,7 @@ def test_execute(self, mock_hook): project_id=GCP_PROJECT, page_size=page_size, page_token=page_token, - filter=filter, + filter=filter_, order_by=order_by, retry=RETRY, timeout=TIMEOUT, @@ -2469,7 +2469,7 @@ def test_execute(self, mock_hook): project_id=GCP_PROJECT, page_size=page_size, page_token=page_token, - filter=filter, + filter=filter_, order_by=order_by, retry=RETRY, timeout=TIMEOUT, diff --git a/tests/providers/microsoft/azure/triggers/test_msgraph.py b/tests/providers/microsoft/azure/triggers/test_msgraph.py index 23085563cf8f99..a9d4a1881af3d0 100644 --- a/tests/providers/microsoft/azure/triggers/test_msgraph.py +++ b/tests/providers/microsoft/azure/triggers/test_msgraph.py @@ -142,9 +142,9 @@ def test_serialize_when_bytes_then_base64_encoded(self): assert actual == content def test_serialize_when_dict_with_uuid_datatime_and_pendulum_then_json(self): - id = uuid4() + uniq_id = uuid4() response = { - "id": id, + "id": uniq_id, "creationDate": datetime(2024, 2, 5), "modificationTime": pendulum.datetime(2024, 2, 5), } @@ -154,7 +154,7 @@ def test_serialize_when_dict_with_uuid_datatime_and_pendulum_then_json(self): assert isinstance(actual, str) assert ( actual - == f'{{"id": "{id}", "creationDate": "2024-02-05T00:00:00", "modificationTime": "2024-02-05T00:00:00+00:00"}}' + == f'{{"id": "{uniq_id}", "creationDate": "2024-02-05T00:00:00", "modificationTime": "2024-02-05T00:00:00+00:00"}}' ) def test_deserialize_when_json(self): diff --git a/tests/providers/openlineage/extractors/test_python.py b/tests/providers/openlineage/extractors/test_python.py index 7d47b9ebc6f846..100a0de56dbedd 100644 --- a/tests/providers/openlineage/extractors/test_python.py +++ b/tests/providers/openlineage/extractors/test_python.py @@ -51,22 +51,24 @@ python_task_getcwd >> bash_task -def callable(): +def sample_callable(): print(10) -CODE = "def callable():\n print(10)\n" +CODE = "def sample_callable():\n print(10)\n" def test_extract_source_code(): - code = inspect.getsource(callable) + code = inspect.getsource(sample_callable) assert code == CODE @patch("airflow.providers.openlineage.conf.is_source_enabled") def test_extract_operator_code_disabled(mocked_source_enabled): mocked_source_enabled.return_value = False - operator = PythonOperator(task_id="taskid", python_callable=callable, op_args=(1, 2), op_kwargs={"a": 1}) + operator = PythonOperator( + task_id="taskid", python_callable=sample_callable, op_args=(1, 2), op_kwargs={"a": 1} + ) with warnings.catch_warnings(): warnings.simplefilter("ignore", AirflowProviderDeprecationWarning) result = PythonExtractor(operator).extract() @@ -84,7 +86,9 @@ def test_extract_operator_code_disabled(mocked_source_enabled): @patch("airflow.providers.openlineage.conf.is_source_enabled") def test_extract_operator_code_enabled(mocked_source_enabled): mocked_source_enabled.return_value = True - operator = PythonOperator(task_id="taskid", python_callable=callable, op_args=(1, 2), op_kwargs={"a": 1}) + operator = PythonOperator( + task_id="taskid", python_callable=sample_callable, op_args=(1, 2), op_kwargs={"a": 1} + ) with warnings.catch_warnings(): warnings.simplefilter("ignore", AirflowProviderDeprecationWarning) result = PythonExtractor(operator).extract() diff --git a/tests/providers/openlineage/plugins/test_utils.py b/tests/providers/openlineage/plugins/test_utils.py index 6062e87b8da09b..a4355307fe6e67 100644 --- a/tests/providers/openlineage/plugins/test_utils.py +++ b/tests/providers/openlineage/plugins/test_utils.py @@ -88,11 +88,11 @@ def test_parse_version(): def test_safe_dict(): assert str(SafeStrDict({"a": 1})) == str({"a": 1}) - class NotImplemented: + class FakeNotImplemented: def __str__(self): raise NotImplementedError - assert str(SafeStrDict({"a": NotImplemented()})) == str({}) + assert str(SafeStrDict({"a": FakeNotImplemented()})) == str({}) def test_info_json_encodable(): diff --git a/tests/utils/test_sqlalchemy.py b/tests/utils/test_sqlalchemy.py index 1cff07079541e3..ccb0bbbc6c0db6 100644 --- a/tests/utils/test_sqlalchemy.py +++ b/tests/utils/test_sqlalchemy.py @@ -188,7 +188,7 @@ def teardown_method(self): class TestExecutorConfigType: @pytest.mark.parametrize( - "input, expected", + "input_value, expected", [ ("anything", "anything"), ( @@ -202,7 +202,7 @@ class TestExecutorConfigType: ), ], ) - def test_bind_processor(self, input, expected): + def test_bind_processor(self, input_value, expected): """ The returned bind processor should pickle the object as is, unless it is a dictionary with a pod_override node, in which case it should run it through BaseSerialization. @@ -211,11 +211,11 @@ def test_bind_processor(self, input, expected): mock_dialect = MagicMock() mock_dialect.dbapi = None process = config_type.bind_processor(mock_dialect) - assert pickle.loads(process(input)) == expected - assert pickle.loads(process(input)) == expected, "should not mutate variable" + assert pickle.loads(process(input_value)) == expected + assert pickle.loads(process(input_value)) == expected, "should not mutate variable" @pytest.mark.parametrize( - "input", + "input_value", [ pytest.param( pickle.dumps("anything"), @@ -235,7 +235,7 @@ def test_bind_processor(self, input, expected): ), ], ) - def test_result_processor(self, input): + def test_result_processor(self, input_value): """ The returned bind processor should pickle the object as is, unless it is a dictionary with a pod_override node whose value was serialized with BaseSerialization. @@ -244,8 +244,8 @@ def test_result_processor(self, input): mock_dialect = MagicMock() mock_dialect.dbapi = None process = config_type.result_processor(mock_dialect, None) - result = process(input) - expected = pickle.loads(input) + result = process(input_value) + expected = pickle.loads(input_value) pod_override = isinstance(expected, dict) and expected.get("pod_override") if pod_override and isinstance(pod_override, dict) and pod_override.get(Encoding.TYPE): # We should only deserialize a pod_override with BaseSerialization if @@ -301,7 +301,7 @@ def test_result_processor_bad_pickled_obj(self): with pytest.raises(AttributeError): test_pod.to_dict() # define what will be retrieved from db - input = pickle.dumps({"pod_override": TEST_POD}) + ser = pickle.dumps({"pod_override": TEST_POD}) # get the result processor method config_type = ExecutorConfigType() @@ -310,7 +310,7 @@ def test_result_processor_bad_pickled_obj(self): process = config_type.result_processor(mock_dialect, None) # apply the result processor - result = process(input) + result = process(ser) # show that the pickled (bad) pod is now a good pod, and same as the copy made # before making it bad