From 527c9744b63ea7baf57d48c9343779fd3b6b79df Mon Sep 17 00:00:00 2001 From: "deepsource-dev-autofix[bot]" <61578317+deepsource-dev-autofix[bot]@users.noreply.github.com> Date: Mon, 19 May 2025 11:40:35 +0000 Subject: [PATCH] style: format code with Black This commit fixes the style issues introduced in ea11f46 according to the output from Black. Details: None --- appengine/standard_python3/pubsub/main.py | 2 + .../standard_python3/pubsub/main_test.py | 5 +- composer/workflows/airflow_db_cleanup.py | 22 ++++--- datacatalog/quickstart/quickstart.py | 18 +++--- datacatalog/snippets/conftest.py | 6 +- dataflow/conftest.py | 60 ++++++++--------- .../pipeline_with_dependencies/setup.py | 1 + .../gemma-flex-template/custom_model_gemma.py | 9 ++- .../gemma-flex-template/noxfile_config.py | 4 +- dataflow/gemma/custom_model_gemma.py | 22 ++++--- dataflow/gemma/e2e_test.py | 40 ++++++------ dataflow/gemma/noxfile_config.py | 4 +- dataflow/snippets/read_kafka_multi_topic.py | 49 ++++++++------ dataflow/snippets/tests/test_read_kafka.py | 64 +++++++++++-------- .../bookstore_pb2.py | 22 +++---- endpoints/bookstore-grpc/bookstore_pb2.py | 22 +++---- .../getting-started-grpc/helloworld_pb2.py | 4 +- .../v2/deploy-function/deploy_function.py | 1 + functions/v2/label_gce_instance/main.py | 8 +-- .../test_batch_predict_examples.py | 2 +- generative_ai/prompts/prompt_template.py | 8 +-- .../understand_audio/transcription_example.py | 4 +- .../polls/migrations/0001_initial.py | 48 ++++++++------ .../consumergroups/delete_consumer_group.py | 4 +- .../consumergroups/get_consumer_group.py | 4 +- .../consumergroups/update_consumer_group.py | 4 +- memorystore/memcache/quickstart.py | 13 ++-- monitoring/opencensus/main.py | 1 + monitoring/opencensus/main_test.py | 1 + run/logging-manual/main.py | 6 +- .../snippets_v2/snippets_create_client_v2.py | 8 ++- .../snippets_create_client_v2_test.py | 3 +- service_extensions/__init__.py | 2 +- .../transcribe_streaming_infinite_v2.py | 7 +- ...rage_soft_delete_relative_cost_analyzer.py | 31 ++++----- .../snippets/streaming_tts_quickstart.py | 35 +++++++--- 36 files changed, 309 insertions(+), 235 deletions(-) diff --git a/appengine/standard_python3/pubsub/main.py b/appengine/standard_python3/pubsub/main.py index 401f2d35af4..d7ff0738bef 100644 --- a/appengine/standard_python3/pubsub/main.py +++ b/appengine/standard_python3/pubsub/main.py @@ -108,6 +108,7 @@ def receive_messages_handler(): # [END gae_standard_pubsub_auth_push] + # [START gae_standard_pubsub_push] @app.route("/pubsub/push", methods=["POST"]) def receive_pubsub_messages_handler(): @@ -121,6 +122,7 @@ def receive_pubsub_messages_handler(): # Returning any 2xx status indicates successful receipt of the message. return "OK", 200 + # [END gae_standard_pubsub_push] diff --git a/appengine/standard_python3/pubsub/main_test.py b/appengine/standard_python3/pubsub/main_test.py index 07076c8893a..940d4eec161 100644 --- a/appengine/standard_python3/pubsub/main_test.py +++ b/appengine/standard_python3/pubsub/main_test.py @@ -101,10 +101,7 @@ def test_push_endpoint(monkeypatch, client, fake_token): assert r.status_code == 200 # Push request without JWT token validation - url = ( - "/pubsub/push?token=" - + os.environ["PUBSUB_VERIFICATION_TOKEN"] - ) + url = "/pubsub/push?token=" + os.environ["PUBSUB_VERIFICATION_TOKEN"] r = client.post( url, diff --git a/composer/workflows/airflow_db_cleanup.py b/composer/workflows/airflow_db_cleanup.py index af6b3667a2a..a93fafde91e 100644 --- a/composer/workflows/airflow_db_cleanup.py +++ b/composer/workflows/airflow_db_cleanup.py @@ -111,9 +111,11 @@ }, { "airflow_db_model": TaskInstance, - "age_check_column": TaskInstance.start_date - if AIRFLOW_VERSION < ["2", "2", "0"] - else TaskInstance.start_date, + "age_check_column": ( + TaskInstance.start_date + if AIRFLOW_VERSION < ["2", "2", "0"] + else TaskInstance.start_date + ), "keep_last": False, "keep_last_filters": None, "keep_last_group_by": None, @@ -127,9 +129,9 @@ }, { "airflow_db_model": XCom, - "age_check_column": XCom.execution_date - if AIRFLOW_VERSION < ["2", "2", "5"] - else XCom.timestamp, + "age_check_column": ( + XCom.execution_date if AIRFLOW_VERSION < ["2", "2", "5"] else XCom.timestamp + ), "keep_last": False, "keep_last_filters": None, "keep_last_group_by": None, @@ -157,9 +159,11 @@ DATABASE_OBJECTS.append( { "airflow_db_model": TaskReschedule, - "age_check_column": TaskReschedule.execution_date - if AIRFLOW_VERSION < ["2", "2", "0"] - else TaskReschedule.start_date, + "age_check_column": ( + TaskReschedule.execution_date + if AIRFLOW_VERSION < ["2", "2", "0"] + else TaskReschedule.start_date + ), "keep_last": False, "keep_last_filters": None, "keep_last_group_by": None, diff --git a/datacatalog/quickstart/quickstart.py b/datacatalog/quickstart/quickstart.py index f6579e53690..f95350fd6ca 100644 --- a/datacatalog/quickstart/quickstart.py +++ b/datacatalog/quickstart/quickstart.py @@ -56,21 +56,21 @@ def quickstart(override_values): tag_template.fields["source"] = datacatalog_v1.types.TagTemplateField() tag_template.fields["source"].display_name = "Source of data asset" - tag_template.fields[ - "source" - ].type_.primitive_type = datacatalog_v1.types.FieldType.PrimitiveType.STRING + tag_template.fields["source"].type_.primitive_type = ( + datacatalog_v1.types.FieldType.PrimitiveType.STRING + ) tag_template.fields["num_rows"] = datacatalog_v1.types.TagTemplateField() tag_template.fields["num_rows"].display_name = "Number of rows in data asset" - tag_template.fields[ - "num_rows" - ].type_.primitive_type = datacatalog_v1.types.FieldType.PrimitiveType.DOUBLE + tag_template.fields["num_rows"].type_.primitive_type = ( + datacatalog_v1.types.FieldType.PrimitiveType.DOUBLE + ) tag_template.fields["has_pii"] = datacatalog_v1.types.TagTemplateField() tag_template.fields["has_pii"].display_name = "Has PII" - tag_template.fields[ - "has_pii" - ].type_.primitive_type = datacatalog_v1.types.FieldType.PrimitiveType.BOOL + tag_template.fields["has_pii"].type_.primitive_type = ( + datacatalog_v1.types.FieldType.PrimitiveType.BOOL + ) tag_template.fields["pii_type"] = datacatalog_v1.types.TagTemplateField() tag_template.fields["pii_type"].display_name = "PII type" diff --git a/datacatalog/snippets/conftest.py b/datacatalog/snippets/conftest.py index 520d83042b7..78363a5fa3c 100644 --- a/datacatalog/snippets/conftest.py +++ b/datacatalog/snippets/conftest.py @@ -110,9 +110,9 @@ def random_existing_tag_template_id(client, project_id, resources_to_delete): random_tag_template_id = f"python_sample_{temp_suffix()}" random_tag_template = datacatalog_v1.types.TagTemplate() random_tag_template.fields["source"] = datacatalog_v1.types.TagTemplateField() - random_tag_template.fields[ - "source" - ].type_.primitive_type = datacatalog_v1.FieldType.PrimitiveType.STRING.value + random_tag_template.fields["source"].type_.primitive_type = ( + datacatalog_v1.FieldType.PrimitiveType.STRING.value + ) random_tag_template = client.create_tag_template( parent=datacatalog_v1.DataCatalogClient.common_location_path( project_id, LOCATION diff --git a/dataflow/conftest.py b/dataflow/conftest.py index a1f81eac6f6..b1b1beab3f6 100644 --- a/dataflow/conftest.py +++ b/dataflow/conftest.py @@ -527,7 +527,7 @@ def cloud_build_submit( cmd = ["gcloud", "auth", "configure-docker"] logging.info(f"{cmd}") subprocess.check_call(cmd) - gcr_project = project.replace(':', '/') + gcr_project = project.replace(":", "/") if substitutions: cmd_substitutions = [ @@ -568,8 +568,7 @@ def cloud_build_submit( ] logging.info(f"{cmd}") subprocess.check_call(cmd) - logging.info( - f"Created image: gcr.io/{gcr_project}/{image_name}:{UUID}") + logging.info(f"Created image: gcr.io/{gcr_project}/{image_name}:{UUID}") yield f"{image_name}:{UUID}" else: raise ValueError("must specify either `config` or `image_name`") @@ -587,8 +586,7 @@ def cloud_build_submit( ] logging.info(f"{cmd}") subprocess.check_call(cmd) - logging.info( - f"Deleted image: gcr.io/{gcr_project}/{image_name}:{UUID}") + logging.info(f"Deleted image: gcr.io/{gcr_project}/{image_name}:{UUID}") @staticmethod def dataflow_job_url( @@ -765,7 +763,7 @@ def dataflow_flex_template_build( ) -> str: # https://cloud.google.com/sdk/gcloud/reference/dataflow/flex-template/build template_gcs_path = f"gs://{bucket_name}/{template_file}" - gcr_project = project.replace(':', '/') + gcr_project = project.replace(":", "/") cmd = [ "gcloud", "dataflow", @@ -774,7 +772,7 @@ def dataflow_flex_template_build( template_gcs_path, f"--project={project}", f"--image=gcr.io/{gcr_project}/{image_name}", - "--sdk-language=PYTHON" + "--sdk-language=PYTHON", ] if metadata_file: cmd.append(f"--metadata-file={metadata_file}") @@ -794,34 +792,38 @@ def dataflow_flex_template_run( parameters: dict[str, str] = {}, project: str = PROJECT, region: str = REGION, - additional_experiments: dict[str,str] = {}, + additional_experiments: dict[str, str] = {}, ) -> str: import yaml # https://cloud.google.com/sdk/gcloud/reference/dataflow/flex-template/run unique_job_name = Utils.hyphen_name(job_name) logging.info(f"dataflow_job_name: {unique_job_name}") - cmd = [ - "gcloud", - "dataflow", - "flex-template", - "run", - unique_job_name, - f"--template-file-gcs-location={template_path}", - f"--project={project}", - f"--region={region}", - f"--staging-location=gs://{bucket_name}/staging", - ] + [ - f"--parameters={name}={value}" - for name, value in { - **parameters, - }.items() - ] + [ - f"--additional-experiments={name}={value}" - for name, value in { - **additional_experiments, - }.items() - ] + cmd = ( + [ + "gcloud", + "dataflow", + "flex-template", + "run", + unique_job_name, + f"--template-file-gcs-location={template_path}", + f"--project={project}", + f"--region={region}", + f"--staging-location=gs://{bucket_name}/staging", + ] + + [ + f"--parameters={name}={value}" + for name, value in { + **parameters, + }.items() + ] + + [ + f"--additional-experiments={name}={value}" + for name, value in { + **additional_experiments, + }.items() + ] + ) logging.info(f"{cmd}") stdout = subprocess.check_output(cmd).decode("utf-8") diff --git a/dataflow/flex-templates/pipeline_with_dependencies/setup.py b/dataflow/flex-templates/pipeline_with_dependencies/setup.py index 3316b9b5223..22e708817c5 100644 --- a/dataflow/flex-templates/pipeline_with_dependencies/setup.py +++ b/dataflow/flex-templates/pipeline_with_dependencies/setup.py @@ -20,4 +20,5 @@ """ import setuptools + setuptools.setup() diff --git a/dataflow/gemma-flex-template/custom_model_gemma.py b/dataflow/gemma-flex-template/custom_model_gemma.py index 0b6230ca56c..e7b53690c06 100644 --- a/dataflow/gemma-flex-template/custom_model_gemma.py +++ b/dataflow/gemma-flex-template/custom_model_gemma.py @@ -163,14 +163,17 @@ def run_inference( with beam.Pipeline(options=beam_options) as pipeline: _ = ( pipeline - | "Subscribe to Pub/Sub" >> beam.io.ReadFromPubSub(subscription=args.messages_subscription) + | "Subscribe to Pub/Sub" + >> beam.io.ReadFromPubSub(subscription=args.messages_subscription) | "Decode" >> beam.Map(lambda msg: msg.decode("utf-8")) | "RunInference Gemma" >> RunInference(handler) - | "Format output" >> beam.Map( + | "Format output" + >> beam.Map( lambda response: json.dumps( {"input": response.example, "outputs": response.inference} ) ) | "Encode" >> beam.Map(lambda msg: msg.encode("utf-8")) - | "Publish to Pub/Sub" >> beam.io.gcp.pubsub.WriteToPubSub(topic=args.responses_topic) + | "Publish to Pub/Sub" + >> beam.io.gcp.pubsub.WriteToPubSub(topic=args.responses_topic) ) diff --git a/dataflow/gemma-flex-template/noxfile_config.py b/dataflow/gemma-flex-template/noxfile_config.py index d483d3f479d..a3bee62803b 100644 --- a/dataflow/gemma-flex-template/noxfile_config.py +++ b/dataflow/gemma-flex-template/noxfile_config.py @@ -20,7 +20,5 @@ # The Python version used is defined by the Dockerfile and the job # submission enviornment must match. "ignored_versions": ["2.7", "3.6", "3.7", "3.8", "3.9", "3.11", "3.12"], - "envs": { - "PYTHONPATH": ".." - }, + "envs": {"PYTHONPATH": ".."}, } diff --git a/dataflow/gemma/custom_model_gemma.py b/dataflow/gemma/custom_model_gemma.py index fbf0b975057..456a9680e67 100644 --- a/dataflow/gemma/custom_model_gemma.py +++ b/dataflow/gemma/custom_model_gemma.py @@ -35,7 +35,7 @@ def __init__( self, model_name: str = "gemma_2B", ): - """ Implementation of the ModelHandler interface for Gemma using text as input. + """Implementation of the ModelHandler interface for Gemma using text as input. Example Usage:: @@ -48,7 +48,7 @@ def __init__( self._env_vars = {} def share_model_across_processes(self) -> bool: - """ Indicates if the model should be loaded once-per-VM rather than + """Indicates if the model should be loaded once-per-VM rather than once-per-worker-process on a VM. Because Gemma is a large language model, this will always return True to avoid OOM errors. """ @@ -62,7 +62,7 @@ def run_inference( self, batch: Sequence[str], model: GemmaCausalLM, - inference_args: Optional[dict[str, Any]] = None + inference_args: Optional[dict[str, Any]] = None, ) -> Iterable[PredictionResult]: """Runs inferences on a batch of text strings. @@ -85,7 +85,8 @@ def run_inference( class FormatOutput(beam.DoFn): def process(self, element, *args, **kwargs): yield "Input: {input}, Output: {output}".format( - input=element.example, output=element.inference) + input=element.example, output=element.inference + ) if __name__ == "__main__": @@ -119,13 +120,16 @@ def process(self, element, *args, **kwargs): pipeline = beam.Pipeline(options=beam_options) _ = ( - pipeline | "Read Topic" >> - beam.io.ReadFromPubSub(subscription=args.messages_subscription) + pipeline + | "Read Topic" + >> beam.io.ReadFromPubSub(subscription=args.messages_subscription) | "Parse" >> beam.Map(lambda x: x.decode("utf-8")) - | "RunInference-Gemma" >> RunInference( + | "RunInference-Gemma" + >> RunInference( GemmaModelHandler(args.model_path) ) # Send the prompts to the model and get responses. | "Format Output" >> beam.ParDo(FormatOutput()) # Format the output. - | "Publish Result" >> - beam.io.gcp.pubsub.WriteStringsToPubSub(topic=args.responses_topic)) + | "Publish Result" + >> beam.io.gcp.pubsub.WriteStringsToPubSub(topic=args.responses_topic) + ) pipeline.run() diff --git a/dataflow/gemma/e2e_test.py b/dataflow/gemma/e2e_test.py index e2510716f4b..3d3791b5be5 100644 --- a/dataflow/gemma/e2e_test.py +++ b/dataflow/gemma/e2e_test.py @@ -70,8 +70,9 @@ def messages_topic(pubsub_topic: Callable[[str], str]) -> str: @pytest.fixture(scope="session") -def messages_subscription(pubsub_subscription: Callable[[str, str], str], - messages_topic: str) -> str: +def messages_subscription( + pubsub_subscription: Callable[[str, str], str], messages_topic: str +) -> str: return pubsub_subscription("messages", messages_topic) @@ -81,20 +82,21 @@ def responses_topic(pubsub_topic: Callable[[str], str]) -> str: @pytest.fixture(scope="session") -def responses_subscription(pubsub_subscription: Callable[[str, str], str], - responses_topic: str) -> str: +def responses_subscription( + pubsub_subscription: Callable[[str, str], str], responses_topic: str +) -> str: return pubsub_subscription("responses", responses_topic) @pytest.fixture(scope="session") def dataflow_job( - project: str, - bucket_name: str, - location: str, - unique_name: str, - container_image: str, - messages_subscription: str, - responses_topic: str, + project: str, + bucket_name: str, + location: str, + unique_name: str, + container_image: str, + messages_subscription: str, + responses_topic: str, ) -> Iterator[str]: # Launch the streaming Dataflow pipeline. conftest.run_cmd( @@ -127,20 +129,18 @@ def dataflow_job( @pytest.mark.timeout(3600) def test_pipeline_dataflow( - project: str, - location: str, - dataflow_job: str, - messages_topic: str, - responses_subscription: str, + project: str, + location: str, + dataflow_job: str, + messages_topic: str, + responses_subscription: str, ) -> None: print(f"Waiting for the Dataflow workers to start: {dataflow_job}") conftest.wait_until( - lambda: conftest.dataflow_num_workers(project, location, dataflow_job) - > 0, + lambda: conftest.dataflow_num_workers(project, location, dataflow_job) > 0, "workers are running", ) - num_workers = conftest.dataflow_num_workers(project, location, - dataflow_job) + num_workers = conftest.dataflow_num_workers(project, location, dataflow_job) print(f"Dataflow job num_workers: {num_workers}") messages = ["This is a test for a Python sample."] diff --git a/dataflow/gemma/noxfile_config.py b/dataflow/gemma/noxfile_config.py index 32432cccd4a..a540525478d 100644 --- a/dataflow/gemma/noxfile_config.py +++ b/dataflow/gemma/noxfile_config.py @@ -20,7 +20,5 @@ # The Python version used is defined by the Dockerfile and the job # submission enviornment must match. "ignored_versions": ["2.7", "3.6", "3.7", "3.8", "3.9", "3.10", "3.12"], - "envs": { - "PYTHONPATH": ".." - }, + "envs": {"PYTHONPATH": ".."}, } diff --git a/dataflow/snippets/read_kafka_multi_topic.py b/dataflow/snippets/read_kafka_multi_topic.py index a3d215bb061..bcb400bf4bb 100644 --- a/dataflow/snippets/read_kafka_multi_topic.py +++ b/dataflow/snippets/read_kafka_multi_topic.py @@ -31,38 +31,47 @@ def read_from_kafka() -> None: class MyOptions(PipelineOptions): @staticmethod def _add_argparse_args(parser: argparse.ArgumentParser) -> None: - parser.add_argument('--bootstrap_server') - parser.add_argument('--output') + parser.add_argument("--bootstrap_server") + parser.add_argument("--output") options = MyOptions() with beam.Pipeline(options=options) as pipeline: # Read from two Kafka topics. - all_topics = pipeline | ReadFromKafka(consumer_config={ - "bootstrap.servers": options.bootstrap_server - }, + all_topics = pipeline | ReadFromKafka( + consumer_config={"bootstrap.servers": options.bootstrap_server}, topics=["topic1", "topic2"], with_metadata=True, max_num_records=10, - start_read_time=0 + start_read_time=0, ) # Filter messages from one topic into one branch of the pipeline. - (all_topics - | beam.Filter(lambda message: message.topic == 'topic1') - | beam.Map(lambda message: message.value.decode('utf-8')) - | "Write topic1" >> WriteToText( - file_path_prefix=options.output + '/topic1/output', - file_name_suffix='.txt', - num_shards=1)) + ( + all_topics + | beam.Filter(lambda message: message.topic == "topic1") + | beam.Map(lambda message: message.value.decode("utf-8")) + | "Write topic1" + >> WriteToText( + file_path_prefix=options.output + "/topic1/output", + file_name_suffix=".txt", + num_shards=1, + ) + ) # Filter messages from the other topic. - (all_topics - | beam.Filter(lambda message: message.topic == 'topic2') - | beam.Map(lambda message: message.value.decode('utf-8')) - | "Write topic2" >> WriteToText( - file_path_prefix=options.output + '/topic2/output', - file_name_suffix='.txt', - num_shards=1)) + ( + all_topics + | beam.Filter(lambda message: message.topic == "topic2") + | beam.Map(lambda message: message.value.decode("utf-8")) + | "Write topic2" + >> WriteToText( + file_path_prefix=options.output + "/topic2/output", + file_name_suffix=".txt", + num_shards=1, + ) + ) + + # [END dataflow_kafka_read_multi_topic] diff --git a/dataflow/snippets/tests/test_read_kafka.py b/dataflow/snippets/tests/test_read_kafka.py index 0c62c5e4491..50ba68c396a 100644 --- a/dataflow/snippets/tests/test_read_kafka.py +++ b/dataflow/snippets/tests/test_read_kafka.py @@ -26,23 +26,25 @@ import pytest -BOOTSTRAP_SERVER = 'localhost:9092' -TOPIC_NAMES = ['topic1', 'topic2'] -CONTAINER_IMAGE_NAME = 'kafka-pipeline:1' +BOOTSTRAP_SERVER = "localhost:9092" +TOPIC_NAMES = ["topic1", "topic2"] +CONTAINER_IMAGE_NAME = "kafka-pipeline:1" -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def docker_client() -> DockerClient: # Build a container image for the pipeline. client = docker.from_env() - client.images.build(path='./', tag=CONTAINER_IMAGE_NAME) + client.images.build(path="./", tag=CONTAINER_IMAGE_NAME) yield client -@pytest.fixture(scope='module', autouse=True) +@pytest.fixture(scope="module", autouse=True) def kafka_container(docker_client: DockerClient) -> None: # Start a containerized Kafka server. - container = docker_client.containers.run('apache/kafka:3.7.0', network_mode='host', detach=True) + container = docker_client.containers.run( + "apache/kafka:3.7.0", network_mode="host", detach=True + ) try: create_topics() send_messages(TOPIC_NAMES[0]) @@ -54,7 +56,7 @@ def kafka_container(docker_client: DockerClient) -> None: @pytest.fixture def file_name_prefix() -> str: - return f'output-{uuid.uuid4()}' + return f"output-{uuid.uuid4()}" def create_topics() -> None: @@ -63,8 +65,12 @@ def create_topics() -> None: try: client = KafkaAdminClient(bootstrap_servers=BOOTSTRAP_SERVER) topics = [] - topics.append(NewTopic(name=TOPIC_NAMES[0], num_partitions=1, replication_factor=1)) - topics.append(NewTopic(name=TOPIC_NAMES[1], num_partitions=1, replication_factor=1)) + topics.append( + NewTopic(name=TOPIC_NAMES[0], num_partitions=1, replication_factor=1) + ) + topics.append( + NewTopic(name=TOPIC_NAMES[1], num_partitions=1, replication_factor=1) + ) client.create_topics(topics) break except NoBrokersAvailable: @@ -75,43 +81,51 @@ def send_messages(topic: str) -> None: # Send some messages to Kafka producer = KafkaProducer(bootstrap_servers=BOOTSTRAP_SERVER) for i in range(0, 5): - message = f'{topic}-{i}' + message = f"{topic}-{i}" producer.send(topic, message.encode()) def verify_output(file_name: str, topic: str) -> None: # Verify the pipeline wrote the Kafka messages to the output file. - with open(file_name, 'r') as f: + with open(file_name, "r") as f: text = f.read() for i in range(0, 5): - assert f'{topic}-{i}' in text + assert f"{topic}-{i}" in text -def test_read_kafka(docker_client: DockerClient, tmp_path: Path, file_name_prefix: str) -> None: +def test_read_kafka( + docker_client: DockerClient, tmp_path: Path, file_name_prefix: str +) -> None: topic = TOPIC_NAMES[0] # Run the containerized Dataflow pipeline. docker_client.containers.run( image=CONTAINER_IMAGE_NAME, - command=f'/pipeline/read_kafka.py --output /out/{file_name_prefix} --bootstrap_server {BOOTSTRAP_SERVER} --topic {topic}', - volumes=['/var/run/docker.sock:/var/run/docker.sock', f'{tmp_path}/:/out'], - network_mode='host', - entrypoint='python') + command=f"/pipeline/read_kafka.py --output /out/{file_name_prefix} --bootstrap_server {BOOTSTRAP_SERVER} --topic {topic}", + volumes=["/var/run/docker.sock:/var/run/docker.sock", f"{tmp_path}/:/out"], + network_mode="host", + entrypoint="python", + ) # Verify the pipeline wrote the Kafka messages to the output file. - verify_output(f'{tmp_path}/{file_name_prefix}-00000-of-00001.txt', topic) + verify_output(f"{tmp_path}/{file_name_prefix}-00000-of-00001.txt", topic) -def test_read_kafka_multi_topic(docker_client: DockerClient, tmp_path: Path, file_name_prefix: str) -> None: +def test_read_kafka_multi_topic( + docker_client: DockerClient, tmp_path: Path, file_name_prefix: str +) -> None: # Run the containerized Dataflow pipeline. docker_client.containers.run( image=CONTAINER_IMAGE_NAME, - command=f'/pipeline/read_kafka_multi_topic.py --output /out/{file_name_prefix} --bootstrap_server {BOOTSTRAP_SERVER}', - volumes=['/var/run/docker.sock:/var/run/docker.sock', f'{tmp_path}/:/out'], - network_mode='host', - entrypoint='python') + command=f"/pipeline/read_kafka_multi_topic.py --output /out/{file_name_prefix} --bootstrap_server {BOOTSTRAP_SERVER}", + volumes=["/var/run/docker.sock:/var/run/docker.sock", f"{tmp_path}/:/out"], + network_mode="host", + entrypoint="python", + ) # Verify the pipeline wrote the Kafka messages to the output files. # This code snippet writes outputs to separate directories based on the topic name. for topic in TOPIC_NAMES: - verify_output(f'{tmp_path}/{file_name_prefix}/{topic}/output-00000-of-00001.txt', topic) + verify_output( + f"{tmp_path}/{file_name_prefix}/{topic}/output-00000-of-00001.txt", topic + ) diff --git a/endpoints/bookstore-grpc-transcoding/bookstore_pb2.py b/endpoints/bookstore-grpc-transcoding/bookstore_pb2.py index 5d008db5fec..22246da0d6f 100644 --- a/endpoints/bookstore-grpc-transcoding/bookstore_pb2.py +++ b/endpoints/bookstore-grpc-transcoding/bookstore_pb2.py @@ -605,7 +605,7 @@ (_message.Message,), dict( DESCRIPTOR=_SHELF, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.Shelf) ), ) @@ -616,7 +616,7 @@ (_message.Message,), dict( DESCRIPTOR=_BOOK, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.Book) ), ) @@ -627,7 +627,7 @@ (_message.Message,), dict( DESCRIPTOR=_LISTSHELVESRESPONSE, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.ListShelvesResponse) ), ) @@ -638,7 +638,7 @@ (_message.Message,), dict( DESCRIPTOR=_CREATESHELFREQUEST, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.CreateShelfRequest) ), ) @@ -649,7 +649,7 @@ (_message.Message,), dict( DESCRIPTOR=_GETSHELFREQUEST, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.GetShelfRequest) ), ) @@ -660,7 +660,7 @@ (_message.Message,), dict( DESCRIPTOR=_DELETESHELFREQUEST, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.DeleteShelfRequest) ), ) @@ -671,7 +671,7 @@ (_message.Message,), dict( DESCRIPTOR=_LISTBOOKSREQUEST, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.ListBooksRequest) ), ) @@ -682,7 +682,7 @@ (_message.Message,), dict( DESCRIPTOR=_LISTBOOKSRESPONSE, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.ListBooksResponse) ), ) @@ -693,7 +693,7 @@ (_message.Message,), dict( DESCRIPTOR=_CREATEBOOKREQUEST, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.CreateBookRequest) ), ) @@ -704,7 +704,7 @@ (_message.Message,), dict( DESCRIPTOR=_GETBOOKREQUEST, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.GetBookRequest) ), ) @@ -715,7 +715,7 @@ (_message.Message,), dict( DESCRIPTOR=_DELETEBOOKREQUEST, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.DeleteBookRequest) ), ) diff --git a/endpoints/bookstore-grpc/bookstore_pb2.py b/endpoints/bookstore-grpc/bookstore_pb2.py index 0578be9832d..9fe1e986b1a 100644 --- a/endpoints/bookstore-grpc/bookstore_pb2.py +++ b/endpoints/bookstore-grpc/bookstore_pb2.py @@ -603,7 +603,7 @@ (_message.Message,), dict( DESCRIPTOR=_SHELF, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.Shelf) ), ) @@ -614,7 +614,7 @@ (_message.Message,), dict( DESCRIPTOR=_BOOK, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.Book) ), ) @@ -625,7 +625,7 @@ (_message.Message,), dict( DESCRIPTOR=_LISTSHELVESRESPONSE, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.ListShelvesResponse) ), ) @@ -636,7 +636,7 @@ (_message.Message,), dict( DESCRIPTOR=_CREATESHELFREQUEST, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.CreateShelfRequest) ), ) @@ -647,7 +647,7 @@ (_message.Message,), dict( DESCRIPTOR=_GETSHELFREQUEST, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.GetShelfRequest) ), ) @@ -658,7 +658,7 @@ (_message.Message,), dict( DESCRIPTOR=_DELETESHELFREQUEST, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.DeleteShelfRequest) ), ) @@ -669,7 +669,7 @@ (_message.Message,), dict( DESCRIPTOR=_LISTBOOKSREQUEST, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.ListBooksRequest) ), ) @@ -680,7 +680,7 @@ (_message.Message,), dict( DESCRIPTOR=_LISTBOOKSRESPONSE, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.ListBooksResponse) ), ) @@ -691,7 +691,7 @@ (_message.Message,), dict( DESCRIPTOR=_CREATEBOOKREQUEST, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.CreateBookRequest) ), ) @@ -702,7 +702,7 @@ (_message.Message,), dict( DESCRIPTOR=_GETBOOKREQUEST, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.GetBookRequest) ), ) @@ -713,7 +713,7 @@ (_message.Message,), dict( DESCRIPTOR=_DELETEBOOKREQUEST, - __module__="bookstore_pb2" + __module__="bookstore_pb2", # @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.DeleteBookRequest) ), ) diff --git a/endpoints/getting-started-grpc/helloworld_pb2.py b/endpoints/getting-started-grpc/helloworld_pb2.py index 9d0bfa9ed68..20c81fafdd3 100644 --- a/endpoints/getting-started-grpc/helloworld_pb2.py +++ b/endpoints/getting-started-grpc/helloworld_pb2.py @@ -124,7 +124,7 @@ (_message.Message,), dict( DESCRIPTOR=_HELLOREQUEST, - __module__="helloworld_pb2" + __module__="helloworld_pb2", # @@protoc_insertion_point(class_scope:helloworld.HelloRequest) ), ) @@ -135,7 +135,7 @@ (_message.Message,), dict( DESCRIPTOR=_HELLOREPLY, - __module__="helloworld_pb2" + __module__="helloworld_pb2", # @@protoc_insertion_point(class_scope:helloworld.HelloReply) ), ) diff --git a/functions/v2/deploy-function/deploy_function.py b/functions/v2/deploy-function/deploy_function.py index 465c557854e..c8ef1757135 100644 --- a/functions/v2/deploy-function/deploy_function.py +++ b/functions/v2/deploy-function/deploy_function.py @@ -93,6 +93,7 @@ def create_cloud_function( response = operation.result() print(response) + # [END functions_create_function_v2] diff --git a/functions/v2/label_gce_instance/main.py b/functions/v2/label_gce_instance/main.py index e278fc2d54e..0d0d805f620 100644 --- a/functions/v2/label_gce_instance/main.py +++ b/functions/v2/label_gce_instance/main.py @@ -65,10 +65,10 @@ def label_gce_instance(cloudevent): "zone": instance_zone, "instance": instance_name, } - request_init[ - "instances_set_labels_request_resource" - ] = compute.InstancesSetLabelsRequest( - label_fingerprint=instance.label_fingerprint, labels={"creator": creator} + request_init["instances_set_labels_request_resource"] = ( + compute.InstancesSetLabelsRequest( + label_fingerprint=instance.label_fingerprint, labels={"creator": creator} + ) ) request = compute.SetLabelsInstanceRequest(request_init) diff --git a/generative_ai/batch_predict/test_batch_predict_examples.py b/generative_ai/batch_predict/test_batch_predict_examples.py index 6306a0c2fdf..ef423f615bf 100644 --- a/generative_ai/batch_predict/test_batch_predict_examples.py +++ b/generative_ai/batch_predict/test_batch_predict_examples.py @@ -99,6 +99,6 @@ def test_batch_gemini_predict_bigquery(output_folder: pytest.fixture()) -> None: job = _main_test( test_func=lambda: gemini_batch_predict_bigquery.batch_predict_gemini_createjob( output_uri - ) + ) ) assert OUTPUT_TABLE in job.output_location diff --git a/generative_ai/prompts/prompt_template.py b/generative_ai/prompts/prompt_template.py index cc253aa02a8..24ba4cfac68 100644 --- a/generative_ai/prompts/prompt_template.py +++ b/generative_ai/prompts/prompt_template.py @@ -32,7 +32,7 @@ def prompt_template_example() -> list[GenerationResponse]: variables = [ {"animal": "Eagles", "activity": "eat berries"}, {"animal": "Coyotes", "activity": "jump"}, - {"animal": "Squirrels", "activity": "fly"} + {"animal": "Squirrels", "activity": "fly"}, ] # define prompt template @@ -40,7 +40,7 @@ def prompt_template_example() -> list[GenerationResponse]: prompt_data="Do {animal} {activity}?", model_name="gemini-1.5-flash-002", variables=variables, - system_instruction="You are a helpful zoologist" + system_instruction="You are a helpful zoologist", # generation_config=generation_config, # Optional # safety_settings=safety_settings, # Optional ) @@ -57,8 +57,8 @@ def prompt_template_example() -> list[GenerationResponse]: print(response.text, end="") # Example response - # Assembled prompt replacing: 1 instances of variable animal, 1 instances of variable activity - # Eagles are primarily carnivorous. While they might *accidentally* ingest a berry...... + # Assembled prompt replacing: 1 instances of variable animal, 1 instances of variable activity + # Eagles are primarily carnivorous. While they might *accidentally* ingest a berry...... # [END generativeaionvertexai_prompt_template] return responses diff --git a/generative_ai/understand_audio/transcription_example.py b/generative_ai/understand_audio/transcription_example.py index 80550a0a210..9daaed90b6a 100644 --- a/generative_ai/understand_audio/transcription_example.py +++ b/generative_ai/understand_audio/transcription_example.py @@ -40,7 +40,9 @@ def transcript_audio() -> str: contents = [audio_file, prompt] - response = model.generate_content(contents, generation_config=GenerationConfig(audio_timestamp=True)) + response = model.generate_content( + contents, generation_config=GenerationConfig(audio_timestamp=True) + ) print(response.text) # Example response: diff --git a/kubernetes_engine/django_tutorial/polls/migrations/0001_initial.py b/kubernetes_engine/django_tutorial/polls/migrations/0001_initial.py index d89be76e4eb..2738baca38f 100644 --- a/kubernetes_engine/django_tutorial/polls/migrations/0001_initial.py +++ b/kubernetes_engine/django_tutorial/polls/migrations/0001_initial.py @@ -24,36 +24,46 @@ class Migration(migrations.Migration): initial = True - dependencies = [ - ] + dependencies = [] operations = [ migrations.CreateModel( - name='Choice', + name="Choice", fields=[ - ('id', models.AutoField( - auto_created=True, primary_key=True, serialize=False, - verbose_name='ID')), - ('choice_text', models.CharField(max_length=200)), - ('votes', models.IntegerField(default=0)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("choice_text", models.CharField(max_length=200)), + ("votes", models.IntegerField(default=0)), ], ), migrations.CreateModel( - name='Question', + name="Question", fields=[ - ('id', models.AutoField( - auto_created=True, primary_key=True, serialize=False, - verbose_name='ID')), - ('question_text', models.CharField(max_length=200)), - ('pub_date', models.DateTimeField( - verbose_name=b'date published')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("question_text", models.CharField(max_length=200)), + ("pub_date", models.DateTimeField(verbose_name=b"date published")), ], ), migrations.AddField( - model_name='choice', - name='question', + model_name="choice", + name="question", field=models.ForeignKey( - on_delete=django.db.models.deletion.CASCADE, - to='polls.Question'), + on_delete=django.db.models.deletion.CASCADE, to="polls.Question" + ), ), ] diff --git a/managedkafka/snippets/consumergroups/delete_consumer_group.py b/managedkafka/snippets/consumergroups/delete_consumer_group.py index caf08628eab..a6fd00498ab 100644 --- a/managedkafka/snippets/consumergroups/delete_consumer_group.py +++ b/managedkafka/snippets/consumergroups/delete_consumer_group.py @@ -54,6 +54,8 @@ def delete_consumer_group( client.delete_consumer_group(request=request) print("Deleted consumer group") except NotFound as e: - print(f"Failed to delete consumer group {consumer_group_id} with error: {e.message}") + print( + f"Failed to delete consumer group {consumer_group_id} with error: {e.message}" + ) # [END managedkafka_delete_consumergroup] diff --git a/managedkafka/snippets/consumergroups/get_consumer_group.py b/managedkafka/snippets/consumergroups/get_consumer_group.py index 3c4be00866b..98acf04f0d3 100644 --- a/managedkafka/snippets/consumergroups/get_consumer_group.py +++ b/managedkafka/snippets/consumergroups/get_consumer_group.py @@ -54,6 +54,8 @@ def get_consumer_group( consumer_group = client.get_consumer_group(request=request) print("Got consumer group:", consumer_group) except NotFound as e: - print(f"Failed to get consumer group {consumer_group_id} with error: {e.message}") + print( + f"Failed to get consumer group {consumer_group_id} with error: {e.message}" + ) # [END managedkafka_get_consumergroup] diff --git a/managedkafka/snippets/consumergroups/update_consumer_group.py b/managedkafka/snippets/consumergroups/update_consumer_group.py index e2cb847a8ad..7b8832faa73 100644 --- a/managedkafka/snippets/consumergroups/update_consumer_group.py +++ b/managedkafka/snippets/consumergroups/update_consumer_group.py @@ -75,6 +75,8 @@ def update_consumer_group( response = client.update_consumer_group(request=request) print("Updated consumer group:", response) except NotFound as e: - print(f"Failed to update consumer group {consumer_group_id} with error: {e.message}") + print( + f"Failed to update consumer group {consumer_group_id} with error: {e.message}" + ) # [END managedkafka_update_consumergroup] diff --git a/memorystore/memcache/quickstart.py b/memorystore/memcache/quickstart.py index 8a7b05d3eb4..8ca6ece8253 100644 --- a/memorystore/memcache/quickstart.py +++ b/memorystore/memcache/quickstart.py @@ -50,7 +50,9 @@ def create_instance(project_id: str, location_id: str, instance_id: str) -> None print(f"Instance {instance_id} was created") -def get_instance(project_id: str, location_id: str, instance_id: str) -> memcache_v1.Instance: +def get_instance( + project_id: str, location_id: str, instance_id: str +) -> memcache_v1.Instance: """ Get a Memcached instance. @@ -62,9 +64,7 @@ def get_instance(project_id: str, location_id: str, instance_id: str) -> memcach client = memcache_v1.CloudMemcacheClient() name = f"projects/{project_id}/locations/{location_id}/instances/{instance_id}" - request = memcache_v1.GetInstanceRequest( - name=name - ) + request = memcache_v1.GetInstanceRequest(name=name) try: instance = client.get_instance(request=request) @@ -106,9 +106,7 @@ def delete_instance(project_id: str, location_id: str, instance_id: str) -> None client = memcache_v1.CloudMemcacheClient() name = f"projects/{project_id}/locations/{location_id}/instances/{instance_id}" - request = memcache_v1.DeleteInstanceRequest( - name=name - ) + request = memcache_v1.DeleteInstanceRequest(name=name) try: operation = client.delete_instance(request=request) @@ -132,6 +130,7 @@ def quickstart(project_id: str, location_id: str, instance_id: str) -> None: update_instance(instance, "new_name") delete_instance(project_id, location_id, instance_id) + # [END memorystorememcache_quickstart] diff --git a/monitoring/opencensus/main.py b/monitoring/opencensus/main.py index 0330f1fd136..0862ad1ba4a 100644 --- a/monitoring/opencensus/main.py +++ b/monitoring/opencensus/main.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """ + import random import time diff --git a/monitoring/opencensus/main_test.py b/monitoring/opencensus/main_test.py index d22f7deae7a..ee8aae5c504 100644 --- a/monitoring/opencensus/main_test.py +++ b/monitoring/opencensus/main_test.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """ + import main diff --git a/run/logging-manual/main.py b/run/logging-manual/main.py index 1b4582115ed..4984db9953c 100644 --- a/run/logging-manual/main.py +++ b/run/logging-manual/main.py @@ -48,9 +48,9 @@ def index(): if trace_header and PROJECT: trace = trace_header.split("/") - global_log_fields[ - "logging.googleapis.com/trace" - ] = f"projects/{PROJECT}/traces/{trace[0]}" + global_log_fields["logging.googleapis.com/trace"] = ( + f"projects/{PROJECT}/traces/{trace[0]}" + ) # Complete a structured log entry. entry = dict( diff --git a/securitycenter/snippets_v2/snippets_create_client_v2.py b/securitycenter/snippets_v2/snippets_create_client_v2.py index 5b41d6a7d57..dab50e3a65f 100644 --- a/securitycenter/snippets_v2/snippets_create_client_v2.py +++ b/securitycenter/snippets_v2/snippets_create_client_v2.py @@ -28,6 +28,12 @@ def create_client_with_endpoint(api_endpoint) -> securitycenter_v2.SecurityCente regional_client = securitycenter_v2.SecurityCenterClient( client_options={"api_endpoint": api_endpoint} ) - print("Regional client initiated with endpoint: {}".format(regional_client.api_endpoint)) + print( + "Regional client initiated with endpoint: {}".format( + regional_client.api_endpoint + ) + ) return regional_client + + # [END securitycenter_set_client_endpoint_v2] diff --git a/securitycenter/snippets_v2/snippets_create_client_v2_test.py b/securitycenter/snippets_v2/snippets_create_client_v2_test.py index b7884dae47f..8098caeb39f 100644 --- a/securitycenter/snippets_v2/snippets_create_client_v2_test.py +++ b/securitycenter/snippets_v2/snippets_create_client_v2_test.py @@ -16,5 +16,6 @@ def test_create_client_with_endpoint(): client = snippets_create_client_v2.create_client_with_endpoint( - "securitycenter.me-central2.rep.googleapis.com") + "securitycenter.me-central2.rep.googleapis.com" + ) assert client.api_endpoint == "securitycenter.me-central2.rep.googleapis.com" diff --git a/service_extensions/__init__.py b/service_extensions/__init__.py index 7355ce3bc3a..946e706ce20 100644 --- a/service_extensions/__init__.py +++ b/service_extensions/__init__.py @@ -10,4 +10,4 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and -# limitations under the License. \ No newline at end of file +# limitations under the License. diff --git a/speech/microphone/transcribe_streaming_infinite_v2.py b/speech/microphone/transcribe_streaming_infinite_v2.py index 54b48439131..278461fc1e1 100644 --- a/speech/microphone/transcribe_streaming_infinite_v2.py +++ b/speech/microphone/transcribe_streaming_infinite_v2.py @@ -302,7 +302,7 @@ def main(project_id: str) -> None: explicit_decoding_config=cloud_speech_types.ExplicitDecodingConfig( sample_rate_hertz=SAMPLE_RATE, encoding=cloud_speech_types.ExplicitDecodingConfig.AudioEncoding.LINEAR16, - audio_channel_count=1 + audio_channel_count=1, ), language_codes=["en-US"], model="long", @@ -311,7 +311,7 @@ def main(project_id: str) -> None: config=recognition_config, streaming_features=cloud_speech_types.StreamingRecognitionFeatures( interim_results=True - ) + ), ) config_request = cloud_speech_types.StreamingRecognizeRequest( recognizer=f"projects/{project_id}/locations/global/recognizers/_", @@ -350,7 +350,8 @@ def requests(config: cloud_speech_types.RecognitionConfig, audio: list) -> list: # Transcribes the audio into text responses_iterator = client.streaming_recognize( - requests=requests(config_request, audio_generator)) + requests=requests(config_request, audio_generator) + ) listen_print_loop(responses_iterator, stream) diff --git a/storage/cost-analysis/storage_soft_delete_relative_cost_analyzer.py b/storage/cost-analysis/storage_soft_delete_relative_cost_analyzer.py index 1a6b3c482b8..3b37b6eaeb3 100644 --- a/storage/cost-analysis/storage_soft_delete_relative_cost_analyzer.py +++ b/storage/cost-analysis/storage_soft_delete_relative_cost_analyzer.py @@ -153,20 +153,19 @@ def calculate_soft_delete_costs( # 2. Update how you calculate 'relative_storage_class_cost' to factor in location soft_delete_ratio = data_point.point_data[0].values[0].double_value distribution_storage_class = bucket_name + " - " + storage_class - storage_class_ratio = storage_ratios_by_bucket.get( - distribution_storage_class - ) + storage_class_ratio = storage_ratios_by_bucket.get(distribution_storage_class) if storage_class_ratio is None: - missing_distribution_storage_class.append( - distribution_storage_class) - buckets.setdefault(bucket_name, []).append({ - # Include storage class and location data for additional plotting dimensions. - # "storage_class": storage_class, - # 'location': location, - "soft_delete_ratio": soft_delete_ratio, - "storage_class_ratio": storage_class_ratio, - "relative_storage_class_cost": get_relative_cost(storage_class), - }) + missing_distribution_storage_class.append(distribution_storage_class) + buckets.setdefault(bucket_name, []).append( + { + # Include storage class and location data for additional plotting dimensions. + # "storage_class": storage_class, + # 'location': location, + "soft_delete_ratio": soft_delete_ratio, + "storage_class_ratio": storage_class_ratio, + "relative_storage_class_cost": get_relative_cost(storage_class), + } + ) if missing_distribution_storage_class: print( @@ -237,7 +236,7 @@ def soft_delete_relative_cost_analyzer( agg_days: int = 30, lookback_days: int = 360, list_buckets: bool = False, - ) -> str | dict[str, float]: # Note potential string output +) -> str | dict[str, float]: # Note potential string output """Identifies buckets exceeding the relative cost threshold for enabling soft delete. Args: @@ -308,9 +307,7 @@ def soft_delete_relative_cost_analyzer_main() -> None: "--lookback_days", type=int, default=360, - help=( - "Time window (in days) for considering the how old the bucket to be." - ), + help=("Time window (in days) for considering the how old the bucket to be."), ) parser.add_argument( "--list", diff --git a/texttospeech/snippets/streaming_tts_quickstart.py b/texttospeech/snippets/streaming_tts_quickstart.py index b52f94cbbfe..ef4a43a22f8 100644 --- a/texttospeech/snippets/streaming_tts_quickstart.py +++ b/texttospeech/snippets/streaming_tts_quickstart.py @@ -23,27 +23,44 @@ def run_streaming_tts_quickstart(): # [START tts_synthezise_streaming] - """Synthesizes speech from a stream of input text. - """ + """Synthesizes speech from a stream of input text.""" from google.cloud import texttospeech import itertools client = texttospeech.TextToSpeechClient() # See https://cloud.google.com/text-to-speech/docs/voices for all voices. - streaming_config = texttospeech.StreamingSynthesizeConfig(voice=texttospeech.VoiceSelectionParams(name="en-US-Journey-D", language_code="en-US")) + streaming_config = texttospeech.StreamingSynthesizeConfig( + voice=texttospeech.VoiceSelectionParams( + name="en-US-Journey-D", language_code="en-US" + ) + ) # Set the config for your stream. The first request must contain your config, and then each subsequent request must contain text. - config_request = texttospeech.StreamingSynthesizeRequest(streaming_config=streaming_config) + config_request = texttospeech.StreamingSynthesizeRequest( + streaming_config=streaming_config + ) # Request generator. Consider using Gemini or another LLM with output streaming as a generator. def request_generator(): - yield texttospeech.StreamingSynthesizeRequest(input=texttospeech.StreamingSynthesisInput(text="Hello there. ")) - yield texttospeech.StreamingSynthesizeRequest(input=texttospeech.StreamingSynthesisInput(text="How are you ")) - yield texttospeech.StreamingSynthesizeRequest(input=texttospeech.StreamingSynthesisInput(text="today? It's ")) - yield texttospeech.StreamingSynthesizeRequest(input=texttospeech.StreamingSynthesisInput(text="such nice weather outside.")) + yield texttospeech.StreamingSynthesizeRequest( + input=texttospeech.StreamingSynthesisInput(text="Hello there. ") + ) + yield texttospeech.StreamingSynthesizeRequest( + input=texttospeech.StreamingSynthesisInput(text="How are you ") + ) + yield texttospeech.StreamingSynthesizeRequest( + input=texttospeech.StreamingSynthesisInput(text="today? It's ") + ) + yield texttospeech.StreamingSynthesizeRequest( + input=texttospeech.StreamingSynthesisInput( + text="such nice weather outside." + ) + ) - streaming_responses = client.streaming_synthesize(itertools.chain([config_request], request_generator())) + streaming_responses = client.streaming_synthesize( + itertools.chain([config_request], request_generator()) + ) for response in streaming_responses: print(f"Audio content size in bytes is: {len(response.audio_content)}") # [END tts_synthezise_streaming]