Skip to content

Commit

Permalink
chore: fix typos (#760)
Browse files Browse the repository at this point in the history
  • Loading branch information
Ark-kun committed Mar 15, 2022
1 parent bb60e96 commit 6995e54
Show file tree
Hide file tree
Showing 13 changed files with 25 additions and 25 deletions.
4 changes: 2 additions & 2 deletions README.rst
Expand Up @@ -111,7 +111,7 @@ Initialize the SDK to store common configurations that you use with the SDK.
staging_bucket='gs://my_staging_bucket',
# custom google.auth.credentials.Credentials
# environment default creds used if not set
# environment default credentials used if not set
credentials=my_credentials,
# customer managed encryption key resource name
Expand Down Expand Up @@ -188,7 +188,7 @@ Please visit `Using a managed dataset in a custom training application`_ for a d

.. _Using a managed dataset in a custom training application: https://cloud.google.com/vertex-ai/docs/training/using-managed-datasets

It must write the model artifact to the environment variable populated by the traing service:
It must write the model artifact to the environment variable populated by the training service:

.. code-block:: Python
Expand Down
10 changes: 5 additions & 5 deletions google/cloud/aiplatform/base.py
Expand Up @@ -98,7 +98,7 @@ def log_create_complete(
cls (VertexAiResourceNoun):
Vertex AI Resource Noun class that is being created.
resource (proto.Message):
Vertex AI Resourc proto.Message
Vertex AI Resource proto.Message
variable_name (str): Name of variable to use for code snippet
"""
self._logger.info(f"{cls.__name__} created. Resource name: {resource.name}")
Expand All @@ -121,7 +121,7 @@ def log_create_complete_with_getter(
cls (VertexAiResourceNoun):
Vertex AI Resource Noun class that is being created.
resource (proto.Message):
Vertex AI Resourc proto.Message
Vertex AI Resource proto.Message
variable_name (str): Name of variable to use for code snippet
"""
self._logger.info(f"{cls.__name__} created. Resource name: {resource.name}")
Expand Down Expand Up @@ -462,7 +462,7 @@ def __init__(
Args:
project(str): Project of the resource noun.
location(str): The location of the resource noun.
credentials(google.auth.crendentials.Crendentials): Optional custom
credentials(google.auth.credentials.Credentials): Optional custom
credentials to use when accessing interacting with resource noun.
resource_name(str): A fully-qualified resource name or ID.
"""
Expand Down Expand Up @@ -842,7 +842,7 @@ def __init__(
Args:
project (str): Optional. Project of the resource noun.
location (str): Optional. The location of the resource noun.
credentials(google.auth.crendentials.Crendentials):
credentials(google.auth.credentials.Credentials):
Optional. custom credentials to use when accessing interacting with
resource noun.
resource_name(str): A fully-qualified resource name or ID.
Expand Down Expand Up @@ -872,7 +872,7 @@ def _empty_constructor(
Args:
project (str): Optional. Project of the resource noun.
location (str): Optional. The location of the resource noun.
credentials(google.auth.crendentials.Crendentials):
credentials(google.auth.credentials.Credentials):
Optional. custom credentials to use when accessing interacting with
resource noun.
resource_name(str): A fully-qualified resource name or ID.
Expand Down
4 changes: 2 additions & 2 deletions google/cloud/aiplatform/datasets/dataset.py
Expand Up @@ -164,7 +164,7 @@ def create(
be picked randomly. Two DataItems are considered identical
if their content bytes are identical (e.g. image bytes or
pdf bytes). These labels will be overridden by Annotation
labels specified inside index file refenced by
labels specified inside index file referenced by
``import_schema_uri``,
e.g. jsonl file.
project (str):
Expand Down Expand Up @@ -488,7 +488,7 @@ def import_data(
be picked randomly. Two DataItems are considered identical
if their content bytes are identical (e.g. image bytes or
pdf bytes). These labels will be overridden by Annotation
labels specified inside index file refenced by
labels specified inside index file referenced by
``import_schema_uri``,
e.g. jsonl file.
sync (bool):
Expand Down
2 changes: 1 addition & 1 deletion google/cloud/aiplatform/datasets/image_dataset.py
Expand Up @@ -82,7 +82,7 @@ def create(
be picked randomly. Two DataItems are considered identical
if their content bytes are identical (e.g. image bytes or
pdf bytes). These labels will be overridden by Annotation
labels specified inside index file refenced by
labels specified inside index file referenced by
``import_schema_uri``,
e.g. jsonl file.
project (str):
Expand Down
2 changes: 1 addition & 1 deletion google/cloud/aiplatform/datasets/text_dataset.py
Expand Up @@ -89,7 +89,7 @@ def create(
be picked randomly. Two DataItems are considered identical
if their content bytes are identical (e.g. image bytes or
pdf bytes). These labels will be overridden by Annotation
labels specified inside index file refenced by
labels specified inside index file referenced by
``import_schema_uri``,
e.g. jsonl file.
project (str):
Expand Down
2 changes: 1 addition & 1 deletion google/cloud/aiplatform/datasets/video_dataset.py
Expand Up @@ -82,7 +82,7 @@ def create(
be picked randomly. Two DataItems are considered identical
if their content bytes are identical (e.g. image bytes or
pdf bytes). These labels will be overridden by Annotation
labels specified inside index file refenced by
labels specified inside index file referenced by
``import_schema_uri``,
e.g. jsonl file.
project (str):
Expand Down
10 changes: 5 additions & 5 deletions google/cloud/aiplatform/jobs.py
Expand Up @@ -325,7 +325,7 @@ def output_info(self,) -> Optional[aiplatform.gapic.BatchPredictionJob.OutputInf
"""Information describing the output of this job, including output location
into which prediction output is written.
This is only available for batch predicition jobs that have run successfully.
This is only available for batch prediction jobs that have run successfully.
"""
self._assert_gca_resource_is_available()
return self._gca_resource.output_info
Expand Down Expand Up @@ -839,7 +839,7 @@ def __init__(
Args:
project(str): Project of the resource noun.
location(str): The location of the resource noun.
credentials(google.auth.crendentials.Crendentials): Optional custom
credentials(google.auth.credentials.Credentials): Optional custom
credentials to use when accessing interacting with resource noun.
"""

Expand Down Expand Up @@ -1023,7 +1023,7 @@ def __init__(
encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
):
"""Cosntruct a Custom Job with Worker Pool Specs.
"""Constructs a Custom Job with Worker Pool Specs.
```
Example usage:
Expand Down Expand Up @@ -1569,7 +1569,7 @@ def __init__(
Required. Configured CustomJob. The worker pool spec from this custom job
applies to the CustomJobs created in all the trials.
metric_spec: Dict[str, str]
Required. Dicionary representing metrics to optimize. The dictionary key is the metric_id,
Required. Dictionary representing metrics to optimize. The dictionary key is the metric_id,
which is reported by your training job, and the dictionary value is the
optimization goal of the metric('minimize' or 'maximize'). example:
Expand All @@ -1594,7 +1594,7 @@ def __init__(
DoubleParameterSpec, IntegerParameterSpec, CategoricalParameterSpace, DiscreteParameterSpec
max_trial_count (int):
Reuired. The desired total number of Trials.
Required. The desired total number of Trials.
parallel_trial_count (int):
Required. The desired number of Trials to run in parallel.
max_failed_trial_count (int):
Expand Down
4 changes: 2 additions & 2 deletions google/cloud/aiplatform/metadata/metadata.py
Expand Up @@ -155,7 +155,7 @@ def log_metrics(self, metrics: Dict[str, Union[float, int]]):
Args:
metrics (Dict):
Required. Metrics key/value pairs. Only flot and int are supported format for value.
Required. Metrics key/value pairs. Only float and int are supported format for value.
Raises:
TypeError: If value contains unsupported types.
ValueError: If Experiment or Run is not set.
Expand Down Expand Up @@ -263,7 +263,7 @@ def _validate_metrics_value_type(metrics: Dict[str, Union[float, int]]):
Args:
metrics (Dict):
Required. Metrics key/value pairs. Only flot and int are supported format for value.
Required. Metrics key/value pairs. Only float and int are supported format for value.
Raises:
TypeError: If value contains unsupported types.
"""
Expand Down
2 changes: 1 addition & 1 deletion google/cloud/aiplatform/tensorboard/uploader_utils.py
Expand Up @@ -377,7 +377,7 @@ def get_or_create(
Returns:
time_series (tensorboard_time_series.TensorboardTimeSeries):
A new or existing tensorboard_time_series.TensorbaordTimeSeries.
A new or existing tensorboard_time_series.TensorboardTimeSeries.
Raises:
exceptions.InvalidArgument:
Expand Down
4 changes: 2 additions & 2 deletions google/cloud/aiplatform/training_jobs.py
Expand Up @@ -435,7 +435,7 @@ def _create_input_data_config(
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
Raises:
ValueError: When more than 1 type of split configuration is passed or when
the split configuartion passed is incompatible with the dataset schema.
the split configuration passed is incompatible with the dataset schema.
"""

input_data_config = None
Expand Down Expand Up @@ -5811,7 +5811,7 @@ def __init__(
multiple objects in shots and segments. You can use these
models to track objects in your videos according to your
own pre-defined, custom labels.
"action_recognition" - A video action reconition model pinpoints
"action_recognition" - A video action recognition model pinpoints
the location of actions with short temporal durations (~1 second).
model_type: str = "CLOUD"
Required. One of the following:
Expand Down
2 changes: 1 addition & 1 deletion google/cloud/aiplatform/utils/pipeline_utils.py
Expand Up @@ -155,7 +155,7 @@ def _get_vertex_value(
inputs, or value is none.
"""
if value is None:
raise ValueError("None values should be filterd out.")
raise ValueError("None values should be filtered out.")

if name not in self._parameter_types:
raise ValueError(
Expand Down
2 changes: 1 addition & 1 deletion google/cloud/aiplatform/utils/source_utils.py
Expand Up @@ -71,7 +71,7 @@ class _TrainingScriptPythonPackager:
packager = TrainingScriptPythonPackager('my_script.py', ['pandas', 'pytorch'])
gcs_path = packager.package_and_copy_to_gcs(
gcs_staging_dir='my-bucket',
project='my-prject')
project='my-project')
module_name = packager.module_name
The package after installed can be executed as:
Expand Down
2 changes: 1 addition & 1 deletion google/cloud/aiplatform/utils/worker_spec_utils.py
Expand Up @@ -186,7 +186,7 @@ def chief_worker_pool(
reduction_server_replica_count: int = 0,
reduction_server_machine_type: str = None,
) -> "_DistributedTrainingSpec":
"""Parameterizes Config to support only chief with worker replicas.
"""Parametrizes Config to support only chief with worker replicas.
For replica is assigned to chief and the remainder to workers. All spec have the
same machine type, accelerator count, and accelerator type.
Expand Down

0 comments on commit 6995e54

Please sign in to comment.