diff --git a/README.rst b/README.rst index 21411c486c..c0bb3fd5b3 100644 --- a/README.rst +++ b/README.rst @@ -111,7 +111,7 @@ Initialize the SDK to store common configurations that you use with the SDK. staging_bucket='gs://my_staging_bucket', # custom google.auth.credentials.Credentials - # environment default creds used if not set + # environment default credentials used if not set credentials=my_credentials, # customer managed encryption key resource name @@ -188,7 +188,7 @@ Please visit `Using a managed dataset in a custom training application`_ for a d .. _Using a managed dataset in a custom training application: https://cloud.google.com/vertex-ai/docs/training/using-managed-datasets -It must write the model artifact to the environment variable populated by the traing service: +It must write the model artifact to the environment variable populated by the training service: .. code-block:: Python diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index f9b70c3bd5..ace8e84b36 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -98,7 +98,7 @@ def log_create_complete( cls (VertexAiResourceNoun): Vertex AI Resource Noun class that is being created. resource (proto.Message): - Vertex AI Resourc proto.Message + Vertex AI Resource proto.Message variable_name (str): Name of variable to use for code snippet """ self._logger.info(f"{cls.__name__} created. Resource name: {resource.name}") @@ -121,7 +121,7 @@ def log_create_complete_with_getter( cls (VertexAiResourceNoun): Vertex AI Resource Noun class that is being created. resource (proto.Message): - Vertex AI Resourc proto.Message + Vertex AI Resource proto.Message variable_name (str): Name of variable to use for code snippet """ self._logger.info(f"{cls.__name__} created. Resource name: {resource.name}") @@ -462,7 +462,7 @@ def __init__( Args: project(str): Project of the resource noun. location(str): The location of the resource noun. - credentials(google.auth.crendentials.Crendentials): Optional custom + credentials(google.auth.credentials.Credentials): Optional custom credentials to use when accessing interacting with resource noun. resource_name(str): A fully-qualified resource name or ID. """ @@ -842,7 +842,7 @@ def __init__( Args: project (str): Optional. Project of the resource noun. location (str): Optional. The location of the resource noun. - credentials(google.auth.crendentials.Crendentials): + credentials(google.auth.credentials.Credentials): Optional. custom credentials to use when accessing interacting with resource noun. resource_name(str): A fully-qualified resource name or ID. @@ -872,7 +872,7 @@ def _empty_constructor( Args: project (str): Optional. Project of the resource noun. location (str): Optional. The location of the resource noun. - credentials(google.auth.crendentials.Crendentials): + credentials(google.auth.credentials.Credentials): Optional. custom credentials to use when accessing interacting with resource noun. resource_name(str): A fully-qualified resource name or ID. diff --git a/google/cloud/aiplatform/datasets/dataset.py b/google/cloud/aiplatform/datasets/dataset.py index 30f518fb71..9adcaca7dc 100644 --- a/google/cloud/aiplatform/datasets/dataset.py +++ b/google/cloud/aiplatform/datasets/dataset.py @@ -164,7 +164,7 @@ def create( be picked randomly. Two DataItems are considered identical if their content bytes are identical (e.g. image bytes or pdf bytes). These labels will be overridden by Annotation - labels specified inside index file refenced by + labels specified inside index file referenced by ``import_schema_uri``, e.g. jsonl file. project (str): @@ -488,7 +488,7 @@ def import_data( be picked randomly. Two DataItems are considered identical if their content bytes are identical (e.g. image bytes or pdf bytes). These labels will be overridden by Annotation - labels specified inside index file refenced by + labels specified inside index file referenced by ``import_schema_uri``, e.g. jsonl file. sync (bool): diff --git a/google/cloud/aiplatform/datasets/image_dataset.py b/google/cloud/aiplatform/datasets/image_dataset.py index bebc75beab..14b26cc6de 100644 --- a/google/cloud/aiplatform/datasets/image_dataset.py +++ b/google/cloud/aiplatform/datasets/image_dataset.py @@ -82,7 +82,7 @@ def create( be picked randomly. Two DataItems are considered identical if their content bytes are identical (e.g. image bytes or pdf bytes). These labels will be overridden by Annotation - labels specified inside index file refenced by + labels specified inside index file referenced by ``import_schema_uri``, e.g. jsonl file. project (str): diff --git a/google/cloud/aiplatform/datasets/text_dataset.py b/google/cloud/aiplatform/datasets/text_dataset.py index 140fd17335..8fa28c3f31 100644 --- a/google/cloud/aiplatform/datasets/text_dataset.py +++ b/google/cloud/aiplatform/datasets/text_dataset.py @@ -89,7 +89,7 @@ def create( be picked randomly. Two DataItems are considered identical if their content bytes are identical (e.g. image bytes or pdf bytes). These labels will be overridden by Annotation - labels specified inside index file refenced by + labels specified inside index file referenced by ``import_schema_uri``, e.g. jsonl file. project (str): diff --git a/google/cloud/aiplatform/datasets/video_dataset.py b/google/cloud/aiplatform/datasets/video_dataset.py index 2964b77f19..a758339b71 100644 --- a/google/cloud/aiplatform/datasets/video_dataset.py +++ b/google/cloud/aiplatform/datasets/video_dataset.py @@ -82,7 +82,7 @@ def create( be picked randomly. Two DataItems are considered identical if their content bytes are identical (e.g. image bytes or pdf bytes). These labels will be overridden by Annotation - labels specified inside index file refenced by + labels specified inside index file referenced by ``import_schema_uri``, e.g. jsonl file. project (str): diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py index 042245e5b9..9d53b13bee 100644 --- a/google/cloud/aiplatform/jobs.py +++ b/google/cloud/aiplatform/jobs.py @@ -325,7 +325,7 @@ def output_info(self,) -> Optional[aiplatform.gapic.BatchPredictionJob.OutputInf """Information describing the output of this job, including output location into which prediction output is written. - This is only available for batch predicition jobs that have run successfully. + This is only available for batch prediction jobs that have run successfully. """ self._assert_gca_resource_is_available() return self._gca_resource.output_info @@ -839,7 +839,7 @@ def __init__( Args: project(str): Project of the resource noun. location(str): The location of the resource noun. - credentials(google.auth.crendentials.Crendentials): Optional custom + credentials(google.auth.credentials.Credentials): Optional custom credentials to use when accessing interacting with resource noun. """ @@ -1023,7 +1023,7 @@ def __init__( encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, ): - """Cosntruct a Custom Job with Worker Pool Specs. + """Constructs a Custom Job with Worker Pool Specs. ``` Example usage: @@ -1569,7 +1569,7 @@ def __init__( Required. Configured CustomJob. The worker pool spec from this custom job applies to the CustomJobs created in all the trials. metric_spec: Dict[str, str] - Required. Dicionary representing metrics to optimize. The dictionary key is the metric_id, + Required. Dictionary representing metrics to optimize. The dictionary key is the metric_id, which is reported by your training job, and the dictionary value is the optimization goal of the metric('minimize' or 'maximize'). example: @@ -1594,7 +1594,7 @@ def __init__( DoubleParameterSpec, IntegerParameterSpec, CategoricalParameterSpace, DiscreteParameterSpec max_trial_count (int): - Reuired. The desired total number of Trials. + Required. The desired total number of Trials. parallel_trial_count (int): Required. The desired number of Trials to run in parallel. max_failed_trial_count (int): diff --git a/google/cloud/aiplatform/metadata/metadata.py b/google/cloud/aiplatform/metadata/metadata.py index 6ba664916e..e6462745cb 100644 --- a/google/cloud/aiplatform/metadata/metadata.py +++ b/google/cloud/aiplatform/metadata/metadata.py @@ -155,7 +155,7 @@ def log_metrics(self, metrics: Dict[str, Union[float, int]]): Args: metrics (Dict): - Required. Metrics key/value pairs. Only flot and int are supported format for value. + Required. Metrics key/value pairs. Only float and int are supported format for value. Raises: TypeError: If value contains unsupported types. ValueError: If Experiment or Run is not set. @@ -263,7 +263,7 @@ def _validate_metrics_value_type(metrics: Dict[str, Union[float, int]]): Args: metrics (Dict): - Required. Metrics key/value pairs. Only flot and int are supported format for value. + Required. Metrics key/value pairs. Only float and int are supported format for value. Raises: TypeError: If value contains unsupported types. """ diff --git a/google/cloud/aiplatform/tensorboard/uploader_utils.py b/google/cloud/aiplatform/tensorboard/uploader_utils.py index 1396f6cc78..8fac59f55b 100644 --- a/google/cloud/aiplatform/tensorboard/uploader_utils.py +++ b/google/cloud/aiplatform/tensorboard/uploader_utils.py @@ -377,7 +377,7 @@ def get_or_create( Returns: time_series (tensorboard_time_series.TensorboardTimeSeries): - A new or existing tensorboard_time_series.TensorbaordTimeSeries. + A new or existing tensorboard_time_series.TensorboardTimeSeries. Raises: exceptions.InvalidArgument: diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index 44f66187cc..f84a4136d8 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -435,7 +435,7 @@ def _create_input_data_config( - AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test" Raises: ValueError: When more than 1 type of split configuration is passed or when - the split configuartion passed is incompatible with the dataset schema. + the split configuration passed is incompatible with the dataset schema. """ input_data_config = None @@ -5811,7 +5811,7 @@ def __init__( multiple objects in shots and segments. You can use these models to track objects in your videos according to your own pre-defined, custom labels. - "action_recognition" - A video action reconition model pinpoints + "action_recognition" - A video action recognition model pinpoints the location of actions with short temporal durations (~1 second). model_type: str = "CLOUD" Required. One of the following: diff --git a/google/cloud/aiplatform/utils/pipeline_utils.py b/google/cloud/aiplatform/utils/pipeline_utils.py index 5a09a98f18..96d662ce05 100644 --- a/google/cloud/aiplatform/utils/pipeline_utils.py +++ b/google/cloud/aiplatform/utils/pipeline_utils.py @@ -155,7 +155,7 @@ def _get_vertex_value( inputs, or value is none. """ if value is None: - raise ValueError("None values should be filterd out.") + raise ValueError("None values should be filtered out.") if name not in self._parameter_types: raise ValueError( diff --git a/google/cloud/aiplatform/utils/source_utils.py b/google/cloud/aiplatform/utils/source_utils.py index 660e7af497..2a13daf452 100644 --- a/google/cloud/aiplatform/utils/source_utils.py +++ b/google/cloud/aiplatform/utils/source_utils.py @@ -71,7 +71,7 @@ class _TrainingScriptPythonPackager: packager = TrainingScriptPythonPackager('my_script.py', ['pandas', 'pytorch']) gcs_path = packager.package_and_copy_to_gcs( gcs_staging_dir='my-bucket', - project='my-prject') + project='my-project') module_name = packager.module_name The package after installed can be executed as: diff --git a/google/cloud/aiplatform/utils/worker_spec_utils.py b/google/cloud/aiplatform/utils/worker_spec_utils.py index 6cad7d562f..2de1bf2f28 100644 --- a/google/cloud/aiplatform/utils/worker_spec_utils.py +++ b/google/cloud/aiplatform/utils/worker_spec_utils.py @@ -186,7 +186,7 @@ def chief_worker_pool( reduction_server_replica_count: int = 0, reduction_server_machine_type: str = None, ) -> "_DistributedTrainingSpec": - """Parameterizes Config to support only chief with worker replicas. + """Parametrizes Config to support only chief with worker replicas. For replica is assigned to chief and the remainder to workers. All spec have the same machine type, accelerator count, and accelerator type.