From 64fec26586aa2f2c42e962156c6d8e38e511a410 Mon Sep 17 00:00:00 2001 From: sagemaker-bot Date: Mon, 21 Apr 2025 10:09:34 +0000 Subject: [PATCH] Daily Sync with Botocore v1.37.37 on 2025/04/21 --- sample/sagemaker/2017-07-24/service-2.json | 10 ++++++---- src/sagemaker_core/main/code_injection/shape_dag.py | 8 +++++++- src/sagemaker_core/main/shapes.py | 12 +++++++----- src/sagemaker_core/tools/constants.py | 2 +- 4 files changed, 21 insertions(+), 11 deletions(-) diff --git a/sample/sagemaker/2017-07-24/service-2.json b/sample/sagemaker/2017-07-24/service-2.json index 5e78d27a..89cc7ffa 100644 --- a/sample/sagemaker/2017-07-24/service-2.json +++ b/sample/sagemaker/2017-07-24/service-2.json @@ -30368,7 +30368,8 @@ "ModelApprovalStatus":{ "shape":"ModelApprovalStatus", "documentation":"

The approval status of the model. This can be one of the following values.

" - } + }, + "ModelLifeCycle":{"shape":"ModelLifeCycle"} }, "documentation":"

Provides summary information about a model package.

" }, @@ -33816,7 +33817,7 @@ }, "InferenceAmiVersion":{ "shape":"ProductionVariantInferenceAmiVersion", - "documentation":"

Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads.

By selecting an AMI version, you can ensure that your inference environment is compatible with specific software requirements, such as CUDA driver versions, Linux kernel versions, or Amazon Web Services Neuron driver versions.

The AMI version names, and their configurations, are the following:

al2-ami-sagemaker-inference-gpu-2
  • Accelerator: GPU

  • NVIDIA driver version: 535

  • CUDA version: 12.2

al2-ami-sagemaker-inference-gpu-2-1
  • Accelerator: GPU

  • NVIDIA driver version: 535

  • CUDA version: 12.2

  • NVIDIA Container Toolkit with disabled CUDA-compat mounting

al2-ami-sagemaker-inference-gpu-3-1
  • Accelerator: GPU

  • NVIDIA driver version: 550

  • CUDA version: 12.4

  • NVIDIA Container Toolkit with disabled CUDA-compat mounting

" + "documentation":"

Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads.

By selecting an AMI version, you can ensure that your inference environment is compatible with specific software requirements, such as CUDA driver versions, Linux kernel versions, or Amazon Web Services Neuron driver versions.

The AMI version names, and their configurations, are the following:

al2-ami-sagemaker-inference-gpu-2
  • Accelerator: GPU

  • NVIDIA driver version: 535

  • CUDA version: 12.2

al2-ami-sagemaker-inference-gpu-2-1
  • Accelerator: GPU

  • NVIDIA driver version: 535

  • CUDA version: 12.2

  • NVIDIA Container Toolkit with disabled CUDA-compat mounting

al2-ami-sagemaker-inference-gpu-3-1
  • Accelerator: GPU

  • NVIDIA driver version: 550

  • CUDA version: 12.4

  • NVIDIA Container Toolkit with disabled CUDA-compat mounting

al2-ami-sagemaker-inference-neuron-2
  • Accelerator: Inferentia2 and Trainium

  • Neuron driver version: 2.19

" } }, "documentation":"

Identifies a model that you want to host and the resources chosen to deploy for hosting it. If you are deploying multiple models, tell SageMaker how to distribute traffic among the models by specifying variant weights. For more information on production variants, check Production variants.

" @@ -33857,7 +33858,8 @@ "enum":[ "al2-ami-sagemaker-inference-gpu-2", "al2-ami-sagemaker-inference-gpu-2-1", - "al2-ami-sagemaker-inference-gpu-3-1" + "al2-ami-sagemaker-inference-gpu-3-1", + "al2-ami-sagemaker-inference-neuron-2" ] }, "ProductionVariantInstanceType":{ @@ -37744,7 +37746,7 @@ }, "MaxPendingTimeInSeconds":{ "shape":"MaxPendingTimeInSeconds", - "documentation":"

The maximum length of time, in seconds, that a training or compilation job can be pending before it is stopped.

" + "documentation":"

The maximum length of time, in seconds, that a training or compilation job can be pending before it is stopped.

When working with training jobs that use capacity from training plans, not all Pending job states count against the MaxPendingTimeInSeconds limit. The following scenarios do not increment the MaxPendingTimeInSeconds counter:

MaxPendingTimeInSeconds only increments when jobs are actively waiting for capacity in an Active plan.

" } }, "documentation":"

Specifies a limit to how long a job can run. When the job reaches the time limit, SageMaker ends the job. Use this API to cap costs.

To stop a training job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel.

The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.

" diff --git a/src/sagemaker_core/main/code_injection/shape_dag.py b/src/sagemaker_core/main/code_injection/shape_dag.py index ad7fbfff..241ce7f1 100644 --- a/src/sagemaker_core/main/code_injection/shape_dag.py +++ b/src/sagemaker_core/main/code_injection/shape_dag.py @@ -10885,6 +10885,7 @@ {"name": "CreationTime", "shape": "CreationTime", "type": "timestamp"}, {"name": "ModelPackageStatus", "shape": "ModelPackageStatus", "type": "string"}, {"name": "ModelApprovalStatus", "shape": "ModelApprovalStatus", "type": "string"}, + {"name": "ModelLifeCycle", "shape": "ModelLifeCycle", "type": "structure"}, ], "type": "structure", }, @@ -11153,7 +11154,12 @@ "MonitoringDatasetFormat": { "members": [ {"name": "Csv", "shape": "MonitoringCsvDatasetFormat", "type": "structure"}, - {"name": "Json", "shape": "MonitoringJsonDatasetFormat", "type": "structure"}, + { + "alias": "json", + "name": "JsonFormat", + "shape": "MonitoringJsonDatasetFormat", + "type": "structure", + }, {"name": "Parquet", "shape": "MonitoringParquetDatasetFormat", "type": "structure"}, ], "type": "structure", diff --git a/src/sagemaker_core/main/shapes.py b/src/sagemaker_core/main/shapes.py index d680b7ff..5a3508d0 100644 --- a/src/sagemaker_core/main/shapes.py +++ b/src/sagemaker_core/main/shapes.py @@ -12,7 +12,7 @@ # language governing permissions and limitations under the License. import datetime -from pydantic import BaseModel, ConfigDict +from pydantic import BaseModel, ConfigDict, Field from typing import List, Dict, Optional, Any, Union from sagemaker_core.main.utils import Unassigned @@ -1013,7 +1013,7 @@ class StoppingCondition(Base): ---------------------- max_runtime_in_seconds: The maximum length of time, in seconds, that a training or compilation job can run before it is stopped. For compilation jobs, if the job does not complete during this time, a TimeOut error is generated. We recommend starting with 900 seconds and increasing as necessary based on your model. For all other jobs, if the job does not complete during this time, SageMaker ends the job. When RetryStrategy is specified in the job request, MaxRuntimeInSeconds specifies the maximum time for all of the attempts in total, not each individual attempt. The default value is 1 day. The maximum value is 28 days. The maximum time that a TrainingJob can run in total, including any time spent publishing metrics or archiving and uploading models after it has been stopped, is 30 days. max_wait_time_in_seconds: The maximum length of time, in seconds, that a managed Spot training job has to complete. It is the amount of time spent waiting for Spot capacity plus the amount of time the job can run. It must be equal to or greater than MaxRuntimeInSeconds. If the job does not complete during this time, SageMaker ends the job. When RetryStrategy is specified in the job request, MaxWaitTimeInSeconds specifies the maximum time for all of the attempts in total, not each individual attempt. - max_pending_time_in_seconds: The maximum length of time, in seconds, that a training or compilation job can be pending before it is stopped. + max_pending_time_in_seconds: The maximum length of time, in seconds, that a training or compilation job can be pending before it is stopped. When working with training jobs that use capacity from training plans, not all Pending job states count against the MaxPendingTimeInSeconds limit. The following scenarios do not increment the MaxPendingTimeInSeconds counter: The plan is in a Scheduled state: Jobs queued (in Pending status) before a plan's start date (waiting for scheduled start time) Between capacity reservations: Jobs temporarily back to Pending status between two capacity reservation periods MaxPendingTimeInSeconds only increments when jobs are actively waiting for capacity in an Active plan. """ max_runtime_in_seconds: Optional[int] = Unassigned() @@ -2543,12 +2543,12 @@ class MonitoringDatasetFormat(Base): Attributes ---------------------- csv: The CSV dataset used in the monitoring job. - json: The JSON dataset used in the monitoring job + json_format: The JSON dataset used in the monitoring job parquet: The Parquet dataset used in the monitoring job """ csv: Optional[MonitoringCsvDatasetFormat] = Unassigned() - json: Optional[MonitoringJsonDatasetFormat] = Unassigned() + json_format: Optional[MonitoringJsonDatasetFormat] = Field(default=Unassigned(), alias="json") parquet: Optional[MonitoringParquetDatasetFormat] = Unassigned() @@ -4877,7 +4877,7 @@ class ProductionVariant(Base): enable_ssm_access: You can use this parameter to turn on native Amazon Web Services Systems Manager (SSM) access for a production variant behind an endpoint. By default, SSM access is disabled for all production variants behind an endpoint. You can turn on or turn off SSM access for a production variant behind an existing endpoint by creating a new endpoint configuration and calling UpdateEndpoint. managed_instance_scaling: Settings that control the range in the number of instances that the endpoint provisions as it scales up or down to accommodate traffic. routing_config: Settings that control how the endpoint routes incoming traffic to the instances that the endpoint hosts. - inference_ami_version: Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads. By selecting an AMI version, you can ensure that your inference environment is compatible with specific software requirements, such as CUDA driver versions, Linux kernel versions, or Amazon Web Services Neuron driver versions. The AMI version names, and their configurations, are the following: al2-ami-sagemaker-inference-gpu-2 Accelerator: GPU NVIDIA driver version: 535 CUDA version: 12.2 al2-ami-sagemaker-inference-gpu-2-1 Accelerator: GPU NVIDIA driver version: 535 CUDA version: 12.2 NVIDIA Container Toolkit with disabled CUDA-compat mounting al2-ami-sagemaker-inference-gpu-3-1 Accelerator: GPU NVIDIA driver version: 550 CUDA version: 12.4 NVIDIA Container Toolkit with disabled CUDA-compat mounting + inference_ami_version: Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads. By selecting an AMI version, you can ensure that your inference environment is compatible with specific software requirements, such as CUDA driver versions, Linux kernel versions, or Amazon Web Services Neuron driver versions. The AMI version names, and their configurations, are the following: al2-ami-sagemaker-inference-gpu-2 Accelerator: GPU NVIDIA driver version: 535 CUDA version: 12.2 al2-ami-sagemaker-inference-gpu-2-1 Accelerator: GPU NVIDIA driver version: 535 CUDA version: 12.2 NVIDIA Container Toolkit with disabled CUDA-compat mounting al2-ami-sagemaker-inference-gpu-3-1 Accelerator: GPU NVIDIA driver version: 550 CUDA version: 12.4 NVIDIA Container Toolkit with disabled CUDA-compat mounting al2-ami-sagemaker-inference-neuron-2 Accelerator: Inferentia2 and Trainium Neuron driver version: 2.19 """ variant_name: str @@ -10680,6 +10680,7 @@ class ModelPackageSummary(Base): creation_time: A timestamp that shows when the model package was created. model_package_status: The overall status of the model package. model_approval_status: The approval status of the model. This can be one of the following values. APPROVED - The model is approved REJECTED - The model is rejected. PENDING_MANUAL_APPROVAL - The model is waiting for manual approval. + model_life_cycle """ model_package_arn: str @@ -10690,6 +10691,7 @@ class ModelPackageSummary(Base): model_package_version: Optional[int] = Unassigned() model_package_description: Optional[str] = Unassigned() model_approval_status: Optional[str] = Unassigned() + model_life_cycle: Optional[ModelLifeCycle] = Unassigned() class ModelSummary(Base): diff --git a/src/sagemaker_core/tools/constants.py b/src/sagemaker_core/tools/constants.py index 8986a572..e789d387 100644 --- a/src/sagemaker_core/tools/constants.py +++ b/src/sagemaker_core/tools/constants.py @@ -98,4 +98,4 @@ API_COVERAGE_JSON_FILE_PATH = os.getcwd() + "/src/sagemaker_core/tools/api_coverage.json" -SHAPES_WITH_JSON_FIELD_ALIAS = ["MonitoringDatasetFormat"] # Shapes with field name with "json" +SHAPES_WITH_JSON_FIELD_ALIAS = ["MonitoringDatasetFormat"] # Shapes with field name with "json"