From a20b75632bd8b4f21897dd14d15caf0ea4153378 Mon Sep 17 00:00:00 2001 From: sagemaker-bot Date: Fri, 14 Feb 2025 10:09:06 +0000 Subject: [PATCH] Daily Sync with Botocore v1.36.20 on 2025/02/14 --- sample/sagemaker/2017-07-24/service-2.json | 8 ++++++-- src/sagemaker_core/main/shapes.py | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/sample/sagemaker/2017-07-24/service-2.json b/sample/sagemaker/2017-07-24/service-2.json index c7d2b172..c2736086 100644 --- a/sample/sagemaker/2017-07-24/service-2.json +++ b/sample/sagemaker/2017-07-24/service-2.json @@ -33607,7 +33607,7 @@ }, "InferenceAmiVersion":{ "shape":"ProductionVariantInferenceAmiVersion", - "documentation":"

Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads.

By selecting an AMI version, you can ensure that your inference environment is compatible with specific software requirements, such as CUDA driver versions, Linux kernel versions, or Amazon Web Services Neuron driver versions.

The AMI version names, and their configurations, are the following:

al2-ami-sagemaker-inference-gpu-2
  • Accelerator: GPU

  • NVIDIA driver version: 535.54.03

  • CUDA driver version: 12.2

  • Supported instance types: ml.g4dn.*, ml.g5.*, ml.g6.*, ml.p3.*, ml.p4d.*, ml.p4de.*, ml.p5.*

" + "documentation":"

Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads.

By selecting an AMI version, you can ensure that your inference environment is compatible with specific software requirements, such as CUDA driver versions, Linux kernel versions, or Amazon Web Services Neuron driver versions.

The AMI version names, and their configurations, are the following:

al2-ami-sagemaker-inference-gpu-2
  • Accelerator: GPU

  • NVIDIA driver version: 535.54.03

  • CUDA version: 12.2

al2-ami-sagemaker-inference-gpu-2-1
  • Accelerator: GPU

  • NVIDIA driver version: 535.54.03

  • CUDA driver version: 12.2

  • CUDA Container Toolkit with disabled CUDA-compat mounting

al2-ami-sagemaker-inference-gpu-3-1
  • Accelerator: GPU

  • NVIDIA driver version: 550.144.01

  • CUDA version: 12.4

  • Container Toolkit with disabled CUDA-compat mounting

" } }, "documentation":"

Identifies a model that you want to host and the resources chosen to deploy for hosting it. If you are deploying multiple models, tell SageMaker how to distribute traffic among the models by specifying variant weights. For more information on production variants, check Production variants.

" @@ -33645,7 +33645,11 @@ }, "ProductionVariantInferenceAmiVersion":{ "type":"string", - "enum":["al2-ami-sagemaker-inference-gpu-2"] + "enum":[ + "al2-ami-sagemaker-inference-gpu-2", + "al2-ami-sagemaker-inference-gpu-2-1", + "al2-ami-sagemaker-inference-gpu-3-1" + ] }, "ProductionVariantInstanceType":{ "type":"string", diff --git a/src/sagemaker_core/main/shapes.py b/src/sagemaker_core/main/shapes.py index 5024b8e5..6ba2fe1c 100644 --- a/src/sagemaker_core/main/shapes.py +++ b/src/sagemaker_core/main/shapes.py @@ -4858,7 +4858,7 @@ class ProductionVariant(Base): enable_ssm_access: You can use this parameter to turn on native Amazon Web Services Systems Manager (SSM) access for a production variant behind an endpoint. By default, SSM access is disabled for all production variants behind an endpoint. You can turn on or turn off SSM access for a production variant behind an existing endpoint by creating a new endpoint configuration and calling UpdateEndpoint. managed_instance_scaling: Settings that control the range in the number of instances that the endpoint provisions as it scales up or down to accommodate traffic. routing_config: Settings that control how the endpoint routes incoming traffic to the instances that the endpoint hosts. - inference_ami_version: Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads. By selecting an AMI version, you can ensure that your inference environment is compatible with specific software requirements, such as CUDA driver versions, Linux kernel versions, or Amazon Web Services Neuron driver versions. The AMI version names, and their configurations, are the following: al2-ami-sagemaker-inference-gpu-2 Accelerator: GPU NVIDIA driver version: 535.54.03 CUDA driver version: 12.2 Supported instance types: ml.g4dn.*, ml.g5.*, ml.g6.*, ml.p3.*, ml.p4d.*, ml.p4de.*, ml.p5.* + inference_ami_version: Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads. By selecting an AMI version, you can ensure that your inference environment is compatible with specific software requirements, such as CUDA driver versions, Linux kernel versions, or Amazon Web Services Neuron driver versions. The AMI version names, and their configurations, are the following: al2-ami-sagemaker-inference-gpu-2 Accelerator: GPU NVIDIA driver version: 535.54.03 CUDA version: 12.2 al2-ami-sagemaker-inference-gpu-2-1 Accelerator: GPU NVIDIA driver version: 535.54.03 CUDA driver version: 12.2 CUDA Container Toolkit with disabled CUDA-compat mounting al2-ami-sagemaker-inference-gpu-3-1 Accelerator: GPU NVIDIA driver version: 550.144.01 CUDA version: 12.4 Container Toolkit with disabled CUDA-compat mounting """ variant_name: str