diff --git a/clients/client-sagemaker-runtime/src/commands/InvokeEndpointCommand.ts b/clients/client-sagemaker-runtime/src/commands/InvokeEndpointCommand.ts index d071fbf14006..baf8ab9d0aa5 100644 --- a/clients/client-sagemaker-runtime/src/commands/InvokeEndpointCommand.ts +++ b/clients/client-sagemaker-runtime/src/commands/InvokeEndpointCommand.ts @@ -94,6 +94,7 @@ export interface InvokeEndpointCommandOutput extends InvokeEndpointCommandOutput * TargetContainerHostname: "STRING_VALUE", * InferenceId: "STRING_VALUE", * EnableExplanations: "STRING_VALUE", + * InferenceComponentName: "STRING_VALUE", * }; * const command = new InvokeEndpointCommand(input); * const response = await client.send(command); diff --git a/clients/client-sagemaker-runtime/src/commands/InvokeEndpointWithResponseStreamCommand.ts b/clients/client-sagemaker-runtime/src/commands/InvokeEndpointWithResponseStreamCommand.ts index a0610fe14aaf..de3081f44250 100644 --- a/clients/client-sagemaker-runtime/src/commands/InvokeEndpointWithResponseStreamCommand.ts +++ b/clients/client-sagemaker-runtime/src/commands/InvokeEndpointWithResponseStreamCommand.ts @@ -72,6 +72,9 @@ export interface InvokeEndpointWithResponseStreamCommandOutput *

For information about how to process the streaming response, see Invoke real-time endpoints.

* * + *

Before you can use this operation, your IAM permissions must allow the + * sagemaker:InvokeEndpoint action. For more information about Amazon SageMaker actions for IAM policies, see Actions, resources, and condition keys for Amazon SageMaker in the IAM Service Authorization + * Reference.

*

Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add * additional headers. You should not rely on the behavior of headers outside those * enumerated in the request syntax.

@@ -93,6 +96,7 @@ export interface InvokeEndpointWithResponseStreamCommandOutput * TargetVariant: "STRING_VALUE", * TargetContainerHostname: "STRING_VALUE", * InferenceId: "STRING_VALUE", + * InferenceComponentName: "STRING_VALUE", * }; * const command = new InvokeEndpointWithResponseStreamCommand(input); * const response = await client.send(command); diff --git a/clients/client-sagemaker-runtime/src/models/models_0.ts b/clients/client-sagemaker-runtime/src/models/models_0.ts index ab2a54fa0497..d2afd5c77813 100644 --- a/clients/client-sagemaker-runtime/src/models/models_0.ts +++ b/clients/client-sagemaker-runtime/src/models/models_0.ts @@ -139,6 +139,13 @@ export interface InvokeEndpointInput { *

*/ EnableExplanations?: string; + + /** + * @public + *

If the endpoint hosts one or more inference components, this parameter specifies the + * name of inference component to invoke.

+ */ + InferenceComponentName?: string; } /** @@ -494,6 +501,13 @@ export interface InvokeEndpointWithResponseStreamInput { *

An identifier that you assign to your request.

*/ InferenceId?: string; + + /** + * @public + *

If the endpoint hosts one or more inference components, this parameter specifies the + * name of inference component to invoke for a streaming response.

+ */ + InferenceComponentName?: string; } /** diff --git a/clients/client-sagemaker-runtime/src/protocols/Aws_restJson1.ts b/clients/client-sagemaker-runtime/src/protocols/Aws_restJson1.ts index 65d8febd6906..d1102e06cbe0 100644 --- a/clients/client-sagemaker-runtime/src/protocols/Aws_restJson1.ts +++ b/clients/client-sagemaker-runtime/src/protocols/Aws_restJson1.ts @@ -60,6 +60,7 @@ export const se_InvokeEndpointCommand = async ( "x-amzn-sagemaker-target-container-hostname": input.TargetContainerHostname!, "x-amzn-sagemaker-inference-id": input.InferenceId!, "x-amzn-sagemaker-enable-explanations": input.EnableExplanations!, + "x-amzn-sagemaker-inference-component": input.InferenceComponentName!, }); let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/endpoints/{EndpointName}/invocations"; @@ -147,6 +148,7 @@ export const se_InvokeEndpointWithResponseStreamCommand = async ( "x-amzn-sagemaker-target-variant": input.TargetVariant!, "x-amzn-sagemaker-target-container-hostname": input.TargetContainerHostname!, "x-amzn-sagemaker-inference-id": input.InferenceId!, + "x-amzn-sagemaker-inference-component": input.InferenceComponentName!, }); let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + diff --git a/codegen/sdk-codegen/aws-models/sagemaker-runtime.json b/codegen/sdk-codegen/aws-models/sagemaker-runtime.json index 3d039d3a3e88..b3d910354a5f 100644 --- a/codegen/sdk-codegen/aws-models/sagemaker-runtime.json +++ b/codegen/sdk-codegen/aws-models/sagemaker-runtime.json @@ -1130,6 +1130,16 @@ "smithy.api#pattern": "^\\p{ASCII}*$" } }, + "com.amazonaws.sagemakerruntime#InferenceComponentHeader": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 63 + }, + "smithy.api#pattern": "^[a-zA-Z0-9]([\\-a-zA-Z0-9]*[a-zA-Z0-9])?$" + } + }, "com.amazonaws.sagemakerruntime#InferenceId": { "type": "string", "traits": { @@ -1432,6 +1442,13 @@ "smithy.api#documentation": "

An optional JMESPath expression used to override the EnableExplanations\n parameter of the ClarifyExplainerConfig API. See the EnableExplanations section in the developer guide for more information.\n

", "smithy.api#httpHeader": "X-Amzn-SageMaker-Enable-Explanations" } + }, + "InferenceComponentName": { + "target": "com.amazonaws.sagemakerruntime#InferenceComponentHeader", + "traits": { + "smithy.api#documentation": "

If the endpoint hosts one or more inference components, this parameter specifies the\n name of inference component to invoke.

", + "smithy.api#httpHeader": "X-Amzn-SageMaker-Inference-Component" + } } }, "traits": { @@ -1505,7 +1522,7 @@ } ], "traits": { - "smithy.api#documentation": "

Invokes a model at the specified endpoint to return the inference response as a\n stream. The inference stream provides the response payload incrementally as a series of\n parts. Before you can get an inference stream, you must have access to a model that's\n deployed using Amazon SageMaker hosting services, and the container for that model\n must support inference streaming.

\n

For more information that can help you use this API, see the following sections in the\n Amazon SageMaker Developer Guide:

\n \n

Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add\n additional headers. You should not rely on the behavior of headers outside those\n enumerated in the request syntax.

\n

Calls to InvokeEndpointWithResponseStream are authenticated by using\n Amazon Web Services Signature Version 4. For information, see Authenticating Requests (Amazon Web Services Signature Version 4) in the\n Amazon S3 API Reference.

", + "smithy.api#documentation": "

Invokes a model at the specified endpoint to return the inference response as a\n stream. The inference stream provides the response payload incrementally as a series of\n parts. Before you can get an inference stream, you must have access to a model that's\n deployed using Amazon SageMaker hosting services, and the container for that model\n must support inference streaming.

\n

For more information that can help you use this API, see the following sections in the\n Amazon SageMaker Developer Guide:

\n \n

Before you can use this operation, your IAM permissions must allow the\n sagemaker:InvokeEndpoint action. For more information about Amazon SageMaker actions for IAM policies, see Actions, resources, and condition keys for Amazon SageMaker in the IAM Service Authorization\n Reference.

\n

Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add\n additional headers. You should not rely on the behavior of headers outside those\n enumerated in the request syntax.

\n

Calls to InvokeEndpointWithResponseStream are authenticated by using\n Amazon Web Services Signature Version 4. For information, see Authenticating Requests (Amazon Web Services Signature Version 4) in the\n Amazon S3 API Reference.

", "smithy.api#http": { "method": "POST", "uri": "/endpoints/{EndpointName}/invocations-response-stream", @@ -1574,6 +1591,13 @@ "smithy.api#documentation": "

An identifier that you assign to your request.

", "smithy.api#httpHeader": "X-Amzn-SageMaker-Inference-Id" } + }, + "InferenceComponentName": { + "target": "com.amazonaws.sagemakerruntime#InferenceComponentHeader", + "traits": { + "smithy.api#documentation": "

If the endpoint hosts one or more inference components, this parameter specifies the\n name of inference component to invoke for a streaming response.

", + "smithy.api#httpHeader": "X-Amzn-SageMaker-Inference-Component" + } } }, "traits": {