/
api_op_CreateBatchInferenceJob.go
200 lines (175 loc) · 6.9 KB
/
api_op_CreateBatchInferenceJob.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
// Code generated by smithy-go-codegen DO NOT EDIT.
package personalize
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/service/personalize/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Generates batch recommendations based on a list of items or users stored in
// Amazon S3 and exports the recommendations to an Amazon S3 bucket. To generate
// batch recommendations, specify the ARN of a solution version and an Amazon S3
// URI for the input and output data. For user personalization, popular items, and
// personalized ranking solutions, the batch inference job generates a list of
// recommended items for each user ID in the input file. For related items
// solutions, the job generates a list of recommended items for each item ID in the
// input file. For more information, see Creating a batch inference job (https://docs.aws.amazon.com/personalize/latest/dg/getting-batch-recommendations.html)
// . If you use the Similar-Items recipe, Amazon Personalize can add descriptive
// themes to batch recommendations. To generate themes, set the job's mode to
// THEME_GENERATION and specify the name of the field that contains item names in
// the input data. For more information about generating themes, see Batch
// recommendations with themes from Content Generator (https://docs.aws.amazon.com/personalize/latest/dg/themed-batch-recommendations.html)
// . You can't get batch recommendations with the Trending-Now or Next-Best-Action
// recipes.
func (c *Client) CreateBatchInferenceJob(ctx context.Context, params *CreateBatchInferenceJobInput, optFns ...func(*Options)) (*CreateBatchInferenceJobOutput, error) {
if params == nil {
params = &CreateBatchInferenceJobInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateBatchInferenceJob", params, optFns, c.addOperationCreateBatchInferenceJobMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateBatchInferenceJobOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateBatchInferenceJobInput struct {
// The Amazon S3 path that leads to the input file to base your recommendations
// on. The input material must be in JSON format.
//
// This member is required.
JobInput *types.BatchInferenceJobInput
// The name of the batch inference job to create.
//
// This member is required.
JobName *string
// The path to the Amazon S3 bucket where the job's output will be stored.
//
// This member is required.
JobOutput *types.BatchInferenceJobOutput
// The ARN of the Amazon Identity and Access Management role that has permissions
// to read and write to your input and output Amazon S3 buckets respectively.
//
// This member is required.
RoleArn *string
// The Amazon Resource Name (ARN) of the solution version that will be used to
// generate the batch inference recommendations.
//
// This member is required.
SolutionVersionArn *string
// The configuration details of a batch inference job.
BatchInferenceJobConfig *types.BatchInferenceJobConfig
// The mode of the batch inference job. To generate descriptive themes for groups
// of similar items, set the job mode to THEME_GENERATION . If you don't want to
// generate themes, use the default BATCH_INFERENCE . When you get batch
// recommendations with themes, you will incur additional costs. For more
// information, see Amazon Personalize pricing (https://aws.amazon.com/personalize/pricing/)
// .
BatchInferenceJobMode types.BatchInferenceJobMode
// The ARN of the filter to apply to the batch inference job. For more information
// on using filters, see Filtering batch recommendations (https://docs.aws.amazon.com/personalize/latest/dg/filter-batch.html)
// .
FilterArn *string
// The number of recommendations to retrieve.
NumResults *int32
// A list of tags (https://docs.aws.amazon.com/personalize/latest/dg/tagging-resources.html)
// to apply to the batch inference job.
Tags []types.Tag
// For theme generation jobs, specify the name of the column in your Items dataset
// that contains each item's name.
ThemeGenerationConfig *types.ThemeGenerationConfig
noSmithyDocumentSerde
}
type CreateBatchInferenceJobOutput struct {
// The ARN of the batch inference job.
BatchInferenceJobArn *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateBatchInferenceJobMiddlewares(stack *middleware.Stack, options Options) (err error) {
if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
return err
}
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateBatchInferenceJob{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateBatchInferenceJob{}, middleware.After)
if err != nil {
return err
}
if err := addProtocolFinalizerMiddlewares(stack, options, "CreateBatchInferenceJob"); err != nil {
return fmt.Errorf("add protocol finalizers: %v", err)
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = addClientRequestID(stack); err != nil {
return err
}
if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addComputePayloadSHA256(stack); err != nil {
return err
}
if err = addRetry(stack, options); err != nil {
return err
}
if err = addRawResponseToMetadata(stack); err != nil {
return err
}
if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addOpCreateBatchInferenceJobValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBatchInferenceJob(options.Region), middleware.Before); err != nil {
return err
}
if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateBatchInferenceJob(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
OperationName: "CreateBatchInferenceJob",
}
}