/
api_op_CreateDatasetImportJob.go
314 lines (279 loc) · 11 KB
/
api_op_CreateDatasetImportJob.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
// Code generated by smithy-go-codegen DO NOT EDIT.
package personalize
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/personalize/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Creates a job that imports training data from your data source (an Amazon S3
// bucket) to an Amazon Personalize dataset. To allow Amazon Personalize to import
// the training data, you must specify an IAM service role that has permission to
// read from the data source, as Amazon Personalize makes a copy of your data and
// processes it internally. For information on granting access to your Amazon S3
// bucket, see Giving Amazon Personalize Access to Amazon S3 Resources (https://docs.aws.amazon.com/personalize/latest/dg/granting-personalize-s3-access.html)
// . By default, a dataset import job replaces any existing data in the dataset
// that you imported in bulk. To add new records without replacing existing data,
// specify INCREMENTAL for the import mode in the CreateDatasetImportJob operation.
// Status A dataset import job can be in one of the following states:
// - CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
//
// To get the status of the import job, call DescribeDatasetImportJob (https://docs.aws.amazon.com/personalize/latest/dg/API_DescribeDatasetImportJob.html)
// , providing the Amazon Resource Name (ARN) of the dataset import job. The
// dataset import is complete when the status shows as ACTIVE. If the status shows
// as CREATE FAILED, the response includes a failureReason key, which describes
// why the job failed. Importing takes time. You must wait until the status shows
// as ACTIVE before training a model using the dataset. Related APIs
// - ListDatasetImportJobs (https://docs.aws.amazon.com/personalize/latest/dg/API_ListDatasetImportJobs.html)
// - DescribeDatasetImportJob (https://docs.aws.amazon.com/personalize/latest/dg/API_DescribeDatasetImportJob.html)
func (c *Client) CreateDatasetImportJob(ctx context.Context, params *CreateDatasetImportJobInput, optFns ...func(*Options)) (*CreateDatasetImportJobOutput, error) {
if params == nil {
params = &CreateDatasetImportJobInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateDatasetImportJob", params, optFns, c.addOperationCreateDatasetImportJobMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateDatasetImportJobOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateDatasetImportJobInput struct {
// The Amazon S3 bucket that contains the training data to import.
//
// This member is required.
DataSource *types.DataSource
// The ARN of the dataset that receives the imported data.
//
// This member is required.
DatasetArn *string
// The name for the dataset import job.
//
// This member is required.
JobName *string
// The ARN of the IAM role that has permissions to read from the Amazon S3 data
// source.
//
// This member is required.
RoleArn *string
// Specify how to add the new records to an existing dataset. The default import
// mode is FULL . If you haven't imported bulk records into the dataset previously,
// you can only specify FULL .
// - Specify FULL to overwrite all existing bulk data in your dataset. Data you
// imported individually is not replaced.
// - Specify INCREMENTAL to append the new records to the existing data in your
// dataset. Amazon Personalize replaces any record with the same ID with the new
// one.
ImportMode types.ImportMode
// If you created a metric attribution, specify whether to publish metrics for
// this import job to Amazon S3
PublishAttributionMetricsToS3 *bool
// A list of tags (https://docs.aws.amazon.com/personalize/latest/dg/tagging-resources.html)
// to apply to the dataset import job.
Tags []types.Tag
noSmithyDocumentSerde
}
type CreateDatasetImportJobOutput struct {
// The ARN of the dataset import job.
DatasetImportJobArn *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateDatasetImportJobMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateDatasetImportJob{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateDatasetImportJob{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCreateDatasetImportJobResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateDatasetImportJobValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateDatasetImportJob(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateDatasetImportJob(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "personalize",
OperationName: "CreateDatasetImportJob",
}
}
type opCreateDatasetImportJobResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCreateDatasetImportJobResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCreateDatasetImportJobResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "personalize"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "personalize"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("personalize")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addCreateDatasetImportJobResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opCreateDatasetImportJobResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
}