/
api_op_CreateInferenceScheduler.go
266 lines (234 loc) · 9.4 KB
/
api_op_CreateInferenceScheduler.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
// Code generated by smithy-go-codegen DO NOT EDIT.
package lookoutequipment
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/service/lookoutequipment/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Creates a scheduled inference. Scheduling an inference is setting up a
//
// continuous real-time inference plan to analyze new measurement data. When
// setting up the schedule, you provide an S3 bucket location for the input data,
// assign it a delimiter between separate entries in the data, set an offset delay
// if desired, and set the frequency of inferencing. You must also provide an S3
// bucket location for the output data.
func (c *Client) CreateInferenceScheduler(ctx context.Context, params *CreateInferenceSchedulerInput, optFns ...func(*Options)) (*CreateInferenceSchedulerOutput, error) {
if params == nil {
params = &CreateInferenceSchedulerInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateInferenceScheduler", params, optFns, c.addOperationCreateInferenceSchedulerMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateInferenceSchedulerOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateInferenceSchedulerInput struct {
// A unique identifier for the request. If you do not set the client request
// token, Amazon Lookout for Equipment generates one.
//
// This member is required.
ClientToken *string
// Specifies configuration information for the input data for the inference
// scheduler, including delimiter, format, and dataset location.
//
// This member is required.
DataInputConfiguration *types.InferenceInputConfiguration
// Specifies configuration information for the output results for the inference
// scheduler, including the S3 location for the output.
//
// This member is required.
DataOutputConfiguration *types.InferenceOutputConfiguration
// How often data is uploaded to the source Amazon S3 bucket for the input data.
// The value chosen is the length of time between data uploads. For instance, if
// you select 5 minutes, Amazon Lookout for Equipment will upload the real-time
// data to the source bucket once every 5 minutes. This frequency also determines
// how often Amazon Lookout for Equipment runs inference on your data.
//
// For more information, see [Understanding the inference process].
//
// [Understanding the inference process]: https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/understanding-inference-process.html
//
// This member is required.
DataUploadFrequency types.DataUploadFrequency
// The name of the inference scheduler being created.
//
// This member is required.
InferenceSchedulerName *string
// The name of the previously trained machine learning model being used to create
// the inference scheduler.
//
// This member is required.
ModelName *string
// The Amazon Resource Name (ARN) of a role with permission to access the data
// source being used for the inference.
//
// This member is required.
RoleArn *string
// The interval (in minutes) of planned delay at the start of each inference
// segment. For example, if inference is set to run every ten minutes, the delay is
// set to five minutes and the time is 09:08. The inference scheduler will wake up
// at the configured interval (which, without a delay configured, would be 09:10)
// plus the additional five minute delay time (so 09:15) to check your Amazon S3
// bucket. The delay provides a buffer for you to upload data at the same
// frequency, so that you don't have to stop and restart the scheduler when
// uploading new data.
//
// For more information, see [Understanding the inference process].
//
// [Understanding the inference process]: https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/understanding-inference-process.html
DataDelayOffsetInMinutes *int64
// Provides the identifier of the KMS key used to encrypt inference scheduler data
// by Amazon Lookout for Equipment.
ServerSideKmsKeyId *string
// Any tags associated with the inference scheduler.
Tags []types.Tag
noSmithyDocumentSerde
}
type CreateInferenceSchedulerOutput struct {
// The Amazon Resource Name (ARN) of the inference scheduler being created.
InferenceSchedulerArn *string
// The name of inference scheduler being created.
InferenceSchedulerName *string
// Provides a quality assessment for a model that uses labels. If Lookout for
// Equipment determines that the model quality is poor based on training metrics,
// the value is POOR_QUALITY_DETECTED . Otherwise, the value is
// QUALITY_THRESHOLD_MET .
//
// If the model is unlabeled, the model quality can't be assessed and the value of
// ModelQuality is CANNOT_DETERMINE_QUALITY . In this situation, you can get a
// model quality assessment by adding labels to the input dataset and retraining
// the model.
//
// For information about using labels with your models, see [Understanding labeling].
//
// For information about improving the quality of a model, see [Best practices with Amazon Lookout for Equipment].
//
// [Best practices with Amazon Lookout for Equipment]: https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/best-practices.html
// [Understanding labeling]: https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/understanding-labeling.html
ModelQuality types.ModelQuality
// Indicates the status of the CreateInferenceScheduler operation.
Status types.InferenceSchedulerStatus
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateInferenceSchedulerMiddlewares(stack *middleware.Stack, options Options) (err error) {
if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
return err
}
err = stack.Serialize.Add(&awsAwsjson10_serializeOpCreateInferenceScheduler{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpCreateInferenceScheduler{}, middleware.After)
if err != nil {
return err
}
if err := addProtocolFinalizerMiddlewares(stack, options, "CreateInferenceScheduler"); err != nil {
return fmt.Errorf("add protocol finalizers: %v", err)
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = addClientRequestID(stack); err != nil {
return err
}
if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addComputePayloadSHA256(stack); err != nil {
return err
}
if err = addRetry(stack, options); err != nil {
return err
}
if err = addRawResponseToMetadata(stack); err != nil {
return err
}
if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addIdempotencyToken_opCreateInferenceSchedulerMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateInferenceSchedulerValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateInferenceScheduler(options.Region), middleware.Before); err != nil {
return err
}
if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
type idempotencyToken_initializeOpCreateInferenceScheduler struct {
tokenProvider IdempotencyTokenProvider
}
func (*idempotencyToken_initializeOpCreateInferenceScheduler) ID() string {
return "OperationIdempotencyTokenAutoFill"
}
func (m *idempotencyToken_initializeOpCreateInferenceScheduler) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
if m.tokenProvider == nil {
return next.HandleInitialize(ctx, in)
}
input, ok := in.Parameters.(*CreateInferenceSchedulerInput)
if !ok {
return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateInferenceSchedulerInput ")
}
if input.ClientToken == nil {
t, err := m.tokenProvider.GetIdempotencyToken()
if err != nil {
return out, metadata, err
}
input.ClientToken = &t
}
return next.HandleInitialize(ctx, in)
}
func addIdempotencyToken_opCreateInferenceSchedulerMiddleware(stack *middleware.Stack, cfg Options) error {
return stack.Initialize.Add(&idempotencyToken_initializeOpCreateInferenceScheduler{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before)
}
func newServiceMetadataMiddleware_opCreateInferenceScheduler(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
OperationName: "CreateInferenceScheduler",
}
}