/
api_op_StartContentModeration.go
118 lines (102 loc) · 4.86 KB
/
api_op_StartContentModeration.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
// Code generated by smithy-go-codegen DO NOT EDIT.
package rekognition
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/rekognition/types"
"github.com/awslabs/smithy-go/middleware"
smithyhttp "github.com/awslabs/smithy-go/transport/http"
)
// Starts asynchronous detection of unsafe content in a stored video. Amazon
// Rekognition Video can moderate content in a video stored in an Amazon S3 bucket.
// Use Video to specify the bucket name and the filename of the video.
// StartContentModeration returns a job identifier (JobId) which you use to get the
// results of the analysis. When unsafe content analysis is finished, Amazon
// Rekognition Video publishes a completion status to the Amazon Simple
// Notification Service topic that you specify in NotificationChannel. To get the
// results of the unsafe content analysis, first check that the status value
// published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration
// and pass the job identifier (JobId) from the initial call to
// StartContentModeration. For more information, see Detecting Unsafe Content in
// the Amazon Rekognition Developer Guide.
func (c *Client) StartContentModeration(ctx context.Context, params *StartContentModerationInput, optFns ...func(*Options)) (*StartContentModerationOutput, error) {
if params == nil {
params = &StartContentModerationInput{}
}
result, metadata, err := c.invokeOperation(ctx, "StartContentModeration", params, optFns, addOperationStartContentModerationMiddlewares)
if err != nil {
return nil, err
}
out := result.(*StartContentModerationOutput)
out.ResultMetadata = metadata
return out, nil
}
type StartContentModerationInput struct {
// The video in which you want to detect unsafe content. The video must be stored
// in an Amazon S3 bucket.
//
// This member is required.
Video *types.Video
// Idempotent token used to identify the start request. If you use the same token
// with multiple StartContentModeration requests, the same JobId is returned. Use
// ClientRequestToken to prevent the same job from being accidently started more
// than once.
ClientRequestToken *string
// An identifier you specify that's returned in the completion notification that's
// published to your Amazon Simple Notification Service topic. For example, you can
// use JobTag to group related jobs and identify them in the completion
// notification.
JobTag *string
// Specifies the minimum confidence that Amazon Rekognition must have in order to
// return a moderated content label. Confidence represents how certain Amazon
// Rekognition is that the moderated content is correctly identified. 0 is the
// lowest confidence. 100 is the highest confidence. Amazon Rekognition doesn't
// return any moderated content labels with a confidence level lower than this
// specified value. If you don't specify MinConfidence, GetContentModeration
// returns labels with confidence values greater than or equal to 50 percent.
MinConfidence *float32
// The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish the
// completion status of the unsafe content analysis to.
NotificationChannel *types.NotificationChannel
}
type StartContentModerationOutput struct {
// The identifier for the unsafe content analysis job. Use JobId to identify the
// job in a subsequent call to GetContentModeration.
JobId *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func addOperationStartContentModerationMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpStartContentModeration{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpStartContentModeration{}, middleware.After)
if err != nil {
return err
}
awsmiddleware.AddRequestInvocationIDMiddleware(stack)
smithyhttp.AddContentLengthMiddleware(stack)
addResolveEndpointMiddleware(stack, options)
v4.AddComputePayloadSHA256Middleware(stack)
addRetryMiddlewares(stack, options)
addHTTPSignerV4Middleware(stack, options)
awsmiddleware.AddAttemptClockSkewMiddleware(stack)
addClientUserAgent(stack)
smithyhttp.AddErrorCloseResponseBodyMiddleware(stack)
smithyhttp.AddCloseResponseBodyMiddleware(stack)
addOpStartContentModerationValidationMiddleware(stack)
stack.Initialize.Add(newServiceMetadataMiddleware_opStartContentModeration(options.Region), middleware.Before)
addRequestIDRetrieverMiddleware(stack)
addResponseErrorMiddleware(stack)
return nil
}
func newServiceMetadataMiddleware_opStartContentModeration(region string) awsmiddleware.RegisterServiceMetadata {
return awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "rekognition",
OperationName: "StartContentModeration",
}
}