/
api_op_DetectLabels.go
199 lines (181 loc) · 8.2 KB
/
api_op_DetectLabels.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
// Code generated by smithy-go-codegen DO NOT EDIT.
package rekognition
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/rekognition/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Detects instances of real-world entities within an image (JPEG or PNG) provided
// as input. This includes objects like flower, tree, and table; events like
// wedding, graduation, and birthday party; and concepts like landscape, evening,
// and nature. For an example, see Analyzing Images Stored in an Amazon S3 Bucket
// in the Amazon Rekognition Developer Guide. DetectLabels does not support the
// detection of activities. However, activity detection is supported for label
// detection in videos. For more information, see StartLabelDetection in the Amazon
// Rekognition Developer Guide. You pass the input image as base64-encoded image
// bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS
// CLI to call Amazon Rekognition operations, passing image bytes is not supported.
// The image must be either a PNG or JPEG formatted file. For each object, scene,
// and concept the API returns one or more labels. Each label provides the object
// name, and the level of confidence that the image contains the object. For
// example, suppose the input image has a lighthouse, the sea, and a rock. The
// response includes all three labels, one for each object. {Name: lighthouse,
// Confidence: 98.4629}
// {Name: rock,Confidence: 79.2097}
//
// {Name:
// sea,Confidence: 75.061} In the preceding example, the operation returns one
// label for each of the three objects. The operation can also return multiple
// labels for the same object in the image. For example, if the input image shows a
// flower (for example, a tulip), the operation might return the following three
// labels. {Name: flower,Confidence: 99.0562}
// {Name: plant,Confidence:
// 99.0562}
//
// {Name: tulip,Confidence: 99.0562} In this example, the detection
// algorithm more precisely identifies the flower as a tulip. In response, the API
// returns an array of labels. In addition, the response also includes the
// orientation correction. Optionally, you can specify MinConfidence to control the
// confidence threshold for the labels returned. The default is 55%. You can also
// add the MaxLabels parameter to limit the number of labels returned. If the
// object detected is a person, the operation doesn't provide the same facial
// details that the DetectFaces operation provides. DetectLabels returns bounding
// boxes for instances of common object labels in an array of Instance objects. An
// Instance object contains a BoundingBox object, for the location of the label on
// the image. It also includes the confidence by which the bounding box was
// detected. DetectLabels also returns a hierarchical taxonomy of detected labels.
// For example, a detected car might be assigned the label car. The label car has
// two parent labels: Vehicle (its parent) and Transportation (its
// grandparent).
//
// The response returns the entire list of ancestors for a label.
// Each ancestor is a unique label in the response. In the previous example, Car,
// Vehicle, and Transportation are returned as unique labels in the response. This
// is a stateless API operation. That is, the operation does not persist any data.
// This operation requires permissions to perform the rekognition:DetectLabels
// action.
func (c *Client) DetectLabels(ctx context.Context, params *DetectLabelsInput, optFns ...func(*Options)) (*DetectLabelsOutput, error) {
if params == nil {
params = &DetectLabelsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DetectLabels", params, optFns, c.addOperationDetectLabelsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DetectLabelsOutput)
out.ResultMetadata = metadata
return out, nil
}
type DetectLabelsInput struct {
// The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI
// to call Amazon Rekognition operations, passing image bytes is not supported.
// Images stored in an S3 Bucket do not need to be base64-encoded. If you are using
// an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image
// bytes passed using the Bytes field. For more information, see Images in the
// Amazon Rekognition developer guide.
//
// This member is required.
Image *types.Image
// Maximum number of labels you want the service to return in the response. The
// service returns the specified number of highest confidence labels.
MaxLabels *int32
// Specifies the minimum confidence level for the labels to return. Amazon
// Rekognition doesn't return any labels with confidence lower than this specified
// value. If MinConfidence is not specified, the operation returns labels with a
// confidence values greater than or equal to 55 percent.
MinConfidence *float32
noSmithyDocumentSerde
}
type DetectLabelsOutput struct {
// Version number of the label detection model that was used to detect labels.
LabelModelVersion *string
// An array of labels for the real-world objects detected.
Labels []types.Label
// The value of OrientationCorrection is always null. If the input image is in
// .jpeg format, it might contain exchangeable image file format (Exif) metadata
// that includes the image's orientation. Amazon Rekognition uses this orientation
// information to perform image correction. The bounding box coordinates are
// translated to represent object locations after the orientation information in
// the Exif metadata is used to correct the image orientation. Images in .png
// format don't contain Exif metadata. Amazon Rekognition doesn’t perform image
// correction for images in .png format and .jpeg images without orientation
// information in the image Exif metadata. The bounding box coordinates aren't
// translated and represent the object locations before the image is rotated.
OrientationCorrection types.OrientationCorrection
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDetectLabelsMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDetectLabels{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDetectLabels{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpDetectLabelsValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDetectLabels(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opDetectLabels(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "rekognition",
OperationName: "DetectLabels",
}
}