/
rekognition.jl
3981 lines (3697 loc) · 184 KB
/
rekognition.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# This file is auto-generated by AWSMetadata.jl
using AWS
using AWS.AWSServices: rekognition
using AWS.Compat
using AWS.UUIDs
"""
associate_faces(collection_id, face_ids, user_id)
associate_faces(collection_id, face_ids, user_id, params::Dict{String,<:Any})
Associates one or more faces with an existing UserID. Takes an array of FaceIds. Each
FaceId that are present in the FaceIds list is associated with the provided UserID. The
maximum number of total FaceIds per UserID is 100. The UserMatchThreshold parameter
specifies the minimum user match confidence required for the face to be associated with a
UserID that has at least one FaceID already associated. This ensures that the FaceIds are
associated with the right UserID. The value ranges from 0-100 and default value is 75. If
successful, an array of AssociatedFace objects containing the associated FaceIds is
returned. If a given face is already associated with the given UserID, it will be ignored
and will not be returned in the response. If a given face is already associated to a
different UserID, isn't found in the collection, doesn’t meet the UserMatchThreshold, or
there are already 100 faces associated with the UserID, it will be returned as part of an
array of UnsuccessfulFaceAssociations. The UserStatus reflects the status of an operation
which updates a UserID representation with a list of given faces. The UserStatus can be:
ACTIVE - All associations or disassociations of FaceID(s) for a UserID are complete.
CREATED - A UserID has been created, but has no FaceID(s) associated with it. UPDATING -
A UserID is being updated and there are current associations or disassociations of
FaceID(s) taking place.
# Arguments
- `collection_id`: The ID of an existing collection containing the UserID.
- `face_ids`: An array of FaceIDs to associate with the UserID.
- `user_id`: The ID for the existing UserID.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"ClientRequestToken"`: Idempotent token used to identify the request to AssociateFaces.
If you use the same token with multiple AssociateFaces requests, the same response is
returned. Use ClientRequestToken to prevent the same request from being processed more than
once.
- `"UserMatchThreshold"`: An optional value specifying the minimum confidence in the UserID
match to return. The default value is 75.
"""
function associate_faces(
CollectionId, FaceIds, UserId; aws_config::AbstractAWSConfig=global_aws_config()
)
return rekognition(
"AssociateFaces",
Dict{String,Any}(
"CollectionId" => CollectionId,
"FaceIds" => FaceIds,
"UserId" => UserId,
"ClientRequestToken" => string(uuid4()),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function associate_faces(
CollectionId,
FaceIds,
UserId,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"AssociateFaces",
Dict{String,Any}(
mergewith(
_merge,
Dict{String,Any}(
"CollectionId" => CollectionId,
"FaceIds" => FaceIds,
"UserId" => UserId,
"ClientRequestToken" => string(uuid4()),
),
params,
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
compare_faces(source_image, target_image)
compare_faces(source_image, target_image, params::Dict{String,<:Any})
Compares a face in the source input image with each of the 100 largest faces detected in
the target input image. If the source image contains multiple faces, the service detects
the largest face and compares it with each face detected in the target image.
CompareFaces uses machine learning algorithms, which are probabilistic. A false negative is
an incorrect prediction that a face in the target image has a low similarity confidence
score when compared to the face in the source image. To reduce the probability of false
negatives, we recommend that you compare the target image against multiple source images.
If you plan to use CompareFaces to make a decision that impacts an individual's rights,
privacy, or access to services, we recommend that you pass the result to a human for review
and further validation before taking action. You pass the input and target images either
as base64-encoded image bytes or as references to images in an Amazon S3 bucket. If you use
the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't supported. The
image must be formatted as a PNG or JPEG file. In response, the operation returns an array
of face matches ordered by similarity score in descending order. For each face match, the
response provides a bounding box of the face, facial landmarks, pose details (pitch, roll,
and yaw), quality (brightness and sharpness), and confidence value (indicating the level of
confidence that the bounding box contains a face). The response also provides a similarity
score, which indicates how closely the faces match. By default, only faces with a
similarity score of greater than or equal to 80% are returned in the response. You can
change this value by specifying the SimilarityThreshold parameter. CompareFaces also
returns an array of faces that don't match the source image. For each face, it returns a
bounding box, confidence value, landmarks, pose details, and quality. The response also
returns information about the face in the source image, including the bounding box of the
face and confidence value. The QualityFilter input parameter allows you to filter out
detected faces that don’t meet a required quality bar. The quality bar is based on a
variety of common use cases. Use QualityFilter to set the quality bar by specifying LOW,
MEDIUM, or HIGH. If you do not want to filter detected faces, specify NONE. The default
value is NONE. If the image doesn't contain Exif metadata, CompareFaces returns
orientation information for the source and target images. Use these values to display the
images with the correct image orientation. If no faces are detected in the source or target
images, CompareFaces returns an InvalidParameterException error. This is a stateless API
operation. That is, data returned by this operation doesn't persist. For an example, see
Comparing Faces in Images in the Amazon Rekognition Developer Guide. This operation
requires permissions to perform the rekognition:CompareFaces action.
# Arguments
- `source_image`: The input image as base64-encoded bytes or an S3 object. If you use the
AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not
supported. If you are using an AWS SDK to call Amazon Rekognition, you might not need to
base64-encode image bytes passed using the Bytes field. For more information, see Images in
the Amazon Rekognition developer guide.
- `target_image`: The target image as base64-encoded bytes or an S3 object. If you use the
AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not
supported. If you are using an AWS SDK to call Amazon Rekognition, you might not need to
base64-encode image bytes passed using the Bytes field. For more information, see Images in
the Amazon Rekognition developer guide.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"QualityFilter"`: A filter that specifies a quality bar for how much filtering is done
to identify faces. Filtered faces aren't compared. If you specify AUTO, Amazon Rekognition
chooses the quality bar. If you specify LOW, MEDIUM, or HIGH, filtering removes all faces
that don’t meet the chosen quality bar. The quality bar is based on a variety of common
use cases. Low-quality detections can occur for a number of reasons. Some examples are an
object that's misidentified as a face, a face that's too blurry, or a face with a pose
that's too extreme to use. If you specify NONE, no filtering is performed. The default
value is NONE. To use quality filtering, the collection you are using must be associated
with version 3 of the face model or higher.
- `"SimilarityThreshold"`: The minimum level of confidence in the face matches that a match
must meet to be included in the FaceMatches array.
"""
function compare_faces(
SourceImage, TargetImage; aws_config::AbstractAWSConfig=global_aws_config()
)
return rekognition(
"CompareFaces",
Dict{String,Any}("SourceImage" => SourceImage, "TargetImage" => TargetImage);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function compare_faces(
SourceImage,
TargetImage,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"CompareFaces",
Dict{String,Any}(
mergewith(
_merge,
Dict{String,Any}(
"SourceImage" => SourceImage, "TargetImage" => TargetImage
),
params,
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
copy_project_version(destination_project_arn, output_config, source_project_arn, source_project_version_arn, version_name)
copy_project_version(destination_project_arn, output_config, source_project_arn, source_project_version_arn, version_name, params::Dict{String,<:Any})
Copies a version of an Amazon Rekognition Custom Labels model from a source project to a
destination project. The source and destination projects can be in different AWS accounts
but must be in the same AWS Region. You can't copy a model to another AWS service. To copy
a model version to a different AWS account, you need to create a resource-based policy
known as a project policy. You attach the project policy to the source project by calling
PutProjectPolicy. The project policy gives permission to copy the model version from a
trusting AWS account to a trusted account. For more information creating and attaching a
project policy, see Attaching a project policy (SDK) in the Amazon Rekognition Custom
Labels Developer Guide. If you are copying a model version to a project in the same AWS
account, you don't need to create a project policy. To copy a model, the destination
project, source project, and source model version must already exist. Copying a model
version takes a while to complete. To get the current status, call DescribeProjectVersions
and check the value of Status in the ProjectVersionDescription object. The copy operation
has finished when the value of Status is COPYING_COMPLETED. This operation requires
permissions to perform the rekognition:CopyProjectVersion action.
# Arguments
- `destination_project_arn`: The ARN of the project in the trusted AWS account that you
want to copy the model version to.
- `output_config`: The S3 bucket and folder location where the training output for the
source model version is placed.
- `source_project_arn`: The ARN of the source project in the trusting AWS account.
- `source_project_version_arn`: The ARN of the model version in the source project that you
want to copy to a destination project.
- `version_name`: A name for the version of the model that's copied to the destination
project.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"KmsKeyId"`: The identifier for your AWS Key Management Service key (AWS KMS key). You
can supply the Amazon Resource Name (ARN) of your KMS key, the ID of your KMS key, an alias
for your KMS key, or an alias ARN. The key is used to encrypt training results and manifest
files written to the output Amazon S3 bucket (OutputConfig). If you choose to use your own
KMS key, you need the following permissions on the KMS key. kms:CreateGrant
kms:DescribeKey kms:GenerateDataKey kms:Decrypt If you don't specify a value for
KmsKeyId, images copied into the service are encrypted using a key that AWS owns and
manages.
- `"Tags"`: The key-value tags to assign to the model version.
"""
function copy_project_version(
DestinationProjectArn,
OutputConfig,
SourceProjectArn,
SourceProjectVersionArn,
VersionName;
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"CopyProjectVersion",
Dict{String,Any}(
"DestinationProjectArn" => DestinationProjectArn,
"OutputConfig" => OutputConfig,
"SourceProjectArn" => SourceProjectArn,
"SourceProjectVersionArn" => SourceProjectVersionArn,
"VersionName" => VersionName,
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function copy_project_version(
DestinationProjectArn,
OutputConfig,
SourceProjectArn,
SourceProjectVersionArn,
VersionName,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"CopyProjectVersion",
Dict{String,Any}(
mergewith(
_merge,
Dict{String,Any}(
"DestinationProjectArn" => DestinationProjectArn,
"OutputConfig" => OutputConfig,
"SourceProjectArn" => SourceProjectArn,
"SourceProjectVersionArn" => SourceProjectVersionArn,
"VersionName" => VersionName,
),
params,
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
create_collection(collection_id)
create_collection(collection_id, params::Dict{String,<:Any})
Creates a collection in an AWS Region. You can add faces to the collection using the
IndexFaces operation. For example, you might create collections, one for each of your
application users. A user can then index faces using the IndexFaces operation and persist
results in a specific collection. Then, a user can search the collection for faces in the
user-specific container. When you create a collection, it is associated with the latest
version of the face model version. Collection names are case-sensitive. This operation
requires permissions to perform the rekognition:CreateCollection action. If you want to tag
your collection, you also require permission to perform the rekognition:TagResource
operation.
# Arguments
- `collection_id`: ID for the collection that you are creating.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"Tags"`: A set of tags (key-value pairs) that you want to attach to the collection.
"""
function create_collection(CollectionId; aws_config::AbstractAWSConfig=global_aws_config())
return rekognition(
"CreateCollection",
Dict{String,Any}("CollectionId" => CollectionId);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function create_collection(
CollectionId,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"CreateCollection",
Dict{String,Any}(
mergewith(_merge, Dict{String,Any}("CollectionId" => CollectionId), params)
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
create_dataset(dataset_type, project_arn)
create_dataset(dataset_type, project_arn, params::Dict{String,<:Any})
Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using
an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition
Custom Labels dataset. To create a training dataset for a project, specify train for the
value of DatasetType. To create the test dataset for a project, specify test for the value
of DatasetType. The response from CreateDataset is the Amazon Resource Name (ARN) for the
dataset. Creating a dataset takes a while to complete. Use DescribeDataset to check the
current status. The dataset created successfully if the value of Status is CREATE_COMPLETE.
To check if any non-terminal errors occurred, call ListDatasetEntries and check for the
presence of errors lists in the JSON Lines. Dataset creation fails if a terminal error
occurs (Status = CREATE_FAILED). Currently, you can't access the terminal error
information. For more information, see Creating dataset in the Amazon Rekognition Custom
Labels Developer Guide. This operation requires permissions to perform the
rekognition:CreateDataset action. If you want to copy an existing dataset, you also require
permission to perform the rekognition:ListDatasetEntries action.
# Arguments
- `dataset_type`: The type of the dataset. Specify train to create a training dataset.
Specify test to create a test dataset.
- `project_arn`: The ARN of the Amazon Rekognition Custom Labels project to which you want
to asssign the dataset.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"DatasetSource"`: The source files for the dataset. You can specify the ARN of an
existing dataset or specify the Amazon S3 bucket location of an Amazon Sagemaker format
manifest file. If you don't specify datasetSource, an empty dataset is created. To add
labeled images to the dataset, You can use the console or call UpdateDatasetEntries.
"""
function create_dataset(
DatasetType, ProjectArn; aws_config::AbstractAWSConfig=global_aws_config()
)
return rekognition(
"CreateDataset",
Dict{String,Any}("DatasetType" => DatasetType, "ProjectArn" => ProjectArn);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function create_dataset(
DatasetType,
ProjectArn,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"CreateDataset",
Dict{String,Any}(
mergewith(
_merge,
Dict{String,Any}("DatasetType" => DatasetType, "ProjectArn" => ProjectArn),
params,
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
create_face_liveness_session()
create_face_liveness_session(params::Dict{String,<:Any})
This API operation initiates a Face Liveness session. It returns a SessionId, which you can
use to start streaming Face Liveness video and get the results for a Face Liveness session.
You can use the OutputConfig option in the Settings parameter to provide an Amazon S3
bucket location. The Amazon S3 bucket stores reference images and audit images. You can use
AuditImagesLimit to limit the number of audit images returned. This number is between 0 and
4. By default, it is set to 0. The limit is best effort and based on the duration of the
selfie-video.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"ClientRequestToken"`: Idempotent token is used to recognize the Face Liveness request.
If the same token is used with multiple CreateFaceLivenessSession requests, the same
session is returned. This token is employed to avoid unintentionally creating the same
session multiple times.
- `"KmsKeyId"`: The identifier for your AWS Key Management Service key (AWS KMS key). Used
to encrypt audit images and reference images.
- `"Settings"`: A session settings object. It contains settings for the operation to be
performed. For Face Liveness, it accepts OutputConfig and AuditImagesLimit.
"""
function create_face_liveness_session(; aws_config::AbstractAWSConfig=global_aws_config())
return rekognition(
"CreateFaceLivenessSession"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET
)
end
function create_face_liveness_session(
params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()
)
return rekognition(
"CreateFaceLivenessSession",
params;
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
create_project(project_name)
create_project(project_name, params::Dict{String,<:Any})
Creates a new Amazon Rekognition Custom Labels project. A project is a group of resources
(datasets, model versions) that you use to create and manage Amazon Rekognition Custom
Labels models. This operation requires permissions to perform the
rekognition:CreateProject action.
# Arguments
- `project_name`: The name of the project to create.
"""
function create_project(ProjectName; aws_config::AbstractAWSConfig=global_aws_config())
return rekognition(
"CreateProject",
Dict{String,Any}("ProjectName" => ProjectName);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function create_project(
ProjectName,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"CreateProject",
Dict{String,Any}(
mergewith(_merge, Dict{String,Any}("ProjectName" => ProjectName), params)
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
create_project_version(output_config, project_arn, version_name)
create_project_version(output_config, project_arn, version_name, params::Dict{String,<:Any})
Creates a new version of a model and begins training. Models are managed as part of an
Amazon Rekognition Custom Labels project. The response from CreateProjectVersion is an
Amazon Resource Name (ARN) for the version of the model. Training uses the training and
test datasets associated with the project. For more information, see Creating training and
test dataset in the Amazon Rekognition Custom Labels Developer Guide. You can train a
model in a project that doesn't have associated datasets by specifying manifest files in
the TrainingData and TestingData fields. If you open the console after training a model
with manifest files, Amazon Rekognition Custom Labels creates the datasets for you using
the most recent manifest files. You can no longer train a model version for the project by
specifying manifest files. Instead of training with a project without associated datasets,
we recommend that you use the manifest files to create training and test datasets for the
project. Training takes a while to complete. You can get the current status by calling
DescribeProjectVersions. Training completed successfully if the value of the Status field
is TRAINING_COMPLETED. If training fails, see Debugging a failed model training in the
Amazon Rekognition Custom Labels developer guide. Once training has successfully
completed, call DescribeProjectVersions to get the training results and evaluate the model.
For more information, see Improving a trained Amazon Rekognition Custom Labels model in the
Amazon Rekognition Custom Labels developers guide. After evaluating the model, you start
the model by calling StartProjectVersion. This operation requires permissions to perform
the rekognition:CreateProjectVersion action.
# Arguments
- `output_config`: The Amazon S3 bucket location to store the results of training. The S3
bucket can be in any AWS account as long as the caller has s3:PutObject permissions on the
S3 bucket.
- `project_arn`: The ARN of the Amazon Rekognition Custom Labels project that manages the
model that you want to train.
- `version_name`: A name for the version of the model. This value must be unique.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"KmsKeyId"`: The identifier for your AWS Key Management Service key (AWS KMS key). You
can supply the Amazon Resource Name (ARN) of your KMS key, the ID of your KMS key, an alias
for your KMS key, or an alias ARN. The key is used to encrypt training and test images
copied into the service for model training. Your source images are unaffected. The key is
also used to encrypt training results and manifest files written to the output Amazon S3
bucket (OutputConfig). If you choose to use your own KMS key, you need the following
permissions on the KMS key. kms:CreateGrant kms:DescribeKey kms:GenerateDataKey
kms:Decrypt If you don't specify a value for KmsKeyId, images copied into the service are
encrypted using a key that AWS owns and manages.
- `"Tags"`: A set of tags (key-value pairs) that you want to attach to the model.
- `"TestingData"`: Specifies an external manifest that the service uses to test the model.
If you specify TestingData you must also specify TrainingData. The project must not have
any associated datasets.
- `"TrainingData"`: Specifies an external manifest that the services uses to train the
model. If you specify TrainingData you must also specify TestingData. The project must not
have any associated datasets.
"""
function create_project_version(
OutputConfig, ProjectArn, VersionName; aws_config::AbstractAWSConfig=global_aws_config()
)
return rekognition(
"CreateProjectVersion",
Dict{String,Any}(
"OutputConfig" => OutputConfig,
"ProjectArn" => ProjectArn,
"VersionName" => VersionName,
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function create_project_version(
OutputConfig,
ProjectArn,
VersionName,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"CreateProjectVersion",
Dict{String,Any}(
mergewith(
_merge,
Dict{String,Any}(
"OutputConfig" => OutputConfig,
"ProjectArn" => ProjectArn,
"VersionName" => VersionName,
),
params,
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
create_stream_processor(input, name, output, role_arn, settings)
create_stream_processor(input, name, output, role_arn, settings, params::Dict{String,<:Any})
Creates an Amazon Rekognition stream processor that you can use to detect and recognize
faces or to detect labels in a streaming video. Amazon Rekognition Video is a consumer of
live video from Amazon Kinesis Video Streams. There are two different settings for stream
processors in Amazon Rekognition: detecting faces and detecting labels. If you are
creating a stream processor for detecting faces, you provide as input a Kinesis video
stream (Input) and a Kinesis data stream (Output) stream for receiving the output. You must
use the FaceSearch option in Settings, specifying the collection that contains the faces
you want to recognize. After you have finished analyzing a streaming video, use
StopStreamProcessor to stop processing. If you are creating a stream processor to detect
labels, you provide as input a Kinesis video stream (Input), Amazon S3 bucket information
(Output), and an Amazon SNS topic ARN (NotificationChannel). You can also provide a KMS key
ID to encrypt the data sent to your Amazon S3 bucket. You specify what you want to detect
by using the ConnectedHome option in settings, and selecting one of the following: PERSON,
PET, PACKAGE, ALL You can also specify where in the frame you want Amazon Rekognition to
monitor with RegionsOfInterest. When you run the StartStreamProcessor operation on a label
detection stream processor, you input start and stop information to determine the length of
the processing time. Use Name to assign an identifier for the stream processor. You use
Name to manage the stream processor. For example, you can start processing the source video
by calling StartStreamProcessor with the Name field. This operation requires permissions
to perform the rekognition:CreateStreamProcessor action. If you want to tag your stream
processor, you also require permission to perform the rekognition:TagResource operation.
# Arguments
- `input`: Kinesis video stream stream that provides the source streaming video. If you are
using the AWS CLI, the parameter name is StreamProcessorInput. This is required for both
face search and label detection stream processors.
- `name`: An identifier you assign to the stream processor. You can use Name to manage the
stream processor. For example, you can get the current status of the stream processor by
calling DescribeStreamProcessor. Name is idempotent. This is required for both face search
and label detection stream processors.
- `output`: Kinesis data stream stream or Amazon S3 bucket location to which Amazon
Rekognition Video puts the analysis results. If you are using the AWS CLI, the parameter
name is StreamProcessorOutput. This must be a S3Destination of an Amazon S3 bucket that you
own for a label detection stream processor or a Kinesis data stream ARN for a face search
stream processor.
- `role_arn`: The Amazon Resource Number (ARN) of the IAM role that allows access to the
stream processor. The IAM role provides Rekognition read permissions for a Kinesis stream.
It also provides write permissions to an Amazon S3 bucket and Amazon Simple Notification
Service topic for a label detection stream processor. This is required for both face search
and label detection stream processors.
- `settings`: Input parameters used in a streaming video analyzed by a stream processor.
You can use FaceSearch to recognize faces in a streaming video, or you can use
ConnectedHome to detect labels.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"DataSharingPreference"`: Shows whether you are sharing data with Rekognition to
improve model performance. You can choose this option at the account level or on a
per-stream basis. Note that if you opt out at the account level this setting is ignored on
individual streams.
- `"KmsKeyId"`: The identifier for your AWS Key Management Service key (AWS KMS key). This
is an optional parameter for label detection stream processors and should not be used to
create a face search stream processor. You can supply the Amazon Resource Name (ARN) of
your KMS key, the ID of your KMS key, an alias for your KMS key, or an alias ARN. The key
is used to encrypt results and data published to your Amazon S3 bucket, which includes
image frames and hero images. Your source images are unaffected.
- `"NotificationChannel"`:
- `"RegionsOfInterest"`: Specifies locations in the frames where Amazon Rekognition checks
for objects or people. You can specify up to 10 regions of interest, and each region has
either a polygon or a bounding box. This is an optional parameter for label detection
stream processors and should not be used to create a face search stream processor.
- `"Tags"`: A set of tags (key-value pairs) that you want to attach to the stream
processor.
"""
function create_stream_processor(
Input,
Name,
Output,
RoleArn,
Settings;
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"CreateStreamProcessor",
Dict{String,Any}(
"Input" => Input,
"Name" => Name,
"Output" => Output,
"RoleArn" => RoleArn,
"Settings" => Settings,
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function create_stream_processor(
Input,
Name,
Output,
RoleArn,
Settings,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"CreateStreamProcessor",
Dict{String,Any}(
mergewith(
_merge,
Dict{String,Any}(
"Input" => Input,
"Name" => Name,
"Output" => Output,
"RoleArn" => RoleArn,
"Settings" => Settings,
),
params,
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
create_user(collection_id, user_id)
create_user(collection_id, user_id, params::Dict{String,<:Any})
Creates a new User within a collection specified by CollectionId. Takes UserId as a
parameter, which is a user provided ID which should be unique within the collection. The
provided UserId will alias the system generated UUID to make the UserId more user friendly.
Uses a ClientToken, an idempotency token that ensures a call to CreateUser completes only
once. If the value is not supplied, the AWS SDK generates an idempotency token for the
requests. This prevents retries after a network error results from making multiple
CreateUser calls.
# Arguments
- `collection_id`: The ID of an existing collection to which the new UserID needs to be
created.
- `user_id`: ID for the UserID to be created. This ID needs to be unique within the
collection.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"ClientRequestToken"`: Idempotent token used to identify the request to CreateUser. If
you use the same token with multiple CreateUser requests, the same response is returned.
Use ClientRequestToken to prevent the same request from being processed more than once.
"""
function create_user(
CollectionId, UserId; aws_config::AbstractAWSConfig=global_aws_config()
)
return rekognition(
"CreateUser",
Dict{String,Any}(
"CollectionId" => CollectionId,
"UserId" => UserId,
"ClientRequestToken" => string(uuid4()),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function create_user(
CollectionId,
UserId,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"CreateUser",
Dict{String,Any}(
mergewith(
_merge,
Dict{String,Any}(
"CollectionId" => CollectionId,
"UserId" => UserId,
"ClientRequestToken" => string(uuid4()),
),
params,
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
delete_collection(collection_id)
delete_collection(collection_id, params::Dict{String,<:Any})
Deletes the specified collection. Note that this operation removes all faces in the
collection. For an example, see Deleting a collection. This operation requires permissions
to perform the rekognition:DeleteCollection action.
# Arguments
- `collection_id`: ID of the collection to delete.
"""
function delete_collection(CollectionId; aws_config::AbstractAWSConfig=global_aws_config())
return rekognition(
"DeleteCollection",
Dict{String,Any}("CollectionId" => CollectionId);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function delete_collection(
CollectionId,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"DeleteCollection",
Dict{String,Any}(
mergewith(_merge, Dict{String,Any}("CollectionId" => CollectionId), params)
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
delete_dataset(dataset_arn)
delete_dataset(dataset_arn, params::Dict{String,<:Any})
Deletes an existing Amazon Rekognition Custom Labels dataset. Deleting a dataset might take
while. Use DescribeDataset to check the current status. The dataset is still deleting if
the value of Status is DELETE_IN_PROGRESS. If you try to access the dataset after it is
deleted, you get a ResourceNotFoundException exception. You can't delete a dataset while
it is creating (Status = CREATE_IN_PROGRESS) or if the dataset is updating (Status =
UPDATE_IN_PROGRESS). This operation requires permissions to perform the
rekognition:DeleteDataset action.
# Arguments
- `dataset_arn`: The ARN of the Amazon Rekognition Custom Labels dataset that you want to
delete.
"""
function delete_dataset(DatasetArn; aws_config::AbstractAWSConfig=global_aws_config())
return rekognition(
"DeleteDataset",
Dict{String,Any}("DatasetArn" => DatasetArn);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function delete_dataset(
DatasetArn,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"DeleteDataset",
Dict{String,Any}(
mergewith(_merge, Dict{String,Any}("DatasetArn" => DatasetArn), params)
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
delete_faces(collection_id, face_ids)
delete_faces(collection_id, face_ids, params::Dict{String,<:Any})
Deletes faces from a collection. You specify a collection ID and an array of face IDs to
remove from the collection. This operation requires permissions to perform the
rekognition:DeleteFaces action.
# Arguments
- `collection_id`: Collection from which to remove the specific faces.
- `face_ids`: An array of face IDs to delete.
"""
function delete_faces(
CollectionId, FaceIds; aws_config::AbstractAWSConfig=global_aws_config()
)
return rekognition(
"DeleteFaces",
Dict{String,Any}("CollectionId" => CollectionId, "FaceIds" => FaceIds);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function delete_faces(
CollectionId,
FaceIds,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"DeleteFaces",
Dict{String,Any}(
mergewith(
_merge,
Dict{String,Any}("CollectionId" => CollectionId, "FaceIds" => FaceIds),
params,
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
delete_project(project_arn)
delete_project(project_arn, params::Dict{String,<:Any})
Deletes an Amazon Rekognition Custom Labels project. To delete a project you must first
delete all models associated with the project. To delete a model, see DeleteProjectVersion.
DeleteProject is an asynchronous operation. To check if the project is deleted, call
DescribeProjects. The project is deleted when the project no longer appears in the
response. Be aware that deleting a given project will also delete any ProjectPolicies
associated with that project. This operation requires permissions to perform the
rekognition:DeleteProject action.
# Arguments
- `project_arn`: The Amazon Resource Name (ARN) of the project that you want to delete.
"""
function delete_project(ProjectArn; aws_config::AbstractAWSConfig=global_aws_config())
return rekognition(
"DeleteProject",
Dict{String,Any}("ProjectArn" => ProjectArn);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function delete_project(
ProjectArn,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"DeleteProject",
Dict{String,Any}(
mergewith(_merge, Dict{String,Any}("ProjectArn" => ProjectArn), params)
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
delete_project_policy(policy_name, project_arn)
delete_project_policy(policy_name, project_arn, params::Dict{String,<:Any})
Deletes an existing project policy. To get a list of project policies attached to a
project, call ListProjectPolicies. To attach a project policy to a project, call
PutProjectPolicy. This operation requires permissions to perform the
rekognition:DeleteProjectPolicy action.
# Arguments
- `policy_name`: The name of the policy that you want to delete.
- `project_arn`: The Amazon Resource Name (ARN) of the project that the project policy you
want to delete is attached to.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"PolicyRevisionId"`: The ID of the project policy revision that you want to delete.
"""
function delete_project_policy(
PolicyName, ProjectArn; aws_config::AbstractAWSConfig=global_aws_config()
)
return rekognition(
"DeleteProjectPolicy",
Dict{String,Any}("PolicyName" => PolicyName, "ProjectArn" => ProjectArn);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function delete_project_policy(
PolicyName,
ProjectArn,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"DeleteProjectPolicy",
Dict{String,Any}(
mergewith(
_merge,
Dict{String,Any}("PolicyName" => PolicyName, "ProjectArn" => ProjectArn),
params,
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
delete_project_version(project_version_arn)
delete_project_version(project_version_arn, params::Dict{String,<:Any})
Deletes an Amazon Rekognition Custom Labels model. You can't delete a model if it is
running or if it is training. To check the status of a model, use the Status field returned
from DescribeProjectVersions. To stop a running model call StopProjectVersion. If the model
is training, wait until it finishes. This operation requires permissions to perform the
rekognition:DeleteProjectVersion action.
# Arguments
- `project_version_arn`: The Amazon Resource Name (ARN) of the model version that you want
to delete.
"""
function delete_project_version(
ProjectVersionArn; aws_config::AbstractAWSConfig=global_aws_config()
)
return rekognition(
"DeleteProjectVersion",
Dict{String,Any}("ProjectVersionArn" => ProjectVersionArn);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function delete_project_version(
ProjectVersionArn,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return rekognition(
"DeleteProjectVersion",
Dict{String,Any}(
mergewith(
_merge, Dict{String,Any}("ProjectVersionArn" => ProjectVersionArn), params
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
delete_stream_processor(name)
delete_stream_processor(name, params::Dict{String,<:Any})
Deletes the stream processor identified by Name. You assign the value for Name when you
create the stream processor with CreateStreamProcessor. You might not be able to use the
same name for a stream processor for a few seconds after calling DeleteStreamProcessor.
# Arguments
- `name`: The name of the stream processor you want to delete.
"""
function delete_stream_processor(Name; aws_config::AbstractAWSConfig=global_aws_config())
return rekognition(
"DeleteStreamProcessor",
Dict{String,Any}("Name" => Name);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function delete_stream_processor(
Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()
)
return rekognition(
"DeleteStreamProcessor",
Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Name" => Name), params));
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,