forked from Azure/azure-sdk-for-go
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathface.go
826 lines (758 loc) · 41.3 KB
/
face.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
package face
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
"io"
"net/http"
)
// Client is the an API for face detection, verification, and identification.
type Client struct {
BaseClient
}
// NewClient creates an instance of the Client client.
func NewClient(endpoint string) Client {
return Client{New(endpoint)}
}
// DetectWithStream detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, and
// attributes.<br />
// * No image will be stored. Only the extracted face feature will be stored on server. The faceId is an identifier of
// the face feature and will be used in [Face -
// Identify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/identify), [Face -
// Verify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/verifyfacetoface), and [Face - Find
// Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar). The stored face feature(s)
// will expire and be deleted 24 hours after the original detection call.
// * Optional parameters include faceId, landmarks, and attributes. Attributes include age, gender, headPose, smile,
// facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise. Some of the results
// returned for specific attributes may not be highly accurate.
// * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.
// * Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from large to small.
// * For optimal results when querying [Face -
// Identify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/identify), [Face -
// Verify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/verifyfacetoface), and [Face - Find
// Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar) ('returnFaceId' is true),
// please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes).
// * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with
// dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.
// * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to
// [How to specify a detection
// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model)
// | Model | Recommended use-case(s) |
// | ---------- | -------- |
// | 'detection_01': | The default detection model for [Face -
// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). Recommend for near frontal
// face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong image
// orientation, the faces in such cases may not be detected. |
// | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry
// faces. |
//
// * Different 'recognitionModel' values are provided. If follow-up operations like Verify, Identify, Find Similar are
// needed, please specify the recognition model with 'recognitionModel' parameter. The default value for
// 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this
// parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More
// details, please refer to [How to specify a recognition
// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model)
// | Model | Recommended use-case(s) |
// | ---------- | -------- |
// | 'recognition_01': | The default recognition model for [Face -
// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). All those faceIds created
// before 2019 March are bonded with this recognition model. |
// | 'recognition_02': | Recognition model released in 2019 March. |
// | 'recognition_03': | Recognition model released in 2020 May. 'recognition_03' is recommended since its overall
// accuracy is improved compared with 'recognition_01' and 'recognition_02'. |
// Parameters:
// imageParameter - an image stream.
// returnFaceID - a value indicating whether the operation should return faceIds of detected faces.
// returnFaceLandmarks - a value indicating whether the operation should return landmarks of the detected
// faces.
// returnFaceAttributes - analyze and return the one or more specified face attributes in the comma-separated
// string like "returnFaceAttributes=age,gender". Supported face attributes include age, gender, headPose,
// smile, facialHair, glasses and emotion. Note that each face attribute analysis has additional computational
// and time cost.
// recognitionModel - name of recognition model. Recognition model is used when the face features are extracted
// and associated with detected faceIds, (Large)FaceList or (Large)PersonGroup. A recognition model name can be
// provided when performing Face - Detect or (Large)FaceList - Create or (Large)PersonGroup - Create. The
// default value is 'recognition_01', if latest model needed, please explicitly specify the model you need.
// returnRecognitionModel - a value indicating whether the operation should return 'recognitionModel' in
// response.
// detectionModel - name of detection model. Detection model is used to detect faces in the submitted image. A
// detection model name can be provided when performing Face - Detect or (Large)FaceList - Add Face or
// (Large)PersonGroup - Add Face. The default value is 'detection_01', if another model is needed, please
// explicitly specify it.
func (client Client) DetectWithStream(ctx context.Context, imageParameter io.ReadCloser, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []AttributeType, recognitionModel RecognitionModel, returnRecognitionModel *bool, detectionModel DetectionModel) (result ListDetectedFace, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.DetectWithStream")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DetectWithStreamPreparer(ctx, imageParameter, returnFaceID, returnFaceLandmarks, returnFaceAttributes, recognitionModel, returnRecognitionModel, detectionModel)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "DetectWithStream", nil, "Failure preparing request")
return
}
resp, err := client.DetectWithStreamSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "face.Client", "DetectWithStream", resp, "Failure sending request")
return
}
result, err = client.DetectWithStreamResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "DetectWithStream", resp, "Failure responding to request")
}
return
}
// DetectWithStreamPreparer prepares the DetectWithStream request.
func (client Client) DetectWithStreamPreparer(ctx context.Context, imageParameter io.ReadCloser, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []AttributeType, recognitionModel RecognitionModel, returnRecognitionModel *bool, detectionModel DetectionModel) (*http.Request, error) {
urlParameters := map[string]interface{}{
"Endpoint": client.Endpoint,
}
queryParameters := map[string]interface{}{}
if returnFaceID != nil {
queryParameters["returnFaceId"] = autorest.Encode("query", *returnFaceID)
} else {
queryParameters["returnFaceId"] = autorest.Encode("query", true)
}
if returnFaceLandmarks != nil {
queryParameters["returnFaceLandmarks"] = autorest.Encode("query", *returnFaceLandmarks)
} else {
queryParameters["returnFaceLandmarks"] = autorest.Encode("query", false)
}
if returnFaceAttributes != nil && len(returnFaceAttributes) > 0 {
queryParameters["returnFaceAttributes"] = autorest.Encode("query", returnFaceAttributes, ",")
}
if len(string(recognitionModel)) > 0 {
queryParameters["recognitionModel"] = autorest.Encode("query", recognitionModel)
} else {
queryParameters["recognitionModel"] = autorest.Encode("query", "recognition_01")
}
if returnRecognitionModel != nil {
queryParameters["returnRecognitionModel"] = autorest.Encode("query", *returnRecognitionModel)
} else {
queryParameters["returnRecognitionModel"] = autorest.Encode("query", false)
}
if len(string(detectionModel)) > 0 {
queryParameters["detectionModel"] = autorest.Encode("query", detectionModel)
} else {
queryParameters["detectionModel"] = autorest.Encode("query", "detection_01")
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/octet-stream"),
autorest.AsPost(),
autorest.WithCustomBaseURL("{Endpoint}/face/v1.0", urlParameters),
autorest.WithPath("/detect"),
autorest.WithFile(imageParameter),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DetectWithStreamSender sends the DetectWithStream request. The method will close the
// http.Response Body if it receives an error.
func (client Client) DetectWithStreamSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// DetectWithStreamResponder handles the response to the DetectWithStream request. The method always
// closes the http.Response Body.
func (client Client) DetectWithStreamResponder(resp *http.Response) (result ListDetectedFace, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result.Value),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// DetectWithURL detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, and
// attributes.<br />
// * No image will be stored. Only the extracted face feature will be stored on server. The faceId is an identifier of
// the face feature and will be used in [Face -
// Identify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/identify), [Face -
// Verify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/verifyfacetoface), and [Face - Find
// Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar). The stored face feature(s)
// will expire and be deleted 24 hours after the original detection call.
// * Optional parameters include faceId, landmarks, and attributes. Attributes include age, gender, headPose, smile,
// facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise. Some of the results
// returned for specific attributes may not be highly accurate.
// * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.
// * Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from large to small.
// * For optimal results when querying [Face -
// Identify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/identify), [Face -
// Verify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/verifyfacetoface), and [Face - Find
// Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar) ('returnFaceId' is true),
// please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes).
// * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with
// dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.
// * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to
// [How to specify a detection
// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model)
// | Model | Recommended use-case(s) |
// | ---------- | -------- |
// | 'detection_01': | The default detection model for [Face -
// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). Recommend for near frontal
// face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong image
// orientation, the faces in such cases may not be detected. |
// | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry
// faces. |
//
// * Different 'recognitionModel' values are provided. If follow-up operations like Verify, Identify, Find Similar are
// needed, please specify the recognition model with 'recognitionModel' parameter. The default value for
// 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this
// parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More
// details, please refer to [How to specify a recognition
// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model)
// | Model | Recommended use-case(s) |
// | ---------- | -------- |
// | 'recognition_01': | The default recognition model for [Face -
// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). All those faceIds created
// before 2019 March are bonded with this recognition model. |
// | 'recognition_02': | Recognition model released in 2019 March. |
// | 'recognition_03': | Recognition model released in 2020 May. 'recognition_03' is recommended since its overall
// accuracy is improved compared with 'recognition_01' and 'recognition_02'. |
// Parameters:
// imageURL - a JSON document with a URL pointing to the image that is to be analyzed.
// returnFaceID - a value indicating whether the operation should return faceIds of detected faces.
// returnFaceLandmarks - a value indicating whether the operation should return landmarks of the detected
// faces.
// returnFaceAttributes - analyze and return the one or more specified face attributes in the comma-separated
// string like "returnFaceAttributes=age,gender". Supported face attributes include age, gender, headPose,
// smile, facialHair, glasses and emotion. Note that each face attribute analysis has additional computational
// and time cost.
// recognitionModel - name of recognition model. Recognition model is used when the face features are extracted
// and associated with detected faceIds, (Large)FaceList or (Large)PersonGroup. A recognition model name can be
// provided when performing Face - Detect or (Large)FaceList - Create or (Large)PersonGroup - Create. The
// default value is 'recognition_01', if latest model needed, please explicitly specify the model you need.
// returnRecognitionModel - a value indicating whether the operation should return 'recognitionModel' in
// response.
// detectionModel - name of detection model. Detection model is used to detect faces in the submitted image. A
// detection model name can be provided when performing Face - Detect or (Large)FaceList - Add Face or
// (Large)PersonGroup - Add Face. The default value is 'detection_01', if another model is needed, please
// explicitly specify it.
func (client Client) DetectWithURL(ctx context.Context, imageURL ImageURL, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []AttributeType, recognitionModel RecognitionModel, returnRecognitionModel *bool, detectionModel DetectionModel) (result ListDetectedFace, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.DetectWithURL")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: imageURL,
Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("face.Client", "DetectWithURL", err.Error())
}
req, err := client.DetectWithURLPreparer(ctx, imageURL, returnFaceID, returnFaceLandmarks, returnFaceAttributes, recognitionModel, returnRecognitionModel, detectionModel)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "DetectWithURL", nil, "Failure preparing request")
return
}
resp, err := client.DetectWithURLSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "face.Client", "DetectWithURL", resp, "Failure sending request")
return
}
result, err = client.DetectWithURLResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "DetectWithURL", resp, "Failure responding to request")
}
return
}
// DetectWithURLPreparer prepares the DetectWithURL request.
func (client Client) DetectWithURLPreparer(ctx context.Context, imageURL ImageURL, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []AttributeType, recognitionModel RecognitionModel, returnRecognitionModel *bool, detectionModel DetectionModel) (*http.Request, error) {
urlParameters := map[string]interface{}{
"Endpoint": client.Endpoint,
}
queryParameters := map[string]interface{}{}
if returnFaceID != nil {
queryParameters["returnFaceId"] = autorest.Encode("query", *returnFaceID)
} else {
queryParameters["returnFaceId"] = autorest.Encode("query", true)
}
if returnFaceLandmarks != nil {
queryParameters["returnFaceLandmarks"] = autorest.Encode("query", *returnFaceLandmarks)
} else {
queryParameters["returnFaceLandmarks"] = autorest.Encode("query", false)
}
if returnFaceAttributes != nil && len(returnFaceAttributes) > 0 {
queryParameters["returnFaceAttributes"] = autorest.Encode("query", returnFaceAttributes, ",")
}
if len(string(recognitionModel)) > 0 {
queryParameters["recognitionModel"] = autorest.Encode("query", recognitionModel)
} else {
queryParameters["recognitionModel"] = autorest.Encode("query", "recognition_01")
}
if returnRecognitionModel != nil {
queryParameters["returnRecognitionModel"] = autorest.Encode("query", *returnRecognitionModel)
} else {
queryParameters["returnRecognitionModel"] = autorest.Encode("query", false)
}
if len(string(detectionModel)) > 0 {
queryParameters["detectionModel"] = autorest.Encode("query", detectionModel)
} else {
queryParameters["detectionModel"] = autorest.Encode("query", "detection_01")
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithCustomBaseURL("{Endpoint}/face/v1.0", urlParameters),
autorest.WithPath("/detect"),
autorest.WithJSON(imageURL),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DetectWithURLSender sends the DetectWithURL request. The method will close the
// http.Response Body if it receives an error.
func (client Client) DetectWithURLSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// DetectWithURLResponder handles the response to the DetectWithURL request. The method always
// closes the http.Response Body.
func (client Client) DetectWithURLResponder(resp *http.Response) (result ListDetectedFace, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result.Value),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// FindSimilar given query face's faceId, to search the similar-looking faces from a faceId array, a face list or a
// large face list. faceId array contains the faces created by [Face -
// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl), which will expire 24 hours
// after creation. A "faceListId" is created by [FaceList -
// Create](https://docs.microsoft.com/rest/api/cognitiveservices/face/facelist/create) containing persistedFaceIds that
// will not expire. And a "largeFaceListId" is created by [LargeFaceList -
// Create](https://docs.microsoft.com/rest/api/cognitiveservices/face/largefacelist/create) containing persistedFaceIds
// that will also not expire. Depending on the input the returned similar faces list contains faceIds or
// persistedFaceIds ranked by similarity.
// <br/>Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default mode that it
// tries to find faces of the same person as possible by using internal same-person thresholds. It is useful to find a
// known person's other photos. Note that an empty list will be returned if no faces pass the internal thresholds.
// "matchFace" mode ignores same-person thresholds and returns ranked similar faces anyway, even the similarity is low.
// It can be used in the cases like searching celebrity-looking faces.
// <br/>The 'recognitionModel' associated with the query face's faceId should be the same as the 'recognitionModel'
// used by the target faceId array, face list or large face list.
// Parameters:
// body - request body for Find Similar.
func (client Client) FindSimilar(ctx context.Context, body FindSimilarRequest) (result ListSimilarFace, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.FindSimilar")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: body,
Constraints: []validation.Constraint{{Target: "body.FaceID", Name: validation.Null, Rule: true, Chain: nil},
{Target: "body.FaceListID", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.FaceListID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "body.FaceListID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil},
}},
{Target: "body.LargeFaceListID", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.LargeFaceListID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "body.LargeFaceListID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil},
}},
{Target: "body.FaceIds", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.FaceIds", Name: validation.MaxItems, Rule: 1000, Chain: nil}}},
{Target: "body.MaxNumOfCandidatesReturned", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.MaxNumOfCandidatesReturned", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
{Target: "body.MaxNumOfCandidatesReturned", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("face.Client", "FindSimilar", err.Error())
}
req, err := client.FindSimilarPreparer(ctx, body)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "FindSimilar", nil, "Failure preparing request")
return
}
resp, err := client.FindSimilarSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "face.Client", "FindSimilar", resp, "Failure sending request")
return
}
result, err = client.FindSimilarResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "FindSimilar", resp, "Failure responding to request")
}
return
}
// FindSimilarPreparer prepares the FindSimilar request.
func (client Client) FindSimilarPreparer(ctx context.Context, body FindSimilarRequest) (*http.Request, error) {
urlParameters := map[string]interface{}{
"Endpoint": client.Endpoint,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithCustomBaseURL("{Endpoint}/face/v1.0", urlParameters),
autorest.WithPath("/findsimilars"),
autorest.WithJSON(body))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// FindSimilarSender sends the FindSimilar request. The method will close the
// http.Response Body if it receives an error.
func (client Client) FindSimilarSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// FindSimilarResponder handles the response to the FindSimilar request. The method always
// closes the http.Response Body.
func (client Client) FindSimilarResponder(resp *http.Response) (result ListSimilarFace, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result.Value),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Group divide candidate faces into groups based on face similarity.<br />
// * The output is one or more disjointed face groups and a messyGroup. A face group contains faces that have similar
// looking, often of the same person. Face groups are ranked by group size, i.e. number of faces. Notice that faces
// belonging to a same person might be split into several groups in the result.
// * MessyGroup is a special face group containing faces that cannot find any similar counterpart face from original
// faces. The messyGroup will not appear in the result if all faces found their counterparts.
// * Group API needs at least 2 candidate faces and 1000 at most. We suggest to try [Face -
// Verify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/verifyfacetoface) when you only have 2
// candidate faces.
// * The 'recognitionModel' associated with the query faces' faceIds should be the same.
// Parameters:
// body - request body for grouping.
func (client Client) Group(ctx context.Context, body GroupRequest) (result GroupResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.Group")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: body,
Constraints: []validation.Constraint{{Target: "body.FaceIds", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "body.FaceIds", Name: validation.MaxItems, Rule: 1000, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("face.Client", "Group", err.Error())
}
req, err := client.GroupPreparer(ctx, body)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "Group", nil, "Failure preparing request")
return
}
resp, err := client.GroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "face.Client", "Group", resp, "Failure sending request")
return
}
result, err = client.GroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "Group", resp, "Failure responding to request")
}
return
}
// GroupPreparer prepares the Group request.
func (client Client) GroupPreparer(ctx context.Context, body GroupRequest) (*http.Request, error) {
urlParameters := map[string]interface{}{
"Endpoint": client.Endpoint,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithCustomBaseURL("{Endpoint}/face/v1.0", urlParameters),
autorest.WithPath("/group"),
autorest.WithJSON(body))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GroupSender sends the Group request. The method will close the
// http.Response Body if it receives an error.
func (client Client) GroupSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GroupResponder handles the response to the Group request. The method always
// closes the http.Response Body.
func (client Client) GroupResponder(resp *http.Response) (result GroupResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Identify 1-to-many identification to find the closest matches of the specific query person face from a person group
// or large person group.
// <br/> For each face in the faceIds array, Face Identify will compute similarities between the query face and all the
// faces in the person group (given by personGroupId) or large person group (given by largePersonGroupId), and return
// candidate person(s) for that face ranked by similarity confidence. The person group/large person group should be
// trained to make it ready for identification. See more in [PersonGroup -
// Train](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroup/train) and [LargePersonGroup -
// Train](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroup/train).
// <br/>
//
// Remarks:<br />
// * The algorithm allows more than one face to be identified independently at the same request, but no more than 10
// faces.
// * Each person in the person group/large person group could have more than one face, but no more than 248 faces.
// * Higher face image quality means better identification precision. Please consider high-quality faces: frontal,
// clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.
// * Number of candidates returned is restricted by maxNumOfCandidatesReturned and confidenceThreshold. If no person is
// identified, the returned candidates will be an empty array.
// * Try [Face - Find Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar) when you
// need to find similar faces from a face list/large face list instead of a person group/large person group.
// * The 'recognitionModel' associated with the query faces' faceIds should be the same as the 'recognitionModel' used
// by the target person group or large person group.
// Parameters:
// body - request body for identify operation.
func (client Client) Identify(ctx context.Context, body IdentifyRequest) (result ListIdentifyResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.Identify")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: body,
Constraints: []validation.Constraint{{Target: "body.FaceIds", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "body.FaceIds", Name: validation.MaxItems, Rule: 10, Chain: nil}}},
{Target: "body.PersonGroupID", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.PersonGroupID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "body.PersonGroupID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil},
}},
{Target: "body.LargePersonGroupID", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.LargePersonGroupID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "body.LargePersonGroupID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil},
}},
{Target: "body.MaxNumOfCandidatesReturned", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.MaxNumOfCandidatesReturned", Name: validation.InclusiveMaximum, Rule: int64(5), Chain: nil},
{Target: "body.MaxNumOfCandidatesReturned", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("face.Client", "Identify", err.Error())
}
req, err := client.IdentifyPreparer(ctx, body)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "Identify", nil, "Failure preparing request")
return
}
resp, err := client.IdentifySender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "face.Client", "Identify", resp, "Failure sending request")
return
}
result, err = client.IdentifyResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "Identify", resp, "Failure responding to request")
}
return
}
// IdentifyPreparer prepares the Identify request.
func (client Client) IdentifyPreparer(ctx context.Context, body IdentifyRequest) (*http.Request, error) {
urlParameters := map[string]interface{}{
"Endpoint": client.Endpoint,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithCustomBaseURL("{Endpoint}/face/v1.0", urlParameters),
autorest.WithPath("/identify"),
autorest.WithJSON(body))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// IdentifySender sends the Identify request. The method will close the
// http.Response Body if it receives an error.
func (client Client) IdentifySender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// IdentifyResponder handles the response to the Identify request. The method always
// closes the http.Response Body.
func (client Client) IdentifyResponder(resp *http.Response) (result ListIdentifyResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result.Value),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// VerifyFaceToFace verify whether two faces belong to a same person or whether one face belongs to a person.
// <br/>
// Remarks:<br />
// * Higher face image quality means better identification precision. Please consider high-quality faces: frontal,
// clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.
// * For the scenarios that are sensitive to accuracy please make your own judgment.
// * The 'recognitionModel' associated with the query faces' faceIds should be the same as the 'recognitionModel' used
// by the target face, person group or large person group.
// Parameters:
// body - request body for face to face verification.
func (client Client) VerifyFaceToFace(ctx context.Context, body VerifyFaceToFaceRequest) (result VerifyResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.VerifyFaceToFace")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: body,
Constraints: []validation.Constraint{{Target: "body.FaceID1", Name: validation.Null, Rule: true, Chain: nil},
{Target: "body.FaceID2", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("face.Client", "VerifyFaceToFace", err.Error())
}
req, err := client.VerifyFaceToFacePreparer(ctx, body)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "VerifyFaceToFace", nil, "Failure preparing request")
return
}
resp, err := client.VerifyFaceToFaceSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "face.Client", "VerifyFaceToFace", resp, "Failure sending request")
return
}
result, err = client.VerifyFaceToFaceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "VerifyFaceToFace", resp, "Failure responding to request")
}
return
}
// VerifyFaceToFacePreparer prepares the VerifyFaceToFace request.
func (client Client) VerifyFaceToFacePreparer(ctx context.Context, body VerifyFaceToFaceRequest) (*http.Request, error) {
urlParameters := map[string]interface{}{
"Endpoint": client.Endpoint,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithCustomBaseURL("{Endpoint}/face/v1.0", urlParameters),
autorest.WithPath("/verify"),
autorest.WithJSON(body))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// VerifyFaceToFaceSender sends the VerifyFaceToFace request. The method will close the
// http.Response Body if it receives an error.
func (client Client) VerifyFaceToFaceSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// VerifyFaceToFaceResponder handles the response to the VerifyFaceToFace request. The method always
// closes the http.Response Body.
func (client Client) VerifyFaceToFaceResponder(resp *http.Response) (result VerifyResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// VerifyFaceToPerson verify whether two faces belong to a same person. Compares a face Id with a Person Id
// Parameters:
// body - request body for face to person verification.
func (client Client) VerifyFaceToPerson(ctx context.Context, body VerifyFaceToPersonRequest) (result VerifyResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.VerifyFaceToPerson")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: body,
Constraints: []validation.Constraint{{Target: "body.FaceID", Name: validation.Null, Rule: true, Chain: nil},
{Target: "body.PersonGroupID", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.PersonGroupID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "body.PersonGroupID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil},
}},
{Target: "body.LargePersonGroupID", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.LargePersonGroupID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "body.LargePersonGroupID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil},
}},
{Target: "body.PersonID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("face.Client", "VerifyFaceToPerson", err.Error())
}
req, err := client.VerifyFaceToPersonPreparer(ctx, body)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "VerifyFaceToPerson", nil, "Failure preparing request")
return
}
resp, err := client.VerifyFaceToPersonSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "face.Client", "VerifyFaceToPerson", resp, "Failure sending request")
return
}
result, err = client.VerifyFaceToPersonResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "VerifyFaceToPerson", resp, "Failure responding to request")
}
return
}
// VerifyFaceToPersonPreparer prepares the VerifyFaceToPerson request.
func (client Client) VerifyFaceToPersonPreparer(ctx context.Context, body VerifyFaceToPersonRequest) (*http.Request, error) {
urlParameters := map[string]interface{}{
"Endpoint": client.Endpoint,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithCustomBaseURL("{Endpoint}/face/v1.0", urlParameters),
autorest.WithPath("/verify"),
autorest.WithJSON(body))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// VerifyFaceToPersonSender sends the VerifyFaceToPerson request. The method will close the
// http.Response Body if it receives an error.
func (client Client) VerifyFaceToPersonSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// VerifyFaceToPersonResponder handles the response to the VerifyFaceToPerson request. The method always
// closes the http.Response Body.
func (client Client) VerifyFaceToPersonResponder(resp *http.Response) (result VerifyResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}