Skip to content

Commit

Permalink
Merge branch 'release-1.12.6'
Browse files Browse the repository at this point in the history
* release-1.12.6:
  Bumping version to 1.12.6
  Update to latest models
  Add a changelog for #1557 fix
  Ensure dict bodies are utf-8 encoded before being url encoded
  • Loading branch information
awstools committed Sep 18, 2018
2 parents 3ff30de + 08faf03 commit df53d9a
Show file tree
Hide file tree
Showing 8 changed files with 148 additions and 4 deletions.
17 changes: 17 additions & 0 deletions .changes/1.12.6.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
[
{
"category": "Serialization",
"description": "Fixes `#1557 <https://github.com/boto/botocore/issues/1557>`__. Fixed a regression in serialization where request bodies would be improperly encoded.",
"type": "bugfix"
},
{
"category": "``es``",
"description": "Update es client to latest version",
"type": "api-change"
},
{
"category": "``rekognition``",
"description": "Update rekognition client to latest version",
"type": "api-change"
}
]
8 changes: 8 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,14 @@
CHANGELOG
=========

1.12.6
======

* bugfix:Serialization: Fixes `#1557 <https://github.com/boto/botocore/issues/1557>`__. Fixed a regression in serialization where request bodies would be improperly encoded.
* api-change:``es``: Update es client to latest version
* api-change:``rekognition``: Update rekognition client to latest version


1.12.5
======

Expand Down
2 changes: 1 addition & 1 deletion botocore/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import re
import logging

__version__ = '1.12.5'
__version__ = '1.12.6'


class NullHandler(logging.Handler):
Expand Down
11 changes: 10 additions & 1 deletion botocore/awsrequest.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,6 +464,15 @@ def prepare_headers(self, headers):
headers = headers or {}
self.headers = HeadersDict(headers.items())

def _to_utf8(self, item):
key, value = item
if isinstance(key, six.text_type):
key = key.encode('utf-8')
if isinstance(value, six.text_type):
value = value.encode('utf-8')

return key, value

def prepare_body(self, data):
"""Prepares the given HTTP body data."""
self.body = data
Expand All @@ -478,7 +487,7 @@ def prepare_body(self, data):
self.headers['Content-Length'] = '0'

if isinstance(self.body, dict):
params = list(self.body.items())
params = [self._to_utf8(item) for item in self.body.items()]
self.body = urlencode(params, doseq=True)

try:
Expand Down
40 changes: 40 additions & 0 deletions botocore/data/es/2015-01-01/service-2.json
Original file line number Diff line number Diff line change
Expand Up @@ -545,6 +545,10 @@
"shape":"EncryptionAtRestOptions",
"documentation":"<p>Specifies the Encryption At Rest Options.</p>"
},
"NodeToNodeEncryptionOptions":{
"shape":"NodeToNodeEncryptionOptions",
"documentation":"<p>Specifies the NodeToNodeEncryptionOptions.</p>"
},
"AdvancedOptions":{
"shape":"AdvancedOptions",
"documentation":"<p> Option to allow references to indices in an HTTP request body. Must be <code>false</code> when configuring access to individual sub-resources. By default, the value is <code>true</code>. See <a href=\"http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options\" target=\"_blank\">Configuration Advanced Options</a> for more information.</p>"
Expand Down Expand Up @@ -978,6 +982,10 @@
"shape":"EncryptionAtRestOptionsStatus",
"documentation":"<p>Specifies the <code>EncryptionAtRestOptions</code> for the Elasticsearch domain.</p>"
},
"NodeToNodeEncryptionOptions":{
"shape":"NodeToNodeEncryptionOptionsStatus",
"documentation":"<p>Specifies the <code>NodeToNodeEncryptionOptions</code> for the Elasticsearch domain.</p>"
},
"AdvancedOptions":{
"shape":"AdvancedOptionsStatus",
"documentation":"<p>Specifies the <code>AdvancedOptions</code> for the domain. See <a href=\"http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options\" target=\"_blank\">Configuring Advanced Options</a> for more information.</p>"
Expand Down Expand Up @@ -1063,6 +1071,10 @@
"shape":"EncryptionAtRestOptions",
"documentation":"<p> Specifies the status of the <code>EncryptionAtRestOptions</code>.</p>"
},
"NodeToNodeEncryptionOptions":{
"shape":"NodeToNodeEncryptionOptions",
"documentation":"<p>Specifies the status of the <code>NodeToNodeEncryptionOptions</code>.</p>"
},
"AdvancedOptions":{
"shape":"AdvancedOptions",
"documentation":"<p>Specifies the status of the <code>AdvancedOptions</code></p>"
Expand Down Expand Up @@ -1486,6 +1498,34 @@
"type":"string",
"documentation":"<p> Paginated APIs accepts NextToken input to returns next page results and provides a NextToken output in the response which can be used by the client to retrieve more results. </p>"
},
"NodeToNodeEncryptionOptions":{
"type":"structure",
"members":{
"Enabled":{
"shape":"Boolean",
"documentation":"<p>Specify true to enable node-to-node encryption.</p>"
}
},
"documentation":"<p>Specifies the node-to-node encryption options.</p>"
},
"NodeToNodeEncryptionOptionsStatus":{
"type":"structure",
"required":[
"Options",
"Status"
],
"members":{
"Options":{
"shape":"NodeToNodeEncryptionOptions",
"documentation":"<p>Specifies the node-to-node encryption options for the specified Elasticsearch domain.</p>"
},
"Status":{
"shape":"OptionStatus",
"documentation":"<p>Specifies the status of the node-to-node encryption options for the specified Elasticsearch domain.</p>"
}
},
"documentation":"<p>Status of the node-to-node encryption options for the specified Elasticsearch domain.</p>"
},
"OptionState":{
"type":"string",
"documentation":"<p>The state of a requested change. One of the following:</p> <ul> <li>Processing: The request change is still in-process.</li> <li>Active: The request change is processed and deployed to the Elasticsearch domain.</li> </ul>",
Expand Down
58 changes: 57 additions & 1 deletion botocore/data/rekognition/2016-06-27/service-2.json
Original file line number Diff line number Diff line change
Expand Up @@ -391,7 +391,7 @@
{"shape":"ResourceNotFoundException"},
{"shape":"InvalidImageFormatException"}
],
"documentation":"<p>Detects faces in the input image and adds them to the specified collection. </p> <p>Amazon Rekognition does not save the actual faces detected. Instead, the underlying detection algorithm first detects the faces in the input image, and for each face extracts facial features into a feature vector, and stores it in the back-end database. Amazon Rekognition uses feature vectors when performing face match and search operations using the and operations.</p> <p>To get the number of faces in a collection, call . </p> <p>If you are using version 1.0 of the face detection model, <code>IndexFaces</code> indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image. To determine which version of the model you are using, call and supply the collection ID. You also get the model version from the value of <code>FaceModelVersion</code> in the response from <code>IndexFaces</code>. </p> <p>For more information, see Model Versioning in the Amazon Rekognition Developer Guide.</p> <p>If you provide the optional <code>ExternalImageID</code> for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image. </p> <p>In response, the operation returns an array of metadata for all detected faces. This includes, the bounding box of the detected face, confidence value (indicating the bounding box contains a face), a face ID assigned by the service for each face that is detected and stored, and an image ID assigned by the service for the input image. If you request all facial attributes (using the <code>detectionAttributes</code> parameter, Amazon Rekognition returns detailed facial attributes such as facial landmarks (for example, location of eye and mouth) and other facial attributes such gender. If you provide the same image, specify the same collection, and use the same external ID in the <code>IndexFaces</code> operation, Amazon Rekognition doesn't save duplicate face metadata.</p> <p>For more information, see Adding Faces to a Collection in the Amazon Rekognition Developer Guide.</p> <p>The input image is passed either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. </p> <p>This operation requires permissions to perform the <code>rekognition:IndexFaces</code> action.</p>"
"documentation":"<p>Detects faces in the input image and adds them to the specified collection. </p> <p>Amazon Rekognition does not save the actual faces detected. Instead, the underlying detection algorithm first detects the faces in the input image, and for each face extracts facial features into a feature vector, and stores it in the back-end database. Amazon Rekognition uses feature vectors when performing face match and search operations using the and operations.</p> <p>To get the number of faces in a collection, call . </p> <p>If you are using version 1.0 of the face detection model, <code>IndexFaces</code> indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image. To determine which version of the model you are using, call and supply the collection ID. You also get the model version from the value of <code>FaceModelVersion</code> in the response from <code>IndexFaces</code>. </p> <p>For more information, see Model Versioning in the Amazon Rekognition Developer Guide.</p> <p>If you provide the optional <code>ExternalImageID</code> for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image.</p> <p>You can specify the maximum number of faces to index with the <code>MaxFaces</code> input parameter. This is useful when you want to index the largest faces in an image, and you don't want to index other faces detected in the image.</p> <p>The <code>QualityFilter</code> input parameter allows you to filter out detected faces that don’t meet the required quality bar chosen by Amazon Rekognition. The quality bar is based on a variety of common use cases.</p> <p>In response, the operation returns an array of metadata for all detected faces, <code>FaceRecords</code>. This includes: </p> <ul> <li> <p>The bounding box, <code>BoundingBox</code>, of the detected face. </p> </li> <li> <p>A confidence value, <code>Confidence</code>, indicating the confidence that the bounding box contains a face.</p> </li> <li> <p>A face ID, <code>faceId</code>, assigned by the service for each face that is detected and stored.</p> </li> <li> <p>An image ID, <code>ImageId</code>, assigned by the service for the input image.</p> </li> </ul> <p>If you request all facial attributes (using the <code>detectionAttributes</code> parameter), Amazon Rekognition returns detailed facial attributes such as facial landmarks (for example, location of eye and mouth) and other facial attributes such gender. If you provide the same image, specify the same collection, and use the same external ID in the <code>IndexFaces</code> operation, Amazon Rekognition doesn't save duplicate face metadata.</p> <p>Information about faces detected in an image, but not indexed, is returned in an array of objects, <code>UnindexedFaces</code>. Faces are not indexed for reasons such as:</p> <ul> <li> <p>The face is too blurry.</p> </li> <li> <p>The image is too dark.</p> </li> <li> <p>The face has an extreme pose.</p> </li> <li> <p>The face is too small.</p> </li> <li> <p>The number of faces detected exceeds the value of the <code>MaxFaces</code> request parameter.</p> </li> </ul> <p/> <p>For more information, see Adding Faces to a Collection in the Amazon Rekognition Developer Guide.</p> <p>The input image is passed either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. </p> <p>This operation requires permissions to perform the <code>rekognition:IndexFaces</code> action.</p>"
},
"ListCollections":{
"name":"ListCollections",
Expand Down Expand Up @@ -1980,6 +1980,14 @@
"DetectionAttributes":{
"shape":"Attributes",
"documentation":"<p>An array of facial attributes that you want to be returned. This can be the default list of attributes or all attributes. If you don't specify a value for <code>Attributes</code> or if you specify <code>[\"DEFAULT\"]</code>, the API returns the following subset of facial attributes: <code>BoundingBox</code>, <code>Confidence</code>, <code>Pose</code>, <code>Quality</code> and <code>Landmarks</code>. If you provide <code>[\"ALL\"]</code>, all facial attributes are returned but the operation will take longer to complete.</p> <p>If you provide both, <code>[\"ALL\", \"DEFAULT\"]</code>, the service uses a logical AND operator to determine which attributes to return (in this case, all attributes). </p>"
},
"MaxFaces":{
"shape":"MaxFacesToIndex",
"documentation":"<p>The maximum number of faces to index. The value of <code>MaxFaces</code> must be greater than or equal to 1. <code>IndexFaces</code> returns no more that 100 detected faces in an image, even if you specify a larger value for <code>MaxFaces</code>.</p> <p>If <code>IndexFaces</code> detects more faces than the value of <code>MaxFaces</code>, the faces with the lowest quality are filtered out first. If there are still more faces than the value of <code>MaxFaces</code>, the faces with the smallest bounding boxes are filtered out (up to the number needed to satisfy the value of <code>MaxFaces</code>). Information about the unindexed faces is available in the <code>UnindexedFaces</code> array. </p> <p>The faces returned by <code>IndexFaces</code> are sorted, in descending order, by the largest face bounding box size, to the smallest.</p>"
},
"QualityFilter":{
"shape":"QualityFilter",
"documentation":"<p>Specifies how much filtering is done to identify faces detected with low quality. Filtered faces are not indexed. If you specify <code>AUTO</code>, filtering prioritizes the identification of faces that don’t meet the required quality bar chosen by Amazon Rekognition. The quality bar is based on a variety of common use cases. Low quality detections can arise for a number of reasons. For example, an object misidentified as a face, a face that is too blurry, or a face with a pose that is too extreme to use. If you specify <code>NONE</code>, no filtering is performed. The default value is NONE.</p>"
}
}
},
Expand All @@ -1997,6 +2005,10 @@
"FaceModelVersion":{
"shape":"String",
"documentation":"<p>Version number of the face detection model associated with the input collection (<code>CollectionId</code>).</p>"
},
"UnindexedFaces":{
"shape":"UnindexedFaces",
"documentation":"<p>An array of faces that detected in the image but not indexed either because the quality filter deemed them to be of low-quality or the <code>MaxFaces</code> request parameter filtered them out. To use the quality filter, you specify the <code>QualityFilter</code> request parameter.</p>"
}
}
},
Expand Down Expand Up @@ -2274,6 +2286,10 @@
"max":4096,
"min":1
},
"MaxFacesToIndex":{
"type":"integer",
"min":1
},
"MaxResults":{
"type":"integer",
"min":1
Expand Down Expand Up @@ -2478,6 +2494,28 @@
"documentation":"<p>The number of requests exceeded your throughput limit. If you want to increase this limit, contact Amazon Rekognition.</p>",
"exception":true
},
"QualityFilter":{
"type":"string",
"enum":[
"NONE",
"AUTO"
]
},
"Reason":{
"type":"string",
"enum":[
"EXCEEDS_MAX_FACES",
"EXTREME_POSE",
"LOW_BRIGHTNESS",
"LOW_SHARPNESS",
"LOW_CONFIDENCE",
"SMALL_BOUNDING_BOX"
]
},
"Reasons":{
"type":"list",
"member":{"shape":"Reason"}
},
"RecognizeCelebritiesRequest":{
"type":"structure",
"required":["Image"],
Expand Down Expand Up @@ -3055,6 +3093,24 @@
"type":"long",
"min":0
},
"UnindexedFace":{
"type":"structure",
"members":{
"Reasons":{
"shape":"Reasons",
"documentation":"<p>An array of reasons specifying why a face was not indexed. </p> <ul> <li> <p>EXTREME_POSE - The face is at a pose that can't be detected. For example, the head is turned too far away from the camera.</p> </li> <li> <p>EXCEEDS_MAX_FACES - The number of faces detected is already higher than that specified by the <code>MaxFaces</code> input parameter for <code>IndexFaces</code>.</p> </li> <li> <p>LOW_BRIGHTNESS - The image is too dark.</p> </li> <li> <p>LOW_SHARPNESS - The image is too blurry.</p> </li> <li> <p>LOW_CONFIDENCE - The face was detected with a low confidence.</p> </li> <li> <p>SMALL_BOUNDING_BOX - The bounding box around the face is too small.</p> </li> </ul>"
},
"FaceDetail":{
"shape":"FaceDetail",
"documentation":"<p>Structure containing attributes of a face that was detected, but not indexed, by <code>IndexFaces</code>. </p>"
}
},
"documentation":"<p>A face detected by but not indexed. Use the <code>Reasons</code> response attribute to determine why a face is not indexed.</p>"
},
"UnindexedFaces":{
"type":"list",
"member":{"shape":"UnindexedFace"}
},
"Url":{"type":"string"},
"Urls":{
"type":"list",
Expand Down
2 changes: 1 addition & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@
# The short X.Y version.
version = '1.12'
# The full version, including alpha/beta/rc tags.
release = '1.12.5'
release = '1.12.6'

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
Expand Down
14 changes: 14 additions & 0 deletions tests/unit/test_awsrequest.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,20 @@ def test_can_prepare_dict_body(self):
prepared_request = request.prepare()
self.assertEqual(prepared_request.body, 'dead=beef')

def test_can_prepare_dict_body_unicode_values(self):
body = {'Text': u'\u30c6\u30b9\u30c8 string'}
expected_body = 'Text=%E3%83%86%E3%82%B9%E3%83%88+string'
request = AWSRequest(url='http://example.com/', data=body)
prepared_request = request.prepare()
self.assertEqual(prepared_request.body, expected_body)

def test_can_prepare_dict_body_unicode_keys(self):
body = {u'\u30c6\u30b9\u30c8': 'string'}
expected_body = '%E3%83%86%E3%82%B9%E3%83%88=string'
request = AWSRequest(url='http://example.com/', data=body)
prepared_request = request.prepare()
self.assertEqual(prepared_request.body, expected_body)

def test_can_prepare_empty_body(self):
request = AWSRequest(url='http://example.com/', data=b'')
prepared_request = request.prepare()
Expand Down

0 comments on commit df53d9a

Please sign in to comment.