Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 69 additions & 0 deletions shotgun_api3/lib/httplib2/auth.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
import base64
import re

from ... import pyparsing as pp

from .error import *


try: # pyparsing>=3.0.0
downcaseTokens = pp.common.downcaseTokens
except AttributeError:
downcaseTokens = pp.downcaseTokens

UNQUOTE_PAIRS = re.compile(r"\\(.)")
unquote = lambda s, l, t: UNQUOTE_PAIRS.sub(r"\1", t[0][1:-1])

# https://tools.ietf.org/html/rfc7235#section-1.2
# https://tools.ietf.org/html/rfc7235#appendix-B
tchar = "!#$%&'*+-.^_`|~" + pp.nums + pp.alphas
token = pp.Word(tchar).setName("token")
token68 = pp.Combine(pp.Word("-._~+/" + pp.nums + pp.alphas) + pp.Optional(pp.Word("=").leaveWhitespace())).setName(
"token68"
)

quoted_string = pp.dblQuotedString.copy().setName("quoted-string").setParseAction(unquote)
auth_param_name = token.copy().setName("auth-param-name").addParseAction(downcaseTokens)
auth_param = auth_param_name + pp.Suppress("=") + (quoted_string | token)
params = pp.Dict(pp.delimitedList(pp.Group(auth_param)))

scheme = token("scheme")
challenge = scheme + (params("params") | token68("token"))

authentication_info = params.copy()
www_authenticate = pp.delimitedList(pp.Group(challenge))


def _parse_authentication_info(headers, headername="authentication-info"):
"""https://tools.ietf.org/html/rfc7615
"""
header = headers.get(headername, "").strip()
if not header:
return {}
try:
parsed = authentication_info.parseString(header)
except pp.ParseException as ex:
# print(ex.explain(ex))
raise MalformedHeader(headername)

return parsed.asDict()


def _parse_www_authenticate(headers, headername="www-authenticate"):
"""Returns a dictionary of dictionaries, one dict per auth_scheme."""
header = headers.get(headername, "").strip()
if not header:
return {}
try:
parsed = www_authenticate.parseString(header)
except pp.ParseException as ex:
# print(ex.explain(ex))
raise MalformedHeader(headername)

retval = {
challenge["scheme"].lower(): challenge["params"].asDict()
if "params" in challenge
else {"token": challenge.get("token")}
for challenge in parsed
}
return retval
2,225 changes: 2,225 additions & 0 deletions shotgun_api3/lib/httplib2/cacerts.txt

Large diffs are not rendered by default.

42 changes: 42 additions & 0 deletions shotgun_api3/lib/httplib2/certs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
"""Utilities for certificate management."""

import os

certifi_available = False
certifi_where = None
try:
from certifi import where as certifi_where
certifi_available = True
except ImportError:
pass

custom_ca_locater_available = False
custom_ca_locater_where = None
try:
from ca_certs_locater import get as custom_ca_locater_where
custom_ca_locater_available = True
except ImportError:
pass


BUILTIN_CA_CERTS = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "cacerts.txt"
)


def where():
env = os.environ.get("HTTPLIB2_CA_CERTS")
if env is not None:
if os.path.isfile(env):
return env
else:
raise RuntimeError("Environment variable HTTPLIB2_CA_CERTS not a valid file")
if custom_ca_locater_available:
return custom_ca_locater_where()
if certifi_available:
return certifi_where()
return BUILTIN_CA_CERTS


if __name__ == "__main__":
print(where())
48 changes: 48 additions & 0 deletions shotgun_api3/lib/httplib2/error.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception):
pass


# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)


class RedirectMissingLocation(HttpLib2ErrorWithResponse):
pass


class RedirectLimit(HttpLib2ErrorWithResponse):
pass


class FailedToDecompressContent(HttpLib2ErrorWithResponse):
pass


class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse):
pass


class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse):
pass


class MalformedHeader(HttpLib2Error):
pass


class RelativeURIError(HttpLib2Error):
pass


class ServerNotFoundError(HttpLib2Error):
pass


class ProxiesUnavailableError(HttpLib2Error):
pass
124 changes: 124 additions & 0 deletions shotgun_api3/lib/httplib2/iri2uri.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
# -*- coding: utf-8 -*-
"""Converts an IRI to a URI."""

__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"

import urllib.parse

# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD

escape_range = [
(0xA0, 0xD7FF),
(0xE000, 0xF8FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD),
(0x20000, 0x2FFFD),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD),
(0x50000, 0x5FFFD),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD),
(0x80000, 0x8FFFD),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD),
(0xB0000, 0xBFFFD),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD),
(0x100000, 0x10FFFD),
]


def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % o for o in c.encode("utf-8")])
break
return retval


def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri, str):
(scheme, authority, path, query, fragment) = urllib.parse.urlsplit(uri)
authority = authority.encode("idna").decode("utf-8")
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urllib.parse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri


if __name__ == "__main__":
import unittest

class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
"ftp://ftp.is.co.za/rfc/rfc1808.txt",
"http://www.ietf.org/rfc/rfc2396.txt",
"ldap://[2001:db8::7]/c=GB?objectClass?one",
"mailto:John.Doe@example.com",
"news:comp.infosystems.www.servers.unix",
"tel:+1-816-555-1212",
"telnet://192.0.2.16:80/",
"urn:oasis:names:specification:docbook:dtd:xml:4.1.2",
]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))

def test_iri(self):
"""Test that the right type of escaping is done for each part of the URI."""
self.assertEqual(
"http://xn--o3h.com/%E2%98%84",
iri2uri("http://\N{COMET}.com/\N{COMET}"),
)
self.assertEqual(
"http://bitworking.org/?fred=%E2%98%84",
iri2uri("http://bitworking.org/?fred=\N{COMET}"),
)
self.assertEqual(
"http://bitworking.org/#%E2%98%84",
iri2uri("http://bitworking.org/#\N{COMET}"),
)
self.assertEqual("#%E2%98%84", iri2uri("#\N{COMET}"))
self.assertEqual(
"/fred?bar=%E2%98%9A#%E2%98%84",
iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"),
)
self.assertEqual(
"/fred?bar=%E2%98%9A#%E2%98%84",
iri2uri(iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")),
)
self.assertNotEqual(
"/fred?bar=%E2%98%9A#%E2%98%84",
iri2uri(
"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode("utf-8")
),
)

unittest.main()
36 changes: 10 additions & 26 deletions shotgun_api3/lib/httplib2/python2/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,10 @@
"Sam Ruby",
"Louis Nyffenegger",
"Alex Yu",
"Lai Han",
]
__license__ = "MIT"
__version__ = "0.19.1"
__version__ = "0.22.0"

import base64
import calendar
Expand Down Expand Up @@ -467,7 +468,10 @@ def _decompressContent(response, new_content):
if encoding == "gzip":
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == "deflate":
content = zlib.decompress(content, -zlib.MAX_WBITS)
try:
content = zlib.decompress(content, zlib.MAX_WBITS)
except (IOError, zlib.error):
content = zlib.decompress(content, -zlib.MAX_WBITS)
response["content-length"] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response["-content-encoding"] = response["content-encoding"]
Expand Down Expand Up @@ -961,34 +965,14 @@ def proxy_info_from_url(url, method="http", noproxy=None):
"""Construct a ProxyInfo from a URL (such as http_proxy env var)
"""
url = urlparse.urlparse(url)
username = None
password = None
port = None
if "@" in url[1]:
ident, host_port = url[1].split("@", 1)
if ":" in ident:
username, password = ident.split(":", 1)
else:
password = ident
else:
host_port = url[1]
if ":" in host_port:
host, port = host_port.split(":", 1)
else:
host = host_port

if port:
port = int(port)
else:
port = dict(https=443, http=80)[method]

proxy_type = 3 # socks.PROXY_TYPE_HTTP
pi = ProxyInfo(
proxy_type=proxy_type,
proxy_host=host,
proxy_port=port,
proxy_user=username or None,
proxy_pass=password or None,
proxy_host=url.hostname,
proxy_port=url.port or dict(https=443, http=80)[method],
proxy_user=url.username or None,
proxy_pass=url.password or None,
proxy_headers=None,
)

Expand Down
Loading