diff --git a/.codeclimate.yml b/.codeclimate.yml
index 8eec6c427..d89f24814 100644
--- a/.codeclimate.yml
+++ b/.codeclimate.yml
@@ -27,6 +27,7 @@ exclude_paths:
- "resources/skins/"
- "resources/settings.xml"
- "tests/"
+ - "packages/"
- "addon.xml"
- "changelog.txt"
- "Contributing.md"
diff --git a/addon.xml b/addon.xml
index feeb896dc..2adc63eb0 100644
--- a/addon.xml
+++ b/addon.xml
@@ -5,7 +5,6 @@
-
diff --git a/codecov.yml b/codecov.yml
index 04f82dff0..a6cb92940 100644
--- a/codecov.yml
+++ b/codecov.yml
@@ -10,3 +10,4 @@ coverage:
comment: false
ignore:
- tests/
+- packages/
diff --git a/packages/__init__.py b/packages/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/packages/certifi/__init__.py b/packages/certifi/__init__.py
new file mode 100644
index 000000000..eebdf8886
--- /dev/null
+++ b/packages/certifi/__init__.py
@@ -0,0 +1,3 @@
+from .core import contents, where
+
+__version__ = "2021.05.30"
diff --git a/packages/certifi/__main__.py b/packages/certifi/__main__.py
new file mode 100644
index 000000000..8945b5da8
--- /dev/null
+++ b/packages/certifi/__main__.py
@@ -0,0 +1,12 @@
+import argparse
+
+from certifi import contents, where
+
+parser = argparse.ArgumentParser()
+parser.add_argument("-c", "--contents", action="store_true")
+args = parser.parse_args()
+
+if args.contents:
+ print(contents())
+else:
+ print(where())
diff --git a/packages/certifi/cacert.pem b/packages/certifi/cacert.pem
new file mode 100644
index 000000000..96e2fc65a
--- /dev/null
+++ b/packages/certifi/cacert.pem
@@ -0,0 +1,4257 @@
+
+# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
+# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
+# Label: "GlobalSign Root CA"
+# Serial: 4835703278459707669005204
+# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a
+# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c
+# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99
+-----BEGIN CERTIFICATE-----
+MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG
+A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv
+b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw
+MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i
+YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT
+aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ
+jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp
+xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp
+1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG
+snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ
+U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8
+9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B
+AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz
+yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE
+38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP
+AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad
+DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME
+HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2
+# Label: "GlobalSign Root CA - R2"
+# Serial: 4835703278459682885658125
+# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30
+# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe
+# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e
+-----BEGIN CERTIFICATE-----
+MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1
+MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL
+v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8
+eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq
+tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd
+C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa
+zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB
+mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH
+V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n
+bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG
+3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs
+J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO
+291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS
+ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd
+AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7
+TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Label: "Entrust.net Premium 2048 Secure Server CA"
+# Serial: 946069240
+# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90
+# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31
+# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML
+RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp
+bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5
+IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3
+MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3
+LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp
+YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG
+A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq
+K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe
+sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX
+MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT
+XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/
+HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH
+4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub
+j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo
+U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf
+zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b
+u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+
+bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er
+fF6adulZkMV8gzURZVE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
+# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
+# Label: "Baltimore CyberTrust Root"
+# Serial: 33554617
+# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4
+# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74
+# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ
+RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD
+VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX
+DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y
+ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy
+VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr
+mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr
+IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK
+mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu
+XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy
+dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye
+jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1
+BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3
+DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92
+9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx
+jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0
+Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz
+ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS
+R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
+# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
+# Label: "Entrust Root Certification Authority"
+# Serial: 1164660820
+# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4
+# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9
+# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c
+-----BEGIN CERTIFICATE-----
+MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0
+Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW
+KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl
+cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw
+NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw
+NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy
+ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV
+BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo
+Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4
+4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9
+KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI
+rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi
+94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB
+sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi
+gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo
+kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE
+vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA
+A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t
+O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua
+AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP
+9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/
+eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m
+0vdXcDazv/wor3ElhVsT/h5/WrQ8
+-----END CERTIFICATE-----
+
+# Issuer: CN=AAA Certificate Services O=Comodo CA Limited
+# Subject: CN=AAA Certificate Services O=Comodo CA Limited
+# Label: "Comodo AAA Services root"
+# Serial: 1
+# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0
+# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49
+# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4
+-----BEGIN CERTIFICATE-----
+MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb
+MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
+GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj
+YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM
+GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua
+BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe
+3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4
+YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR
+rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm
+ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU
+oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF
+MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v
+QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t
+b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF
+AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q
+GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz
+Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2
+G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi
+l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3
+smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 2"
+# Serial: 1289
+# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b
+# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7
+# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86
+-----BEGIN CERTIFICATE-----
+MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
+GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
+b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV
+BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
+YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa
+GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg
+Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J
+WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB
+rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp
++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1
+ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i
+Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz
+PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og
+/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH
+oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI
+yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud
+EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2
+A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL
+MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT
+ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f
+BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn
+g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl
+fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K
+WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha
+B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc
+hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR
+TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD
+mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z
+ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y
+4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza
+8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 3"
+# Serial: 1478
+# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf
+# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85
+# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35
+-----BEGIN CERTIFICATE-----
+MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
+GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
+b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV
+BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
+YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM
+V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB
+4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr
+H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd
+8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv
+vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT
+mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe
+btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc
+T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt
+WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ
+c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A
+4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD
+VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG
+CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0
+aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0
+aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu
+dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw
+czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G
+A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC
+TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg
+Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0
+7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem
+d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd
++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B
+4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN
+t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x
+DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57
+k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s
+zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j
+Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT
+mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK
+4SVhM7JZG+Ju1zdXtg2pEto=
+-----END CERTIFICATE-----
+
+# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1
+# Subject: O=SECOM Trust.net OU=Security Communication RootCA1
+# Label: "Security Communication Root CA"
+# Serial: 0
+# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a
+# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7
+# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c
+-----BEGIN CERTIFICATE-----
+MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY
+MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t
+dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5
+WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD
+VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8
+9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ
+DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9
+Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N
+QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ
+xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G
+A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T
+AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG
+kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr
+Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5
+Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU
+JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot
+RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
+# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
+# Label: "XRamp Global CA Root"
+# Serial: 107108908803651509692980124233745014957
+# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1
+# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6
+# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2
+-----BEGIN CERTIFICATE-----
+MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB
+gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk
+MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY
+UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx
+NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3
+dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy
+dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6
+38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP
+KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q
+DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4
+qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa
+JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi
+PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P
+BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs
+jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0
+eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD
+ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR
+vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt
+qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa
+IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy
+i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ
+O+7ETPTsJ3xCwnR8gooJybQDJbw=
+-----END CERTIFICATE-----
+
+# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
+# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
+# Label: "Go Daddy Class 2 CA"
+# Serial: 0
+# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67
+# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4
+# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4
+-----BEGIN CERTIFICATE-----
+MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh
+MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE
+YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3
+MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo
+ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg
+MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN
+ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA
+PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w
+wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi
+EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY
+avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+
+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE
+sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h
+/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5
+IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD
+ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy
+OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P
+TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ
+HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER
+dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf
+ReYNnyicsbkqWletNw+vHX/bvZ8=
+-----END CERTIFICATE-----
+
+# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
+# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
+# Label: "Starfield Class 2 CA"
+# Serial: 0
+# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24
+# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a
+# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58
+-----BEGIN CERTIFICATE-----
+MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl
+MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp
+U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw
+NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE
+ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp
+ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3
+DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf
+8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN
++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0
+X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa
+K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA
+1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G
+A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR
+zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0
+YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD
+bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w
+DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3
+L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D
+eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl
+xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp
+VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY
+WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root CA"
+# Serial: 17154717934120587862167794914071425081
+# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72
+# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43
+# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c
+-----BEGIN CERTIFICATE-----
+MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c
+JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP
+mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+
+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4
+VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/
+AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB
+AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun
+pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC
+dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf
+fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm
+NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx
+H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe
++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root CA"
+# Serial: 10944719598952040374951832963794454346
+# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e
+# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36
+# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61
+-----BEGIN CERTIFICATE-----
+MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
+QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB
+CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97
+nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt
+43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P
+T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4
+gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO
+BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR
+TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw
+DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr
+hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg
+06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF
+PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls
+YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk
+CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert High Assurance EV Root CA"
+# Serial: 3553400076410547919724730734378100087
+# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a
+# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25
+# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j
+ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3
+LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug
+RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm
++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW
+PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM
+xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB
+Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3
+hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg
+EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA
+FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec
+nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z
+eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF
+hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2
+Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe
+vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep
++OkuE6N36B9K
+-----END CERTIFICATE-----
+
+# Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co.
+# Subject: CN=DST Root CA X3 O=Digital Signature Trust Co.
+# Label: "DST Root CA X3"
+# Serial: 91299735575339953335919266965803778155
+# MD5 Fingerprint: 41:03:52:dc:0f:f7:50:1b:16:f0:02:8e:ba:6f:45:c5
+# SHA1 Fingerprint: da:c9:02:4f:54:d8:f6:df:94:93:5f:b1:73:26:38:ca:6a:d7:7c:13
+# SHA256 Fingerprint: 06:87:26:03:31:a7:24:03:d9:09:f1:05:e6:9b:cf:0d:32:e1:bd:24:93:ff:c6:d9:20:6d:11:bc:d6:77:07:39
+-----BEGIN CERTIFICATE-----
+MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/
+MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT
+DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow
+PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD
+Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O
+rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq
+OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b
+xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw
+7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD
+aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV
+HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG
+SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69
+ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr
+AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz
+R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5
+JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo
+Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ
+-----END CERTIFICATE-----
+
+# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG
+# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG
+# Label: "SwissSign Gold CA - G2"
+# Serial: 13492815561806991280
+# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93
+# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61
+# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95
+-----BEGIN CERTIFICATE-----
+MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
+BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln
+biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF
+MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT
+d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
+CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8
+76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+
+bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c
+6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE
+emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd
+MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt
+MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y
+MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y
+FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi
+aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM
+gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB
+qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7
+lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn
+8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov
+L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6
+45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO
+UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5
+O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC
+bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv
+GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a
+77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC
+hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3
+92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp
+Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w
+ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt
+Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ
+-----END CERTIFICATE-----
+
+# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG
+# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG
+# Label: "SwissSign Silver CA - G2"
+# Serial: 5700383053117599563
+# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13
+# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb
+# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5
+-----BEGIN CERTIFICATE-----
+MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE
+BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu
+IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow
+RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY
+U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A
+MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv
+Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br
+YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF
+nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH
+6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt
+eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/
+c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ
+MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH
+HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf
+jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6
+5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB
+rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
+F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c
+wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0
+cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB
+AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp
+WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9
+xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ
+2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ
+IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8
+aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X
+em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR
+dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/
+OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+
+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy
+tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u
+-----END CERTIFICATE-----
+
+# Issuer: CN=SecureTrust CA O=SecureTrust Corporation
+# Subject: CN=SecureTrust CA O=SecureTrust Corporation
+# Label: "SecureTrust CA"
+# Serial: 17199774589125277788362757014266862032
+# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1
+# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11
+# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73
+-----BEGIN CERTIFICATE-----
+MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz
+MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv
+cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz
+Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO
+0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao
+wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj
+7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS
+8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT
+BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg
+JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC
+NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3
+6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/
+3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm
+D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS
+CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR
+3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Secure Global CA O=SecureTrust Corporation
+# Subject: CN=Secure Global CA O=SecureTrust Corporation
+# Label: "Secure Global CA"
+# Serial: 9751836167731051554232119481456978597
+# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de
+# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b
+# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69
+-----BEGIN CERTIFICATE-----
+MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx
+MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg
+Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ
+iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa
+/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ
+jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI
+HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7
+sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w
+gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw
+KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG
+AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L
+URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO
+H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm
+I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY
+iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc
+f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO Certification Authority O=COMODO CA Limited
+# Label: "COMODO Certification Authority"
+# Serial: 104350513648249232941998508985834464573
+# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75
+# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b
+# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66
+-----BEGIN CERTIFICATE-----
+MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB
+gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV
+BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw
+MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl
+YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P
+RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3
+UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI
+2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8
+Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp
++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+
+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O
+nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW
+/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g
+PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u
+QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY
+SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv
+IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/
+RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4
+zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd
+BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB
+ZQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.
+# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.
+# Label: "Network Solutions Certificate Authority"
+# Serial: 116697915152937497490437556386812487904
+# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e
+# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce
+# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c
+-----BEGIN CERTIFICATE-----
+MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi
+MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu
+MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp
+dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV
+UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO
+ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz
+c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP
+OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl
+mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF
+BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4
+qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw
+gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB
+BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu
+bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp
+dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8
+6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/
+h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH
+/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv
+wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN
+pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+# Label: "COMODO ECC Certification Authority"
+# Serial: 41578283867086692638256921589707938090
+# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23
+# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11
+# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7
+-----BEGIN CERTIFICATE-----
+MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT
+IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw
+MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy
+ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N
+T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv
+biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR
+FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J
+cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW
+BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm
+fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv
+GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certigna O=Dhimyotis
+# Subject: CN=Certigna O=Dhimyotis
+# Label: "Certigna"
+# Serial: 18364802974209362175
+# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff
+# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97
+# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d
+-----BEGIN CERTIFICATE-----
+MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV
+BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X
+DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ
+BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4
+QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny
+gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw
+zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q
+130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2
+JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw
+DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw
+ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT
+AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj
+AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG
+9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h
+bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc
+fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu
+HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w
+t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw
+WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc
+# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc
+# Label: "Cybertrust Global Root"
+# Serial: 4835703278459682877484360
+# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1
+# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6
+# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3
+-----BEGIN CERTIFICATE-----
+MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG
+A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh
+bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE
+ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS
+b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5
+7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS
+J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y
+HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP
+t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz
+FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY
+XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/
+MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw
+hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js
+MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA
+A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj
+Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx
+XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o
+omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc
+A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW
+WL1WMRJOEcgh4LMRkWXbtKaIOM5V
+-----END CERTIFICATE-----
+
+# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
+# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
+# Label: "ePKI Root Certification Authority"
+# Serial: 28956088682735189655030529057352760477
+# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3
+# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0
+# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5
+-----BEGIN CERTIFICATE-----
+MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe
+MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0
+ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
+Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw
+IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL
+SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH
+SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh
+ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X
+DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1
+TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ
+fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA
+sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU
+WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS
+nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH
+dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip
+NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC
+AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF
+MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH
+ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB
+uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl
+PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP
+JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/
+gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2
+j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6
+5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB
+o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS
+/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z
+Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE
+W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D
+hNQ+IIX3Sj0rnP0qCglN6oH4EZw=
+-----END CERTIFICATE-----
+
+# Issuer: O=certSIGN OU=certSIGN ROOT CA
+# Subject: O=certSIGN OU=certSIGN ROOT CA
+# Label: "certSIGN ROOT CA"
+# Serial: 35210227249154
+# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17
+# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b
+# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb
+-----BEGIN CERTIFICATE-----
+MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT
+AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD
+QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP
+MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do
+0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ
+UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d
+RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ
+OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv
+JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C
+AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O
+BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ
+LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY
+MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ
+44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I
+Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw
+i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN
+9u6wWk5JRFRYX0KD
+-----END CERTIFICATE-----
+
+# Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services)
+# Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services)
+# Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny"
+# Serial: 80544274841616
+# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88
+# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91
+# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98
+-----BEGIN CERTIFICATE-----
+MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG
+EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3
+MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl
+cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR
+dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB
+pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM
+b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm
+aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz
+IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT
+lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz
+AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5
+VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG
+ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2
+BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG
+AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M
+U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh
+bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C
++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC
+bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F
+uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2
+XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post
+# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post
+# Label: "Hongkong Post Root CA 1"
+# Serial: 1000
+# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca
+# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58
+# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2
+-----BEGIN CERTIFICATE-----
+MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx
+FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg
+Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG
+A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr
+b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ
+jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn
+PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh
+ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9
+nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h
+q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED
+MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC
+mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3
+7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB
+oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs
+EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO
+fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi
+AmvZWg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
+# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
+# Label: "SecureSign RootCA11"
+# Serial: 1
+# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26
+# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3
+# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12
+-----BEGIN CERTIFICATE-----
+MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr
+MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG
+A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0
+MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp
+Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD
+QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz
+i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8
+h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV
+MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9
+UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni
+8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC
+h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD
+VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
+AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm
+KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ
+X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr
+QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5
+pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN
+QSdJQO7e5iNEOdyhIta6A/I=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
+# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
+# Label: "Microsec e-Szigno Root CA 2009"
+# Serial: 14014712776195784473
+# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1
+# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e
+# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78
+-----BEGIN CERTIFICATE-----
+MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD
+VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0
+ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G
+CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y
+OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx
+FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp
+Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o
+dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP
+kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc
+cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U
+fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7
+N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC
+xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1
++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
+A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM
+Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG
+SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h
+mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk
+ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775
+tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c
+2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t
+HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
+# Label: "GlobalSign Root CA - R3"
+# Serial: 4835703278459759426209954
+# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28
+# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad
+# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b
+-----BEGIN CERTIFICATE-----
+MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4
+MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8
+RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT
+gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm
+KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd
+QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ
+XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o
+LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU
+RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp
+jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK
+6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX
+mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs
+Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH
+WD9f
+-----END CERTIFICATE-----
+
+# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068"
+# Serial: 6047274297262753887
+# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3
+# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa
+# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef
+-----BEGIN CERTIFICATE-----
+MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE
+BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h
+cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy
+MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg
+Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9
+thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM
+cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG
+L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i
+NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h
+X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b
+m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy
+Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja
+EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T
+KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF
+6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh
+OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD
+VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD
+VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp
+cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv
+ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl
+AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF
+661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9
+am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1
+ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481
+PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS
+3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k
+SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF
+3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM
+ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g
+StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz
+Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB
+jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V
+-----END CERTIFICATE-----
+
+# Issuer: CN=Izenpe.com O=IZENPE S.A.
+# Subject: CN=Izenpe.com O=IZENPE S.A.
+# Label: "Izenpe.com"
+# Serial: 917563065490389241595536686991402621
+# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73
+# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19
+# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f
+-----BEGIN CERTIFICATE-----
+MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4
+MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6
+ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD
+VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j
+b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq
+scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO
+xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H
+LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX
+uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD
+yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+
+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q
+rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN
+BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L
+hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB
+QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+
+HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu
+Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg
+QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB
+BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx
+MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA
+A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb
+laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56
+awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo
+JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw
+LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT
+VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk
+LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb
+UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/
+QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+
+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls
+QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
+# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
+# Label: "Go Daddy Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01
+# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b
+# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT
+EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp
+ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz
+NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH
+EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE
+AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD
+E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH
+/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy
+DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh
+GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR
+tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA
+AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX
+WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu
+9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr
+gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo
+2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO
+LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI
+4uJEvlz36hz1
+-----END CERTIFICATE-----
+
+# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Label: "Starfield Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96
+# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e
+# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5
+-----BEGIN CERTIFICATE-----
+MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs
+ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw
+MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6
+b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj
+aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp
+Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg
+nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1
+HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N
+Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN
+dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0
+HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G
+CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU
+sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3
+4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg
+8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K
+pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1
+mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0
+-----END CERTIFICATE-----
+
+# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Label: "Starfield Services Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2
+# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f
+# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5
+-----BEGIN CERTIFICATE-----
+MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs
+ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5
+MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD
+VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy
+ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy
+dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p
+OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2
+8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K
+Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe
+hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk
+6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw
+DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q
+AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI
+bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB
+ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z
+qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd
+iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn
+0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN
+sSi6
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Commercial O=AffirmTrust
+# Subject: CN=AffirmTrust Commercial O=AffirmTrust
+# Label: "AffirmTrust Commercial"
+# Serial: 8608355977964138876
+# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7
+# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7
+# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP
+Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr
+ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL
+MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1
+yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr
+VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/
+nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG
+XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj
+vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt
+Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g
+N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC
+nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Networking O=AffirmTrust
+# Subject: CN=AffirmTrust Networking O=AffirmTrust
+# Label: "AffirmTrust Networking"
+# Serial: 8957382827206547757
+# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f
+# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f
+# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y
+YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua
+kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL
+QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp
+6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG
+yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i
+QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO
+tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu
+QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ
+Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u
+olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48
+x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Premium O=AffirmTrust
+# Subject: CN=AffirmTrust Premium O=AffirmTrust
+# Label: "AffirmTrust Premium"
+# Serial: 7893706540734352110
+# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57
+# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27
+# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz
+dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG
+A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U
+cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf
+qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ
+JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ
++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS
+s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5
+HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7
+70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG
+V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S
+qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S
+5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia
+C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX
+OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE
+FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2
+KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg
+Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B
+8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ
+MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc
+0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ
+u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF
+u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH
+YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8
+GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO
+RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e
+KeC2uAloGRwYQw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust
+# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust
+# Label: "AffirmTrust Premium ECC"
+# Serial: 8401224907861490260
+# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d
+# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb
+# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23
+-----BEGIN CERTIFICATE-----
+MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC
+VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ
+cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ
+BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt
+VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D
+0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9
+ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G
+A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs
+aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I
+flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Network CA"
+# Serial: 279744
+# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78
+# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e
+# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e
+-----BEGIN CERTIFICATE-----
+MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM
+MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D
+ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU
+cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3
+WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg
+Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw
+IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH
+UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM
+TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU
+BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM
+kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x
+AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV
+HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y
+sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL
+I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8
+J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY
+VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI
+03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
+# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
+# Label: "TWCA Root Certification Authority"
+# Serial: 1
+# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79
+# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48
+# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44
+-----BEGIN CERTIFICATE-----
+MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES
+MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU
+V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz
+WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO
+LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE
+AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH
+K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX
+RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z
+rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx
+3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq
+hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC
+MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls
+XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D
+lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn
+aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ
+YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw==
+-----END CERTIFICATE-----
+
+# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
+# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
+# Label: "Security Communication RootCA2"
+# Serial: 0
+# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43
+# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74
+# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl
+MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe
+U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX
+DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy
+dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj
+YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV
+OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr
+zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM
+VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ
+hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO
+ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw
+awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs
+OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3
+DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF
+coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc
+okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8
+t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy
+1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/
+SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03
+-----END CERTIFICATE-----
+
+# Issuer: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes
+# Subject: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes
+# Label: "EC-ACC"
+# Serial: -23701579247955709139626555126524820479
+# MD5 Fingerprint: eb:f5:9d:29:0d:61:f9:42:1f:7c:c2:ba:6d:e3:15:09
+# SHA1 Fingerprint: 28:90:3a:63:5b:52:80:fa:e6:77:4c:0b:6d:a7:d6:ba:a6:4a:f2:e8
+# SHA256 Fingerprint: 88:49:7f:01:60:2f:31:54:24:6a:e2:8c:4d:5a:ef:10:f1:d8:7e:bb:76:62:6f:4a:e0:b7:f9:5b:a7:96:87:99
+-----BEGIN CERTIFICATE-----
+MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB
+8zELMAkGA1UEBhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2Vy
+dGlmaWNhY2lvIChOSUYgUS0wODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1
+YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYDVQQLEyxWZWdldSBodHRwczovL3d3
+dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UECxMsSmVyYXJxdWlh
+IEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMTBkVD
+LUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQG
+EwJFUzE7MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8g
+KE5JRiBRLTA4MDExNzYtSSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBD
+ZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZlZ2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQu
+bmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJhcnF1aWEgRW50aXRhdHMg
+ZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUNDMIIBIjAN
+BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R
+85iKw5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm
+4CgPukLjbo73FCeTae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaV
+HMf5NLWUhdWZXqBIoH7nF2W4onW4HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNd
+QlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0aE9jD2z3Il3rucO2n5nzbcc8t
+lGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw0JDnJwIDAQAB
+o4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E
+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4
+opvpXY0wfwYDVR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBo
+dHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidW
+ZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAwDQYJKoZIhvcN
+AQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJlF7W2u++AVtd0x7Y
+/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNaAl6k
+SBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhy
+Rp/7SNVel+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOS
+Agu+TGbrIP65y7WZf+a2E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xl
+nJ2lYJU6Un/10asIbvPuW/mIPX64b24D5EI=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions RootCA 2011"
+# Serial: 0
+# MD5 Fingerprint: 73:9f:4c:4b:73:5b:79:e9:fa:ba:1c:ef:6e:cb:d5:c9
+# SHA1 Fingerprint: fe:45:65:9b:79:03:5b:98:a1:61:b5:51:2e:ac:da:58:09:48:22:4d
+# SHA256 Fingerprint: bc:10:4f:15:a4:8b:e7:09:dc:a5:42:a7:e1:d4:b9:df:6f:05:45:27:e8:02:ea:a9:2d:59:54:44:25:8a:fe:71
+-----BEGIN CERTIFICATE-----
+MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix
+RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1
+dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p
+YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw
+NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK
+EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl
+cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
+c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz
+dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ
+fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns
+bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD
+75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP
+FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV
+HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp
+5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu
+b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA
+A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p
+6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8
+TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7
+dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys
+Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI
+l7WdmplNsDz4SgCbZN2fOUvRJ9e4
+-----END CERTIFICATE-----
+
+# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
+# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
+# Label: "Actalis Authentication Root CA"
+# Serial: 6271844772424770508
+# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6
+# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac
+# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66
+-----BEGIN CERTIFICATE-----
+MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE
+BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w
+MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290
+IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC
+SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1
+ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv
+UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX
+4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9
+KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/
+gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb
+rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ
+51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F
+be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe
+KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F
+v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn
+fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7
+jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz
+ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt
+ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL
+e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70
+jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz
+WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V
+SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j
+pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX
+X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok
+fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R
+K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU
+ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU
+LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT
+LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
+# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
+# Label: "Buypass Class 2 Root CA"
+# Serial: 2
+# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29
+# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99
+# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
+Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow
+TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
+HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr
+6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV
+L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91
+1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx
+MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ
+QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB
+arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr
+Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi
+FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS
+P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN
+9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP
+AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz
+uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h
+9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s
+A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t
+OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo
++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7
+KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2
+DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us
+H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ
+I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7
+5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h
+3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz
+Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
+# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
+# Label: "Buypass Class 3 Root CA"
+# Serial: 2
+# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec
+# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57
+# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
+Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow
+TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
+HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y
+ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E
+N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9
+tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX
+0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c
+/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X
+KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY
+zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS
+O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D
+34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP
+K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3
+AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv
+Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj
+QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV
+cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS
+IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2
+HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa
+O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv
+033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u
+dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE
+kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41
+3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD
+u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq
+4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc=
+-----END CERTIFICATE-----
+
+# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Label: "T-TeleSec GlobalRoot Class 3"
+# Serial: 1
+# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef
+# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1
+# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
+KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
+BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
+YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1
+OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
+aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
+ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN
+8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/
+RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4
+hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5
+ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM
+EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1
+A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy
+WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ
+1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30
+6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT
+91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml
+e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p
+TpPDpFQUWw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
+# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
+# Label: "D-TRUST Root Class 3 CA 2 2009"
+# Serial: 623603
+# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f
+# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0
+# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1
+-----BEGIN CERTIFICATE-----
+MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF
+MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD
+bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha
+ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM
+HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03
+UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42
+tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R
+ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM
+lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp
+/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G
+A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G
+A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj
+dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy
+MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl
+cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js
+L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL
+BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni
+acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0
+o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K
+zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8
+PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y
+Johw1+qRzT65ysCQblrGXnRl11z+o+I=
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
+# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
+# Label: "D-TRUST Root Class 3 CA 2 EV 2009"
+# Serial: 623604
+# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6
+# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83
+# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81
+-----BEGIN CERTIFICATE-----
+MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF
+MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD
+bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw
+NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV
+BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn
+ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0
+3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z
+qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR
+p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8
+HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw
+ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea
+HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw
+Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh
+c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E
+RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt
+dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku
+Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp
+3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05
+nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF
+CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na
+xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX
+KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1
+-----END CERTIFICATE-----
+
+# Issuer: CN=CA Disig Root R2 O=Disig a.s.
+# Subject: CN=CA Disig Root R2 O=Disig a.s.
+# Label: "CA Disig Root R2"
+# Serial: 10572350602393338211
+# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03
+# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71
+# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03
+-----BEGIN CERTIFICATE-----
+MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV
+BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu
+MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy
+MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx
+EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw
+ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe
+NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH
+PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I
+x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe
+QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR
+yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO
+QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912
+H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ
+QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD
+i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs
+nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1
+rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud
+DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI
+hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM
+tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf
+GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb
+lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka
++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal
+TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i
+nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3
+gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr
+G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os
+zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x
+L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL
+-----END CERTIFICATE-----
+
+# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
+# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
+# Label: "ACCVRAIZ1"
+# Serial: 6828503384748696800
+# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02
+# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17
+# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13
+-----BEGIN CERTIFICATE-----
+MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE
+AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw
+CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ
+BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND
+VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb
+qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY
+HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo
+G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA
+lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr
+IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/
+0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH
+k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47
+4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO
+m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa
+cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl
+uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI
+KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls
+ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG
+AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2
+VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT
+VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG
+CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA
+cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA
+QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA
+7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA
+cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA
+QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA
+czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu
+aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt
+aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud
+DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF
+BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp
+D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU
+JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m
+AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD
+vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms
+tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH
+7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h
+I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA
+h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF
+d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H
+pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7
+-----END CERTIFICATE-----
+
+# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
+# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
+# Label: "TWCA Global Root CA"
+# Serial: 3262
+# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96
+# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65
+# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx
+EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT
+VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5
+NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT
+B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF
+10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz
+0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh
+MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH
+zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc
+46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2
+yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi
+laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP
+oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA
+BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE
+qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm
+4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL
+1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn
+LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF
+H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo
+RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+
+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh
+15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW
+6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW
+nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j
+wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz
+aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy
+KwbQBM0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera
+# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera
+# Label: "TeliaSonera Root CA v1"
+# Serial: 199041966741090107964904287217786801558
+# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c
+# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37
+# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89
+-----BEGIN CERTIFICATE-----
+MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw
+NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv
+b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD
+VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F
+VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1
+7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X
+Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+
+/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs
+81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm
+dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe
+Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu
+sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4
+pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs
+slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ
+arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD
+VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG
+9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl
+dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx
+0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj
+TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed
+Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7
+Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI
+OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7
+vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW
+t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn
+HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx
+SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi
+# Subject: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi
+# Label: "E-Tugra Certification Authority"
+# Serial: 7667447206703254355
+# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49
+# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39
+# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c
+-----BEGIN CERTIFICATE-----
+MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV
+BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC
+aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV
+BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1
+Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz
+MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+
+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp
+em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN
+ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY
+B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH
+D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF
+Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo
+q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D
+k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH
+fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut
+dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM
+ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8
+zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn
+rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX
+U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6
+Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5
+XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF
+Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR
+HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY
+GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c
+77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3
++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK
+vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6
+FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl
+yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P
+AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD
+y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d
+NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Label: "T-TeleSec GlobalRoot Class 2"
+# Serial: 1
+# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a
+# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9
+# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
+KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
+BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
+YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1
+OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
+aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
+ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd
+AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC
+FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi
+1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq
+jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ
+wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/
+WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy
+NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC
+uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw
+IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6
+g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN
+9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP
+BSeOE6Fuwg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Atos TrustedRoot 2011 O=Atos
+# Subject: CN=Atos TrustedRoot 2011 O=Atos
+# Label: "Atos TrustedRoot 2011"
+# Serial: 6643877497813316402
+# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56
+# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21
+# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE
+AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG
+EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM
+FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC
+REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp
+Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM
+VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+
+SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ
+4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L
+cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi
+eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV
+HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG
+A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3
+DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j
+vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP
+DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc
+maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D
+lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv
+KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 1 G3"
+# Serial: 687049649626669250736271037606554624078720034195
+# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab
+# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67
+# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00
+MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV
+wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe
+rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341
+68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh
+4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp
+UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o
+abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc
+3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G
+KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt
+hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO
+Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt
+zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD
+ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC
+MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2
+cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN
+qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5
+YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv
+b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2
+8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k
+NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj
+ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp
+q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt
+nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 2 G3"
+# Serial: 390156079458959257446133169266079962026824725800
+# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06
+# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36
+# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00
+MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf
+qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW
+n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym
+c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+
+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1
+o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j
+IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq
+IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz
+8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh
+vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l
+7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG
+cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD
+ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66
+AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC
+roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga
+W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n
+lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE
++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV
+csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd
+dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg
+KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM
+HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4
+WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 3 G3"
+# Serial: 268090761170461462463995952157327242137089239581
+# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7
+# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d
+# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00
+MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR
+/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu
+FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR
+U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c
+ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR
+FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k
+A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw
+eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl
+sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp
+VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q
+A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+
+ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD
+ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px
+KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI
+FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv
+oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg
+u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP
+0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf
+3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl
+8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+
+DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN
+PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/
+ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root G2"
+# Serial: 15385348160840213938643033620894905419
+# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d
+# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f
+# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85
+-----BEGIN CERTIFICATE-----
+MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA
+n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc
+biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp
+EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA
+bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu
+YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB
+AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW
+BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI
+QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I
+0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni
+lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9
+B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv
+ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo
+IhNzbM8m9Yop5w==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root G3"
+# Serial: 15459312981008553731928384953135426796
+# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb
+# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89
+# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2
+-----BEGIN CERTIFICATE-----
+MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw
+CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
+ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg
+RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
+Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq
+hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf
+Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q
+RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD
+AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY
+JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv
+6pZjamVFkpUBtA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root G2"
+# Serial: 4293743540046975378534879503202253541
+# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44
+# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4
+# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f
+-----BEGIN CERTIFICATE-----
+MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH
+MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI
+2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx
+1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ
+q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz
+tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ
+vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP
+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV
+5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY
+1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4
+NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG
+Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91
+8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe
+pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl
+MrY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root G3"
+# Serial: 7089244469030293291760083333884364146
+# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca
+# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e
+# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0
+-----BEGIN CERTIFICATE-----
+MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw
+CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
+ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe
+Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw
+EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x
+IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF
+K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG
+fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO
+Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd
+BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx
+AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/
+oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8
+sycX
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Trusted Root G4"
+# Serial: 7451500558977370777930084869016614236
+# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49
+# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4
+# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88
+-----BEGIN CERTIFICATE-----
+MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg
+RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
+Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y
+ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If
+xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV
+ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO
+DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ
+jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/
+CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi
+EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM
+fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY
+uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK
+chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t
+9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD
+ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2
+SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd
++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc
+fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa
+sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N
+cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N
+0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie
+4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI
+r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1
+/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm
+gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited
+# Label: "COMODO RSA Certification Authority"
+# Serial: 101909084537582093308941363524873193117
+# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18
+# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4
+# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34
+-----BEGIN CERTIFICATE-----
+MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB
+hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV
+BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5
+MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT
+EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR
+Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR
+6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X
+pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC
+9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV
+/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf
+Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z
++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w
+qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah
+SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC
+u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf
+Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq
+crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E
+FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB
+/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl
+wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM
+4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV
+2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna
+FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ
+CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK
+boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke
+jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL
+S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb
+QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl
+0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB
+NVOFBkpdn627G190
+-----END CERTIFICATE-----
+
+# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
+# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
+# Label: "USERTrust RSA Certification Authority"
+# Serial: 2645093764781058787591871645665788717
+# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5
+# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e
+# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2
+-----BEGIN CERTIFICATE-----
+MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB
+iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl
+cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV
+BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw
+MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV
+BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU
+aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B
+3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY
+tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/
+Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2
+VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT
+79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6
+c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT
+Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l
+c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee
+UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE
+Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd
+BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G
+A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF
+Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO
+VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3
+ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs
+8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR
+iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze
+Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ
+XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/
+qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB
+VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB
+L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG
+jjxDah2nGN59PRbxYvnKkKj9
+-----END CERTIFICATE-----
+
+# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
+# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
+# Label: "USERTrust ECC Certification Authority"
+# Serial: 123013823720199481456569720443997572134
+# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1
+# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0
+# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a
+-----BEGIN CERTIFICATE-----
+MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL
+MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl
+eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT
+JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx
+MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT
+Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg
+VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo
+I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng
+o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G
+A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB
+zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW
+RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
+# Label: "GlobalSign ECC Root CA - R4"
+# Serial: 14367148294922964480859022125800977897474
+# MD5 Fingerprint: 20:f0:27:68:d1:7e:a0:9d:0e:e6:2a:ca:df:5c:89:8e
+# SHA1 Fingerprint: 69:69:56:2e:40:80:f4:24:a1:e7:19:9f:14:ba:f3:ee:58:ab:6a:bb
+# SHA256 Fingerprint: be:c9:49:11:c2:95:56:76:db:6c:0a:55:09:86:d7:6e:3b:a0:05:66:7c:44:2c:97:62:b4:fb:b7:73:de:22:8c
+-----BEGIN CERTIFICATE-----
+MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk
+MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH
+bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
+DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
+QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ
+FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F
+uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX
+kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs
+ewv4n4Q=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
+# Label: "GlobalSign ECC Root CA - R5"
+# Serial: 32785792099990507226680698011560947931244
+# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08
+# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa
+# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24
+-----BEGIN CERTIFICATE-----
+MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk
+MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH
+bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
+DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
+QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
+MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc
+8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke
+hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI
+KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg
+515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO
+xwy8p2Fp8fc74SrL+SvzZpA3
+-----END CERTIFICATE-----
+
+# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden
+# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden
+# Label: "Staat der Nederlanden EV Root CA"
+# Serial: 10000013
+# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba
+# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb
+# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a
+-----BEGIN CERTIFICATE-----
+MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO
+TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh
+dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y
+MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg
+TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS
+b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS
+M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC
+UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d
+Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p
+rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l
+pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb
+j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC
+KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS
+/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X
+cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH
+1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP
+px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB
+/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7
+MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI
+eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u
+2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS
+v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC
+wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy
+CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e
+vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6
+Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa
+Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL
+eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8
+FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc
+7uzXLg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
+# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
+# Label: "IdenTrust Commercial Root CA 1"
+# Serial: 13298821034946342390520003877796839426
+# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7
+# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25
+# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu
+VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw
+MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw
+JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT
+3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU
++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp
+S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1
+bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi
+T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL
+vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK
+Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK
+dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT
+c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv
+l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N
+iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD
+ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH
+6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt
+LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93
+nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3
++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK
+W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT
+AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq
+l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG
+4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ
+mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A
+7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H
+-----END CERTIFICATE-----
+
+# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
+# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
+# Label: "IdenTrust Public Sector Root CA 1"
+# Serial: 13298821034946342390521976156843933698
+# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba
+# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd
+# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f
+-----BEGIN CERTIFICATE-----
+MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu
+VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN
+MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0
+MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7
+ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy
+RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS
+bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF
+/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R
+3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw
+EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy
+9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V
+GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ
+2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV
+WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD
+W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN
+AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj
+t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV
+DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9
+TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G
+lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW
+mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df
+WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5
++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ
+tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA
+GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv
+8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - G2"
+# Serial: 1246989352
+# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2
+# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4
+# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39
+-----BEGIN CERTIFICATE-----
+MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50
+cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs
+IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz
+dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy
+NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu
+dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt
+dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0
+aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T
+RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN
+cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW
+wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1
+U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0
+jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP
+BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN
+BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/
+jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ
+Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v
+1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R
+nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH
+VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - EC1"
+# Serial: 51543124481930649114116133369
+# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc
+# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47
+# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5
+-----BEGIN CERTIFICATE-----
+MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG
+A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3
+d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu
+dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq
+RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy
+MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD
+VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0
+L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g
+Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD
+ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi
+A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt
+ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH
+Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
+BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC
+R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX
+hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G
+-----END CERTIFICATE-----
+
+# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority
+# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority
+# Label: "CFCA EV ROOT"
+# Serial: 407555286
+# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30
+# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83
+# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd
+-----BEGIN CERTIFICATE-----
+MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD
+TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx
+MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j
+aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP
+T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03
+sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL
+TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5
+/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp
+7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz
+EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt
+hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP
+a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot
+aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg
+TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV
+PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv
+cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL
+tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd
+BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB
+ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT
+ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL
+jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS
+ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy
+P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19
+xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d
+Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN
+5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe
+/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z
+AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ
+5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su
+-----END CERTIFICATE-----
+
+# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
+# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
+# Label: "OISTE WISeKey Global Root GB CA"
+# Serial: 157768595616588414422159278966750757568
+# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d
+# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed
+# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6
+-----BEGIN CERTIFICATE-----
+MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt
+MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg
+Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i
+YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x
+CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG
+b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh
+bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3
+HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx
+WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX
+1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk
+u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P
+99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r
+M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB
+BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh
+cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5
+gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO
+ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf
+aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic
+Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A.
+# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A.
+# Label: "SZAFIR ROOT CA2"
+# Serial: 357043034767186914217277344587386743377558296292
+# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99
+# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de
+# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe
+-----BEGIN CERTIFICATE-----
+MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL
+BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6
+ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw
+NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L
+cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg
+Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN
+QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT
+3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw
+3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6
+3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5
+BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN
+XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
+AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF
+AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw
+8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG
+nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP
+oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy
+d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg
+LvWpCz/UXeHPhJ/iGcJfitYgHuNztw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Network CA 2"
+# Serial: 44979900017204383099463764357512596969
+# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2
+# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92
+# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04
+-----BEGIN CERTIFICATE-----
+MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB
+gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu
+QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG
+A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz
+OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ
+VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3
+b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA
+DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn
+0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB
+OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE
+fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E
+Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m
+o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i
+sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW
+OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez
+Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS
+adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n
+3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC
+AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ
+F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf
+CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29
+XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm
+djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/
+WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb
+AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq
+P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko
+b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj
+XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P
+5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi
+DrW5viSP
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions RootCA 2015"
+# Serial: 0
+# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce
+# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6
+# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36
+-----BEGIN CERTIFICATE-----
+MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix
+DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k
+IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT
+N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v
+dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG
+A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh
+ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx
+QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1
+dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
+AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA
+4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0
+AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10
+4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C
+ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV
+9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD
+gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6
+Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq
+NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko
+LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc
+Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd
+ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I
+XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI
+M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot
+9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V
+Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea
+j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh
+X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ
+l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf
+bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4
+pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK
+e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0
+vm9qp/UsQu0yrbYhnr68
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015"
+# Serial: 0
+# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef
+# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66
+# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33
+-----BEGIN CERTIFICATE-----
+MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN
+BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
+c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl
+bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv
+b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ
+BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj
+YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5
+MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0
+dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg
+QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa
+jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC
+MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi
+C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep
+lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof
+TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR
+-----END CERTIFICATE-----
+
+# Issuer: CN=ISRG Root X1 O=Internet Security Research Group
+# Subject: CN=ISRG Root X1 O=Internet Security Research Group
+# Label: "ISRG Root X1"
+# Serial: 172886928669790476064670243504169061120
+# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e
+# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8
+# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
+TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
+cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
+WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
+ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
+h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
+0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
+A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
+T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
+B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
+B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
+KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
+OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
+jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
+qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
+rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
+hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
+ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
+3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
+NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
+ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
+TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
+jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
+oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
+4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
+mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
+emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
+-----END CERTIFICATE-----
+
+# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM
+# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM
+# Label: "AC RAIZ FNMT-RCM"
+# Serial: 485876308206448804701554682760554759
+# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d
+# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20
+# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa
+-----BEGIN CERTIFICATE-----
+MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx
+CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ
+WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ
+BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG
+Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/
+yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf
+BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz
+WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF
+tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z
+374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC
+IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL
+mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7
+wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS
+MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2
+ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet
+UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw
+AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H
+YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3
+LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD
+nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1
+RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM
+LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf
+77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N
+JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm
+fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp
+6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp
+1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B
+9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok
+RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv
+uu8wd+RU4riEmViAqhOLUTpPSPaLtrM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 1 O=Amazon
+# Subject: CN=Amazon Root CA 1 O=Amazon
+# Label: "Amazon Root CA 1"
+# Serial: 143266978916655856878034712317230054538369994
+# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6
+# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16
+# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e
+-----BEGIN CERTIFICATE-----
+MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
+ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
+9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
+IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
+VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
+93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
+jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA
+A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI
+U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs
+N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv
+o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU
+5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy
+rqXRfboQnoZsG4q5WTP468SQvvG5
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 2 O=Amazon
+# Subject: CN=Amazon Root CA 2 O=Amazon
+# Label: "Amazon Root CA 2"
+# Serial: 143266982885963551818349160658925006970653239
+# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66
+# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a
+# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK
+gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ
+W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg
+1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K
+8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r
+2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me
+z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR
+8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj
+mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz
+7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6
++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI
+0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB
+Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm
+UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2
+LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY
++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS
+k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl
+7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm
+btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl
+urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+
+fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63
+n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE
+76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H
+9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT
+4PsJYGw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 3 O=Amazon
+# Subject: CN=Amazon Root CA 3 O=Amazon
+# Label: "Amazon Root CA 3"
+# Serial: 143266986699090766294700635381230934788665930
+# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87
+# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e
+# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4
+-----BEGIN CERTIFICATE-----
+MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5
+MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
+Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
+A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
+Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl
+ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr
+ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr
+BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM
+YyRIHN8wfdVoOw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 4 O=Amazon
+# Subject: CN=Amazon Root CA 4 O=Amazon
+# Label: "Amazon Root CA 4"
+# Serial: 143266989758080763974105200630763877849284878
+# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd
+# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be
+# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92
+-----BEGIN CERTIFICATE-----
+MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5
+MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
+Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
+A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
+Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi
+9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk
+M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB
+/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB
+MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw
+CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW
+1KyLa2tJElMzrdfkviT8tQp21KW8EA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
+# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
+# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1"
+# Serial: 1
+# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49
+# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca
+# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16
+-----BEGIN CERTIFICATE-----
+MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx
+GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp
+bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w
+KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0
+BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy
+dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG
+EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll
+IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU
+QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT
+TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg
+LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7
+a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr
+LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr
+N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X
+YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/
+iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f
+AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH
+V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL
+BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh
+AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf
+IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4
+lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c
+8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf
+lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD.
+# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD.
+# Label: "GDCA TrustAUTH R5 ROOT"
+# Serial: 9009899650740120186
+# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4
+# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4
+# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93
+-----BEGIN CERTIFICATE-----
+MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE
+BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ
+IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0
+MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV
+BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w
+HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj
+Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj
+TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u
+KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj
+qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm
+MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12
+ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP
+zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk
+L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC
+jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA
+HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC
+AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB
+/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg
+p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm
+DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5
+COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry
+L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf
+JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg
+IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io
+2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV
+09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ
+XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq
+T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe
+MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Label: "TrustCor RootCert CA-1"
+# Serial: 15752444095811006489
+# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45
+# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a
+# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c
+-----BEGIN CERTIFICATE-----
+MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD
+VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk
+MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U
+cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y
+IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB
+pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h
+IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG
+A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU
+cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid
+RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V
+seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme
+9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV
+EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW
+hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/
+DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw
+DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD
+ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I
+/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf
+ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ
+yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts
+L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN
+zl/HHk484IkzlQsPpTLWPFp5LBk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Label: "TrustCor RootCert CA-2"
+# Serial: 2711694510199101698
+# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64
+# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0
+# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65
+-----BEGIN CERTIFICATE-----
+MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV
+BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw
+IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy
+dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig
+Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk
+MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg
+Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD
+VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy
+dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+
+QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq
+1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp
+2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK
+DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape
+az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF
+3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88
+oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM
+g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3
+mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh
+8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd
+BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U
+nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw
+DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX
+dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+
+MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL
+/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX
+CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa
+ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW
+2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7
+N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3
+Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB
+As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp
+5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu
+1uwJ
+-----END CERTIFICATE-----
+
+# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Label: "TrustCor ECA-1"
+# Serial: 9548242946988625984
+# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c
+# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd
+# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c
+-----BEGIN CERTIFICATE-----
+MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD
+VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk
+MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U
+cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y
+IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV
+BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw
+IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy
+dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig
+RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb
+3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA
+BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5
+3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou
+owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/
+wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF
+ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf
+BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/
+MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv
+civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2
+AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F
+hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50
+soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI
+WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi
+tJ/X5g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation
+# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation
+# Label: "SSL.com Root Certification Authority RSA"
+# Serial: 8875640296558310041
+# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29
+# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb
+# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69
+-----BEGIN CERTIFICATE-----
+MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE
+BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK
+DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz
+OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv
+dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv
+bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN
+AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R
+xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX
+qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC
+C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3
+6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh
+/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF
+YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E
+JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc
+US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8
+ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm
++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi
+M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV
+HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G
+A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV
+cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc
+Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs
+PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/
+q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0
+cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr
+a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I
+H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y
+K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu
+nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf
+oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY
+Ic2wBlX7Jz9TkHCpBB5XJ7k=
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation
+# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation
+# Label: "SSL.com Root Certification Authority ECC"
+# Serial: 8495723813297216424
+# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e
+# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a
+# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65
+-----BEGIN CERTIFICATE-----
+MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC
+VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T
+U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0
+aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz
+WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0
+b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS
+b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI
+7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg
+CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud
+EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD
+VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T
+kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+
+gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation
+# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation
+# Label: "SSL.com EV Root Certification Authority RSA R2"
+# Serial: 6248227494352943350
+# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95
+# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a
+# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c
+-----BEGIN CERTIFICATE-----
+MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV
+BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE
+CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy
+MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G
+A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD
+DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq
+M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf
+OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa
+4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9
+HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR
+aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA
+b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ
+Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV
+PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO
+pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu
+UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY
+MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV
+HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4
+9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW
+s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5
+Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg
+cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM
+79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz
+/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt
+ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm
+Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK
+QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ
+w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi
+S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07
+mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation
+# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation
+# Label: "SSL.com EV Root Certification Authority ECC"
+# Serial: 3182246526754555285
+# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90
+# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d
+# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8
+-----BEGIN CERTIFICATE-----
+MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC
+VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T
+U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx
+NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv
+dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv
+bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49
+AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA
+VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku
+WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP
+MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX
+5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ
+ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg
+h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6
+# Label: "GlobalSign Root CA - R6"
+# Serial: 1417766617973444989252670301619537
+# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae
+# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1
+# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69
+-----BEGIN CERTIFICATE-----
+MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg
+MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh
+bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx
+MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET
+MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ
+KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI
+xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k
+ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD
+aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw
+LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw
+1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX
+k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2
+SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h
+bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n
+WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY
+rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce
+MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu
+bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN
+nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt
+Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61
+55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj
+vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf
+cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz
+oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp
+nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs
+pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v
+JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R
+8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4
+5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA=
+-----END CERTIFICATE-----
+
+# Issuer: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed
+# Subject: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed
+# Label: "OISTE WISeKey Global Root GC CA"
+# Serial: 44084345621038548146064804565436152554
+# MD5 Fingerprint: a9:d6:b9:2d:2f:93:64:f8:a5:69:ca:91:e9:68:07:23
+# SHA1 Fingerprint: e0:11:84:5e:34:de:be:88:81:b9:9c:f6:16:26:d1:96:1f:c3:b9:31
+# SHA256 Fingerprint: 85:60:f9:1c:36:24:da:ba:95:70:b5:fe:a0:db:e3:6f:f1:1a:83:23:be:94:86:85:4f:b3:f3:4a:55:71:19:8d
+-----BEGIN CERTIFICATE-----
+MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQsw
+CQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91
+bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwg
+Um9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRaFw00MjA1MDkwOTU4MzNaMG0xCzAJ
+BgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBGb3Vu
+ZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2JhbCBS
+b290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4ni
+eUqjFqdrVCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4W
+p2OQ0jnUsYd4XxiWD1AbNTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8E
+BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7T
+rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV
+57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg
+Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R1 O=Google Trust Services LLC
+# Subject: CN=GTS Root R1 O=Google Trust Services LLC
+# Label: "GTS Root R1"
+# Serial: 146587175971765017618439757810265552097
+# MD5 Fingerprint: 82:1a:ef:d4:d2:4a:f2:9f:e2:3d:97:06:14:70:72:85
+# SHA1 Fingerprint: e1:c9:50:e6:ef:22:f8:4c:56:45:72:8b:92:20:60:d7:d5:a7:a3:e8
+# SHA256 Fingerprint: 2a:57:54:71:e3:13:40:bc:21:58:1c:bd:2c:f1:3e:15:84:63:20:3e:ce:94:bc:f9:d3:cc:19:6b:f0:9a:54:72
+-----BEGIN CERTIFICATE-----
+MIIFWjCCA0KgAwIBAgIQbkepxUtHDA3sM9CJuRz04TANBgkqhkiG9w0BAQwFADBH
+MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM
+QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy
+MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl
+cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaM
+f/vo27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vX
+mX7wCl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7
+zUjwTcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0P
+fyblqAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtc
+vfaHszVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4
+Zor8Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUsp
+zBmkMiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOO
+Rc92wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYW
+k70paDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+
+DVrNVjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgF
+lQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBADiW
+Cu49tJYeX++dnAsznyvgyv3SjgofQXSlfKqE1OXyHuY3UjKcC9FhHb8owbZEKTV1
+d5iyfNm9dKyKaOOpMQkpAWBz40d8U6iQSifvS9efk+eCNs6aaAyC58/UEBZvXw6Z
+XPYfcX3v73svfuo21pdwCxXu11xWajOl40k4DLh9+42FpLFZXvRq4d2h9mREruZR
+gyFmxhE+885H7pwoHyXa/6xmld01D1zvICxi/ZG6qcz8WpyTgYMpl0p8WnK0OdC3
+d8t5/Wk6kjftbjhlRn7pYL15iJdfOBL07q9bgsiG1eGZbYwE8na6SfZu6W0eX6Dv
+J4J2QPim01hcDyxC2kLGe4g0x8HYRZvBPsVhHdljUEn2NIVq4BjFbkerQUIpm/Zg
+DdIx02OYI5NaAIFItO/Nis3Jz5nu2Z6qNuFoS3FJFDYoOj0dzpqPJeaAcWErtXvM
++SUWgeExX6GjfhaknBZqlxi9dnKlC54dNuYvoS++cJEPqOba+MSSQGwlfnuzCdyy
+F62ARPBopY+Udf90WuioAnwMCeKpSwughQtiue+hMZL77/ZRBIls6Kl0obsXs7X9
+SQ98POyDGCBDTtWTurQ0sR8WNh8M5mQ5Fkzc4P4dyKliPUDqysU0ArSuiYgzNdws
+E3PYJ/HQcu51OyLemGhmW/HGY0dVHLqlCFF1pkgl
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R2 O=Google Trust Services LLC
+# Subject: CN=GTS Root R2 O=Google Trust Services LLC
+# Label: "GTS Root R2"
+# Serial: 146587176055767053814479386953112547951
+# MD5 Fingerprint: 44:ed:9a:0e:a4:09:3b:00:f2:ae:4c:a3:c6:61:b0:8b
+# SHA1 Fingerprint: d2:73:96:2a:2a:5e:39:9f:73:3f:e1:c7:1e:64:3f:03:38:34:fc:4d
+# SHA256 Fingerprint: c4:5d:7b:b0:8e:6d:67:e6:2e:42:35:11:0b:56:4e:5f:78:fd:92:ef:05:8c:84:0a:ea:4e:64:55:d7:58:5c:60
+-----BEGIN CERTIFICATE-----
+MIIFWjCCA0KgAwIBAgIQbkepxlqz5yDFMJo/aFLybzANBgkqhkiG9w0BAQwFADBH
+MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM
+QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy
+MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl
+cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3Lv
+CvptnfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3Kg
+GjSY6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9Bu
+XvAuMC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOd
+re7kRXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXu
+PuWgf9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1
+mKPV+3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K
+8YzodDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqj
+x5RWIr9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsR
+nTKaG73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0
+kzCqgc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9Ok
+twIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBALZp
+8KZ3/p7uC4Gt4cCpx/k1HUCCq+YEtN/L9x0Pg/B+E02NjO7jMyLDOfxA325BS0JT
+vhaI8dI4XsRomRyYUpOM52jtG2pzegVATX9lO9ZY8c6DR2Dj/5epnGB3GFW1fgiT
+z9D2PGcDFWEJ+YF59exTpJ/JjwGLc8R3dtyDovUMSRqodt6Sm2T4syzFJ9MHwAiA
+pJiS4wGWAqoC7o87xdFtCjMwc3i5T1QWvwsHoaRc5svJXISPD+AVdyx+Jn7axEvb
+pxZ3B7DNdehyQtaVhJ2Gg/LkkM0JR9SLA3DaWsYDQvTtN6LwG1BUSw7YhN4ZKJmB
+R64JGz9I0cNv4rBgF/XuIwKl2gBbbZCr7qLpGzvpx0QnRY5rn/WkhLx3+WuXrD5R
+RaIRpsyF7gpo8j5QOHokYh4XIDdtak23CZvJ/KRY9bb7nE4Yu5UC56GtmwfuNmsk
+0jmGwZODUNKBRqhfYlcsu2xkiAhu7xNUX90txGdj08+JN7+dIPT7eoOboB6BAFDC
+5AwiWVIQ7UNWhwD4FFKnHYuTjKJNRn8nxnGbJN7k2oaLDX5rIMHAnuFl2GqjpuiF
+izoHCBy69Y9Vmhh1fuXsgWbRIXOhNUQLgD1bnF5vKheW0YMjiGZt5obicDIvUiLn
+yOd/xCxgXS/Dr55FBcOEArf9LAhST4Ldo/DUhgkC
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R3 O=Google Trust Services LLC
+# Subject: CN=GTS Root R3 O=Google Trust Services LLC
+# Label: "GTS Root R3"
+# Serial: 146587176140553309517047991083707763997
+# MD5 Fingerprint: 1a:79:5b:6b:04:52:9c:5d:c7:74:33:1b:25:9a:f9:25
+# SHA1 Fingerprint: 30:d4:24:6f:07:ff:db:91:89:8a:0b:e9:49:66:11:eb:8c:5e:46:e5
+# SHA256 Fingerprint: 15:d5:b8:77:46:19:ea:7d:54:ce:1c:a6:d0:b0:c4:03:e0:37:a9:17:f1:31:e8:a0:4e:1e:6b:7a:71:ba:bc:e5
+-----BEGIN CERTIFICATE-----
+MIICDDCCAZGgAwIBAgIQbkepx2ypcyRAiQ8DVd2NHTAKBggqhkjOPQQDAzBHMQsw
+CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
+MBIGA1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
+MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
+Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout
+736GjOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2A
+DDL24CejQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
+DgQWBBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEAgFuk
+fCPAlaUs3L6JbyO5o91lAFJekazInXJ0glMLfalAvWhgxeG4VDvBNhcl2MG9AjEA
+njWSdIUlUfUk7GRSJFClH9voy8l27OyCbvWFGFPouOOaKaqW04MjyaR7YbPMAuhd
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R4 O=Google Trust Services LLC
+# Subject: CN=GTS Root R4 O=Google Trust Services LLC
+# Label: "GTS Root R4"
+# Serial: 146587176229350439916519468929765261721
+# MD5 Fingerprint: 5d:b6:6a:c4:60:17:24:6a:1a:99:a8:4b:ee:5e:b4:26
+# SHA1 Fingerprint: 2a:1d:60:27:d9:4a:b1:0a:1c:4d:91:5c:cd:33:a0:cb:3e:2d:54:cb
+# SHA256 Fingerprint: 71:cc:a5:39:1f:9e:79:4b:04:80:25:30:b3:63:e1:21:da:8a:30:43:bb:26:66:2f:ea:4d:ca:7f:c9:51:a4:bd
+-----BEGIN CERTIFICATE-----
+MIICCjCCAZGgAwIBAgIQbkepyIuUtui7OyrYorLBmTAKBggqhkjOPQQDAzBHMQsw
+CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
+MBIGA1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
+MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
+Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzu
+hXyiQHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/l
+xKvRHYqjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
+DgQWBBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNnADBkAjBqUFJ0
+CMRw3J5QdCHojXohw0+WbhXRIjVhLfoIN+4Zba3bssx9BzT1YBkstTTZbyACMANx
+sbqjYAuG7ZoIapVon+Kz4ZNkfF6Tpt95LY2F45TPI11xzPKwTdb+mciUqXWi4w==
+-----END CERTIFICATE-----
+
+# Issuer: CN=UCA Global G2 Root O=UniTrust
+# Subject: CN=UCA Global G2 Root O=UniTrust
+# Label: "UCA Global G2 Root"
+# Serial: 124779693093741543919145257850076631279
+# MD5 Fingerprint: 80:fe:f0:c4:4a:f0:5c:62:32:9f:1c:ba:78:a9:50:f8
+# SHA1 Fingerprint: 28:f9:78:16:19:7a:ff:18:25:18:aa:44:fe:c1:a0:ce:5c:b6:4c:8a
+# SHA256 Fingerprint: 9b:ea:11:c9:76:fe:01:47:64:c1:be:56:a6:f9:14:b5:a5:60:31:7a:bd:99:88:39:33:82:e5:16:1a:a0:49:3c
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9
+MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBH
+bG9iYWwgRzIgUm9vdDAeFw0xNjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0x
+CzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlUcnVzdDEbMBkGA1UEAwwSVUNBIEds
+b2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxeYr
+b3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmToni9
+kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzm
+VHqUwCoV8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/R
+VogvGjqNO7uCEeBHANBSh6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDc
+C/Vkw85DvG1xudLeJ1uK6NjGruFZfc8oLTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIj
+tm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/R+zvWr9LesGtOxdQXGLY
+D0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBeKW4bHAyv
+j5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6Dl
+NaBa4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6
+iIis7nCs+dwp4wwcOxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznP
+O6Q0ibd5Ei9Hxeepl2n8pndntd978XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/
+BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIHEjMz15DD/pQwIX4wV
+ZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo5sOASD0Ee/oj
+L3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5
+1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl
+1qnN3e92mI0ADs0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oU
+b3n09tDh05S60FdRvScFDcH9yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LV
+PtateJLbXDzz2K36uGt/xDYotgIVilQsnLAXc47QN6MUPJiVAAwpBVueSUmxX8fj
+y88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHojhJi6IjMtX9Gl8Cb
+EGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZkbxqg
+DMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI
++Vg7RE+xygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGy
+YiGqhkCyLmTTX8jjfhFnRR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bX
+UB+K+wb1whnw0A==
+-----END CERTIFICATE-----
+
+# Issuer: CN=UCA Extended Validation Root O=UniTrust
+# Subject: CN=UCA Extended Validation Root O=UniTrust
+# Label: "UCA Extended Validation Root"
+# Serial: 106100277556486529736699587978573607008
+# MD5 Fingerprint: a1:f3:5f:43:c6:34:9b:da:bf:8c:7e:05:53:ad:96:e2
+# SHA1 Fingerprint: a3:a1:b0:6f:24:61:23:4a:e3:36:a5:c2:37:fc:a6:ff:dd:f0:d7:3a
+# SHA256 Fingerprint: d4:3a:f9:b3:54:73:75:5c:96:84:fc:06:d7:d8:cb:70:ee:5c:28:e7:73:fb:29:4e:b4:1e:e7:17:22:92:4d:24
+-----BEGIN CERTIFICATE-----
+MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBH
+MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBF
+eHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMx
+MDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNV
+BAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrsiWog
+D4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvS
+sPGP2KxFRv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aop
+O2z6+I9tTcg1367r3CTueUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dk
+sHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR59mzLC52LqGj3n5qiAno8geK+LLNEOfi
+c0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH0mK1lTnj8/FtDw5lhIpj
+VMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KRel7sFsLz
+KuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/
+TuDvB0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41G
+sx2VYVdWf6/wFlthWG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs
+1+lvK9JKBZP8nm9rZ/+I8U6laUpSNwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQD
+fwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS3H5aBZ8eNJr34RQwDwYDVR0T
+AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADaN
+l8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR
+ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQ
+VBcZEhrxH9cMaVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5
+c6sq1WnIeJEmMX3ixzDx/BR4dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp
+4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb+7lsq+KePRXBOy5nAliRn+/4Qh8s
+t2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOWF3sGPjLtx7dCvHaj
+2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwiGpWO
+vpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2C
+xR9GUeOcGMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmx
+cmtpzyKEC2IPrNkZAJSidjzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbM
+fjKaiJUINlK73nZfdklJrX+9ZSCyycErdhh2n1ax
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036
+# Subject: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036
+# Label: "Certigna Root CA"
+# Serial: 269714418870597844693661054334862075617
+# MD5 Fingerprint: 0e:5c:30:62:27:eb:5b:bc:d7:ae:62:ba:e9:d5:df:77
+# SHA1 Fingerprint: 2d:0d:52:14:ff:9e:ad:99:24:01:74:20:47:6e:6c:85:27:27:f5:43
+# SHA256 Fingerprint: d4:8d:3d:23:ee:db:50:a4:59:e5:51:97:60:1c:27:77:4b:9d:7b:18:c9:4d:5a:05:95:11:a1:02:50:b9:31:68
+-----BEGIN CERTIFICATE-----
+MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAw
+WjELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAw
+MiA0ODE0NjMwODEwMDAzNjEZMBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0x
+MzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjdaMFoxCzAJBgNVBAYTAkZSMRIwEAYD
+VQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYzMDgxMDAwMzYxGTAX
+BgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw
+ggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sO
+ty3tRQgXstmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9M
+CiBtnyN6tMbaLOQdLNyzKNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPu
+I9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8JXrJhFwLrN1CTivngqIkicuQstDuI7pm
+TLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16XdG+RCYyKfHx9WzMfgIh
+C59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq4NYKpkDf
+ePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3Yz
+IoejwpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWT
+Co/1VTp2lc5ZmIoJlXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1k
+JWumIWmbat10TWuXekG9qxf5kBdIjzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5
+hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp//TBt2dzhauH8XwIDAQABo4IB
+GjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of
+1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczov
+L3d3d3cuY2VydGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilo
+dHRwOi8vY3JsLmNlcnRpZ25hLmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYr
+aHR0cDovL2NybC5kaGlteW90aXMuY29tL2NlcnRpZ25hcm9vdGNhLmNybDANBgkq
+hkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOItOoldaDgvUSILSo3L
+6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxPTGRG
+HVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH6
+0BGM+RFq7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncB
+lA2c5uk5jR+mUYyZDDl34bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdi
+o2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1
+gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS6Cvu5zHbugRqh5jnxV/v
+faci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaYtlu3zM63
+Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh
+jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw
+3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI
+# Subject: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI
+# Label: "emSign Root CA - G1"
+# Serial: 235931866688319308814040
+# MD5 Fingerprint: 9c:42:84:57:dd:cb:0b:a7:2e:95:ad:b6:f3:da:bc:ac
+# SHA1 Fingerprint: 8a:c7:ad:8f:73:ac:4e:c1:b5:75:4d:a5:40:f4:fc:cf:7c:b5:8e:8c
+# SHA256 Fingerprint: 40:f6:af:03:46:a9:9a:a1:cd:1d:55:5a:4e:9c:ce:62:c7:f9:63:46:03:ee:40:66:15:83:3d:c8:c8:d0:03:67
+-----BEGIN CERTIFICATE-----
+MIIDlDCCAnygAwIBAgIKMfXkYgxsWO3W2DANBgkqhkiG9w0BAQsFADBnMQswCQYD
+VQQGEwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBU
+ZWNobm9sb2dpZXMgTGltaXRlZDEcMBoGA1UEAxMTZW1TaWduIFJvb3QgQ0EgLSBH
+MTAeFw0xODAyMTgxODMwMDBaFw00MzAyMTgxODMwMDBaMGcxCzAJBgNVBAYTAklO
+MRMwEQYDVQQLEwplbVNpZ24gUEtJMSUwIwYDVQQKExxlTXVkaHJhIFRlY2hub2xv
+Z2llcyBMaW1pdGVkMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEcxMIIBIjAN
+BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk0u76WaK7p1b1TST0Bsew+eeuGQz
+f2N4aLTNLnF115sgxk0pvLZoYIr3IZpWNVrzdr3YzZr/k1ZLpVkGoZM0Kd0WNHVO
+8oG0x5ZOrRkVUkr+PHB1cM2vK6sVmjM8qrOLqs1D/fXqcP/tzxE7lM5OMhbTI0Aq
+d7OvPAEsbO2ZLIvZTmmYsvePQbAyeGHWDV/D+qJAkh1cF+ZwPjXnorfCYuKrpDhM
+tTk1b+oDafo6VGiFbdbyL0NVHpENDtjVaqSW0RM8LHhQ6DqS0hdW5TUaQBw+jSzt
+Od9C4INBdN+jzcKGYEho42kLVACL5HZpIQ15TjQIXhTCzLG3rdd8cIrHhQIDAQAB
+o0IwQDAdBgNVHQ4EFgQU++8Nhp6w492pufEhF38+/PB3KxowDgYDVR0PAQH/BAQD
+AgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFn/8oz1h31x
+PaOfG1vR2vjTnGs2vZupYeveFix0PZ7mddrXuqe8QhfnPZHr5X3dPpzxz5KsbEjM
+wiI/aTvFthUvozXGaCocV685743QNcMYDHsAVhzNixl03r4PEuDQqqE/AjSxcM6d
+GNYIAwlG7mDgfrbESQRRfXBgvKqy/3lyeqYdPV8q+Mri/Tm3R7nrft8EI6/6nAYH
+6ftjk4BAtcZsCjEozgyfz7MjNYBBjWzEN3uBL4ChQEKF6dk4jeihU80Bv2noWgby
+RQuQ+q7hv53yrlc8pa6yVvSLZUDp/TGBLPQ5Cdjua6e0ph0VpZj3AYHYhX3zUVxx
+iN66zB+Afko=
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI
+# Subject: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI
+# Label: "emSign ECC Root CA - G3"
+# Serial: 287880440101571086945156
+# MD5 Fingerprint: ce:0b:72:d1:9f:88:8e:d0:50:03:e8:e3:b8:8b:67:40
+# SHA1 Fingerprint: 30:43:fa:4f:f2:57:dc:a0:c3:80:ee:2e:58:ea:78:b2:3f:e6:bb:c1
+# SHA256 Fingerprint: 86:a1:ec:ba:08:9c:4a:8d:3b:be:27:34:c6:12:ba:34:1d:81:3e:04:3c:f9:e8:a8:62:cd:5c:57:a3:6b:be:6b
+-----BEGIN CERTIFICATE-----
+MIICTjCCAdOgAwIBAgIKPPYHqWhwDtqLhDAKBggqhkjOPQQDAzBrMQswCQYDVQQG
+EwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNo
+bm9sb2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0g
+RzMwHhcNMTgwMjE4MTgzMDAwWhcNNDMwMjE4MTgzMDAwWjBrMQswCQYDVQQGEwJJ
+TjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9s
+b2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0gRzMw
+djAQBgcqhkjOPQIBBgUrgQQAIgNiAAQjpQy4LRL1KPOxst3iAhKAnjlfSU2fySU0
+WXTsuwYc58Byr+iuL+FBVIcUqEqy6HyC5ltqtdyzdc6LBtCGI79G1Y4PPwT01xyS
+fvalY8L1X44uT6EYGQIrMgqCZH0Wk9GjQjBAMB0GA1UdDgQWBBR8XQKEE9TMipuB
+zhccLikenEhjQjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggq
+hkjOPQQDAwNpADBmAjEAvvNhzwIQHWSVB7gYboiFBS+DCBeQyh+KTOgNG3qxrdWB
+CUfvO6wIBHxcmbHtRwfSAjEAnbpV/KlK6O3t5nYBQnvI+GDZjVGLVTv7jHvrZQnD
++JbNR6iC8hZVdyR+EhCVBCyj
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI
+# Subject: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI
+# Label: "emSign Root CA - C1"
+# Serial: 825510296613316004955058
+# MD5 Fingerprint: d8:e3:5d:01:21:fa:78:5a:b0:df:ba:d2:ee:2a:5f:68
+# SHA1 Fingerprint: e7:2e:f1:df:fc:b2:09:28:cf:5d:d4:d5:67:37:b1:51:cb:86:4f:01
+# SHA256 Fingerprint: 12:56:09:aa:30:1d:a0:a2:49:b9:7a:82:39:cb:6a:34:21:6f:44:dc:ac:9f:39:54:b1:42:92:f2:e8:c8:60:8f
+-----BEGIN CERTIFICATE-----
+MIIDczCCAlugAwIBAgILAK7PALrEzzL4Q7IwDQYJKoZIhvcNAQELBQAwVjELMAkG
+A1UEBhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEg
+SW5jMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEMxMB4XDTE4MDIxODE4MzAw
+MFoXDTQzMDIxODE4MzAwMFowVjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln
+biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQDExNlbVNpZ24gUm9v
+dCBDQSAtIEMxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz+upufGZ
+BczYKCFK83M0UYRWEPWgTywS4/oTmifQz/l5GnRfHXk5/Fv4cI7gklL35CX5VIPZ
+HdPIWoU/Xse2B+4+wM6ar6xWQio5JXDWv7V7Nq2s9nPczdcdioOl+yuQFTdrHCZH
+3DspVpNqs8FqOp099cGXOFgFixwR4+S0uF2FHYP+eF8LRWgYSKVGczQ7/g/IdrvH
+GPMF0Ybzhe3nudkyrVWIzqa2kbBPrH4VI5b2P/AgNBbeCsbEBEV5f6f9vtKppa+c
+xSMq9zwhbL2vj07FOrLzNBL834AaSaTUqZX3noleoomslMuoaJuvimUnzYnu3Yy1
+aylwQ6BpC+S5DwIDAQABo0IwQDAdBgNVHQ4EFgQU/qHgcB4qAzlSWkK+XJGFehiq
+TbUwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL
+BQADggEBAMJKVvoVIXsoounlHfv4LcQ5lkFMOycsxGwYFYDGrK9HWS8mC+M2sO87
+/kOXSTKZEhVb3xEp/6tT+LvBeA+snFOvV71ojD1pM/CjoCNjO2RnIkSt1XHLVip4
+kqNPEjE2NuLe/gDEo2APJ62gsIq1NnpSob0n9CAnYuhNlCQT5AoE6TyrLshDCUrG
+YQTlSTR+08TI9Q/Aqum6VF7zYytPT1DU/rl7mYw9wC68AivTxEDkigcxHpvOJpkT
++xHqmiIMERnHXhuBUDDIlhJu58tBf5E7oke3VIAb3ADMmpDqw8NQBmIMMMAVSKeo
+WXzhriKi4gp6D/piq1JM4fHfyr6DDUI=
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI
+# Subject: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI
+# Label: "emSign ECC Root CA - C3"
+# Serial: 582948710642506000014504
+# MD5 Fingerprint: 3e:53:b3:a3:81:ee:d7:10:f8:d3:b0:1d:17:92:f5:d5
+# SHA1 Fingerprint: b6:af:43:c2:9b:81:53:7d:f6:ef:6b:c3:1f:1f:60:15:0c:ee:48:66
+# SHA256 Fingerprint: bc:4d:80:9b:15:18:9d:78:db:3e:1d:8c:f4:f9:72:6a:79:5d:a1:64:3c:a5:f1:35:8e:1d:db:0e:dc:0d:7e:b3
+-----BEGIN CERTIFICATE-----
+MIICKzCCAbGgAwIBAgIKe3G2gla4EnycqDAKBggqhkjOPQQDAzBaMQswCQYDVQQG
+EwJVUzETMBEGA1UECxMKZW1TaWduIFBLSTEUMBIGA1UEChMLZU11ZGhyYSBJbmMx
+IDAeBgNVBAMTF2VtU2lnbiBFQ0MgUm9vdCBDQSAtIEMzMB4XDTE4MDIxODE4MzAw
+MFoXDTQzMDIxODE4MzAwMFowWjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln
+biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMSAwHgYDVQQDExdlbVNpZ24gRUND
+IFJvb3QgQ0EgLSBDMzB2MBAGByqGSM49AgEGBSuBBAAiA2IABP2lYa57JhAd6bci
+MK4G9IGzsUJxlTm801Ljr6/58pc1kjZGDoeVjbk5Wum739D+yAdBPLtVb4Ojavti
+sIGJAnB9SMVK4+kiVCJNk7tCDK93nCOmfddhEc5lx/h//vXyqaNCMEAwHQYDVR0O
+BBYEFPtaSNCAIEDyqOkAB2kZd6fmw/TPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB
+Af8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMQC02C8Cif22TGK6Q04ThHK1rt0c
+3ta13FaPWEBaLd4gTCKDypOofu4SQMfWh0/434UCMBwUZOR8loMRnLDRWmFLpg9J
+0wD8ofzkpf9/rdcw0Md3f76BB1UwUCAU9Vc4CqgxUQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hongkong Post Root CA 3 O=Hongkong Post
+# Subject: CN=Hongkong Post Root CA 3 O=Hongkong Post
+# Label: "Hongkong Post Root CA 3"
+# Serial: 46170865288971385588281144162979347873371282084
+# MD5 Fingerprint: 11:fc:9f:bd:73:30:02:8a:fd:3f:f3:58:b9:cb:20:f0
+# SHA1 Fingerprint: 58:a2:d0:ec:20:52:81:5b:c1:f3:f8:64:02:24:4e:c2:8e:02:4b:02
+# SHA256 Fingerprint: 5a:2f:c0:3f:0c:83:b0:90:bb:fa:40:60:4b:09:88:44:6c:76:36:18:3d:f9:84:6e:17:10:1a:44:7f:b8:ef:d6
+-----BEGIN CERTIFICATE-----
+MIIFzzCCA7egAwIBAgIUCBZfikyl7ADJk0DfxMauI7gcWqQwDQYJKoZIhvcNAQEL
+BQAwbzELMAkGA1UEBhMCSEsxEjAQBgNVBAgTCUhvbmcgS29uZzESMBAGA1UEBxMJ
+SG9uZyBLb25nMRYwFAYDVQQKEw1Ib25na29uZyBQb3N0MSAwHgYDVQQDExdIb25n
+a29uZyBQb3N0IFJvb3QgQ0EgMzAeFw0xNzA2MDMwMjI5NDZaFw00MjA2MDMwMjI5
+NDZaMG8xCzAJBgNVBAYTAkhLMRIwEAYDVQQIEwlIb25nIEtvbmcxEjAQBgNVBAcT
+CUhvbmcgS29uZzEWMBQGA1UEChMNSG9uZ2tvbmcgUG9zdDEgMB4GA1UEAxMXSG9u
+Z2tvbmcgUG9zdCBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCziNfqzg8gTr7m1gNt7ln8wlffKWihgw4+aMdoWJwcYEuJQwy51BWy7sFO
+dem1p+/l6TWZ5Mwc50tfjTMwIDNT2aa71T4Tjukfh0mtUC1Qyhi+AViiE3CWu4mI
+VoBc+L0sPOFMV4i707mV78vH9toxdCim5lSJ9UExyuUmGs2C4HDaOym71QP1mbpV
+9WTRYA6ziUm4ii8F0oRFKHyPaFASePwLtVPLwpgchKOesL4jpNrcyCse2m5FHomY
+2vkALgbpDDtw1VAliJnLzXNg99X/NWfFobxeq81KuEXryGgeDQ0URhLj0mRiikKY
+vLTGCAj4/ahMZJx2Ab0vqWwzD9g/KLg8aQFChn5pwckGyuV6RmXpwtZQQS4/t+Tt
+bNe/JgERohYpSms0BpDsE9K2+2p20jzt8NYt3eEV7KObLyzJPivkaTv/ciWxNoZb
+x39ri1UbSsUgYT2uy1DhCDq+sI9jQVMwCFk8mB13umOResoQUGC/8Ne8lYePl8X+
+l2oBlKN8W4UdKjk60FSh0Tlxnf0h+bV78OLgAo9uliQlLKAeLKjEiafv7ZkGL7YK
+TE/bosw3Gq9HhS2KX8Q0NEwA/RiTZxPRN+ZItIsGxVd7GYYKecsAyVKvQv83j+Gj
+Hno9UKtjBucVtT+2RTeUN7F+8kjDf8V1/peNRY8apxpyKBpADwIDAQABo2MwYTAP
+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQXnc0e
+i9Y5K3DTXNSguB+wAPzFYTAdBgNVHQ4EFgQUF53NHovWOStw01zUoLgfsAD8xWEw
+DQYJKoZIhvcNAQELBQADggIBAFbVe27mIgHSQpsY1Q7XZiNc4/6gx5LS6ZStS6LG
+7BJ8dNVI0lkUmcDrudHr9EgwW62nV3OZqdPlt9EuWSRY3GguLmLYauRwCy0gUCCk
+MpXRAJi70/33MvJJrsZ64Ee+bs7Lo3I6LWldy8joRTnU+kLBEUx3XZL7av9YROXr
+gZ6voJmtvqkBZss4HTzfQx/0TW60uhdG/H39h4F5ag0zD/ov+BS5gLNdTaqX4fnk
+GMX41TiMJjz98iji7lpJiCzfeT2OnpA8vUFKOt1b9pq0zj8lMH8yfaIDlNDceqFS
+3m6TjRgm/VWsvY+b0s+v54Ysyx8Jb6NvqYTUc79NoXQbTiNg8swOqn+knEwlqLJm
+Ozj/2ZQw9nKEvmhVEA/GcywWaZMH/rFF7buiVWqw2rVKAiUnhde3t4ZEFolsgCs+
+l6mc1X5VTMbeRRAc6uk7nwNT7u56AQIWeNTowr5GdogTPyK7SBIdUgC0An4hGh6c
+JfTzPV4e0hz5sy229zdcxsshTrD3mUcYhcErulWuBurQB7Lcq9CClnXO0lD+mefP
+L5/ndtFhKvshuzHQqp9HpLIiyhY6UFfEW0NnxWViA0kB60PZ2Pierc+xYw5F9KBa
+LJstxabArahH9CdMOA0uG0k7UvToiIMrVCjU8jVStDKDYmlkDJGcn5fqdBb9HxEG
+mpv0
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - G4 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2015 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - G4 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2015 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - G4"
+# Serial: 289383649854506086828220374796556676440
+# MD5 Fingerprint: 89:53:f1:83:23:b7:7c:8e:05:f1:8c:71:38:4e:1f:88
+# SHA1 Fingerprint: 14:88:4e:86:26:37:b0:26:af:59:62:5c:40:77:ec:35:29:ba:96:01
+# SHA256 Fingerprint: db:35:17:d1:f6:73:2a:2d:5a:b9:7c:53:3e:c7:07:79:ee:32:70:a6:2f:b4:ac:42:38:37:24:60:e6:f0:1e:88
+-----BEGIN CERTIFICATE-----
+MIIGSzCCBDOgAwIBAgIRANm1Q3+vqTkPAAAAAFVlrVgwDQYJKoZIhvcNAQELBQAw
+gb4xCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQL
+Ex9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykg
+MjAxNSBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAw
+BgNVBAMTKUVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0
+MB4XDTE1MDUyNzExMTExNloXDTM3MTIyNzExNDExNlowgb4xCzAJBgNVBAYTAlVT
+MRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1
+c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxNSBFbnRydXN0LCBJ
+bmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAwBgNVBAMTKUVudHJ1c3Qg
+Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0MIICIjANBgkqhkiG9w0B
+AQEFAAOCAg8AMIICCgKCAgEAsewsQu7i0TD/pZJH4i3DumSXbcr3DbVZwbPLqGgZ
+2K+EbTBwXX7zLtJTmeH+H17ZSK9dE43b/2MzTdMAArzE+NEGCJR5WIoV3imz/f3E
+T+iq4qA7ec2/a0My3dl0ELn39GjUu9CH1apLiipvKgS1sqbHoHrmSKvS0VnM1n4j
+5pds8ELl3FFLFUHtSUrJ3hCX1nbB76W1NhSXNdh4IjVS70O92yfbYVaCNNzLiGAM
+C1rlLAHGVK/XqsEQe9IFWrhAnoanw5CGAlZSCXqc0ieCU0plUmr1POeo8pyvi73T
+DtTUXm6Hnmo9RR3RXRv06QqsYJn7ibT/mCzPfB3pAqoEmh643IhuJbNsZvc8kPNX
+wbMv9W3y+8qh+CmdRouzavbmZwe+LGcKKh9asj5XxNMhIWNlUpEbsZmOeX7m640A
+2Vqq6nPopIICR5b+W45UYaPrL0swsIsjdXJ8ITzI9vF01Bx7owVV7rtNOzK+mndm
+nqxpkCIHH2E6lr7lmk/MBTwoWdPBDFSoWWG9yHJM6Nyfh3+9nEg2XpWjDrk4JFX8
+dWbrAuMINClKxuMrLzOg2qOGpRKX/YAr2hRC45K9PvJdXmd0LhyIRyk0X+IyqJwl
+N4y6mACXi0mWHv0liqzc2thddG5msP9E36EYxr5ILzeUePiVSj9/E15dWf10hkNj
+c0kCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
+VR0OBBYEFJ84xFYjwznooHFs6FRM5Og6sb9nMA0GCSqGSIb3DQEBCwUAA4ICAQAS
+5UKme4sPDORGpbZgQIeMJX6tuGguW8ZAdjwD+MlZ9POrYs4QjbRaZIxowLByQzTS
+Gwv2LFPSypBLhmb8qoMi9IsabyZIrHZ3CL/FmFz0Jomee8O5ZDIBf9PD3Vht7LGr
+hFV0d4QEJ1JrhkzO3bll/9bGXp+aEJlLdWr+aumXIOTkdnrG0CSqkM0gkLpHZPt/
+B7NTeLUKYvJzQ85BK4FqLoUWlFPUa19yIqtRLULVAJyZv967lDtX/Zr1hstWO1uI
+AeV8KEsD+UmDfLJ/fOPtjqF/YFOOVZ1QNBIPt5d7bIdKROf1beyAN/BYGW5KaHbw
+H5Lk6rWS02FREAutp9lfx1/cH6NcjKF+m7ee01ZvZl4HliDtC3T7Zk6LERXpgUl+
+b7DUUH8i119lAg2m9IUe2K4GS0qn0jFmwvjO5QimpAKWRGhXxNUzzxkvFMSUHHuk
+2fCfDrGA4tGeEWSpiBE6doLlYsKA2KSD7ZPvfC+QsDJMlhVoSFLUmQjAJOgc47Ol
+IQ6SwJAfzyBfyjs4x7dtOvPmRLgOMWuIjnDrnBdSqEGULoe256YSxXXfW8AKbnuk
+5F6G+TaU33fD6Q3AOfF5u0aOq0NZJ7cguyPpVkAh7DE9ZapD8j3fcEThuk0mEDuY
+n/PIjhs4ViFqUZPTkcpG2om3PVODLAgfi49T3f+sHw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation
+# Subject: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation
+# Label: "Microsoft ECC Root Certificate Authority 2017"
+# Serial: 136839042543790627607696632466672567020
+# MD5 Fingerprint: dd:a1:03:e6:4a:93:10:d1:bf:f0:19:42:cb:fe:ed:67
+# SHA1 Fingerprint: 99:9a:64:c3:7f:f4:7d:9f:ab:95:f1:47:69:89:14:60:ee:c4:c3:c5
+# SHA256 Fingerprint: 35:8d:f3:9d:76:4a:f9:e1:b7:66:e9:c9:72:df:35:2e:e1:5c:fa:c2:27:af:6a:d1:d7:0e:8e:4a:6e:dc:ba:02
+-----BEGIN CERTIFICATE-----
+MIICWTCCAd+gAwIBAgIQZvI9r4fei7FK6gxXMQHC7DAKBggqhkjOPQQDAzBlMQsw
+CQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYD
+VQQDEy1NaWNyb3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIw
+MTcwHhcNMTkxMjE4MjMwNjQ1WhcNNDIwNzE4MjMxNjA0WjBlMQswCQYDVQQGEwJV
+UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1NaWNy
+b3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwdjAQBgcq
+hkjOPQIBBgUrgQQAIgNiAATUvD0CQnVBEyPNgASGAlEvaqiBYgtlzPbKnR5vSmZR
+ogPZnZH6thaxjG7efM3beaYvzrvOcS/lpaso7GMEZpn4+vKTEAXhgShC48Zo9OYb
+hGBKia/teQ87zvH2RPUBeMCjVDBSMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBTIy5lycFIM+Oa+sgRXKSrPQhDtNTAQBgkrBgEEAYI3
+FQEEAwIBADAKBggqhkjOPQQDAwNoADBlAjBY8k3qDPlfXu5gKcs68tvWMoQZP3zV
+L8KxzJOuULsJMsbG7X7JNpQS5GiFBqIb0C8CMQCZ6Ra0DvpWSNSkMBaReNtUjGUB
+iudQZsIxtzm6uBoiB078a1QWIP8rtedMDE2mT3M=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation
+# Subject: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation
+# Label: "Microsoft RSA Root Certificate Authority 2017"
+# Serial: 40975477897264996090493496164228220339
+# MD5 Fingerprint: 10:ff:00:ff:cf:c9:f8:c7:7a:c0:ee:35:8e:c9:0f:47
+# SHA1 Fingerprint: 73:a5:e6:4a:3b:ff:83:16:ff:0e:dc:cc:61:8a:90:6e:4e:ae:4d:74
+# SHA256 Fingerprint: c7:41:f7:0f:4b:2a:8d:88:bf:2e:71:c1:41:22:ef:53:ef:10:eb:a0:cf:a5:e6:4c:fa:20:f4:18:85:30:73:e0
+-----BEGIN CERTIFICATE-----
+MIIFqDCCA5CgAwIBAgIQHtOXCV/YtLNHcB6qvn9FszANBgkqhkiG9w0BAQwFADBl
+MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYw
+NAYDVQQDEy1NaWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5
+IDIwMTcwHhcNMTkxMjE4MjI1MTIyWhcNNDIwNzE4MjMwMDIzWjBlMQswCQYDVQQG
+EwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1N
+aWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKW76UM4wplZEWCpW9R2LBifOZ
+Nt9GkMml7Xhqb0eRaPgnZ1AzHaGm++DlQ6OEAlcBXZxIQIJTELy/xztokLaCLeX0
+ZdDMbRnMlfl7rEqUrQ7eS0MdhweSE5CAg2Q1OQT85elss7YfUJQ4ZVBcF0a5toW1
+HLUX6NZFndiyJrDKxHBKrmCk3bPZ7Pw71VdyvD/IybLeS2v4I2wDwAW9lcfNcztm
+gGTjGqwu+UcF8ga2m3P1eDNbx6H7JyqhtJqRjJHTOoI+dkC0zVJhUXAoP8XFWvLJ
+jEm7FFtNyP9nTUwSlq31/niol4fX/V4ggNyhSyL71Imtus5Hl0dVe49FyGcohJUc
+aDDv70ngNXtk55iwlNpNhTs+VcQor1fznhPbRiefHqJeRIOkpcrVE7NLP8TjwuaG
+YaRSMLl6IE9vDzhTyzMMEyuP1pq9KsgtsRx9S1HKR9FIJ3Jdh+vVReZIZZ2vUpC6
+W6IYZVcSn2i51BVrlMRpIpj0M+Dt+VGOQVDJNE92kKz8OMHY4Xu54+OU4UZpyw4K
+UGsTuqwPN1q3ErWQgR5WrlcihtnJ0tHXUeOrO8ZV/R4O03QK0dqq6mm4lyiPSMQH
++FJDOvTKVTUssKZqwJz58oHhEmrARdlns87/I6KJClTUFLkqqNfs+avNJVgyeY+Q
+W5g5xAgGwax/Dj0ApQIDAQABo1QwUjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/
+BAUwAwEB/zAdBgNVHQ4EFgQUCctZf4aycI8awznjwNnpv7tNsiMwEAYJKwYBBAGC
+NxUBBAMCAQAwDQYJKoZIhvcNAQEMBQADggIBAKyvPl3CEZaJjqPnktaXFbgToqZC
+LgLNFgVZJ8og6Lq46BrsTaiXVq5lQ7GPAJtSzVXNUzltYkyLDVt8LkS/gxCP81OC
+gMNPOsduET/m4xaRhPtthH80dK2Jp86519efhGSSvpWhrQlTM93uCupKUY5vVau6
+tZRGrox/2KJQJWVggEbbMwSubLWYdFQl3JPk+ONVFT24bcMKpBLBaYVu32TxU5nh
+SnUgnZUP5NbcA/FZGOhHibJXWpS2qdgXKxdJ5XbLwVaZOjex/2kskZGT4d9Mozd2
+TaGf+G0eHdP67Pv0RR0Tbc/3WeUiJ3IrhvNXuzDtJE3cfVa7o7P4NHmJweDyAmH3
+pvwPuxwXC65B2Xy9J6P9LjrRk5Sxcx0ki69bIImtt2dmefU6xqaWM/5TkshGsRGR
+xpl/j8nWZjEgQRCHLQzWwa80mMpkg/sTV9HB8Dx6jKXB/ZUhoHHBk2dxEuqPiApp
+GWSZI1b7rCoucL5mxAyE7+WL85MB+GqQk2dLsmijtWKP6T+MejteD+eMuMZ87zf9
+dOLITzNy4ZQ5bb0Sr74MTnB8G2+NszKTc0QWbej09+CVgI+WXTik9KveCjCHk9hN
+AHFiRSdLOkKEW39lt2c0Ui2cFmuqqNh7o0JMcccMyj6D5KbvtwEwXlGjefVwaaZB
+RA+GsCyRxj3qrg+E
+-----END CERTIFICATE-----
+
+# Issuer: CN=e-Szigno Root CA 2017 O=Microsec Ltd.
+# Subject: CN=e-Szigno Root CA 2017 O=Microsec Ltd.
+# Label: "e-Szigno Root CA 2017"
+# Serial: 411379200276854331539784714
+# MD5 Fingerprint: de:1f:f6:9e:84:ae:a7:b4:21:ce:1e:58:7d:d1:84:98
+# SHA1 Fingerprint: 89:d4:83:03:4f:9e:9a:48:80:5f:72:37:d4:a9:a6:ef:cb:7c:1f:d1
+# SHA256 Fingerprint: be:b0:0b:30:83:9b:9b:c3:2c:32:e4:44:79:05:95:06:41:f2:64:21:b1:5e:d0:89:19:8b:51:8a:e2:ea:1b:99
+-----BEGIN CERTIFICATE-----
+MIICQDCCAeWgAwIBAgIMAVRI7yH9l1kN9QQKMAoGCCqGSM49BAMCMHExCzAJBgNV
+BAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMgTHRk
+LjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25vIFJv
+b3QgQ0EgMjAxNzAeFw0xNzA4MjIxMjA3MDZaFw00MjA4MjIxMjA3MDZaMHExCzAJ
+BgNVBAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMg
+THRkLjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25v
+IFJvb3QgQ0EgMjAxNzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJbcPYrYsHtv
+xie+RJCxs1YVe45DJH0ahFnuY2iyxl6H0BVIHqiQrb1TotreOpCmYF9oMrWGQd+H
+Wyx7xf58etqjYzBhMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
+A1UdDgQWBBSHERUI0arBeAyxr87GyZDvvzAEwDAfBgNVHSMEGDAWgBSHERUI0arB
+eAyxr87GyZDvvzAEwDAKBggqhkjOPQQDAgNJADBGAiEAtVfd14pVCzbhhkT61Nlo
+jbjcI4qKDdQvfepz7L9NbKgCIQDLpbQS+ue16M9+k/zzNY9vTlp8tLxOsvxyqltZ
++efcMQ==
+-----END CERTIFICATE-----
+
+# Issuer: O=CERTSIGN SA OU=certSIGN ROOT CA G2
+# Subject: O=CERTSIGN SA OU=certSIGN ROOT CA G2
+# Label: "certSIGN Root CA G2"
+# Serial: 313609486401300475190
+# MD5 Fingerprint: 8c:f1:75:8a:c6:19:cf:94:b7:f7:65:20:87:c3:97:c7
+# SHA1 Fingerprint: 26:f9:93:b4:ed:3d:28:27:b0:b9:4b:a7:e9:15:1d:a3:8d:92:e5:32
+# SHA256 Fingerprint: 65:7c:fe:2f:a7:3f:aa:38:46:25:71:f3:32:a2:36:3a:46:fc:e7:02:09:51:71:07:02:cd:fb:b6:ee:da:33:05
+-----BEGIN CERTIFICATE-----
+MIIFRzCCAy+gAwIBAgIJEQA0tk7GNi02MA0GCSqGSIb3DQEBCwUAMEExCzAJBgNV
+BAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJR04g
+Uk9PVCBDQSBHMjAeFw0xNzAyMDYwOTI3MzVaFw00MjAyMDYwOTI3MzVaMEExCzAJ
+BgNVBAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJ
+R04gUk9PVCBDQSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDF
+dRmRfUR0dIf+DjuW3NgBFszuY5HnC2/OOwppGnzC46+CjobXXo9X69MhWf05N0Iw
+vlDqtg+piNguLWkh59E3GE59kdUWX2tbAMI5Qw02hVK5U2UPHULlj88F0+7cDBrZ
+uIt4ImfkabBoxTzkbFpG583H+u/E7Eu9aqSs/cwoUe+StCmrqzWaTOTECMYmzPhp
+n+Sc8CnTXPnGFiWeI8MgwT0PPzhAsP6CRDiqWhqKa2NYOLQV07YRaXseVO6MGiKs
+cpc/I1mbySKEwQdPzH/iV8oScLumZfNpdWO9lfsbl83kqK/20U6o2YpxJM02PbyW
+xPFsqa7lzw1uKA2wDrXKUXt4FMMgL3/7FFXhEZn91QqhngLjYl/rNUssuHLoPj1P
+rCy7Lobio3aP5ZMqz6WryFyNSwb/EkaseMsUBzXgqd+L6a8VTxaJW732jcZZroiF
+DsGJ6x9nxUWO/203Nit4ZoORUSs9/1F3dmKh7Gc+PoGD4FapUB8fepmrY7+EF3fx
+DTvf95xhszWYijqy7DwaNz9+j5LP2RIUZNoQAhVB/0/E6xyjyfqZ90bp4RjZsbgy
+LcsUDFDYg2WD7rlcz8sFWkz6GZdr1l0T08JcVLwyc6B49fFtHsufpaafItzRUZ6C
+eWRgKRM+o/1Pcmqr4tTluCRVLERLiohEnMqE0yo7AgMBAAGjQjBAMA8GA1UdEwEB
+/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSCIS1mxteg4BXrzkwJ
+d8RgnlRuAzANBgkqhkiG9w0BAQsFAAOCAgEAYN4auOfyYILVAzOBywaK8SJJ6ejq
+kX/GM15oGQOGO0MBzwdw5AgeZYWR5hEit/UCI46uuR59H35s5r0l1ZUa8gWmr4UC
+b6741jH/JclKyMeKqdmfS0mbEVeZkkMR3rYzpMzXjWR91M08KCy0mpbqTfXERMQl
+qiCA2ClV9+BB/AYm/7k29UMUA2Z44RGx2iBfRgB4ACGlHgAoYXhvqAEBj500mv/0
+OJD7uNGzcgbJceaBxXntC6Z58hMLnPddDnskk7RI24Zf3lCGeOdA5jGokHZwYa+c
+NywRtYK3qq4kNFtyDGkNzVmf9nGvnAvRCjj5BiKDUyUM/FHE5r7iOZULJK2v0ZXk
+ltd0ZGtxTgI8qoXzIKNDOXZbbFD+mpwUHmUUihW9o4JFWklWatKcsWMy5WHgUyIO
+pwpJ6st+H6jiYoD2EEVSmAYY3qXNL3+q1Ok+CHLsIwMCPKaq2LxndD0UF/tUSxfj
+03k9bWtJySgOLnRQvwzZRjoQhsmnP+mg7H/rpXdYaXHmgwo38oZJar55CJD2AhZk
+PuXaTH4MNMn5X7azKFGnpyuqSfqNZSlO42sTp5SjLVFteAxEy9/eCG/Oo2Sr05WE
+1LlSVHJ7liXMvGnjSG4N0MedJ5qq+BOS3R7fY581qRY27Iy4g/Q9iY/NtBde17MX
+QRBdJ3NghVdJIgc=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc.
+# Subject: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc.
+# Label: "Trustwave Global Certification Authority"
+# Serial: 1846098327275375458322922162
+# MD5 Fingerprint: f8:1c:18:2d:2f:ba:5f:6d:a1:6c:bc:c7:ab:91:c7:0e
+# SHA1 Fingerprint: 2f:8f:36:4f:e1:58:97:44:21:59:87:a5:2a:9a:d0:69:95:26:7f:b5
+# SHA256 Fingerprint: 97:55:20:15:f5:dd:fc:3c:87:88:c0:06:94:45:55:40:88:94:45:00:84:f1:00:86:70:86:bc:1a:2b:b5:8d:c8
+-----BEGIN CERTIFICATE-----
+MIIF2jCCA8KgAwIBAgIMBfcOhtpJ80Y1LrqyMA0GCSqGSIb3DQEBCwUAMIGIMQsw
+CQYDVQQGEwJVUzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28x
+ITAfBgNVBAoMGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1
+c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMx
+OTM0MTJaFw00MjA4MjMxOTM0MTJaMIGIMQswCQYDVQQGEwJVUzERMA8GA1UECAwI
+SWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2ZSBI
+b2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
+ALldUShLPDeS0YLOvR29zd24q88KPuFd5dyqCblXAj7mY2Hf8g+CY66j96xz0Xzn
+swuvCAAJWX/NKSqIk4cXGIDtiLK0thAfLdZfVaITXdHG6wZWiYj+rDKd/VzDBcdu
+7oaJuogDnXIhhpCujwOl3J+IKMujkkkP7NAP4m1ET4BqstTnoApTAbqOl5F2brz8
+1Ws25kCI1nsvXwXoLG0R8+eyvpJETNKXpP7ScoFDB5zpET71ixpZfR9oWN0EACyW
+80OzfpgZdNmcc9kYvkHHNHnZ9GLCQ7mzJ7Aiy/k9UscwR7PJPrhq4ufogXBeQotP
+JqX+OsIgbrv4Fo7NDKm0G2x2EOFYeUY+VM6AqFcJNykbmROPDMjWLBz7BegIlT1l
+RtzuzWniTY+HKE40Cz7PFNm73bZQmq131BnW2hqIyE4bJ3XYsgjxroMwuREOzYfw
+hI0Vcnyh78zyiGG69Gm7DIwLdVcEuE4qFC49DxweMqZiNu5m4iK4BUBjECLzMx10
+coos9TkpoNPnG4CELcU9402x/RpvumUHO1jsQkUm+9jaJXLE9gCxInm943xZYkqc
+BW89zubWR2OZxiRvchLIrH+QtAuRcOi35hYQcRfO3gZPSEF9NUqjifLJS3tBEW1n
+twiYTOURGa5CgNz7kAXU+FDKvuStx8KU1xad5hePrzb7AgMBAAGjQjBAMA8GA1Ud
+EwEB/wQFMAMBAf8wHQYDVR0OBBYEFJngGWcNYtt2s9o9uFvo/ULSMQ6HMA4GA1Ud
+DwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAmHNw4rDT7TnsTGDZqRKGFx6W
+0OhUKDtkLSGm+J1WE2pIPU/HPinbbViDVD2HfSMF1OQc3Og4ZYbFdada2zUFvXfe
+uyk3QAUHw5RSn8pk3fEbK9xGChACMf1KaA0HZJDmHvUqoai7PF35owgLEQzxPy0Q
+lG/+4jSHg9bP5Rs1bdID4bANqKCqRieCNqcVtgimQlRXtpla4gt5kNdXElE1GYhB
+aCXUNxeEFfsBctyV3lImIJgm4nb1J2/6ADtKYdkNy1GTKv0WBpanI5ojSP5RvbbE
+sLFUzt5sQa0WZ37b/TjNuThOssFgy50X31ieemKyJo90lZvkWx3SD92YHJtZuSPT
+MaCm/zjdzyBP6VhWOmfD0faZmZ26NraAL4hHT4a/RDqA5Dccprrql5gR0IRiR2Qe
+qu5AvzSxnI9O4fKSTx+O856X3vOmeWqJcU9LJxdI/uz0UA9PSX3MReO9ekDFQdxh
+VicGaeVyQYHTtgGJoC86cnn+OjC/QezHYj6RS8fZMXZC+fc8Y+wmjHMMfRod6qh8
+h6jCJ3zhM0EPz8/8AKAigJ5Kp28AsEFFtyLKaEjFQqKu3R3y4G5OBVixwJAWKqQ9
+EEC+j2Jjg6mcgn0tAumDMHzLJ8n9HmYAsC7TIS+OMxZsmO0QqAfWzJPP29FpHOTK
+yeC2nOnOcXHebD8WpHk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc.
+# Subject: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc.
+# Label: "Trustwave Global ECC P256 Certification Authority"
+# Serial: 4151900041497450638097112925
+# MD5 Fingerprint: 5b:44:e3:8d:5d:36:86:26:e8:0d:05:d2:59:a7:83:54
+# SHA1 Fingerprint: b4:90:82:dd:45:0c:be:8b:5b:b1:66:d3:e2:a4:08:26:cd:ed:42:cf
+# SHA256 Fingerprint: 94:5b:bc:82:5e:a5:54:f4:89:d1:fd:51:a7:3d:df:2e:a6:24:ac:70:19:a0:52:05:22:5c:22:a7:8c:cf:a8:b4
+-----BEGIN CERTIFICATE-----
+MIICYDCCAgegAwIBAgIMDWpfCD8oXD5Rld9dMAoGCCqGSM49BAMCMIGRMQswCQYD
+VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf
+BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3
+YXZlIEdsb2JhbCBFQ0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x
+NzA4MjMxOTM1MTBaFw00MjA4MjMxOTM1MTBaMIGRMQswCQYDVQQGEwJVUzERMA8G
+A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0
+d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF
+Q0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTBZMBMGByqGSM49AgEGCCqG
+SM49AwEHA0IABH77bOYj43MyCMpg5lOcunSNGLB4kFKA3TjASh3RqMyTpJcGOMoN
+FWLGjgEqZZ2q3zSRLoHB5DOSMcT9CTqmP62jQzBBMA8GA1UdEwEB/wQFMAMBAf8w
+DwYDVR0PAQH/BAUDAwcGADAdBgNVHQ4EFgQUo0EGrJBt0UrrdaVKEJmzsaGLSvcw
+CgYIKoZIzj0EAwIDRwAwRAIgB+ZU2g6gWrKuEZ+Hxbb/ad4lvvigtwjzRM4q3wgh
+DDcCIC0mA6AFvWvR9lz4ZcyGbbOcNEhjhAnFjXca4syc4XR7
+-----END CERTIFICATE-----
+
+# Issuer: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc.
+# Subject: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc.
+# Label: "Trustwave Global ECC P384 Certification Authority"
+# Serial: 2704997926503831671788816187
+# MD5 Fingerprint: ea:cf:60:c4:3b:b9:15:29:40:a1:97:ed:78:27:93:d6
+# SHA1 Fingerprint: e7:f3:a3:c8:cf:6f:c3:04:2e:6d:0e:67:32:c5:9e:68:95:0d:5e:d2
+# SHA256 Fingerprint: 55:90:38:59:c8:c0:c3:eb:b8:75:9e:ce:4e:25:57:22:5f:f5:75:8b:bd:38:eb:d4:82:76:60:1e:1b:d5:80:97
+-----BEGIN CERTIFICATE-----
+MIICnTCCAiSgAwIBAgIMCL2Fl2yZJ6SAaEc7MAoGCCqGSM49BAMDMIGRMQswCQYD
+VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf
+BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3
+YXZlIEdsb2JhbCBFQ0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x
+NzA4MjMxOTM2NDNaFw00MjA4MjMxOTM2NDNaMIGRMQswCQYDVQQGEwJVUzERMA8G
+A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0
+d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF
+Q0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABGvaDXU1CDFHBa5FmVXxERMuSvgQMSOjfoPTfygIOiYaOs+Xgh+AtycJ
+j9GOMMQKmw6sWASr9zZ9lCOkmwqKi6vr/TklZvFe/oyujUF5nQlgziip04pt89ZF
+1PKYhDhloKNDMEEwDwYDVR0TAQH/BAUwAwEB/zAPBgNVHQ8BAf8EBQMDBwYAMB0G
+A1UdDgQWBBRVqYSJ0sEyvRjLbKYHTsjnnb6CkDAKBggqhkjOPQQDAwNnADBkAjA3
+AZKXRRJ+oPM+rRk6ct30UJMDEr5E0k9BpIycnR+j9sKS50gU/k6bpZFXrsY3crsC
+MGclCrEMXu6pY5Jv5ZAL/mYiykf9ijH3g/56vxC+GCsej/YpHpRZ744hN8tRmKVu
+Sw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp.
+# Subject: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp.
+# Label: "NAVER Global Root Certification Authority"
+# Serial: 9013692873798656336226253319739695165984492813
+# MD5 Fingerprint: c8:7e:41:f6:25:3b:f5:09:b3:17:e8:46:3d:bf:d0:9b
+# SHA1 Fingerprint: 8f:6b:f2:a9:27:4a:da:14:a0:c4:f4:8e:61:27:f9:c0:1e:78:5d:d1
+# SHA256 Fingerprint: 88:f4:38:dc:f8:ff:d1:fa:8f:42:91:15:ff:e5:f8:2a:e1:e0:6e:0c:70:c3:75:fa:ad:71:7b:34:a4:9e:72:65
+-----BEGIN CERTIFICATE-----
+MIIFojCCA4qgAwIBAgIUAZQwHqIL3fXFMyqxQ0Rx+NZQTQ0wDQYJKoZIhvcNAQEM
+BQAwaTELMAkGA1UEBhMCS1IxJjAkBgNVBAoMHU5BVkVSIEJVU0lORVNTIFBMQVRG
+T1JNIENvcnAuMTIwMAYDVQQDDClOQVZFUiBHbG9iYWwgUm9vdCBDZXJ0aWZpY2F0
+aW9uIEF1dGhvcml0eTAeFw0xNzA4MTgwODU4NDJaFw0zNzA4MTgyMzU5NTlaMGkx
+CzAJBgNVBAYTAktSMSYwJAYDVQQKDB1OQVZFUiBCVVNJTkVTUyBQTEFURk9STSBD
+b3JwLjEyMDAGA1UEAwwpTkFWRVIgR2xvYmFsIFJvb3QgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC21PGTXLVA
+iQqrDZBbUGOukJR0F0Vy1ntlWilLp1agS7gvQnXp2XskWjFlqxcX0TM62RHcQDaH
+38dq6SZeWYp34+hInDEW+j6RscrJo+KfziFTowI2MMtSAuXaMl3Dxeb57hHHi8lE
+HoSTGEq0n+USZGnQJoViAbbJAh2+g1G7XNr4rRVqmfeSVPc0W+m/6imBEtRTkZaz
+kVrd/pBzKPswRrXKCAfHcXLJZtM0l/aM9BhK4dA9WkW2aacp+yPOiNgSnABIqKYP
+szuSjXEOdMWLyEz59JuOuDxp7W87UC9Y7cSw0BwbagzivESq2M0UXZR4Yb8Obtoq
+vC8MC3GmsxY/nOb5zJ9TNeIDoKAYv7vxvvTWjIcNQvcGufFt7QSUqP620wbGQGHf
+nZ3zVHbOUzoBppJB7ASjjw2i1QnK1sua8e9DXcCrpUHPXFNwcMmIpi3Ua2FzUCaG
+YQ5fG8Ir4ozVu53BA0K6lNpfqbDKzE0K70dpAy8i+/Eozr9dUGWokG2zdLAIx6yo
+0es+nPxdGoMuK8u180SdOqcXYZaicdNwlhVNt0xz7hlcxVs+Qf6sdWA7G2POAN3a
+CJBitOUt7kinaxeZVL6HSuOpXgRM6xBtVNbv8ejyYhbLgGvtPe31HzClrkvJE+2K
+AQHJuFFYwGY6sWZLxNUxAmLpdIQM201GLQIDAQABo0IwQDAdBgNVHQ4EFgQU0p+I
+36HNLL3s9TsBAZMzJ7LrYEswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB
+Af8wDQYJKoZIhvcNAQEMBQADggIBADLKgLOdPVQG3dLSLvCkASELZ0jKbY7gyKoN
+qo0hV4/GPnrK21HUUrPUloSlWGB/5QuOH/XcChWB5Tu2tyIvCZwTFrFsDDUIbatj
+cu3cvuzHV+YwIHHW1xDBE1UBjCpD5EHxzzp6U5LOogMFDTjfArsQLtk70pt6wKGm
++LUx5vR1yblTmXVHIloUFcd4G7ad6Qz4G3bxhYTeodoS76TiEJd6eN4MUZeoIUCL
+hr0N8F5OSza7OyAfikJW4Qsav3vQIkMsRIz75Sq0bBwcupTgE34h5prCy8VCZLQe
+lHsIJchxzIdFV4XTnyliIoNRlwAYl3dqmJLJfGBs32x9SuRwTMKeuB330DTHD8z7
+p/8Dvq1wkNoL3chtl1+afwkyQf3NosxabUzyqkn+Zvjp2DXrDige7kgvOtB5CTh8
+piKCk5XQA76+AqAF3SAi428diDRgxuYKuQl1C/AH6GmWNcf7I4GOODm4RStDeKLR
+LBT/DShycpWbXgnbiUSYqqFJu3FS8r/2/yehNq+4tneI3TqkbZs0kNwUXTC/t+sX
+5Ie3cdCh13cV1ELX8vMxmV2b3RZtP+oGI/hGoiLtk/bdmuYqh7GYVPEi92tF4+KO
+dh2ajcQGjTa3FPOdVGm3jjzVpG2Tgbet9r1ke8LJaDmgkpzNNIaRkPpkUZ3+/uul
+9XXeifdy
+-----END CERTIFICATE-----
+
+# Issuer: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres
+# Subject: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres
+# Label: "AC RAIZ FNMT-RCM SERVIDORES SEGUROS"
+# Serial: 131542671362353147877283741781055151509
+# MD5 Fingerprint: 19:36:9c:52:03:2f:d2:d1:bb:23:cc:dd:1e:12:55:bb
+# SHA1 Fingerprint: 62:ff:d9:9e:c0:65:0d:03:ce:75:93:d2:ed:3f:2d:32:c9:e3:e5:4a
+# SHA256 Fingerprint: 55:41:53:b1:3d:2c:f9:dd:b7:53:bf:be:1a:4e:0a:e0:8d:0a:a4:18:70:58:fe:60:a2:b8:62:b2:e4:b8:7b:cb
+-----BEGIN CERTIFICATE-----
+MIICbjCCAfOgAwIBAgIQYvYybOXE42hcG2LdnC6dlTAKBggqhkjOPQQDAzB4MQsw
+CQYDVQQGEwJFUzERMA8GA1UECgwIRk5NVC1SQ00xDjAMBgNVBAsMBUNlcmVzMRgw
+FgYDVQRhDA9WQVRFUy1RMjgyNjAwNEoxLDAqBgNVBAMMI0FDIFJBSVogRk5NVC1S
+Q00gU0VSVklET1JFUyBTRUdVUk9TMB4XDTE4MTIyMDA5MzczM1oXDTQzMTIyMDA5
+MzczM1oweDELMAkGA1UEBhMCRVMxETAPBgNVBAoMCEZOTVQtUkNNMQ4wDAYDVQQL
+DAVDZXJlczEYMBYGA1UEYQwPVkFURVMtUTI4MjYwMDRKMSwwKgYDVQQDDCNBQyBS
+QUlaIEZOTVQtUkNNIFNFUlZJRE9SRVMgU0VHVVJPUzB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABPa6V1PIyqvfNkpSIeSX0oNnnvBlUdBeh8dHsVnyV0ebAAKTRBdp20LH
+sbI6GA60XYyzZl2hNPk2LEnb80b8s0RpRBNm/dfF/a82Tc4DTQdxz69qBdKiQ1oK
+Um8BA06Oi6NCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
+VR0OBBYEFAG5L++/EYZg8k/QQW6rcx/n0m5JMAoGCCqGSM49BAMDA2kAMGYCMQCu
+SuMrQMN0EfKVrRYj3k4MGuZdpSRea0R7/DjiT8ucRRcRTBQnJlU5dUoDzBOQn5IC
+MQD6SmxgiHPz7riYYqnOK8LZiqZwMR2vsJRM60/G49HzYqc8/5MuB1xJAWdpEgJy
+v+c=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign Root R46 O=GlobalSign nv-sa
+# Subject: CN=GlobalSign Root R46 O=GlobalSign nv-sa
+# Label: "GlobalSign Root R46"
+# Serial: 1552617688466950547958867513931858518042577
+# MD5 Fingerprint: c4:14:30:e4:fa:66:43:94:2a:6a:1b:24:5f:19:d0:ef
+# SHA1 Fingerprint: 53:a2:b0:4b:ca:6b:d6:45:e6:39:8a:8e:c4:0d:d2:bf:77:c3:a2:90
+# SHA256 Fingerprint: 4f:a3:12:6d:8d:3a:11:d1:c4:85:5a:4f:80:7c:ba:d6:cf:91:9d:3a:5a:88:b0:3b:ea:2c:63:72:d9:3c:40:c9
+-----BEGIN CERTIFICATE-----
+MIIFWjCCA0KgAwIBAgISEdK7udcjGJ5AXwqdLdDfJWfRMA0GCSqGSIb3DQEBDAUA
+MEYxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYD
+VQQDExNHbG9iYWxTaWduIFJvb3QgUjQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMy
+MDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYt
+c2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQCsrHQy6LNl5brtQyYdpokNRbopiLKkHWPd08EsCVeJ
+OaFV6Wc0dwxu5FUdUiXSE2te4R2pt32JMl8Nnp8semNgQB+msLZ4j5lUlghYruQG
+vGIFAha/r6gjA7aUD7xubMLL1aa7DOn2wQL7Id5m3RerdELv8HQvJfTqa1VbkNud
+316HCkD7rRlr+/fKYIje2sGP1q7Vf9Q8g+7XFkyDRTNrJ9CG0Bwta/OrffGFqfUo
+0q3v84RLHIf8E6M6cqJaESvWJ3En7YEtbWaBkoe0G1h6zD8K+kZPTXhc+CtI4wSE
+y132tGqzZfxCnlEmIyDLPRT5ge1lFgBPGmSXZgjPjHvjK8Cd+RTyG/FWaha/LIWF
+zXg4mutCagI0GIMXTpRW+LaCtfOW3T3zvn8gdz57GSNrLNRyc0NXfeD412lPFzYE
++cCQYDdF3uYM2HSNrpyibXRdQr4G9dlkbgIQrImwTDsHTUB+JMWKmIJ5jqSngiCN
+I/onccnfxkF0oE32kRbcRoxfKWMxWXEM2G/CtjJ9++ZdU6Z+Ffy7dXxd7Pj2Fxzs
+x2sZy/N78CsHpdlseVR2bJ0cpm4O6XkMqCNqo98bMDGfsVR7/mrLZqrcZdCinkqa
+ByFrgY/bxFn63iLABJzjqls2k+g9vXqhnQt2sQvHnf3PmKgGwvgqo6GDoLclcqUC
+4wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQUA1yrc4GHqMywptWU4jaWSf8FmSwwDQYJKoZIhvcNAQEMBQADggIBAHx4
+7PYCLLtbfpIrXTncvtgdokIzTfnvpCo7RGkerNlFo048p9gkUbJUHJNOxO97k4Vg
+JuoJSOD1u8fpaNK7ajFxzHmuEajwmf3lH7wvqMxX63bEIaZHU1VNaL8FpO7XJqti
+2kM3S+LGteWygxk6x9PbTZ4IevPuzz5i+6zoYMzRx6Fcg0XERczzF2sUyQQCPtIk
+pnnpHs6i58FZFZ8d4kuaPp92CC1r2LpXFNqD6v6MVenQTqnMdzGxRBF6XLE+0xRF
+FRhiJBPSy03OXIPBNvIQtQ6IbbjhVp+J3pZmOUdkLG5NrmJ7v2B0GbhWrJKsFjLt
+rWhV/pi60zTe9Mlhww6G9kuEYO4Ne7UyWHmRVSyBQ7N0H3qqJZ4d16GLuc1CLgSk
+ZoNNiTW2bKg2SnkheCLQQrzRQDGQob4Ez8pn7fXwgNNgyYMqIgXQBztSvwyeqiv5
+u+YfjyW6hY0XHgL+XVAEV8/+LbzvXMAaq7afJMbfc2hIkCwU9D9SGuTSyxTDYWnP
+4vkYxboznxSjBF25cfe1lNj2M8FawTSLfJvdkzrnE6JwYZ+vj+vYxXX4M2bUdGc6
+N3ec592kD3ZDZopD8p/7DEJ4Y9HiD2971KE9dJeFt0g5QdYg/NA6s/rob8SKunE3
+vouXsXgxT7PntgMTzlSdriVZzH81Xwj3QEUxeCp6
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign Root E46 O=GlobalSign nv-sa
+# Subject: CN=GlobalSign Root E46 O=GlobalSign nv-sa
+# Label: "GlobalSign Root E46"
+# Serial: 1552617690338932563915843282459653771421763
+# MD5 Fingerprint: b5:b8:66:ed:de:08:83:e3:c9:e2:01:34:06:ac:51:6f
+# SHA1 Fingerprint: 39:b4:6c:d5:fe:80:06:eb:e2:2f:4a:bb:08:33:a0:af:db:b9:dd:84
+# SHA256 Fingerprint: cb:b9:c4:4d:84:b8:04:3e:10:50:ea:31:a6:9f:51:49:55:d7:bf:d2:e2:c6:b4:93:01:01:9a:d6:1d:9f:50:58
+-----BEGIN CERTIFICATE-----
+MIICCzCCAZGgAwIBAgISEdK7ujNu1LzmJGjFDYQdmOhDMAoGCCqGSM49BAMDMEYx
+CzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQD
+ExNHbG9iYWxTaWduIFJvb3QgRTQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAw
+MDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2Ex
+HDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAAScDrHPt+ieUnd1NPqlRqetMhkytAepJ8qUuwzSChDH2omwlwxwEwkBjtjq
+R+q+soArzfwoDdusvKSGN+1wCAB16pMLey5SnCNoIwZD7JIvU4Tb+0cUB+hflGdd
+yXqBPCCjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
+DgQWBBQxCpCPtsad0kRLgLWi5h+xEk8blTAKBggqhkjOPQQDAwNoADBlAjEA31SQ
+7Zvvi5QCkxeCmb6zniz2C5GMn0oUsfZkvLtoURMMA/cVi4RguYv/Uo7njLwcAjA8
++RHUjE7AwWHCFUyqqx0LMV87HOIAl0Qx5v5zli/altP+CAezNIm8BZ/3Hobui3A=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH
+# Subject: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH
+# Label: "GLOBALTRUST 2020"
+# Serial: 109160994242082918454945253
+# MD5 Fingerprint: 8a:c7:6f:cb:6d:e3:cc:a2:f1:7c:83:fa:0e:78:d7:e8
+# SHA1 Fingerprint: d0:67:c1:13:51:01:0c:aa:d0:c7:6a:65:37:31:16:26:4f:53:71:a2
+# SHA256 Fingerprint: 9a:29:6a:51:82:d1:d4:51:a2:e3:7f:43:9b:74:da:af:a2:67:52:33:29:f9:0f:9a:0d:20:07:c3:34:e2:3c:9a
+-----BEGIN CERTIFICATE-----
+MIIFgjCCA2qgAwIBAgILWku9WvtPilv6ZeUwDQYJKoZIhvcNAQELBQAwTTELMAkG
+A1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkw
+FwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMB4XDTIwMDIxMDAwMDAwMFoXDTQwMDYx
+MDAwMDAwMFowTTELMAkGA1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9u
+aXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMIICIjANBgkq
+hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAri5WrRsc7/aVj6B3GyvTY4+ETUWiD59b
+RatZe1E0+eyLinjF3WuvvcTfk0Uev5E4C64OFudBc/jbu9G4UeDLgztzOG53ig9Z
+YybNpyrOVPu44sB8R85gfD+yc/LAGbaKkoc1DZAoouQVBGM+uq/ufF7MpotQsjj3
+QWPKzv9pj2gOlTblzLmMCcpL3TGQlsjMH/1WljTbjhzqLL6FLmPdqqmV0/0plRPw
+yJiT2S0WR5ARg6I6IqIoV6Lr/sCMKKCmfecqQjuCgGOlYx8ZzHyyZqjC0203b+J+
+BlHZRYQfEs4kUmSFC0iAToexIiIwquuuvuAC4EDosEKAA1GqtH6qRNdDYfOiaxaJ
+SaSjpCuKAsR49GiKweR6NrFvG5Ybd0mN1MkGco/PU+PcF4UgStyYJ9ORJitHHmkH
+r96i5OTUawuzXnzUJIBHKWk7buis/UDr2O1xcSvy6Fgd60GXIsUf1DnQJ4+H4xj0
+4KlGDfV0OoIu0G4skaMxXDtG6nsEEFZegB31pWXogvziB4xiRfUg3kZwhqG8k9Me
+dKZssCz3AwyIDMvUclOGvGBG85hqwvG/Q/lwIHfKN0F5VVJjjVsSn8VoxIidrPIw
+q7ejMZdnrY8XD2zHc+0klGvIg5rQmjdJBKuxFshsSUktq6HQjJLyQUp5ISXbY9e2
+nKd+Qmn7OmMCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AQYwHQYDVR0OBBYEFNwuH9FhN3nkq9XVsxJxaD1qaJwiMB8GA1UdIwQYMBaAFNwu
+H9FhN3nkq9XVsxJxaD1qaJwiMA0GCSqGSIb3DQEBCwUAA4ICAQCR8EICaEDuw2jA
+VC/f7GLDw56KoDEoqoOOpFaWEhCGVrqXctJUMHytGdUdaG/7FELYjQ7ztdGl4wJC
+XtzoRlgHNQIw4Lx0SsFDKv/bGtCwr2zD/cuz9X9tAy5ZVp0tLTWMstZDFyySCstd
+6IwPS3BD0IL/qMy/pJTAvoe9iuOTe8aPmxadJ2W8esVCgmxcB9CpwYhgROmYhRZf
++I/KARDOJcP5YBugxZfD0yyIMaK9MOzQ0MAS8cE54+X1+NZK3TTN+2/BT+MAi1bi
+kvcoskJ3ciNnxz8RFbLEAwW+uxF7Cr+obuf/WEPPm2eggAe2HcqtbepBEX4tdJP7
+wry+UUTF72glJ4DjyKDUEuzZpTcdN3y0kcra1LGWge9oXHYQSa9+pTeAsRxSvTOB
+TI/53WXZFM2KJVj04sWDpQmQ1GwUY7VA3+vA/MRYfg0UFodUJ25W5HCEuGwyEn6C
+MUO+1918oa2u1qsgEu8KwxCMSZY13At1XrFP1U80DhEgB3VDRemjEdqso5nCtnkn
+4rnvyOL2NSl6dPrFf4IFYqYK6miyeUcGbvJXqBUzxvd4Sj1Ce2t+/vdG6tHrju+I
+aFvowdlxfv1k7/9nR4hYJS8+hge9+6jlgqispdNpQ80xiEmEU5LAsTkbOYMBMMTy
+qfrQA71yN2BWHzZ8vTmR9W0Nv3vXkg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz
+# Subject: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz
+# Label: "ANF Secure Server Root CA"
+# Serial: 996390341000653745
+# MD5 Fingerprint: 26:a6:44:5a:d9:af:4e:2f:b2:1d:b6:65:b0:4e:e8:96
+# SHA1 Fingerprint: 5b:6e:68:d0:cc:15:b6:a0:5f:1e:c1:5f:ae:02:fc:6b:2f:5d:6f:74
+# SHA256 Fingerprint: fb:8f:ec:75:91:69:b9:10:6b:1e:51:16:44:c6:18:c5:13:04:37:3f:6c:06:43:08:8d:8b:ef:fd:1b:99:75:99
+-----BEGIN CERTIFICATE-----
+MIIF7zCCA9egAwIBAgIIDdPjvGz5a7EwDQYJKoZIhvcNAQELBQAwgYQxEjAQBgNV
+BAUTCUc2MzI4NzUxMDELMAkGA1UEBhMCRVMxJzAlBgNVBAoTHkFORiBBdXRvcmlk
+YWQgZGUgQ2VydGlmaWNhY2lvbjEUMBIGA1UECxMLQU5GIENBIFJhaXoxIjAgBgNV
+BAMTGUFORiBTZWN1cmUgU2VydmVyIFJvb3QgQ0EwHhcNMTkwOTA0MTAwMDM4WhcN
+MzkwODMwMTAwMDM4WjCBhDESMBAGA1UEBRMJRzYzMjg3NTEwMQswCQYDVQQGEwJF
+UzEnMCUGA1UEChMeQU5GIEF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uMRQwEgYD
+VQQLEwtBTkYgQ0EgUmFpejEiMCAGA1UEAxMZQU5GIFNlY3VyZSBTZXJ2ZXIgUm9v
+dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANvrayvmZFSVgpCj
+cqQZAZ2cC4Ffc0m6p6zzBE57lgvsEeBbphzOG9INgxwruJ4dfkUyYA8H6XdYfp9q
+yGFOtibBTI3/TO80sh9l2Ll49a2pcbnvT1gdpd50IJeh7WhM3pIXS7yr/2WanvtH
+2Vdy8wmhrnZEE26cLUQ5vPnHO6RYPUG9tMJJo8gN0pcvB2VSAKduyK9o7PQUlrZX
+H1bDOZ8rbeTzPvY1ZNoMHKGESy9LS+IsJJ1tk0DrtSOOMspvRdOoiXsezx76W0OL
+zc2oD2rKDF65nkeP8Nm2CgtYZRczuSPkdxl9y0oukntPLxB3sY0vaJxizOBQ+OyR
+p1RMVwnVdmPF6GUe7m1qzwmd+nxPrWAI/VaZDxUse6mAq4xhj0oHdkLePfTdsiQz
+W7i1o0TJrH93PB0j7IKppuLIBkwC/qxcmZkLLxCKpvR/1Yd0DVlJRfbwcVw5Kda/
+SiOL9V8BY9KHcyi1Swr1+KuCLH5zJTIdC2MKF4EA/7Z2Xue0sUDKIbvVgFHlSFJn
+LNJhiQcND85Cd8BEc5xEUKDbEAotlRyBr+Qc5RQe8TZBAQIvfXOn3kLMTOmJDVb3
+n5HUA8ZsyY/b2BzgQJhdZpmYgG4t/wHFzstGH6wCxkPmrqKEPMVOHj1tyRRM4y5B
+u8o5vzY8KhmqQYdOpc5LMnndkEl/AgMBAAGjYzBhMB8GA1UdIwQYMBaAFJxf0Gxj
+o1+TypOYCK2Mh6UsXME3MB0GA1UdDgQWBBScX9BsY6Nfk8qTmAitjIelLFzBNzAO
+BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC
+AgEATh65isagmD9uw2nAalxJUqzLK114OMHVVISfk/CHGT0sZonrDUL8zPB1hT+L
+9IBdeeUXZ701guLyPI59WzbLWoAAKfLOKyzxj6ptBZNscsdW699QIyjlRRA96Gej
+rw5VD5AJYu9LWaL2U/HANeQvwSS9eS9OICI7/RogsKQOLHDtdD+4E5UGUcjohybK
+pFtqFiGS3XNgnhAY3jyB6ugYw3yJ8otQPr0R4hUDqDZ9MwFsSBXXiJCZBMXM5gf0
+vPSQ7RPi6ovDj6MzD8EpTBNO2hVWcXNyglD2mjN8orGoGjR0ZVzO0eurU+AagNjq
+OknkJjCb5RyKqKkVMoaZkgoQI1YS4PbOTOK7vtuNknMBZi9iPrJyJ0U27U1W45eZ
+/zo1PqVUSlJZS2Db7v54EX9K3BR5YLZrZAPbFYPhor72I5dQ8AkzNqdxliXzuUJ9
+2zg/LFis6ELhDtjTO0wugumDLmsx2d1Hhk9tl5EuT+IocTUW0fJz/iUrB0ckYyfI
++PbZa/wSMVYIwFNCr5zQM378BvAxRAMU8Vjq8moNqRGyg77FGr8H6lnco4g175x2
+MjxNBiLOFeXdntiP2t7SxDnlF4HPOEfrf4htWRvfn0IUrn7PqLBmZdo3r5+qPeoo
+tt7VMVgWglvquxl1AnMaykgaIZOQCo6ThKd9OyMYkomgjaw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
+# Subject: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
+# Label: "Certum EC-384 CA"
+# Serial: 160250656287871593594747141429395092468
+# MD5 Fingerprint: b6:65:b3:96:60:97:12:a1:ec:4e:e1:3d:a3:c6:c9:f1
+# SHA1 Fingerprint: f3:3e:78:3c:ac:df:f4:a2:cc:ac:67:55:69:56:d7:e5:16:3c:e1:ed
+# SHA256 Fingerprint: 6b:32:80:85:62:53:18:aa:50:d1:73:c9:8d:8b:da:09:d5:7e:27:41:3d:11:4c:f7:87:a0:f5:d0:6c:03:0c:f6
+-----BEGIN CERTIFICATE-----
+MIICZTCCAeugAwIBAgIQeI8nXIESUiClBNAt3bpz9DAKBggqhkjOPQQDAzB0MQsw
+CQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScw
+JQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAXBgNVBAMT
+EENlcnR1bSBFQy0zODQgQ0EwHhcNMTgwMzI2MDcyNDU0WhcNNDMwMzI2MDcyNDU0
+WjB0MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBT
+LkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAX
+BgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATE
+KI6rGFtqvm5kN2PkzeyrOvfMobgOgknXhimfoZTy42B4mIF4Bk3y7JoOV2CDn7Tm
+Fy8as10CW4kjPMIRBSqniBMY81CE1700LCeJVf/OTOffph8oxPBUw7l8t1Ot68Kj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI0GZnQkdjrzife81r1HfS+8
+EF9LMA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNoADBlAjADVS2m5hjEfO/J
+UG7BJw+ch69u1RsIGL2SKcHvlJF40jocVYli5RsJHrpka/F2tNQCMQC0QoSZ/6vn
+nvuRlydd3LBbMHHOXjgaatkl5+r3YZJW+OraNsKHZZYuciUvf9/DE8k=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Root CA"
+# Serial: 40870380103424195783807378461123655149
+# MD5 Fingerprint: 51:e1:c2:e7:fe:4c:84:af:59:0e:2f:f4:54:6f:ea:29
+# SHA1 Fingerprint: c8:83:44:c0:18:ae:9f:cc:f1:87:b7:8f:22:d1:c5:d7:45:84:ba:e5
+# SHA256 Fingerprint: fe:76:96:57:38:55:77:3e:37:a9:5e:7a:d4:d9:cc:96:c3:01:57:c1:5d:31:76:5b:a9:b1:57:04:e1:ae:78:fd
+-----BEGIN CERTIFICATE-----
+MIIFwDCCA6igAwIBAgIQHr9ZULjJgDdMBvfrVU+17TANBgkqhkiG9w0BAQ0FADB6
+MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEu
+MScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxHzAdBgNV
+BAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwHhcNMTgwMzE2MTIxMDEzWhcNNDMw
+MzE2MTIxMDEzWjB6MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEg
+U3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRo
+b3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQDRLY67tzbqbTeRn06TpwXkKQMlzhyC93yZ
+n0EGze2jusDbCSzBfN8pfktlL5On1AFrAygYo9idBcEq2EXxkd7fO9CAAozPOA/q
+p1x4EaTByIVcJdPTsuclzxFUl6s1wB52HO8AU5853BSlLCIls3Jy/I2z5T4IHhQq
+NwuIPMqw9MjCoa68wb4pZ1Xi/K1ZXP69VyywkI3C7Te2fJmItdUDmj0VDT06qKhF
+8JVOJVkdzZhpu9PMMsmN74H+rX2Ju7pgE8pllWeg8xn2A1bUatMn4qGtg/BKEiJ3
+HAVz4hlxQsDsdUaakFjgao4rpUYwBI4Zshfjvqm6f1bxJAPXsiEodg42MEx51UGa
+mqi4NboMOvJEGyCI98Ul1z3G4z5D3Yf+xOr1Uz5MZf87Sst4WmsXXw3Hw09Omiqi
+7VdNIuJGmj8PkTQkfVXjjJU30xrwCSss0smNtA0Aq2cpKNgB9RkEth2+dv5yXMSF
+ytKAQd8FqKPVhJBPC/PgP5sZ0jeJP/J7UhyM9uH3PAeXjA6iWYEMspA90+NZRu0P
+qafegGtaqge2Gcu8V/OXIXoMsSt0Puvap2ctTMSYnjYJdmZm/Bo/6khUHL4wvYBQ
+v3y1zgD2DGHZ5yQD4OMBgQ692IU0iL2yNqh7XAjlRICMb/gv1SHKHRzQ+8S1h9E6
+Tsd2tTVItQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSM+xx1
+vALTn04uSNn5YFSqxLNP+jAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQENBQAD
+ggIBAEii1QALLtA/vBzVtVRJHlpr9OTy4EA34MwUe7nJ+jW1dReTagVphZzNTxl4
+WxmB82M+w85bj/UvXgF2Ez8sALnNllI5SW0ETsXpD4YN4fqzX4IS8TrOZgYkNCvo
+zMrnadyHncI013nR03e4qllY/p0m+jiGPp2Kh2RX5Rc64vmNueMzeMGQ2Ljdt4NR
+5MTMI9UGfOZR0800McD2RrsLrfw9EAUqO0qRJe6M1ISHgCq8CYyqOhNf6DR5UMEQ
+GfnTKB7U0VEwKbOukGfWHwpjscWpxkIxYxeU72nLL/qMFH3EQxiJ2fAyQOaA4kZf
+5ePBAFmo+eggvIksDkc0C+pXwlM2/KfUrzHN/gLldfq5Jwn58/U7yn2fqSLLiMmq
+0Uc9NneoWWRrJ8/vJ8HjJLWG965+Mk2weWjROeiQWMODvA8s1pfrzgzhIMfatz7D
+P78v3DSk+yshzWePS/Tj6tQ/50+6uaWTRRxmHyH6ZF5v4HaUMst19W7l9o/HuKTM
+qJZ9ZPskWkoDbGs4xugDQ5r3V7mzKWmTOPQD8rv7gmsHINFSH5pkAnuYZttcTVoP
+0ISVoDwUQwbKytu4QTbaakRnh6+v40URFWkIsr4WOZckbxJF0WddCajJFdr60qZf
+E2Efv4WstK2tBZQIgx51F9NxO5NQI1mg7TyRVJ12AMXDuDjb
+-----END CERTIFICATE-----
diff --git a/packages/certifi/core.py b/packages/certifi/core.py
new file mode 100644
index 000000000..5d2b8cd32
--- /dev/null
+++ b/packages/certifi/core.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+"""
+certifi.py
+~~~~~~~~~~
+
+This module returns the installation location of cacert.pem or its contents.
+"""
+import os
+
+try:
+ from importlib.resources import path as get_path, read_text
+
+ _CACERT_CTX = None
+ _CACERT_PATH = None
+
+ def where():
+ # This is slightly terrible, but we want to delay extracting the file
+ # in cases where we're inside of a zipimport situation until someone
+ # actually calls where(), but we don't want to re-extract the file
+ # on every call of where(), so we'll do it once then store it in a
+ # global variable.
+ global _CACERT_CTX
+ global _CACERT_PATH
+ if _CACERT_PATH is None:
+ # This is slightly janky, the importlib.resources API wants you to
+ # manage the cleanup of this file, so it doesn't actually return a
+ # path, it returns a context manager that will give you the path
+ # when you enter it and will do any cleanup when you leave it. In
+ # the common case of not needing a temporary file, it will just
+ # return the file system location and the __exit__() is a no-op.
+ #
+ # We also have to hold onto the actual context manager, because
+ # it will do the cleanup whenever it gets garbage collected, so
+ # we will also store that at the global level as well.
+ _CACERT_CTX = get_path("certifi", "cacert.pem")
+ _CACERT_PATH = str(_CACERT_CTX.__enter__())
+
+ return _CACERT_PATH
+
+
+except ImportError:
+ # This fallback will work for Python versions prior to 3.7 that lack the
+ # importlib.resources module but relies on the existing `where` function
+ # so won't address issues with environments like PyOxidizer that don't set
+ # __file__ on modules.
+ def read_text(_module, _path, encoding="ascii"):
+ with open(where(), "r", encoding=encoding) as data:
+ return data.read()
+
+ # If we don't have importlib.resources, then we will just do the old logic
+ # of assuming we're on the filesystem and munge the path directly.
+ def where():
+ f = os.path.dirname(__file__)
+
+ return os.path.join(f, "cacert.pem")
+
+
+def contents():
+ return read_text("certifi", "cacert.pem", encoding="ascii")
diff --git a/packages/h11/__init__.py b/packages/h11/__init__.py
new file mode 100644
index 000000000..ae39e0120
--- /dev/null
+++ b/packages/h11/__init__.py
@@ -0,0 +1,21 @@
+# A highish-level implementation of the HTTP/1.1 wire protocol (RFC 7230),
+# containing no networking code at all, loosely modelled on hyper-h2's generic
+# implementation of HTTP/2 (and in particular the h2.connection.H2Connection
+# class). There's still a bunch of subtle details you need to get right if you
+# want to make this actually useful, because it doesn't implement all the
+# semantics to check that what you're asking to write to the wire is sensible,
+# but at least it gets you out of dealing with the wire itself.
+
+from ._connection import *
+from ._events import *
+from ._state import *
+from ._util import LocalProtocolError, ProtocolError, RemoteProtocolError
+from ._version import __version__
+
+PRODUCT_ID = "python-h11/" + __version__
+
+
+__all__ = ["ProtocolError", "LocalProtocolError", "RemoteProtocolError"]
+__all__ += _events.__all__
+__all__ += _connection.__all__
+__all__ += _state.__all__
diff --git a/packages/h11/_abnf.py b/packages/h11/_abnf.py
new file mode 100644
index 000000000..e6d49e1ea
--- /dev/null
+++ b/packages/h11/_abnf.py
@@ -0,0 +1,129 @@
+# We use native strings for all the re patterns, to take advantage of string
+# formatting, and then convert to bytestrings when compiling the final re
+# objects.
+
+# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#whitespace
+# OWS = *( SP / HTAB )
+# ; optional whitespace
+OWS = r"[ \t]*"
+
+# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.token.separators
+# token = 1*tchar
+#
+# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
+# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
+# / DIGIT / ALPHA
+# ; any VCHAR, except delimiters
+token = r"[-!#$%&'*+.^_`|~0-9a-zA-Z]+"
+
+# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#header.fields
+# field-name = token
+field_name = token
+
+# The standard says:
+#
+# field-value = *( field-content / obs-fold )
+# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+# field-vchar = VCHAR / obs-text
+# obs-fold = CRLF 1*( SP / HTAB )
+# ; obsolete line folding
+# ; see Section 3.2.4
+#
+# https://tools.ietf.org/html/rfc5234#appendix-B.1
+#
+# VCHAR = %x21-7E
+# ; visible (printing) characters
+#
+# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.quoted-string
+# obs-text = %x80-FF
+#
+# However, the standard definition of field-content is WRONG! It disallows
+# fields containing a single visible character surrounded by whitespace,
+# e.g. "foo a bar".
+#
+# See: https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189
+#
+# So our definition of field_content attempts to fix it up...
+#
+# Also, we allow lots of control characters, because apparently people assume
+# that they're legal in practice (e.g., google analytics makes cookies with
+# \x01 in them!):
+# https://github.com/python-hyper/h11/issues/57
+# We still don't allow NUL or whitespace, because those are often treated as
+# meta-characters and letting them through can lead to nasty issues like SSRF.
+vchar = r"[\x21-\x7e]"
+vchar_or_obs_text = r"[^\x00\s]"
+field_vchar = vchar_or_obs_text
+field_content = r"{field_vchar}+(?:[ \t]+{field_vchar}+)*".format(**globals())
+
+# We handle obs-fold at a different level, and our fixed-up field_content
+# already grows to swallow the whole value, so ? instead of *
+field_value = r"({field_content})?".format(**globals())
+
+# header-field = field-name ":" OWS field-value OWS
+header_field = (
+ r"(?P{field_name})"
+ r":"
+ r"{OWS}"
+ r"(?P{field_value})"
+ r"{OWS}".format(**globals())
+)
+
+# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#request.line
+#
+# request-line = method SP request-target SP HTTP-version CRLF
+# method = token
+# HTTP-version = HTTP-name "/" DIGIT "." DIGIT
+# HTTP-name = %x48.54.54.50 ; "HTTP", case-sensitive
+#
+# request-target is complicated (see RFC 7230 sec 5.3) -- could be path, full
+# URL, host+port (for connect), or even "*", but in any case we are guaranteed
+# that it contists of the visible printing characters.
+method = token
+request_target = r"{vchar}+".format(**globals())
+http_version = r"HTTP/(?P[0-9]\.[0-9])"
+request_line = (
+ r"(?P{method})"
+ r" "
+ r"(?P{request_target})"
+ r" "
+ r"{http_version}".format(**globals())
+)
+
+# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#status.line
+#
+# status-line = HTTP-version SP status-code SP reason-phrase CRLF
+# status-code = 3DIGIT
+# reason-phrase = *( HTAB / SP / VCHAR / obs-text )
+status_code = r"[0-9]{3}"
+reason_phrase = r"([ \t]|{vchar_or_obs_text})*".format(**globals())
+status_line = (
+ r"{http_version}"
+ r" "
+ r"(?P{status_code})"
+ # However, there are apparently a few too many servers out there that just
+ # leave out the reason phrase:
+ # https://github.com/scrapy/scrapy/issues/345#issuecomment-281756036
+ # https://github.com/seanmonstar/httparse/issues/29
+ # so make it optional. ?: is a non-capturing group.
+ r"(?: (?P{reason_phrase}))?".format(**globals())
+)
+
+HEXDIG = r"[0-9A-Fa-f]"
+# Actually
+#
+# chunk-size = 1*HEXDIG
+#
+# but we impose an upper-limit to avoid ridiculosity. len(str(2**64)) == 20
+chunk_size = r"({HEXDIG}){{1,20}}".format(**globals())
+# Actually
+#
+# chunk-ext = *( ";" chunk-ext-name [ "=" chunk-ext-val ] )
+#
+# but we aren't parsing the things so we don't really care.
+chunk_ext = r";.*"
+chunk_header = (
+ r"(?P{chunk_size})"
+ r"(?P{chunk_ext})?"
+ r"\r\n".format(**globals())
+)
diff --git a/packages/h11/_connection.py b/packages/h11/_connection.py
new file mode 100644
index 000000000..6f796ef51
--- /dev/null
+++ b/packages/h11/_connection.py
@@ -0,0 +1,585 @@
+# This contains the main Connection class. Everything in h11 revolves around
+# this.
+
+from ._events import * # Import all event types
+from ._headers import get_comma_header, has_expect_100_continue, set_comma_header
+from ._readers import READERS
+from ._receivebuffer import ReceiveBuffer
+from ._state import * # Import all state sentinels
+from ._state import _SWITCH_CONNECT, _SWITCH_UPGRADE, ConnectionState
+from ._util import ( # Import the internal things we need
+ LocalProtocolError,
+ make_sentinel,
+ RemoteProtocolError,
+)
+from ._writers import WRITERS
+
+# Everything in __all__ gets re-exported as part of the h11 public API.
+__all__ = ["Connection", "NEED_DATA", "PAUSED"]
+
+NEED_DATA = make_sentinel("NEED_DATA")
+PAUSED = make_sentinel("PAUSED")
+
+# If we ever have this much buffered without it making a complete parseable
+# event, we error out. The only time we really buffer is when reading the
+# request/reponse line + headers together, so this is effectively the limit on
+# the size of that.
+#
+# Some precedents for defaults:
+# - node.js: 80 * 1024
+# - tomcat: 8 * 1024
+# - IIS: 16 * 1024
+# - Apache: <8 KiB per line>
+DEFAULT_MAX_INCOMPLETE_EVENT_SIZE = 16 * 1024
+
+# RFC 7230's rules for connection lifecycles:
+# - If either side says they want to close the connection, then the connection
+# must close.
+# - HTTP/1.1 defaults to keep-alive unless someone says Connection: close
+# - HTTP/1.0 defaults to close unless both sides say Connection: keep-alive
+# (and even this is a mess -- e.g. if you're implementing a proxy then
+# sending Connection: keep-alive is forbidden).
+#
+# We simplify life by simply not supporting keep-alive with HTTP/1.0 peers. So
+# our rule is:
+# - If someone says Connection: close, we will close
+# - If someone uses HTTP/1.0, we will close.
+def _keep_alive(event):
+ connection = get_comma_header(event.headers, b"connection")
+ if b"close" in connection:
+ return False
+ if getattr(event, "http_version", b"1.1") < b"1.1":
+ return False
+ return True
+
+
+def _body_framing(request_method, event):
+ # Called when we enter SEND_BODY to figure out framing information for
+ # this body.
+ #
+ # These are the only two events that can trigger a SEND_BODY state:
+ assert type(event) in (Request, Response)
+ # Returns one of:
+ #
+ # ("content-length", count)
+ # ("chunked", ())
+ # ("http/1.0", ())
+ #
+ # which are (lookup key, *args) for constructing body reader/writer
+ # objects.
+ #
+ # Reference: https://tools.ietf.org/html/rfc7230#section-3.3.3
+ #
+ # Step 1: some responses always have an empty body, regardless of what the
+ # headers say.
+ if type(event) is Response:
+ if (
+ event.status_code in (204, 304)
+ or request_method == b"HEAD"
+ or (request_method == b"CONNECT" and 200 <= event.status_code < 300)
+ ):
+ return ("content-length", (0,))
+ # Section 3.3.3 also lists another case -- responses with status_code
+ # < 200. For us these are InformationalResponses, not Responses, so
+ # they can't get into this function in the first place.
+ assert event.status_code >= 200
+
+ # Step 2: check for Transfer-Encoding (T-E beats C-L):
+ transfer_encodings = get_comma_header(event.headers, b"transfer-encoding")
+ if transfer_encodings:
+ assert transfer_encodings == [b"chunked"]
+ return ("chunked", ())
+
+ # Step 3: check for Content-Length
+ content_lengths = get_comma_header(event.headers, b"content-length")
+ if content_lengths:
+ return ("content-length", (int(content_lengths[0]),))
+
+ # Step 4: no applicable headers; fallback/default depends on type
+ if type(event) is Request:
+ return ("content-length", (0,))
+ else:
+ return ("http/1.0", ())
+
+
+################################################################
+#
+# The main Connection class
+#
+################################################################
+
+
+class Connection:
+ """An object encapsulating the state of an HTTP connection.
+
+ Args:
+ our_role: If you're implementing a client, pass :data:`h11.CLIENT`. If
+ you're implementing a server, pass :data:`h11.SERVER`.
+
+ max_incomplete_event_size (int):
+ The maximum number of bytes we're willing to buffer of an
+ incomplete event. In practice this mostly sets a limit on the
+ maximum size of the request/response line + headers. If this is
+ exceeded, then :meth:`next_event` will raise
+ :exc:`RemoteProtocolError`.
+
+ """
+
+ def __init__(
+ self, our_role, max_incomplete_event_size=DEFAULT_MAX_INCOMPLETE_EVENT_SIZE
+ ):
+ self._max_incomplete_event_size = max_incomplete_event_size
+ # State and role tracking
+ if our_role not in (CLIENT, SERVER):
+ raise ValueError("expected CLIENT or SERVER, not {!r}".format(our_role))
+ self.our_role = our_role
+ if our_role is CLIENT:
+ self.their_role = SERVER
+ else:
+ self.their_role = CLIENT
+ self._cstate = ConnectionState()
+
+ # Callables for converting data->events or vice-versa given the
+ # current state
+ self._writer = self._get_io_object(self.our_role, None, WRITERS)
+ self._reader = self._get_io_object(self.their_role, None, READERS)
+
+ # Holds any unprocessed received data
+ self._receive_buffer = ReceiveBuffer()
+ # If this is true, then it indicates that the incoming connection was
+ # closed *after* the end of whatever's in self._receive_buffer:
+ self._receive_buffer_closed = False
+
+ # Extra bits of state that don't fit into the state machine.
+ #
+ # These two are only used to interpret framing headers for figuring
+ # out how to read/write response bodies. their_http_version is also
+ # made available as a convenient public API.
+ self.their_http_version = None
+ self._request_method = None
+ # This is pure flow-control and doesn't at all affect the set of legal
+ # transitions, so no need to bother ConnectionState with it:
+ self.client_is_waiting_for_100_continue = False
+
+ @property
+ def states(self):
+ """A dictionary like::
+
+ {CLIENT: , SERVER: }
+
+ See :ref:`state-machine` for details.
+
+ """
+ return dict(self._cstate.states)
+
+ @property
+ def our_state(self):
+ """The current state of whichever role we are playing. See
+ :ref:`state-machine` for details.
+ """
+ return self._cstate.states[self.our_role]
+
+ @property
+ def their_state(self):
+ """The current state of whichever role we are NOT playing. See
+ :ref:`state-machine` for details.
+ """
+ return self._cstate.states[self.their_role]
+
+ @property
+ def they_are_waiting_for_100_continue(self):
+ return self.their_role is CLIENT and self.client_is_waiting_for_100_continue
+
+ def start_next_cycle(self):
+ """Attempt to reset our connection state for a new request/response
+ cycle.
+
+ If both client and server are in :data:`DONE` state, then resets them
+ both to :data:`IDLE` state in preparation for a new request/response
+ cycle on this same connection. Otherwise, raises a
+ :exc:`LocalProtocolError`.
+
+ See :ref:`keepalive-and-pipelining`.
+
+ """
+ old_states = dict(self._cstate.states)
+ self._cstate.start_next_cycle()
+ self._request_method = None
+ # self.their_http_version gets left alone, since it presumably lasts
+ # beyond a single request/response cycle
+ assert not self.client_is_waiting_for_100_continue
+ self._respond_to_state_changes(old_states)
+
+ def _process_error(self, role):
+ old_states = dict(self._cstate.states)
+ self._cstate.process_error(role)
+ self._respond_to_state_changes(old_states)
+
+ def _server_switch_event(self, event):
+ if type(event) is InformationalResponse and event.status_code == 101:
+ return _SWITCH_UPGRADE
+ if type(event) is Response:
+ if (
+ _SWITCH_CONNECT in self._cstate.pending_switch_proposals
+ and 200 <= event.status_code < 300
+ ):
+ return _SWITCH_CONNECT
+ return None
+
+ # All events go through here
+ def _process_event(self, role, event):
+ # First, pass the event through the state machine to make sure it
+ # succeeds.
+ old_states = dict(self._cstate.states)
+ if role is CLIENT and type(event) is Request:
+ if event.method == b"CONNECT":
+ self._cstate.process_client_switch_proposal(_SWITCH_CONNECT)
+ if get_comma_header(event.headers, b"upgrade"):
+ self._cstate.process_client_switch_proposal(_SWITCH_UPGRADE)
+ server_switch_event = None
+ if role is SERVER:
+ server_switch_event = self._server_switch_event(event)
+ self._cstate.process_event(role, type(event), server_switch_event)
+
+ # Then perform the updates triggered by it.
+
+ # self._request_method
+ if type(event) is Request:
+ self._request_method = event.method
+
+ # self.their_http_version
+ if role is self.their_role and type(event) in (
+ Request,
+ Response,
+ InformationalResponse,
+ ):
+ self.their_http_version = event.http_version
+
+ # Keep alive handling
+ #
+ # RFC 7230 doesn't really say what one should do if Connection: close
+ # shows up on a 1xx InformationalResponse. I think the idea is that
+ # this is not supposed to happen. In any case, if it does happen, we
+ # ignore it.
+ if type(event) in (Request, Response) and not _keep_alive(event):
+ self._cstate.process_keep_alive_disabled()
+
+ # 100-continue
+ if type(event) is Request and has_expect_100_continue(event):
+ self.client_is_waiting_for_100_continue = True
+ if type(event) in (InformationalResponse, Response):
+ self.client_is_waiting_for_100_continue = False
+ if role is CLIENT and type(event) in (Data, EndOfMessage):
+ self.client_is_waiting_for_100_continue = False
+
+ self._respond_to_state_changes(old_states, event)
+
+ def _get_io_object(self, role, event, io_dict):
+ # event may be None; it's only used when entering SEND_BODY
+ state = self._cstate.states[role]
+ if state is SEND_BODY:
+ # Special case: the io_dict has a dict of reader/writer factories
+ # that depend on the request/response framing.
+ framing_type, args = _body_framing(self._request_method, event)
+ return io_dict[SEND_BODY][framing_type](*args)
+ else:
+ # General case: the io_dict just has the appropriate reader/writer
+ # for this state
+ return io_dict.get((role, state))
+
+ # This must be called after any action that might have caused
+ # self._cstate.states to change.
+ def _respond_to_state_changes(self, old_states, event=None):
+ # Update reader/writer
+ if self.our_state != old_states[self.our_role]:
+ self._writer = self._get_io_object(self.our_role, event, WRITERS)
+ if self.their_state != old_states[self.their_role]:
+ self._reader = self._get_io_object(self.their_role, event, READERS)
+
+ @property
+ def trailing_data(self):
+ """Data that has been received, but not yet processed, represented as
+ a tuple with two elements, where the first is a byte-string containing
+ the unprocessed data itself, and the second is a bool that is True if
+ the receive connection was closed.
+
+ See :ref:`switching-protocols` for discussion of why you'd want this.
+ """
+ return (bytes(self._receive_buffer), self._receive_buffer_closed)
+
+ def receive_data(self, data):
+ """Add data to our internal receive buffer.
+
+ This does not actually do any processing on the data, just stores
+ it. To trigger processing, you have to call :meth:`next_event`.
+
+ Args:
+ data (:term:`bytes-like object`):
+ The new data that was just received.
+
+ Special case: If *data* is an empty byte-string like ``b""``,
+ then this indicates that the remote side has closed the
+ connection (end of file). Normally this is convenient, because
+ standard Python APIs like :meth:`file.read` or
+ :meth:`socket.recv` use ``b""`` to indicate end-of-file, while
+ other failures to read are indicated using other mechanisms
+ like raising :exc:`TimeoutError`. When using such an API you
+ can just blindly pass through whatever you get from ``read``
+ to :meth:`receive_data`, and everything will work.
+
+ But, if you have an API where reading an empty string is a
+ valid non-EOF condition, then you need to be aware of this and
+ make sure to check for such strings and avoid passing them to
+ :meth:`receive_data`.
+
+ Returns:
+ Nothing, but after calling this you should call :meth:`next_event`
+ to parse the newly received data.
+
+ Raises:
+ RuntimeError:
+ Raised if you pass an empty *data*, indicating EOF, and then
+ pass a non-empty *data*, indicating more data that somehow
+ arrived after the EOF.
+
+ (Calling ``receive_data(b"")`` multiple times is fine,
+ and equivalent to calling it once.)
+
+ """
+ if data:
+ if self._receive_buffer_closed:
+ raise RuntimeError("received close, then received more data?")
+ self._receive_buffer += data
+ else:
+ self._receive_buffer_closed = True
+
+ def _extract_next_receive_event(self):
+ state = self.their_state
+ # We don't pause immediately when they enter DONE, because even in
+ # DONE state we can still process a ConnectionClosed() event. But
+ # if we have data in our buffer, then we definitely aren't getting
+ # a ConnectionClosed() immediately and we need to pause.
+ if state is DONE and self._receive_buffer:
+ return PAUSED
+ if state is MIGHT_SWITCH_PROTOCOL or state is SWITCHED_PROTOCOL:
+ return PAUSED
+ assert self._reader is not None
+ event = self._reader(self._receive_buffer)
+ if event is None:
+ if not self._receive_buffer and self._receive_buffer_closed:
+ # In some unusual cases (basically just HTTP/1.0 bodies), EOF
+ # triggers an actual protocol event; in that case, we want to
+ # return that event, and then the state will change and we'll
+ # get called again to generate the actual ConnectionClosed().
+ if hasattr(self._reader, "read_eof"):
+ event = self._reader.read_eof()
+ else:
+ event = ConnectionClosed()
+ if event is None:
+ event = NEED_DATA
+ return event
+
+ def next_event(self):
+ """Parse the next event out of our receive buffer, update our internal
+ state, and return it.
+
+ This is a mutating operation -- think of it like calling :func:`next`
+ on an iterator.
+
+ Returns:
+ : One of three things:
+
+ 1) An event object -- see :ref:`events`.
+
+ 2) The special constant :data:`NEED_DATA`, which indicates that
+ you need to read more data from your socket and pass it to
+ :meth:`receive_data` before this method will be able to return
+ any more events.
+
+ 3) The special constant :data:`PAUSED`, which indicates that we
+ are not in a state where we can process incoming data (usually
+ because the peer has finished their part of the current
+ request/response cycle, and you have not yet called
+ :meth:`start_next_cycle`). See :ref:`flow-control` for details.
+
+ Raises:
+ RemoteProtocolError:
+ The peer has misbehaved. You should close the connection
+ (possibly after sending some kind of 4xx response).
+
+ Once this method returns :class:`ConnectionClosed` once, then all
+ subsequent calls will also return :class:`ConnectionClosed`.
+
+ If this method raises any exception besides :exc:`RemoteProtocolError`
+ then that's a bug -- if it happens please file a bug report!
+
+ If this method raises any exception then it also sets
+ :attr:`Connection.their_state` to :data:`ERROR` -- see
+ :ref:`error-handling` for discussion.
+
+ """
+
+ if self.their_state is ERROR:
+ raise RemoteProtocolError("Can't receive data when peer state is ERROR")
+ try:
+ event = self._extract_next_receive_event()
+ if event not in [NEED_DATA, PAUSED]:
+ self._process_event(self.their_role, event)
+ if event is NEED_DATA:
+ if len(self._receive_buffer) > self._max_incomplete_event_size:
+ # 431 is "Request header fields too large" which is pretty
+ # much the only situation where we can get here
+ raise RemoteProtocolError(
+ "Receive buffer too long", error_status_hint=431
+ )
+ if self._receive_buffer_closed:
+ # We're still trying to complete some event, but that's
+ # never going to happen because no more data is coming
+ raise RemoteProtocolError("peer unexpectedly closed connection")
+ return event
+ except BaseException as exc:
+ self._process_error(self.their_role)
+ if isinstance(exc, LocalProtocolError):
+ exc._reraise_as_remote_protocol_error()
+ else:
+ raise
+
+ def send(self, event):
+ """Convert a high-level event into bytes that can be sent to the peer,
+ while updating our internal state machine.
+
+ Args:
+ event: The :ref:`event ` to send.
+
+ Returns:
+ If ``type(event) is ConnectionClosed``, then returns
+ ``None``. Otherwise, returns a :term:`bytes-like object`.
+
+ Raises:
+ LocalProtocolError:
+ Sending this event at this time would violate our
+ understanding of the HTTP/1.1 protocol.
+
+ If this method raises any exception then it also sets
+ :attr:`Connection.our_state` to :data:`ERROR` -- see
+ :ref:`error-handling` for discussion.
+
+ """
+ data_list = self.send_with_data_passthrough(event)
+ if data_list is None:
+ return None
+ else:
+ return b"".join(data_list)
+
+ def send_with_data_passthrough(self, event):
+ """Identical to :meth:`send`, except that in situations where
+ :meth:`send` returns a single :term:`bytes-like object`, this instead
+ returns a list of them -- and when sending a :class:`Data` event, this
+ list is guaranteed to contain the exact object you passed in as
+ :attr:`Data.data`. See :ref:`sendfile` for discussion.
+
+ """
+ if self.our_state is ERROR:
+ raise LocalProtocolError("Can't send data when our state is ERROR")
+ try:
+ if type(event) is Response:
+ self._clean_up_response_headers_for_sending(event)
+ # We want to call _process_event before calling the writer,
+ # because if someone tries to do something invalid then this will
+ # give a sensible error message, while our writers all just assume
+ # they will only receive valid events. But, _process_event might
+ # change self._writer. So we have to do a little dance:
+ writer = self._writer
+ self._process_event(self.our_role, event)
+ if type(event) is ConnectionClosed:
+ return None
+ else:
+ # In any situation where writer is None, process_event should
+ # have raised ProtocolError
+ assert writer is not None
+ data_list = []
+ writer(event, data_list.append)
+ return data_list
+ except:
+ self._process_error(self.our_role)
+ raise
+
+ def send_failed(self):
+ """Notify the state machine that we failed to send the data it gave
+ us.
+
+ This causes :attr:`Connection.our_state` to immediately become
+ :data:`ERROR` -- see :ref:`error-handling` for discussion.
+
+ """
+ self._process_error(self.our_role)
+
+ # When sending a Response, we take responsibility for a few things:
+ #
+ # - Sometimes you MUST set Connection: close. We take care of those
+ # times. (You can also set it yourself if you want, and if you do then
+ # we'll respect that and close the connection at the right time. But you
+ # don't have to worry about that unless you want to.)
+ #
+ # - The user has to set Content-Length if they want it. Otherwise, for
+ # responses that have bodies (e.g. not HEAD), then we will automatically
+ # select the right mechanism for streaming a body of unknown length,
+ # which depends on depending on the peer's HTTP version.
+ #
+ # This function's *only* responsibility is making sure headers are set up
+ # right -- everything downstream just looks at the headers. There are no
+ # side channels. It mutates the response event in-place (but not the
+ # response.headers list object).
+ def _clean_up_response_headers_for_sending(self, response):
+ assert type(response) is Response
+
+ headers = response.headers
+ need_close = False
+
+ # HEAD requests need some special handling: they always act like they
+ # have Content-Length: 0, and that's how _body_framing treats
+ # them. But their headers are supposed to match what we would send if
+ # the request was a GET. (Technically there is one deviation allowed:
+ # we're allowed to leave out the framing headers -- see
+ # https://tools.ietf.org/html/rfc7231#section-4.3.2 . But it's just as
+ # easy to get them right.)
+ method_for_choosing_headers = self._request_method
+ if method_for_choosing_headers == b"HEAD":
+ method_for_choosing_headers = b"GET"
+ framing_type, _ = _body_framing(method_for_choosing_headers, response)
+ if framing_type in ("chunked", "http/1.0"):
+ # This response has a body of unknown length.
+ # If our peer is HTTP/1.1, we use Transfer-Encoding: chunked
+ # If our peer is HTTP/1.0, we use no framing headers, and close the
+ # connection afterwards.
+ #
+ # Make sure to clear Content-Length (in principle user could have
+ # set both and then we ignored Content-Length b/c
+ # Transfer-Encoding overwrote it -- this would be naughty of them,
+ # but the HTTP spec says that if our peer does this then we have
+ # to fix it instead of erroring out, so we'll accord the user the
+ # same respect).
+ headers = set_comma_header(headers, b"content-length", [])
+ if self.their_http_version is None or self.their_http_version < b"1.1":
+ # Either we never got a valid request and are sending back an
+ # error (their_http_version is None), so we assume the worst;
+ # or else we did get a valid HTTP/1.0 request, so we know that
+ # they don't understand chunked encoding.
+ headers = set_comma_header(headers, b"transfer-encoding", [])
+ # This is actually redundant ATM, since currently we
+ # unconditionally disable keep-alive when talking to HTTP/1.0
+ # peers. But let's be defensive just in case we add
+ # Connection: keep-alive support later:
+ if self._request_method != b"HEAD":
+ need_close = True
+ else:
+ headers = set_comma_header(headers, b"transfer-encoding", ["chunked"])
+
+ if not self._cstate.keep_alive or need_close:
+ # Make sure Connection: close is set
+ connection = set(get_comma_header(headers, b"connection"))
+ connection.discard(b"keep-alive")
+ connection.add(b"close")
+ headers = set_comma_header(headers, b"connection", sorted(connection))
+
+ response.headers = headers
diff --git a/packages/h11/_events.py b/packages/h11/_events.py
new file mode 100644
index 000000000..182793011
--- /dev/null
+++ b/packages/h11/_events.py
@@ -0,0 +1,302 @@
+# High level events that make up HTTP/1.1 conversations. Loosely inspired by
+# the corresponding events in hyper-h2:
+#
+# http://python-hyper.org/h2/en/stable/api.html#events
+#
+# Don't subclass these. Stuff will break.
+
+import re
+
+from . import _headers
+from ._abnf import request_target
+from ._util import bytesify, LocalProtocolError, validate
+
+# Everything in __all__ gets re-exported as part of the h11 public API.
+__all__ = [
+ "Request",
+ "InformationalResponse",
+ "Response",
+ "Data",
+ "EndOfMessage",
+ "ConnectionClosed",
+]
+
+request_target_re = re.compile(request_target.encode("ascii"))
+
+
+class _EventBundle:
+ _fields = []
+ _defaults = {}
+
+ def __init__(self, **kwargs):
+ _parsed = kwargs.pop("_parsed", False)
+ allowed = set(self._fields)
+ for kwarg in kwargs:
+ if kwarg not in allowed:
+ raise TypeError(
+ "unrecognized kwarg {} for {}".format(
+ kwarg, self.__class__.__name__
+ )
+ )
+ required = allowed.difference(self._defaults)
+ for field in required:
+ if field not in kwargs:
+ raise TypeError(
+ "missing required kwarg {} for {}".format(
+ field, self.__class__.__name__
+ )
+ )
+ self.__dict__.update(self._defaults)
+ self.__dict__.update(kwargs)
+
+ # Special handling for some fields
+
+ if "headers" in self.__dict__:
+ self.headers = _headers.normalize_and_validate(
+ self.headers, _parsed=_parsed
+ )
+
+ if not _parsed:
+ for field in ["method", "target", "http_version", "reason"]:
+ if field in self.__dict__:
+ self.__dict__[field] = bytesify(self.__dict__[field])
+
+ if "status_code" in self.__dict__:
+ if not isinstance(self.status_code, int):
+ raise LocalProtocolError("status code must be integer")
+ # Because IntEnum objects are instances of int, but aren't
+ # duck-compatible (sigh), see gh-72.
+ self.status_code = int(self.status_code)
+
+ self._validate()
+
+ def _validate(self):
+ pass
+
+ def __repr__(self):
+ name = self.__class__.__name__
+ kwarg_strs = [
+ "{}={}".format(field, self.__dict__[field]) for field in self._fields
+ ]
+ kwarg_str = ", ".join(kwarg_strs)
+ return "{}({})".format(name, kwarg_str)
+
+ # Useful for tests
+ def __eq__(self, other):
+ return self.__class__ == other.__class__ and self.__dict__ == other.__dict__
+
+ # This is an unhashable type.
+ __hash__ = None
+
+
+class Request(_EventBundle):
+ """The beginning of an HTTP request.
+
+ Fields:
+
+ .. attribute:: method
+
+ An HTTP method, e.g. ``b"GET"`` or ``b"POST"``. Always a byte
+ string. :term:`Bytes-like objects ` and native
+ strings containing only ascii characters will be automatically
+ converted to byte strings.
+
+ .. attribute:: target
+
+ The target of an HTTP request, e.g. ``b"/index.html"``, or one of the
+ more exotic formats described in `RFC 7320, section 5.3
+ `_. Always a byte
+ string. :term:`Bytes-like objects ` and native
+ strings containing only ascii characters will be automatically
+ converted to byte strings.
+
+ .. attribute:: headers
+
+ Request headers, represented as a list of (name, value) pairs. See
+ :ref:`the header normalization rules ` for details.
+
+ .. attribute:: http_version
+
+ The HTTP protocol version, represented as a byte string like
+ ``b"1.1"``. See :ref:`the HTTP version normalization rules
+ ` for details.
+
+ """
+
+ _fields = ["method", "target", "headers", "http_version"]
+ _defaults = {"http_version": b"1.1"}
+
+ def _validate(self):
+ # "A server MUST respond with a 400 (Bad Request) status code to any
+ # HTTP/1.1 request message that lacks a Host header field and to any
+ # request message that contains more than one Host header field or a
+ # Host header field with an invalid field-value."
+ # -- https://tools.ietf.org/html/rfc7230#section-5.4
+ host_count = 0
+ for name, value in self.headers:
+ if name == b"host":
+ host_count += 1
+ if self.http_version == b"1.1" and host_count == 0:
+ raise LocalProtocolError("Missing mandatory Host: header")
+ if host_count > 1:
+ raise LocalProtocolError("Found multiple Host: headers")
+
+ validate(request_target_re, self.target, "Illegal target characters")
+
+
+class _ResponseBase(_EventBundle):
+ _fields = ["status_code", "headers", "http_version", "reason"]
+ _defaults = {"http_version": b"1.1", "reason": b""}
+
+
+class InformationalResponse(_ResponseBase):
+ """An HTTP informational response.
+
+ Fields:
+
+ .. attribute:: status_code
+
+ The status code of this response, as an integer. For an
+ :class:`InformationalResponse`, this is always in the range [100,
+ 200).
+
+ .. attribute:: headers
+
+ Request headers, represented as a list of (name, value) pairs. See
+ :ref:`the header normalization rules ` for
+ details.
+
+ .. attribute:: http_version
+
+ The HTTP protocol version, represented as a byte string like
+ ``b"1.1"``. See :ref:`the HTTP version normalization rules
+ ` for details.
+
+ .. attribute:: reason
+
+ The reason phrase of this response, as a byte string. For example:
+ ``b"OK"``, or ``b"Not Found"``.
+
+ """
+
+ def _validate(self):
+ if not (100 <= self.status_code < 200):
+ raise LocalProtocolError(
+ "InformationalResponse status_code should be in range "
+ "[100, 200), not {}".format(self.status_code)
+ )
+
+
+class Response(_ResponseBase):
+ """The beginning of an HTTP response.
+
+ Fields:
+
+ .. attribute:: status_code
+
+ The status code of this response, as an integer. For an
+ :class:`Response`, this is always in the range [200,
+ 600).
+
+ .. attribute:: headers
+
+ Request headers, represented as a list of (name, value) pairs. See
+ :ref:`the header normalization rules ` for details.
+
+ .. attribute:: http_version
+
+ The HTTP protocol version, represented as a byte string like
+ ``b"1.1"``. See :ref:`the HTTP version normalization rules
+ ` for details.
+
+ .. attribute:: reason
+
+ The reason phrase of this response, as a byte string. For example:
+ ``b"OK"``, or ``b"Not Found"``.
+
+ """
+
+ def _validate(self):
+ if not (200 <= self.status_code < 600):
+ raise LocalProtocolError(
+ "Response status_code should be in range [200, 600), not {}".format(
+ self.status_code
+ )
+ )
+
+
+class Data(_EventBundle):
+ """Part of an HTTP message body.
+
+ Fields:
+
+ .. attribute:: data
+
+ A :term:`bytes-like object` containing part of a message body. Or, if
+ using the ``combine=False`` argument to :meth:`Connection.send`, then
+ any object that your socket writing code knows what to do with, and for
+ which calling :func:`len` returns the number of bytes that will be
+ written -- see :ref:`sendfile` for details.
+
+ .. attribute:: chunk_start
+
+ A marker that indicates whether this data object is from the start of a
+ chunked transfer encoding chunk. This field is ignored when when a Data
+ event is provided to :meth:`Connection.send`: it is only valid on
+ events emitted from :meth:`Connection.next_event`. You probably
+ shouldn't use this attribute at all; see
+ :ref:`chunk-delimiters-are-bad` for details.
+
+ .. attribute:: chunk_end
+
+ A marker that indicates whether this data object is the last for a
+ given chunked transfer encoding chunk. This field is ignored when when
+ a Data event is provided to :meth:`Connection.send`: it is only valid
+ on events emitted from :meth:`Connection.next_event`. You probably
+ shouldn't use this attribute at all; see
+ :ref:`chunk-delimiters-are-bad` for details.
+
+ """
+
+ _fields = ["data", "chunk_start", "chunk_end"]
+ _defaults = {"chunk_start": False, "chunk_end": False}
+
+
+# XX FIXME: "A recipient MUST ignore (or consider as an error) any fields that
+# are forbidden to be sent in a trailer, since processing them as if they were
+# present in the header section might bypass external security filters."
+# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#chunked.trailer.part
+# Unfortunately, the list of forbidden fields is long and vague :-/
+class EndOfMessage(_EventBundle):
+ """The end of an HTTP message.
+
+ Fields:
+
+ .. attribute:: headers
+
+ Default value: ``[]``
+
+ Any trailing headers attached to this message, represented as a list of
+ (name, value) pairs. See :ref:`the header normalization rules
+ ` for details.
+
+ Must be empty unless ``Transfer-Encoding: chunked`` is in use.
+
+ """
+
+ _fields = ["headers"]
+ _defaults = {"headers": []}
+
+
+class ConnectionClosed(_EventBundle):
+ """This event indicates that the sender has closed their outgoing
+ connection.
+
+ Note that this does not necessarily mean that they can't *receive* further
+ data, because TCP connections are composed to two one-way channels which
+ can be closed independently. See :ref:`closing` for details.
+
+ No fields.
+ """
+
+ pass
diff --git a/packages/h11/_headers.py b/packages/h11/_headers.py
new file mode 100644
index 000000000..7ed39bc12
--- /dev/null
+++ b/packages/h11/_headers.py
@@ -0,0 +1,242 @@
+import re
+
+from ._abnf import field_name, field_value
+from ._util import bytesify, LocalProtocolError, validate
+
+# Facts
+# -----
+#
+# Headers are:
+# keys: case-insensitive ascii
+# values: mixture of ascii and raw bytes
+#
+# "Historically, HTTP has allowed field content with text in the ISO-8859-1
+# charset [ISO-8859-1], supporting other charsets only through use of
+# [RFC2047] encoding. In practice, most HTTP header field values use only a
+# subset of the US-ASCII charset [USASCII]. Newly defined header fields SHOULD
+# limit their field values to US-ASCII octets. A recipient SHOULD treat other
+# octets in field content (obs-text) as opaque data."
+# And it deprecates all non-ascii values
+#
+# Leading/trailing whitespace in header names is forbidden
+#
+# Values get leading/trailing whitespace stripped
+#
+# Content-Disposition actually needs to contain unicode semantically; to
+# accomplish this it has a terrifically weird way of encoding the filename
+# itself as ascii (and even this still has lots of cross-browser
+# incompatibilities)
+#
+# Order is important:
+# "a proxy MUST NOT change the order of these field values when forwarding a
+# message"
+# (and there are several headers where the order indicates a preference)
+#
+# Multiple occurences of the same header:
+# "A sender MUST NOT generate multiple header fields with the same field name
+# in a message unless either the entire field value for that header field is
+# defined as a comma-separated list [or the header is Set-Cookie which gets a
+# special exception]" - RFC 7230. (cookies are in RFC 6265)
+#
+# So every header aside from Set-Cookie can be merged by b", ".join if it
+# occurs repeatedly. But, of course, they can't necessarily be split by
+# .split(b","), because quoting.
+#
+# Given all this mess (case insensitive, duplicates allowed, order is
+# important, ...), there doesn't appear to be any standard way to handle
+# headers in Python -- they're almost like dicts, but... actually just
+# aren't. For now we punt and just use a super simple representation: headers
+# are a list of pairs
+#
+# [(name1, value1), (name2, value2), ...]
+#
+# where all entries are bytestrings, names are lowercase and have no
+# leading/trailing whitespace, and values are bytestrings with no
+# leading/trailing whitespace. Searching and updating are done via naive O(n)
+# methods.
+#
+# Maybe a dict-of-lists would be better?
+
+_content_length_re = re.compile(br"[0-9]+")
+_field_name_re = re.compile(field_name.encode("ascii"))
+_field_value_re = re.compile(field_value.encode("ascii"))
+
+
+class Headers:
+ """
+ A list-like interface that allows iterating over headers as byte-pairs
+ of (lowercased-name, value).
+
+ Internally we actually store the representation as three-tuples,
+ including both the raw original casing, in order to preserve casing
+ over-the-wire, and the lowercased name, for case-insensitive comparisions.
+
+ r = Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.org"), ("Connection", "keep-alive")],
+ http_version="1.1",
+ )
+ assert r.headers == [
+ (b"host", b"example.org"),
+ (b"connection", b"keep-alive")
+ ]
+ assert r.headers.raw_items() == [
+ (b"Host", b"example.org"),
+ (b"Connection", b"keep-alive")
+ ]
+ """
+
+ __slots__ = "_full_items"
+
+ def __init__(self, full_items):
+ self._full_items = full_items
+
+ def __iter__(self):
+ for _, name, value in self._full_items:
+ yield name, value
+
+ def __bool__(self):
+ return bool(self._full_items)
+
+ def __eq__(self, other):
+ return list(self) == list(other)
+
+ def __len__(self):
+ return len(self._full_items)
+
+ def __repr__(self):
+ return "" % repr(list(self))
+
+ def __getitem__(self, idx):
+ _, name, value = self._full_items[idx]
+ return (name, value)
+
+ def raw_items(self):
+ return [(raw_name, value) for raw_name, _, value in self._full_items]
+
+
+def normalize_and_validate(headers, _parsed=False):
+ new_headers = []
+ seen_content_length = None
+ saw_transfer_encoding = False
+ for name, value in headers:
+ # For headers coming out of the parser, we can safely skip some steps,
+ # because it always returns bytes and has already run these regexes
+ # over the data:
+ if not _parsed:
+ name = bytesify(name)
+ value = bytesify(value)
+ validate(_field_name_re, name, "Illegal header name {!r}", name)
+ validate(_field_value_re, value, "Illegal header value {!r}", value)
+ raw_name = name
+ name = name.lower()
+ if name == b"content-length":
+ lengths = {length.strip() for length in value.split(b",")}
+ if len(lengths) != 1:
+ raise LocalProtocolError("conflicting Content-Length headers")
+ value = lengths.pop()
+ validate(_content_length_re, value, "bad Content-Length")
+ if seen_content_length is None:
+ seen_content_length = value
+ new_headers.append((raw_name, name, value))
+ elif seen_content_length != value:
+ raise LocalProtocolError("conflicting Content-Length headers")
+ elif name == b"transfer-encoding":
+ # "A server that receives a request message with a transfer coding
+ # it does not understand SHOULD respond with 501 (Not
+ # Implemented)."
+ # https://tools.ietf.org/html/rfc7230#section-3.3.1
+ if saw_transfer_encoding:
+ raise LocalProtocolError(
+ "multiple Transfer-Encoding headers", error_status_hint=501
+ )
+ # "All transfer-coding names are case-insensitive"
+ # -- https://tools.ietf.org/html/rfc7230#section-4
+ value = value.lower()
+ if value != b"chunked":
+ raise LocalProtocolError(
+ "Only Transfer-Encoding: chunked is supported",
+ error_status_hint=501,
+ )
+ saw_transfer_encoding = True
+ new_headers.append((raw_name, name, value))
+ else:
+ new_headers.append((raw_name, name, value))
+ return Headers(new_headers)
+
+
+def get_comma_header(headers, name):
+ # Should only be used for headers whose value is a list of
+ # comma-separated, case-insensitive values.
+ #
+ # The header name `name` is expected to be lower-case bytes.
+ #
+ # Connection: meets these criteria (including cast insensitivity).
+ #
+ # Content-Length: technically is just a single value (1*DIGIT), but the
+ # standard makes reference to implementations that do multiple values, and
+ # using this doesn't hurt. Ditto, case insensitivity doesn't things either
+ # way.
+ #
+ # Transfer-Encoding: is more complex (allows for quoted strings), so
+ # splitting on , is actually wrong. For example, this is legal:
+ #
+ # Transfer-Encoding: foo; options="1,2", chunked
+ #
+ # and should be parsed as
+ #
+ # foo; options="1,2"
+ # chunked
+ #
+ # but this naive function will parse it as
+ #
+ # foo; options="1
+ # 2"
+ # chunked
+ #
+ # However, this is okay because the only thing we are going to do with
+ # any Transfer-Encoding is reject ones that aren't just "chunked", so
+ # both of these will be treated the same anyway.
+ #
+ # Expect: the only legal value is the literal string
+ # "100-continue". Splitting on commas is harmless. Case insensitive.
+ #
+ out = []
+ for _, found_name, found_raw_value in headers._full_items:
+ if found_name == name:
+ found_raw_value = found_raw_value.lower()
+ for found_split_value in found_raw_value.split(b","):
+ found_split_value = found_split_value.strip()
+ if found_split_value:
+ out.append(found_split_value)
+ return out
+
+
+def set_comma_header(headers, name, new_values):
+ # The header name `name` is expected to be lower-case bytes.
+ #
+ # Note that when we store the header we use title casing for the header
+ # names, in order to match the conventional HTTP header style.
+ #
+ # Simply calling `.title()` is a blunt approach, but it's correct
+ # here given the cases where we're using `set_comma_header`...
+ #
+ # Connection, Content-Length, Transfer-Encoding.
+ new_headers = []
+ for found_raw_name, found_name, found_raw_value in headers._full_items:
+ if found_name != name:
+ new_headers.append((found_raw_name, found_raw_value))
+ for new_value in new_values:
+ new_headers.append((name.title(), new_value))
+ return normalize_and_validate(new_headers)
+
+
+def has_expect_100_continue(request):
+ # https://tools.ietf.org/html/rfc7231#section-5.1.1
+ # "A server that receives a 100-continue expectation in an HTTP/1.0 request
+ # MUST ignore that expectation."
+ if request.http_version < b"1.1":
+ return False
+ expect = get_comma_header(request.headers, b"expect")
+ return b"100-continue" in expect
diff --git a/packages/h11/_readers.py b/packages/h11/_readers.py
new file mode 100644
index 000000000..0ead0bec3
--- /dev/null
+++ b/packages/h11/_readers.py
@@ -0,0 +1,222 @@
+# Code to read HTTP data
+#
+# Strategy: each reader is a callable which takes a ReceiveBuffer object, and
+# either:
+# 1) consumes some of it and returns an Event
+# 2) raises a LocalProtocolError (for consistency -- e.g. we call validate()
+# and it might raise a LocalProtocolError, so simpler just to always use
+# this)
+# 3) returns None, meaning "I need more data"
+#
+# If they have a .read_eof attribute, then this will be called if an EOF is
+# received -- but this is optional. Either way, the actual ConnectionClosed
+# event will be generated afterwards.
+#
+# READERS is a dict describing how to pick a reader. It maps states to either:
+# - a reader
+# - or, for body readers, a dict of per-framing reader factories
+
+import re
+
+from ._abnf import chunk_header, header_field, request_line, status_line
+from ._events import *
+from ._state import *
+from ._util import LocalProtocolError, RemoteProtocolError, validate
+
+__all__ = ["READERS"]
+
+header_field_re = re.compile(header_field.encode("ascii"))
+
+# Remember that this has to run in O(n) time -- so e.g. the bytearray cast is
+# critical.
+obs_fold_re = re.compile(br"[ \t]+")
+
+
+def _obsolete_line_fold(lines):
+ it = iter(lines)
+ last = None
+ for line in it:
+ match = obs_fold_re.match(line)
+ if match:
+ if last is None:
+ raise LocalProtocolError("continuation line at start of headers")
+ if not isinstance(last, bytearray):
+ last = bytearray(last)
+ last += b" "
+ last += line[match.end() :]
+ else:
+ if last is not None:
+ yield last
+ last = line
+ if last is not None:
+ yield last
+
+
+def _decode_header_lines(lines):
+ for line in _obsolete_line_fold(lines):
+ matches = validate(header_field_re, line, "illegal header line: {!r}", line)
+ yield (matches["field_name"], matches["field_value"])
+
+
+request_line_re = re.compile(request_line.encode("ascii"))
+
+
+def maybe_read_from_IDLE_client(buf):
+ lines = buf.maybe_extract_lines()
+ if lines is None:
+ if buf.is_next_line_obviously_invalid_request_line():
+ raise LocalProtocolError("illegal request line")
+ return None
+ if not lines:
+ raise LocalProtocolError("no request line received")
+ matches = validate(
+ request_line_re, lines[0], "illegal request line: {!r}", lines[0]
+ )
+ return Request(
+ headers=list(_decode_header_lines(lines[1:])), _parsed=True, **matches
+ )
+
+
+status_line_re = re.compile(status_line.encode("ascii"))
+
+
+def maybe_read_from_SEND_RESPONSE_server(buf):
+ lines = buf.maybe_extract_lines()
+ if lines is None:
+ if buf.is_next_line_obviously_invalid_request_line():
+ raise LocalProtocolError("illegal request line")
+ return None
+ if not lines:
+ raise LocalProtocolError("no response line received")
+ matches = validate(status_line_re, lines[0], "illegal status line: {!r}", lines[0])
+ # Tolerate missing reason phrases
+ if matches["reason"] is None:
+ matches["reason"] = b""
+ status_code = matches["status_code"] = int(matches["status_code"])
+ class_ = InformationalResponse if status_code < 200 else Response
+ return class_(
+ headers=list(_decode_header_lines(lines[1:])), _parsed=True, **matches
+ )
+
+
+class ContentLengthReader:
+ def __init__(self, length):
+ self._length = length
+ self._remaining = length
+
+ def __call__(self, buf):
+ if self._remaining == 0:
+ return EndOfMessage()
+ data = buf.maybe_extract_at_most(self._remaining)
+ if data is None:
+ return None
+ self._remaining -= len(data)
+ return Data(data=data)
+
+ def read_eof(self):
+ raise RemoteProtocolError(
+ "peer closed connection without sending complete message body "
+ "(received {} bytes, expected {})".format(
+ self._length - self._remaining, self._length
+ )
+ )
+
+
+chunk_header_re = re.compile(chunk_header.encode("ascii"))
+
+
+class ChunkedReader:
+ def __init__(self):
+ self._bytes_in_chunk = 0
+ # After reading a chunk, we have to throw away the trailing \r\n; if
+ # this is >0 then we discard that many bytes before resuming regular
+ # de-chunkification.
+ self._bytes_to_discard = 0
+ self._reading_trailer = False
+
+ def __call__(self, buf):
+ if self._reading_trailer:
+ lines = buf.maybe_extract_lines()
+ if lines is None:
+ return None
+ return EndOfMessage(headers=list(_decode_header_lines(lines)))
+ if self._bytes_to_discard > 0:
+ data = buf.maybe_extract_at_most(self._bytes_to_discard)
+ if data is None:
+ return None
+ self._bytes_to_discard -= len(data)
+ if self._bytes_to_discard > 0:
+ return None
+ # else, fall through and read some more
+ assert self._bytes_to_discard == 0
+ if self._bytes_in_chunk == 0:
+ # We need to refill our chunk count
+ chunk_header = buf.maybe_extract_next_line()
+ if chunk_header is None:
+ return None
+ matches = validate(
+ chunk_header_re,
+ chunk_header,
+ "illegal chunk header: {!r}",
+ chunk_header,
+ )
+ # XX FIXME: we discard chunk extensions. Does anyone care?
+ self._bytes_in_chunk = int(matches["chunk_size"], base=16)
+ if self._bytes_in_chunk == 0:
+ self._reading_trailer = True
+ return self(buf)
+ chunk_start = True
+ else:
+ chunk_start = False
+ assert self._bytes_in_chunk > 0
+ data = buf.maybe_extract_at_most(self._bytes_in_chunk)
+ if data is None:
+ return None
+ self._bytes_in_chunk -= len(data)
+ if self._bytes_in_chunk == 0:
+ self._bytes_to_discard = 2
+ chunk_end = True
+ else:
+ chunk_end = False
+ return Data(data=data, chunk_start=chunk_start, chunk_end=chunk_end)
+
+ def read_eof(self):
+ raise RemoteProtocolError(
+ "peer closed connection without sending complete message body "
+ "(incomplete chunked read)"
+ )
+
+
+class Http10Reader:
+ def __call__(self, buf):
+ data = buf.maybe_extract_at_most(999999999)
+ if data is None:
+ return None
+ return Data(data=data)
+
+ def read_eof(self):
+ return EndOfMessage()
+
+
+def expect_nothing(buf):
+ if buf:
+ raise LocalProtocolError("Got data when expecting EOF")
+ return None
+
+
+READERS = {
+ (CLIENT, IDLE): maybe_read_from_IDLE_client,
+ (SERVER, IDLE): maybe_read_from_SEND_RESPONSE_server,
+ (SERVER, SEND_RESPONSE): maybe_read_from_SEND_RESPONSE_server,
+ (CLIENT, DONE): expect_nothing,
+ (CLIENT, MUST_CLOSE): expect_nothing,
+ (CLIENT, CLOSED): expect_nothing,
+ (SERVER, DONE): expect_nothing,
+ (SERVER, MUST_CLOSE): expect_nothing,
+ (SERVER, CLOSED): expect_nothing,
+ SEND_BODY: {
+ "chunked": ChunkedReader,
+ "content-length": ContentLengthReader,
+ "http/1.0": Http10Reader,
+ },
+}
diff --git a/packages/h11/_receivebuffer.py b/packages/h11/_receivebuffer.py
new file mode 100644
index 000000000..a3737f351
--- /dev/null
+++ b/packages/h11/_receivebuffer.py
@@ -0,0 +1,152 @@
+import re
+import sys
+
+__all__ = ["ReceiveBuffer"]
+
+
+# Operations we want to support:
+# - find next \r\n or \r\n\r\n (\n or \n\n are also acceptable),
+# or wait until there is one
+# - read at-most-N bytes
+# Goals:
+# - on average, do this fast
+# - worst case, do this in O(n) where n is the number of bytes processed
+# Plan:
+# - store bytearray, offset, how far we've searched for a separator token
+# - use the how-far-we've-searched data to avoid rescanning
+# - while doing a stream of uninterrupted processing, advance offset instead
+# of constantly copying
+# WARNING:
+# - I haven't benchmarked or profiled any of this yet.
+#
+# Note that starting in Python 3.4, deleting the initial n bytes from a
+# bytearray is amortized O(n), thanks to some excellent work by Antoine
+# Martin:
+#
+# https://bugs.python.org/issue19087
+#
+# This means that if we only supported 3.4+, we could get rid of the code here
+# involving self._start and self.compress, because it's doing exactly the same
+# thing that bytearray now does internally.
+#
+# BUT unfortunately, we still support 2.7, and reading short segments out of a
+# long buffer MUST be O(bytes read) to avoid DoS issues, so we can't actually
+# delete this code. Yet:
+#
+# https://pythonclock.org/
+#
+# (Two things to double-check first though: make sure PyPy also has the
+# optimization, and benchmark to make sure it's a win, since we do have a
+# slightly clever thing where we delay calling compress() until we've
+# processed a whole event, which could in theory be slightly more efficient
+# than the internal bytearray support.)
+blank_line_regex = re.compile(b"\n\r?\n", re.MULTILINE)
+
+
+class ReceiveBuffer:
+ def __init__(self):
+ self._data = bytearray()
+ self._next_line_search = 0
+ self._multiple_lines_search = 0
+
+ def __iadd__(self, byteslike):
+ self._data += byteslike
+ return self
+
+ def __bool__(self):
+ return bool(len(self))
+
+ def __len__(self):
+ return len(self._data)
+
+ # for @property unprocessed_data
+ def __bytes__(self):
+ return bytes(self._data)
+
+ def _extract(self, count):
+ # extracting an initial slice of the data buffer and return it
+ out = self._data[:count]
+ del self._data[:count]
+
+ self._next_line_search = 0
+ self._multiple_lines_search = 0
+
+ return out
+
+ def maybe_extract_at_most(self, count):
+ """
+ Extract a fixed number of bytes from the buffer.
+ """
+ out = self._data[:count]
+ if not out:
+ return None
+
+ return self._extract(count)
+
+ def maybe_extract_next_line(self):
+ """
+ Extract the first line, if it is completed in the buffer.
+ """
+ # Only search in buffer space that we've not already looked at.
+ search_start_index = max(0, self._next_line_search - 1)
+ partial_idx = self._data.find(b"\r\n", search_start_index)
+
+ if partial_idx == -1:
+ self._next_line_search = len(self._data)
+ return None
+
+ # + 2 is to compensate len(b"\r\n")
+ idx = partial_idx + 2
+
+ return self._extract(idx)
+
+ def maybe_extract_lines(self):
+ """
+ Extract everything up to the first blank line, and return a list of lines.
+ """
+ # Handle the case where we have an immediate empty line.
+ if self._data[:1] == b"\n":
+ self._extract(1)
+ return []
+
+ if self._data[:2] == b"\r\n":
+ self._extract(2)
+ return []
+
+ # Only search in buffer space that we've not already looked at.
+ match = blank_line_regex.search(self._data, self._multiple_lines_search)
+ if match is None:
+ self._multiple_lines_search = max(0, len(self._data) - 2)
+ return None
+
+ # Truncate the buffer and return it.
+ idx = match.span(0)[-1]
+ out = self._extract(idx)
+ lines = out.split(b"\n")
+
+ for line in lines:
+ if line.endswith(b"\r"):
+ del line[-1]
+
+ assert lines[-2] == lines[-1] == b""
+
+ del lines[-2:]
+
+ return lines
+
+ # In theory we should wait until `\r\n` before starting to validate
+ # incoming data. However it's interesting to detect (very) invalid data
+ # early given they might not even contain `\r\n` at all (hence only
+ # timeout will get rid of them).
+ # This is not a 100% effective detection but more of a cheap sanity check
+ # allowing for early abort in some useful cases.
+ # This is especially interesting when peer is messing up with HTTPS and
+ # sent us a TLS stream where we were expecting plain HTTP given all
+ # versions of TLS so far start handshake with a 0x16 message type code.
+ def is_next_line_obviously_invalid_request_line(self):
+ try:
+ # HTTP header line must not contain non-printable characters
+ # and should not start with a space
+ return self._data[0] < 0x21
+ except IndexError:
+ return False
diff --git a/packages/h11/_state.py b/packages/h11/_state.py
new file mode 100644
index 000000000..0f08a090c
--- /dev/null
+++ b/packages/h11/_state.py
@@ -0,0 +1,307 @@
+################################################################
+# The core state machine
+################################################################
+#
+# Rule 1: everything that affects the state machine and state transitions must
+# live here in this file. As much as possible goes into the table-based
+# representation, but for the bits that don't quite fit, the actual code and
+# state must nonetheless live here.
+#
+# Rule 2: this file does not know about what role we're playing; it only knows
+# about HTTP request/response cycles in the abstract. This ensures that we
+# don't cheat and apply different rules to local and remote parties.
+#
+#
+# Theory of operation
+# ===================
+#
+# Possibly the simplest way to think about this is that we actually have 5
+# different state machines here. Yes, 5. These are:
+#
+# 1) The client state, with its complicated automaton (see the docs)
+# 2) The server state, with its complicated automaton (see the docs)
+# 3) The keep-alive state, with possible states {True, False}
+# 4) The SWITCH_CONNECT state, with possible states {False, True}
+# 5) The SWITCH_UPGRADE state, with possible states {False, True}
+#
+# For (3)-(5), the first state listed is the initial state.
+#
+# (1)-(3) are stored explicitly in member variables. The last
+# two are stored implicitly in the pending_switch_proposals set as:
+# (state of 4) == (_SWITCH_CONNECT in pending_switch_proposals)
+# (state of 5) == (_SWITCH_UPGRADE in pending_switch_proposals)
+#
+# And each of these machines has two different kinds of transitions:
+#
+# a) Event-triggered
+# b) State-triggered
+#
+# Event triggered is the obvious thing that you'd think it is: some event
+# happens, and if it's the right event at the right time then a transition
+# happens. But there are somewhat complicated rules for which machines can
+# "see" which events. (As a rule of thumb, if a machine "sees" an event, this
+# means two things: the event can affect the machine, and if the machine is
+# not in a state where it expects that event then it's an error.) These rules
+# are:
+#
+# 1) The client machine sees all h11.events objects emitted by the client.
+#
+# 2) The server machine sees all h11.events objects emitted by the server.
+#
+# It also sees the client's Request event.
+#
+# And sometimes, server events are annotated with a _SWITCH_* event. For
+# example, we can have a (Response, _SWITCH_CONNECT) event, which is
+# different from a regular Response event.
+#
+# 3) The keep-alive machine sees the process_keep_alive_disabled() event
+# (which is derived from Request/Response events), and this event
+# transitions it from True -> False, or from False -> False. There's no way
+# to transition back.
+#
+# 4&5) The _SWITCH_* machines transition from False->True when we get a
+# Request that proposes the relevant type of switch (via
+# process_client_switch_proposals), and they go from True->False when we
+# get a Response that has no _SWITCH_* annotation.
+#
+# So that's event-triggered transitions.
+#
+# State-triggered transitions are less standard. What they do here is couple
+# the machines together. The way this works is, when certain *joint*
+# configurations of states are achieved, then we automatically transition to a
+# new *joint* state. So, for example, if we're ever in a joint state with
+#
+# client: DONE
+# keep-alive: False
+#
+# then the client state immediately transitions to:
+#
+# client: MUST_CLOSE
+#
+# This is fundamentally different from an event-based transition, because it
+# doesn't matter how we arrived at the {client: DONE, keep-alive: False} state
+# -- maybe the client transitioned SEND_BODY -> DONE, or keep-alive
+# transitioned True -> False. Either way, once this precondition is satisfied,
+# this transition is immediately triggered.
+#
+# What if two conflicting state-based transitions get enabled at the same
+# time? In practice there's only one case where this arises (client DONE ->
+# MIGHT_SWITCH_PROTOCOL versus DONE -> MUST_CLOSE), and we resolve it by
+# explicitly prioritizing the DONE -> MIGHT_SWITCH_PROTOCOL transition.
+#
+# Implementation
+# --------------
+#
+# The event-triggered transitions for the server and client machines are all
+# stored explicitly in a table. Ditto for the state-triggered transitions that
+# involve just the server and client state.
+#
+# The transitions for the other machines, and the state-triggered transitions
+# that involve the other machines, are written out as explicit Python code.
+#
+# It'd be nice if there were some cleaner way to do all this. This isn't
+# *too* terrible, but I feel like it could probably be better.
+#
+# WARNING
+# -------
+#
+# The script that generates the state machine diagrams for the docs knows how
+# to read out the EVENT_TRIGGERED_TRANSITIONS and STATE_TRIGGERED_TRANSITIONS
+# tables. But it can't automatically read the transitions that are written
+# directly in Python code. So if you touch those, you need to also update the
+# script to keep it in sync!
+
+from ._events import *
+from ._util import LocalProtocolError, make_sentinel
+
+# Everything in __all__ gets re-exported as part of the h11 public API.
+__all__ = [
+ "CLIENT",
+ "SERVER",
+ "IDLE",
+ "SEND_RESPONSE",
+ "SEND_BODY",
+ "DONE",
+ "MUST_CLOSE",
+ "CLOSED",
+ "MIGHT_SWITCH_PROTOCOL",
+ "SWITCHED_PROTOCOL",
+ "ERROR",
+]
+
+CLIENT = make_sentinel("CLIENT")
+SERVER = make_sentinel("SERVER")
+
+# States
+IDLE = make_sentinel("IDLE")
+SEND_RESPONSE = make_sentinel("SEND_RESPONSE")
+SEND_BODY = make_sentinel("SEND_BODY")
+DONE = make_sentinel("DONE")
+MUST_CLOSE = make_sentinel("MUST_CLOSE")
+CLOSED = make_sentinel("CLOSED")
+ERROR = make_sentinel("ERROR")
+
+# Switch types
+MIGHT_SWITCH_PROTOCOL = make_sentinel("MIGHT_SWITCH_PROTOCOL")
+SWITCHED_PROTOCOL = make_sentinel("SWITCHED_PROTOCOL")
+
+_SWITCH_UPGRADE = make_sentinel("_SWITCH_UPGRADE")
+_SWITCH_CONNECT = make_sentinel("_SWITCH_CONNECT")
+
+EVENT_TRIGGERED_TRANSITIONS = {
+ CLIENT: {
+ IDLE: {Request: SEND_BODY, ConnectionClosed: CLOSED},
+ SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE},
+ DONE: {ConnectionClosed: CLOSED},
+ MUST_CLOSE: {ConnectionClosed: CLOSED},
+ CLOSED: {ConnectionClosed: CLOSED},
+ MIGHT_SWITCH_PROTOCOL: {},
+ SWITCHED_PROTOCOL: {},
+ ERROR: {},
+ },
+ SERVER: {
+ IDLE: {
+ ConnectionClosed: CLOSED,
+ Response: SEND_BODY,
+ # Special case: server sees client Request events, in this form
+ (Request, CLIENT): SEND_RESPONSE,
+ },
+ SEND_RESPONSE: {
+ InformationalResponse: SEND_RESPONSE,
+ Response: SEND_BODY,
+ (InformationalResponse, _SWITCH_UPGRADE): SWITCHED_PROTOCOL,
+ (Response, _SWITCH_CONNECT): SWITCHED_PROTOCOL,
+ },
+ SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE},
+ DONE: {ConnectionClosed: CLOSED},
+ MUST_CLOSE: {ConnectionClosed: CLOSED},
+ CLOSED: {ConnectionClosed: CLOSED},
+ SWITCHED_PROTOCOL: {},
+ ERROR: {},
+ },
+}
+
+# NB: there are also some special-case state-triggered transitions hard-coded
+# into _fire_state_triggered_transitions below.
+STATE_TRIGGERED_TRANSITIONS = {
+ # (Client state, Server state) -> new states
+ # Protocol negotiation
+ (MIGHT_SWITCH_PROTOCOL, SWITCHED_PROTOCOL): {CLIENT: SWITCHED_PROTOCOL},
+ # Socket shutdown
+ (CLOSED, DONE): {SERVER: MUST_CLOSE},
+ (CLOSED, IDLE): {SERVER: MUST_CLOSE},
+ (ERROR, DONE): {SERVER: MUST_CLOSE},
+ (DONE, CLOSED): {CLIENT: MUST_CLOSE},
+ (IDLE, CLOSED): {CLIENT: MUST_CLOSE},
+ (DONE, ERROR): {CLIENT: MUST_CLOSE},
+}
+
+
+class ConnectionState:
+ def __init__(self):
+ # Extra bits of state that don't quite fit into the state model.
+
+ # If this is False then it enables the automatic DONE -> MUST_CLOSE
+ # transition. Don't set this directly; call .keep_alive_disabled()
+ self.keep_alive = True
+
+ # This is a subset of {UPGRADE, CONNECT}, containing the proposals
+ # made by the client for switching protocols.
+ self.pending_switch_proposals = set()
+
+ self.states = {CLIENT: IDLE, SERVER: IDLE}
+
+ def process_error(self, role):
+ self.states[role] = ERROR
+ self._fire_state_triggered_transitions()
+
+ def process_keep_alive_disabled(self):
+ self.keep_alive = False
+ self._fire_state_triggered_transitions()
+
+ def process_client_switch_proposal(self, switch_event):
+ self.pending_switch_proposals.add(switch_event)
+ self._fire_state_triggered_transitions()
+
+ def process_event(self, role, event_type, server_switch_event=None):
+ if server_switch_event is not None:
+ assert role is SERVER
+ if server_switch_event not in self.pending_switch_proposals:
+ raise LocalProtocolError(
+ "Received server {} event without a pending proposal".format(
+ server_switch_event
+ )
+ )
+ event_type = (event_type, server_switch_event)
+ if server_switch_event is None and event_type is Response:
+ self.pending_switch_proposals = set()
+ self._fire_event_triggered_transitions(role, event_type)
+ # Special case: the server state does get to see Request
+ # events.
+ if event_type is Request:
+ assert role is CLIENT
+ self._fire_event_triggered_transitions(SERVER, (Request, CLIENT))
+ self._fire_state_triggered_transitions()
+
+ def _fire_event_triggered_transitions(self, role, event_type):
+ state = self.states[role]
+ try:
+ new_state = EVENT_TRIGGERED_TRANSITIONS[role][state][event_type]
+ except KeyError:
+ raise LocalProtocolError(
+ "can't handle event type {} when role={} and state={}".format(
+ event_type.__name__, role, self.states[role]
+ )
+ )
+ self.states[role] = new_state
+
+ def _fire_state_triggered_transitions(self):
+ # We apply these rules repeatedly until converging on a fixed point
+ while True:
+ start_states = dict(self.states)
+
+ # It could happen that both these special-case transitions are
+ # enabled at the same time:
+ #
+ # DONE -> MIGHT_SWITCH_PROTOCOL
+ # DONE -> MUST_CLOSE
+ #
+ # For example, this will always be true of a HTTP/1.0 client
+ # requesting CONNECT. If this happens, the protocol switch takes
+ # priority. From there the client will either go to
+ # SWITCHED_PROTOCOL, in which case it's none of our business when
+ # they close the connection, or else the server will deny the
+ # request, in which case the client will go back to DONE and then
+ # from there to MUST_CLOSE.
+ if self.pending_switch_proposals:
+ if self.states[CLIENT] is DONE:
+ self.states[CLIENT] = MIGHT_SWITCH_PROTOCOL
+
+ if not self.pending_switch_proposals:
+ if self.states[CLIENT] is MIGHT_SWITCH_PROTOCOL:
+ self.states[CLIENT] = DONE
+
+ if not self.keep_alive:
+ for role in (CLIENT, SERVER):
+ if self.states[role] is DONE:
+ self.states[role] = MUST_CLOSE
+
+ # Tabular state-triggered transitions
+ joint_state = (self.states[CLIENT], self.states[SERVER])
+ changes = STATE_TRIGGERED_TRANSITIONS.get(joint_state, {})
+ self.states.update(changes)
+
+ if self.states == start_states:
+ # Fixed point reached
+ return
+
+ def start_next_cycle(self):
+ if self.states != {CLIENT: DONE, SERVER: DONE}:
+ raise LocalProtocolError(
+ "not in a reusable state. self.states={}".format(self.states)
+ )
+ # Can't reach DONE/DONE with any of these active, but still, let's be
+ # sure.
+ assert self.keep_alive
+ assert not self.pending_switch_proposals
+ self.states = {CLIENT: IDLE, SERVER: IDLE}
diff --git a/packages/h11/_util.py b/packages/h11/_util.py
new file mode 100644
index 000000000..eb1a5cd9e
--- /dev/null
+++ b/packages/h11/_util.py
@@ -0,0 +1,122 @@
+__all__ = [
+ "ProtocolError",
+ "LocalProtocolError",
+ "RemoteProtocolError",
+ "validate",
+ "make_sentinel",
+ "bytesify",
+]
+
+
+class ProtocolError(Exception):
+ """Exception indicating a violation of the HTTP/1.1 protocol.
+
+ This as an abstract base class, with two concrete base classes:
+ :exc:`LocalProtocolError`, which indicates that you tried to do something
+ that HTTP/1.1 says is illegal, and :exc:`RemoteProtocolError`, which
+ indicates that the remote peer tried to do something that HTTP/1.1 says is
+ illegal. See :ref:`error-handling` for details.
+
+ In addition to the normal :exc:`Exception` features, it has one attribute:
+
+ .. attribute:: error_status_hint
+
+ This gives a suggestion as to what status code a server might use if
+ this error occurred as part of a request.
+
+ For a :exc:`RemoteProtocolError`, this is useful as a suggestion for
+ how you might want to respond to a misbehaving peer, if you're
+ implementing a server.
+
+ For a :exc:`LocalProtocolError`, this can be taken as a suggestion for
+ how your peer might have responded to *you* if h11 had allowed you to
+ continue.
+
+ The default is 400 Bad Request, a generic catch-all for protocol
+ violations.
+
+ """
+
+ def __init__(self, msg, error_status_hint=400):
+ if type(self) is ProtocolError:
+ raise TypeError("tried to directly instantiate ProtocolError")
+ Exception.__init__(self, msg)
+ self.error_status_hint = error_status_hint
+
+
+# Strategy: there are a number of public APIs where a LocalProtocolError can
+# be raised (send(), all the different event constructors, ...), and only one
+# public API where RemoteProtocolError can be raised
+# (receive_data()). Therefore we always raise LocalProtocolError internally,
+# and then receive_data will translate this into a RemoteProtocolError.
+#
+# Internally:
+# LocalProtocolError is the generic "ProtocolError".
+# Externally:
+# LocalProtocolError is for local errors and RemoteProtocolError is for
+# remote errors.
+class LocalProtocolError(ProtocolError):
+ def _reraise_as_remote_protocol_error(self):
+ # After catching a LocalProtocolError, use this method to re-raise it
+ # as a RemoteProtocolError. This method must be called from inside an
+ # except: block.
+ #
+ # An easy way to get an equivalent RemoteProtocolError is just to
+ # modify 'self' in place.
+ self.__class__ = RemoteProtocolError
+ # But the re-raising is somewhat non-trivial -- you might think that
+ # now that we've modified the in-flight exception object, that just
+ # doing 'raise' to re-raise it would be enough. But it turns out that
+ # this doesn't work, because Python tracks the exception type
+ # (exc_info[0]) separately from the exception object (exc_info[1]),
+ # and we only modified the latter. So we really do need to re-raise
+ # the new type explicitly.
+ # On py3, the traceback is part of the exception object, so our
+ # in-place modification preserved it and we can just re-raise:
+ raise self
+
+
+class RemoteProtocolError(ProtocolError):
+ pass
+
+
+def validate(regex, data, msg="malformed data", *format_args):
+ match = regex.fullmatch(data)
+ if not match:
+ if format_args:
+ msg = msg.format(*format_args)
+ raise LocalProtocolError(msg)
+ return match.groupdict()
+
+
+# Sentinel values
+#
+# - Inherit identity-based comparison and hashing from object
+# - Have a nice repr
+# - Have a *bonus property*: type(sentinel) is sentinel
+#
+# The bonus property is useful if you want to take the return value from
+# next_event() and do some sort of dispatch based on type(event).
+class _SentinelBase(type):
+ def __repr__(self):
+ return self.__name__
+
+
+def make_sentinel(name):
+ cls = _SentinelBase(name, (_SentinelBase,), {})
+ cls.__class__ = cls
+ return cls
+
+
+# Used for methods, request targets, HTTP versions, header names, and header
+# values. Accepts ascii-strings, or bytes/bytearray/memoryview/..., and always
+# returns bytes.
+def bytesify(s):
+ # Fast-path:
+ if type(s) is bytes:
+ return s
+ if isinstance(s, str):
+ s = s.encode("ascii")
+ if isinstance(s, int):
+ raise TypeError("expected bytes-like object, not int")
+ return bytes(s)
diff --git a/packages/h11/_version.py b/packages/h11/_version.py
new file mode 100644
index 000000000..cb5c2c322
--- /dev/null
+++ b/packages/h11/_version.py
@@ -0,0 +1,16 @@
+# This file must be kept very simple, because it is consumed from several
+# places -- it is imported by h11/__init__.py, execfile'd by setup.py, etc.
+
+# We use a simple scheme:
+# 1.0.0 -> 1.0.0+dev -> 1.1.0 -> 1.1.0+dev
+# where the +dev versions are never released into the wild, they're just what
+# we stick into the VCS in between releases.
+#
+# This is compatible with PEP 440:
+# http://legacy.python.org/dev/peps/pep-0440/
+# via the use of the "local suffix" "+dev", which is disallowed on index
+# servers and causes 1.0.0+dev to sort after plain 1.0.0, which is what we
+# want. (Contrast with the special suffix 1.0.0.dev, which sorts *before*
+# 1.0.0.)
+
+__version__ = "0.12.0"
diff --git a/packages/h11/_writers.py b/packages/h11/_writers.py
new file mode 100644
index 000000000..cb5e8a8c5
--- /dev/null
+++ b/packages/h11/_writers.py
@@ -0,0 +1,123 @@
+# Code to read HTTP data
+#
+# Strategy: each writer takes an event + a write-some-bytes function, which is
+# calls.
+#
+# WRITERS is a dict describing how to pick a reader. It maps states to either:
+# - a writer
+# - or, for body writers, a dict of framin-dependent writer factories
+
+from ._events import Data, EndOfMessage
+from ._state import CLIENT, IDLE, SEND_BODY, SEND_RESPONSE, SERVER
+from ._util import LocalProtocolError
+
+__all__ = ["WRITERS"]
+
+
+def write_headers(headers, write):
+ # "Since the Host field-value is critical information for handling a
+ # request, a user agent SHOULD generate Host as the first header field
+ # following the request-line." - RFC 7230
+ raw_items = headers._full_items
+ for raw_name, name, value in raw_items:
+ if name == b"host":
+ write(b"%s: %s\r\n" % (raw_name, value))
+ for raw_name, name, value in raw_items:
+ if name != b"host":
+ write(b"%s: %s\r\n" % (raw_name, value))
+ write(b"\r\n")
+
+
+def write_request(request, write):
+ if request.http_version != b"1.1":
+ raise LocalProtocolError("I only send HTTP/1.1")
+ write(b"%s %s HTTP/1.1\r\n" % (request.method, request.target))
+ write_headers(request.headers, write)
+
+
+# Shared between InformationalResponse and Response
+def write_any_response(response, write):
+ if response.http_version != b"1.1":
+ raise LocalProtocolError("I only send HTTP/1.1")
+ status_bytes = str(response.status_code).encode("ascii")
+ # We don't bother sending ascii status messages like "OK"; they're
+ # optional and ignored by the protocol. (But the space after the numeric
+ # status code is mandatory.)
+ #
+ # XX FIXME: could at least make an effort to pull out the status message
+ # from stdlib's http.HTTPStatus table. Or maybe just steal their enums
+ # (either by import or copy/paste). We already accept them as status codes
+ # since they're of type IntEnum < int.
+ write(b"HTTP/1.1 %s %s\r\n" % (status_bytes, response.reason))
+ write_headers(response.headers, write)
+
+
+class BodyWriter:
+ def __call__(self, event, write):
+ if type(event) is Data:
+ self.send_data(event.data, write)
+ elif type(event) is EndOfMessage:
+ self.send_eom(event.headers, write)
+ else: # pragma: no cover
+ assert False
+
+
+#
+# These are all careful not to do anything to 'data' except call len(data) and
+# write(data). This allows us to transparently pass-through funny objects,
+# like placeholder objects referring to files on disk that will be sent via
+# sendfile(2).
+#
+class ContentLengthWriter(BodyWriter):
+ def __init__(self, length):
+ self._length = length
+
+ def send_data(self, data, write):
+ self._length -= len(data)
+ if self._length < 0:
+ raise LocalProtocolError("Too much data for declared Content-Length")
+ write(data)
+
+ def send_eom(self, headers, write):
+ if self._length != 0:
+ raise LocalProtocolError("Too little data for declared Content-Length")
+ if headers:
+ raise LocalProtocolError("Content-Length and trailers don't mix")
+
+
+class ChunkedWriter(BodyWriter):
+ def send_data(self, data, write):
+ # if we encoded 0-length data in the naive way, it would look like an
+ # end-of-message.
+ if not data:
+ return
+ write(b"%x\r\n" % len(data))
+ write(data)
+ write(b"\r\n")
+
+ def send_eom(self, headers, write):
+ write(b"0\r\n")
+ write_headers(headers, write)
+
+
+class Http10Writer(BodyWriter):
+ def send_data(self, data, write):
+ write(data)
+
+ def send_eom(self, headers, write):
+ if headers:
+ raise LocalProtocolError("can't send trailers to HTTP/1.0 client")
+ # no need to close the socket ourselves, that will be taken care of by
+ # Connection: close machinery
+
+
+WRITERS = {
+ (CLIENT, IDLE): write_request,
+ (SERVER, IDLE): write_any_response,
+ (SERVER, SEND_RESPONSE): write_any_response,
+ SEND_BODY: {
+ "chunked": ChunkedWriter,
+ "content-length": ContentLengthWriter,
+ "http/1.0": Http10Writer,
+ },
+}
diff --git a/packages/h11/tests/__init__.py b/packages/h11/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/packages/h11/tests/data/test-file b/packages/h11/tests/data/test-file
new file mode 100644
index 000000000..d0be0a6c5
--- /dev/null
+++ b/packages/h11/tests/data/test-file
@@ -0,0 +1 @@
+92b12bc045050b55b848d37167a1a63947c364579889ce1d39788e45e9fac9e5
diff --git a/packages/h11/tests/helpers.py b/packages/h11/tests/helpers.py
new file mode 100644
index 000000000..9d2cf3801
--- /dev/null
+++ b/packages/h11/tests/helpers.py
@@ -0,0 +1,77 @@
+from .._connection import *
+from .._events import *
+from .._state import *
+
+
+def get_all_events(conn):
+ got_events = []
+ while True:
+ event = conn.next_event()
+ if event in (NEED_DATA, PAUSED):
+ break
+ got_events.append(event)
+ if type(event) is ConnectionClosed:
+ break
+ return got_events
+
+
+def receive_and_get(conn, data):
+ conn.receive_data(data)
+ return get_all_events(conn)
+
+
+# Merges adjacent Data events, converts payloads to bytestrings, and removes
+# chunk boundaries.
+def normalize_data_events(in_events):
+ out_events = []
+ for event in in_events:
+ if type(event) is Data:
+ event.data = bytes(event.data)
+ event.chunk_start = False
+ event.chunk_end = False
+ if out_events and type(out_events[-1]) is type(event) is Data:
+ out_events[-1].data += event.data
+ else:
+ out_events.append(event)
+ return out_events
+
+
+# Given that we want to write tests that push some events through a Connection
+# and check that its state updates appropriately... we might as make a habit
+# of pushing them through two Connections with a fake network link in
+# between.
+class ConnectionPair:
+ def __init__(self):
+ self.conn = {CLIENT: Connection(CLIENT), SERVER: Connection(SERVER)}
+ self.other = {CLIENT: SERVER, SERVER: CLIENT}
+
+ @property
+ def conns(self):
+ return self.conn.values()
+
+ # expect="match" if expect=send_events; expect=[...] to say what expected
+ def send(self, role, send_events, expect="match"):
+ if not isinstance(send_events, list):
+ send_events = [send_events]
+ data = b""
+ closed = False
+ for send_event in send_events:
+ new_data = self.conn[role].send(send_event)
+ if new_data is None:
+ closed = True
+ else:
+ data += new_data
+ # send uses b"" to mean b"", and None to mean closed
+ # receive uses b"" to mean closed, and None to mean "try again"
+ # so we have to translate between the two conventions
+ if data:
+ self.conn[self.other[role]].receive_data(data)
+ if closed:
+ self.conn[self.other[role]].receive_data(b"")
+ got_events = get_all_events(self.conn[self.other[role]])
+ if expect == "match":
+ expect = send_events
+ if not isinstance(expect, list):
+ expect = [expect]
+ assert got_events == expect
+ return data
diff --git a/packages/h11/tests/test_against_stdlib_http.py b/packages/h11/tests/test_against_stdlib_http.py
new file mode 100644
index 000000000..e6c5db444
--- /dev/null
+++ b/packages/h11/tests/test_against_stdlib_http.py
@@ -0,0 +1,111 @@
+import json
+import os.path
+import socket
+import socketserver
+import threading
+from contextlib import closing, contextmanager
+from http.server import SimpleHTTPRequestHandler
+from urllib.request import urlopen
+
+import h11
+
+
+@contextmanager
+def socket_server(handler):
+ httpd = socketserver.TCPServer(("127.0.0.1", 0), handler)
+ thread = threading.Thread(
+ target=httpd.serve_forever, kwargs={"poll_interval": 0.01}
+ )
+ thread.daemon = True
+ try:
+ thread.start()
+ yield httpd
+ finally:
+ httpd.shutdown()
+
+
+test_file_path = os.path.join(os.path.dirname(__file__), "data/test-file")
+with open(test_file_path, "rb") as f:
+ test_file_data = f.read()
+
+
+class SingleMindedRequestHandler(SimpleHTTPRequestHandler):
+ def translate_path(self, path):
+ return test_file_path
+
+
+def test_h11_as_client():
+ with socket_server(SingleMindedRequestHandler) as httpd:
+ with closing(socket.create_connection(httpd.server_address)) as s:
+ c = h11.Connection(h11.CLIENT)
+
+ s.sendall(
+ c.send(
+ h11.Request(
+ method="GET", target="/foo", headers=[("Host", "localhost")]
+ )
+ )
+ )
+ s.sendall(c.send(h11.EndOfMessage()))
+
+ data = bytearray()
+ while True:
+ event = c.next_event()
+ print(event)
+ if event is h11.NEED_DATA:
+ # Use a small read buffer to make things more challenging
+ # and exercise more paths :-)
+ c.receive_data(s.recv(10))
+ continue
+ if type(event) is h11.Response:
+ assert event.status_code == 200
+ if type(event) is h11.Data:
+ data += event.data
+ if type(event) is h11.EndOfMessage:
+ break
+ assert bytes(data) == test_file_data
+
+
+class H11RequestHandler(socketserver.BaseRequestHandler):
+ def handle(self):
+ with closing(self.request) as s:
+ c = h11.Connection(h11.SERVER)
+ request = None
+ while True:
+ event = c.next_event()
+ if event is h11.NEED_DATA:
+ # Use a small read buffer to make things more challenging
+ # and exercise more paths :-)
+ c.receive_data(s.recv(10))
+ continue
+ if type(event) is h11.Request:
+ request = event
+ if type(event) is h11.EndOfMessage:
+ break
+ info = json.dumps(
+ {
+ "method": request.method.decode("ascii"),
+ "target": request.target.decode("ascii"),
+ "headers": {
+ name.decode("ascii"): value.decode("ascii")
+ for (name, value) in request.headers
+ },
+ }
+ )
+ s.sendall(c.send(h11.Response(status_code=200, headers=[])))
+ s.sendall(c.send(h11.Data(data=info.encode("ascii"))))
+ s.sendall(c.send(h11.EndOfMessage()))
+
+
+def test_h11_as_server():
+ with socket_server(H11RequestHandler) as httpd:
+ host, port = httpd.server_address
+ url = "http://{}:{}/some-path".format(host, port)
+ with closing(urlopen(url)) as f:
+ assert f.getcode() == 200
+ data = f.read()
+ info = json.loads(data.decode("ascii"))
+ print(info)
+ assert info["method"] == "GET"
+ assert info["target"] == "/some-path"
+ assert "urllib" in info["headers"]["user-agent"]
diff --git a/packages/h11/tests/test_connection.py b/packages/h11/tests/test_connection.py
new file mode 100644
index 000000000..baadec8d5
--- /dev/null
+++ b/packages/h11/tests/test_connection.py
@@ -0,0 +1,1078 @@
+import pytest
+
+from .._connection import _body_framing, _keep_alive, Connection, NEED_DATA, PAUSED
+from .._events import *
+from .._state import *
+from .._util import LocalProtocolError, RemoteProtocolError
+from .helpers import ConnectionPair, get_all_events, receive_and_get
+
+
+def test__keep_alive():
+ assert _keep_alive(
+ Request(method="GET", target="/", headers=[("Host", "Example.com")])
+ )
+ assert not _keep_alive(
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "Example.com"), ("Connection", "close")],
+ )
+ )
+ assert not _keep_alive(
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "Example.com"), ("Connection", "a, b, cLOse, foo")],
+ )
+ )
+ assert not _keep_alive(
+ Request(method="GET", target="/", headers=[], http_version="1.0")
+ )
+
+ assert _keep_alive(Response(status_code=200, headers=[]))
+ assert not _keep_alive(Response(status_code=200, headers=[("Connection", "close")]))
+ assert not _keep_alive(
+ Response(status_code=200, headers=[("Connection", "a, b, cLOse, foo")])
+ )
+ assert not _keep_alive(Response(status_code=200, headers=[], http_version="1.0"))
+
+
+def test__body_framing():
+ def headers(cl, te):
+ headers = []
+ if cl is not None:
+ headers.append(("Content-Length", str(cl)))
+ if te:
+ headers.append(("Transfer-Encoding", "chunked"))
+ return headers
+
+ def resp(status_code=200, cl=None, te=False):
+ return Response(status_code=status_code, headers=headers(cl, te))
+
+ def req(cl=None, te=False):
+ h = headers(cl, te)
+ h += [("Host", "example.com")]
+ return Request(method="GET", target="/", headers=h)
+
+ # Special cases where the headers are ignored:
+ for kwargs in [{}, {"cl": 100}, {"te": True}, {"cl": 100, "te": True}]:
+ for meth, r in [
+ (b"HEAD", resp(**kwargs)),
+ (b"GET", resp(status_code=204, **kwargs)),
+ (b"GET", resp(status_code=304, **kwargs)),
+ ]:
+ assert _body_framing(meth, r) == ("content-length", (0,))
+
+ # Transfer-encoding
+ for kwargs in [{"te": True}, {"cl": 100, "te": True}]:
+ for meth, r in [(None, req(**kwargs)), (b"GET", resp(**kwargs))]:
+ assert _body_framing(meth, r) == ("chunked", ())
+
+ # Content-Length
+ for meth, r in [(None, req(cl=100)), (b"GET", resp(cl=100))]:
+ assert _body_framing(meth, r) == ("content-length", (100,))
+
+ # No headers
+ assert _body_framing(None, req()) == ("content-length", (0,))
+ assert _body_framing(b"GET", resp()) == ("http/1.0", ())
+
+
+def test_Connection_basics_and_content_length():
+ with pytest.raises(ValueError):
+ Connection("CLIENT")
+
+ p = ConnectionPair()
+ assert p.conn[CLIENT].our_role is CLIENT
+ assert p.conn[CLIENT].their_role is SERVER
+ assert p.conn[SERVER].our_role is SERVER
+ assert p.conn[SERVER].their_role is CLIENT
+
+ data = p.send(
+ CLIENT,
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.com"), ("Content-Length", "10")],
+ ),
+ )
+ assert data == (
+ b"GET / HTTP/1.1\r\n" b"Host: example.com\r\n" b"Content-Length: 10\r\n\r\n"
+ )
+
+ for conn in p.conns:
+ assert conn.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
+ assert p.conn[CLIENT].our_state is SEND_BODY
+ assert p.conn[CLIENT].their_state is SEND_RESPONSE
+ assert p.conn[SERVER].our_state is SEND_RESPONSE
+ assert p.conn[SERVER].their_state is SEND_BODY
+
+ assert p.conn[CLIENT].their_http_version is None
+ assert p.conn[SERVER].their_http_version == b"1.1"
+
+ data = p.send(SERVER, InformationalResponse(status_code=100, headers=[]))
+ assert data == b"HTTP/1.1 100 \r\n\r\n"
+
+ data = p.send(SERVER, Response(status_code=200, headers=[("Content-Length", "11")]))
+ assert data == b"HTTP/1.1 200 \r\nContent-Length: 11\r\n\r\n"
+
+ for conn in p.conns:
+ assert conn.states == {CLIENT: SEND_BODY, SERVER: SEND_BODY}
+
+ assert p.conn[CLIENT].their_http_version == b"1.1"
+ assert p.conn[SERVER].their_http_version == b"1.1"
+
+ data = p.send(CLIENT, Data(data=b"12345"))
+ assert data == b"12345"
+ data = p.send(
+ CLIENT, Data(data=b"67890"), expect=[Data(data=b"67890"), EndOfMessage()]
+ )
+ assert data == b"67890"
+ data = p.send(CLIENT, EndOfMessage(), expect=[])
+ assert data == b""
+
+ for conn in p.conns:
+ assert conn.states == {CLIENT: DONE, SERVER: SEND_BODY}
+
+ data = p.send(SERVER, Data(data=b"1234567890"))
+ assert data == b"1234567890"
+ data = p.send(SERVER, Data(data=b"1"), expect=[Data(data=b"1"), EndOfMessage()])
+ assert data == b"1"
+ data = p.send(SERVER, EndOfMessage(), expect=[])
+ assert data == b""
+
+ for conn in p.conns:
+ assert conn.states == {CLIENT: DONE, SERVER: DONE}
+
+
+def test_chunked():
+ p = ConnectionPair()
+
+ p.send(
+ CLIENT,
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.com"), ("Transfer-Encoding", "chunked")],
+ ),
+ )
+ data = p.send(CLIENT, Data(data=b"1234567890", chunk_start=True, chunk_end=True))
+ assert data == b"a\r\n1234567890\r\n"
+ data = p.send(CLIENT, Data(data=b"abcde", chunk_start=True, chunk_end=True))
+ assert data == b"5\r\nabcde\r\n"
+ data = p.send(CLIENT, Data(data=b""), expect=[])
+ assert data == b""
+ data = p.send(CLIENT, EndOfMessage(headers=[("hello", "there")]))
+ assert data == b"0\r\nhello: there\r\n\r\n"
+
+ p.send(
+ SERVER, Response(status_code=200, headers=[("Transfer-Encoding", "chunked")])
+ )
+ p.send(SERVER, Data(data=b"54321", chunk_start=True, chunk_end=True))
+ p.send(SERVER, Data(data=b"12345", chunk_start=True, chunk_end=True))
+ p.send(SERVER, EndOfMessage())
+
+ for conn in p.conns:
+ assert conn.states == {CLIENT: DONE, SERVER: DONE}
+
+
+def test_chunk_boundaries():
+ conn = Connection(our_role=SERVER)
+
+ request = (
+ b"POST / HTTP/1.1\r\n"
+ b"Host: example.com\r\n"
+ b"Transfer-Encoding: chunked\r\n"
+ b"\r\n"
+ )
+ conn.receive_data(request)
+ assert conn.next_event() == Request(
+ method="POST",
+ target="/",
+ headers=[("Host", "example.com"), ("Transfer-Encoding", "chunked")],
+ )
+ assert conn.next_event() is NEED_DATA
+
+ conn.receive_data(b"5\r\nhello\r\n")
+ assert conn.next_event() == Data(data=b"hello", chunk_start=True, chunk_end=True)
+
+ conn.receive_data(b"5\r\nhel")
+ assert conn.next_event() == Data(data=b"hel", chunk_start=True, chunk_end=False)
+
+ conn.receive_data(b"l")
+ assert conn.next_event() == Data(data=b"l", chunk_start=False, chunk_end=False)
+
+ conn.receive_data(b"o\r\n")
+ assert conn.next_event() == Data(data=b"o", chunk_start=False, chunk_end=True)
+
+ conn.receive_data(b"5\r\nhello")
+ assert conn.next_event() == Data(data=b"hello", chunk_start=True, chunk_end=True)
+
+ conn.receive_data(b"\r\n")
+ assert conn.next_event() == NEED_DATA
+
+ conn.receive_data(b"0\r\n\r\n")
+ assert conn.next_event() == EndOfMessage()
+
+
+def test_client_talking_to_http10_server():
+ c = Connection(CLIENT)
+ c.send(Request(method="GET", target="/", headers=[("Host", "example.com")]))
+ c.send(EndOfMessage())
+ assert c.our_state is DONE
+ # No content-length, so Http10 framing for body
+ assert receive_and_get(c, b"HTTP/1.0 200 OK\r\n\r\n") == [
+ Response(status_code=200, headers=[], http_version="1.0", reason=b"OK")
+ ]
+ assert c.our_state is MUST_CLOSE
+ assert receive_and_get(c, b"12345") == [Data(data=b"12345")]
+ assert receive_and_get(c, b"67890") == [Data(data=b"67890")]
+ assert receive_and_get(c, b"") == [EndOfMessage(), ConnectionClosed()]
+ assert c.their_state is CLOSED
+
+
+def test_server_talking_to_http10_client():
+ c = Connection(SERVER)
+ # No content-length, so no body
+ # NB: no host header
+ assert receive_and_get(c, b"GET / HTTP/1.0\r\n\r\n") == [
+ Request(method="GET", target="/", headers=[], http_version="1.0"),
+ EndOfMessage(),
+ ]
+ assert c.their_state is MUST_CLOSE
+
+ # We automatically Connection: close back at them
+ assert (
+ c.send(Response(status_code=200, headers=[]))
+ == b"HTTP/1.1 200 \r\nConnection: close\r\n\r\n"
+ )
+
+ assert c.send(Data(data=b"12345")) == b"12345"
+ assert c.send(EndOfMessage()) == b""
+ assert c.our_state is MUST_CLOSE
+
+ # Check that it works if they do send Content-Length
+ c = Connection(SERVER)
+ # NB: no host header
+ assert receive_and_get(c, b"POST / HTTP/1.0\r\nContent-Length: 10\r\n\r\n1") == [
+ Request(
+ method="POST",
+ target="/",
+ headers=[("Content-Length", "10")],
+ http_version="1.0",
+ ),
+ Data(data=b"1"),
+ ]
+ assert receive_and_get(c, b"234567890") == [Data(data=b"234567890"), EndOfMessage()]
+ assert c.their_state is MUST_CLOSE
+ assert receive_and_get(c, b"") == [ConnectionClosed()]
+
+
+def test_automatic_transfer_encoding_in_response():
+ # Check that in responses, the user can specify either Transfer-Encoding:
+ # chunked or no framing at all, and in both cases we automatically select
+ # the right option depending on whether the peer speaks HTTP/1.0 or
+ # HTTP/1.1
+ for user_headers in [
+ [("Transfer-Encoding", "chunked")],
+ [],
+ # In fact, this even works if Content-Length is set,
+ # because if both are set then Transfer-Encoding wins
+ [("Transfer-Encoding", "chunked"), ("Content-Length", "100")],
+ ]:
+ p = ConnectionPair()
+ p.send(
+ CLIENT,
+ [
+ Request(method="GET", target="/", headers=[("Host", "example.com")]),
+ EndOfMessage(),
+ ],
+ )
+ # When speaking to HTTP/1.1 client, all of the above cases get
+ # normalized to Transfer-Encoding: chunked
+ p.send(
+ SERVER,
+ Response(status_code=200, headers=user_headers),
+ expect=Response(
+ status_code=200, headers=[("Transfer-Encoding", "chunked")]
+ ),
+ )
+
+ # When speaking to HTTP/1.0 client, all of the above cases get
+ # normalized to no-framing-headers
+ c = Connection(SERVER)
+ receive_and_get(c, b"GET / HTTP/1.0\r\n\r\n")
+ assert (
+ c.send(Response(status_code=200, headers=user_headers))
+ == b"HTTP/1.1 200 \r\nConnection: close\r\n\r\n"
+ )
+ assert c.send(Data(data=b"12345")) == b"12345"
+
+
+def test_automagic_connection_close_handling():
+ p = ConnectionPair()
+ # If the user explicitly sets Connection: close, then we notice and
+ # respect it
+ p.send(
+ CLIENT,
+ [
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.com"), ("Connection", "close")],
+ ),
+ EndOfMessage(),
+ ],
+ )
+ for conn in p.conns:
+ assert conn.states[CLIENT] is MUST_CLOSE
+ # And if the client sets it, the server automatically echoes it back
+ p.send(
+ SERVER,
+ # no header here...
+ [Response(status_code=204, headers=[]), EndOfMessage()],
+ # ...but oh look, it arrived anyway
+ expect=[
+ Response(status_code=204, headers=[("connection", "close")]),
+ EndOfMessage(),
+ ],
+ )
+ for conn in p.conns:
+ assert conn.states == {CLIENT: MUST_CLOSE, SERVER: MUST_CLOSE}
+
+
+def test_100_continue():
+ def setup():
+ p = ConnectionPair()
+ p.send(
+ CLIENT,
+ Request(
+ method="GET",
+ target="/",
+ headers=[
+ ("Host", "example.com"),
+ ("Content-Length", "100"),
+ ("Expect", "100-continue"),
+ ],
+ ),
+ )
+ for conn in p.conns:
+ assert conn.client_is_waiting_for_100_continue
+ assert not p.conn[CLIENT].they_are_waiting_for_100_continue
+ assert p.conn[SERVER].they_are_waiting_for_100_continue
+ return p
+
+ # Disabled by 100 Continue
+ p = setup()
+ p.send(SERVER, InformationalResponse(status_code=100, headers=[]))
+ for conn in p.conns:
+ assert not conn.client_is_waiting_for_100_continue
+ assert not conn.they_are_waiting_for_100_continue
+
+ # Disabled by a real response
+ p = setup()
+ p.send(
+ SERVER, Response(status_code=200, headers=[("Transfer-Encoding", "chunked")])
+ )
+ for conn in p.conns:
+ assert not conn.client_is_waiting_for_100_continue
+ assert not conn.they_are_waiting_for_100_continue
+
+ # Disabled by the client going ahead and sending stuff anyway
+ p = setup()
+ p.send(CLIENT, Data(data=b"12345"))
+ for conn in p.conns:
+ assert not conn.client_is_waiting_for_100_continue
+ assert not conn.they_are_waiting_for_100_continue
+
+
+def test_max_incomplete_event_size_countermeasure():
+ # Infinitely long headers are definitely not okay
+ c = Connection(SERVER)
+ c.receive_data(b"GET / HTTP/1.0\r\nEndless: ")
+ assert c.next_event() is NEED_DATA
+ with pytest.raises(RemoteProtocolError):
+ while True:
+ c.receive_data(b"a" * 1024)
+ c.next_event()
+
+ # Checking that the same header is accepted / rejected depending on the
+ # max_incomplete_event_size setting:
+ c = Connection(SERVER, max_incomplete_event_size=5000)
+ c.receive_data(b"GET / HTTP/1.0\r\nBig: ")
+ c.receive_data(b"a" * 4000)
+ c.receive_data(b"\r\n\r\n")
+ assert get_all_events(c) == [
+ Request(
+ method="GET", target="/", http_version="1.0", headers=[("big", "a" * 4000)]
+ ),
+ EndOfMessage(),
+ ]
+
+ c = Connection(SERVER, max_incomplete_event_size=4000)
+ c.receive_data(b"GET / HTTP/1.0\r\nBig: ")
+ c.receive_data(b"a" * 4000)
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+
+ # Temporarily exceeding the size limit is fine, as long as its done with
+ # complete events:
+ c = Connection(SERVER, max_incomplete_event_size=5000)
+ c.receive_data(b"GET / HTTP/1.0\r\nContent-Length: 10000")
+ c.receive_data(b"\r\n\r\n" + b"a" * 10000)
+ assert get_all_events(c) == [
+ Request(
+ method="GET",
+ target="/",
+ http_version="1.0",
+ headers=[("Content-Length", "10000")],
+ ),
+ Data(data=b"a" * 10000),
+ EndOfMessage(),
+ ]
+
+ c = Connection(SERVER, max_incomplete_event_size=100)
+ # Two pipelined requests to create a way-too-big receive buffer... but
+ # it's fine because we're not checking
+ c.receive_data(
+ b"GET /1 HTTP/1.1\r\nHost: a\r\n\r\n"
+ b"GET /2 HTTP/1.1\r\nHost: b\r\n\r\n" + b"X" * 1000
+ )
+ assert get_all_events(c) == [
+ Request(method="GET", target="/1", headers=[("host", "a")]),
+ EndOfMessage(),
+ ]
+ # Even more data comes in, still no problem
+ c.receive_data(b"X" * 1000)
+ # We can respond and reuse to get the second pipelined request
+ c.send(Response(status_code=200, headers=[]))
+ c.send(EndOfMessage())
+ c.start_next_cycle()
+ assert get_all_events(c) == [
+ Request(method="GET", target="/2", headers=[("host", "b")]),
+ EndOfMessage(),
+ ]
+ # But once we unpause and try to read the next message, and find that it's
+ # incomplete and the buffer is *still* way too large, then *that's* a
+ # problem:
+ c.send(Response(status_code=200, headers=[]))
+ c.send(EndOfMessage())
+ c.start_next_cycle()
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+
+
+def test_reuse_simple():
+ p = ConnectionPair()
+ p.send(
+ CLIENT,
+ [Request(method="GET", target="/", headers=[("Host", "a")]), EndOfMessage()],
+ )
+ p.send(SERVER, [Response(status_code=200, headers=[]), EndOfMessage()])
+ for conn in p.conns:
+ assert conn.states == {CLIENT: DONE, SERVER: DONE}
+ conn.start_next_cycle()
+
+ p.send(
+ CLIENT,
+ [
+ Request(method="DELETE", target="/foo", headers=[("Host", "a")]),
+ EndOfMessage(),
+ ],
+ )
+ p.send(SERVER, [Response(status_code=404, headers=[]), EndOfMessage()])
+
+
+def test_pipelining():
+ # Client doesn't support pipelining, so we have to do this by hand
+ c = Connection(SERVER)
+ assert c.next_event() is NEED_DATA
+ # 3 requests all bunched up
+ c.receive_data(
+ b"GET /1 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n"
+ b"12345"
+ b"GET /2 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n"
+ b"67890"
+ b"GET /3 HTTP/1.1\r\nHost: a.com\r\n\r\n"
+ )
+ assert get_all_events(c) == [
+ Request(
+ method="GET",
+ target="/1",
+ headers=[("Host", "a.com"), ("Content-Length", "5")],
+ ),
+ Data(data=b"12345"),
+ EndOfMessage(),
+ ]
+ assert c.their_state is DONE
+ assert c.our_state is SEND_RESPONSE
+
+ assert c.next_event() is PAUSED
+
+ c.send(Response(status_code=200, headers=[]))
+ c.send(EndOfMessage())
+ assert c.their_state is DONE
+ assert c.our_state is DONE
+
+ c.start_next_cycle()
+
+ assert get_all_events(c) == [
+ Request(
+ method="GET",
+ target="/2",
+ headers=[("Host", "a.com"), ("Content-Length", "5")],
+ ),
+ Data(data=b"67890"),
+ EndOfMessage(),
+ ]
+ assert c.next_event() is PAUSED
+ c.send(Response(status_code=200, headers=[]))
+ c.send(EndOfMessage())
+ c.start_next_cycle()
+
+ assert get_all_events(c) == [
+ Request(method="GET", target="/3", headers=[("Host", "a.com")]),
+ EndOfMessage(),
+ ]
+ # Doesn't pause this time, no trailing data
+ assert c.next_event() is NEED_DATA
+ c.send(Response(status_code=200, headers=[]))
+ c.send(EndOfMessage())
+
+ # Arrival of more data triggers pause
+ assert c.next_event() is NEED_DATA
+ c.receive_data(b"SADF")
+ assert c.next_event() is PAUSED
+ assert c.trailing_data == (b"SADF", False)
+ # If EOF arrives while paused, we don't see that either:
+ c.receive_data(b"")
+ assert c.trailing_data == (b"SADF", True)
+ assert c.next_event() is PAUSED
+ c.receive_data(b"")
+ assert c.next_event() is PAUSED
+ # Can't call receive_data with non-empty buf after closing it
+ with pytest.raises(RuntimeError):
+ c.receive_data(b"FDSA")
+
+
+def test_protocol_switch():
+ for (req, deny, accept) in [
+ (
+ Request(
+ method="CONNECT",
+ target="example.com:443",
+ headers=[("Host", "foo"), ("Content-Length", "1")],
+ ),
+ Response(status_code=404, headers=[]),
+ Response(status_code=200, headers=[]),
+ ),
+ (
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "foo"), ("Content-Length", "1"), ("Upgrade", "a, b")],
+ ),
+ Response(status_code=200, headers=[]),
+ InformationalResponse(status_code=101, headers=[("Upgrade", "a")]),
+ ),
+ (
+ Request(
+ method="CONNECT",
+ target="example.com:443",
+ headers=[("Host", "foo"), ("Content-Length", "1"), ("Upgrade", "a, b")],
+ ),
+ Response(status_code=404, headers=[]),
+ # Accept CONNECT, not upgrade
+ Response(status_code=200, headers=[]),
+ ),
+ (
+ Request(
+ method="CONNECT",
+ target="example.com:443",
+ headers=[("Host", "foo"), ("Content-Length", "1"), ("Upgrade", "a, b")],
+ ),
+ Response(status_code=404, headers=[]),
+ # Accept Upgrade, not CONNECT
+ InformationalResponse(status_code=101, headers=[("Upgrade", "b")]),
+ ),
+ ]:
+
+ def setup():
+ p = ConnectionPair()
+ p.send(CLIENT, req)
+ # No switch-related state change stuff yet; the client has to
+ # finish the request before that kicks in
+ for conn in p.conns:
+ assert conn.states[CLIENT] is SEND_BODY
+ p.send(CLIENT, [Data(data=b"1"), EndOfMessage()])
+ for conn in p.conns:
+ assert conn.states[CLIENT] is MIGHT_SWITCH_PROTOCOL
+ assert p.conn[SERVER].next_event() is PAUSED
+ return p
+
+ # Test deny case
+ p = setup()
+ p.send(SERVER, deny)
+ for conn in p.conns:
+ assert conn.states == {CLIENT: DONE, SERVER: SEND_BODY}
+ p.send(SERVER, EndOfMessage())
+ # Check that re-use is still allowed after a denial
+ for conn in p.conns:
+ conn.start_next_cycle()
+
+ # Test accept case
+ p = setup()
+ p.send(SERVER, accept)
+ for conn in p.conns:
+ assert conn.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL}
+ conn.receive_data(b"123")
+ assert conn.next_event() is PAUSED
+ conn.receive_data(b"456")
+ assert conn.next_event() is PAUSED
+ assert conn.trailing_data == (b"123456", False)
+
+ # Pausing in might-switch, then recovery
+ # (weird artificial case where the trailing data actually is valid
+ # HTTP for some reason, because this makes it easier to test the state
+ # logic)
+ p = setup()
+ sc = p.conn[SERVER]
+ sc.receive_data(b"GET / HTTP/1.0\r\n\r\n")
+ assert sc.next_event() is PAUSED
+ assert sc.trailing_data == (b"GET / HTTP/1.0\r\n\r\n", False)
+ sc.send(deny)
+ assert sc.next_event() is PAUSED
+ sc.send(EndOfMessage())
+ sc.start_next_cycle()
+ assert get_all_events(sc) == [
+ Request(method="GET", target="/", headers=[], http_version="1.0"),
+ EndOfMessage(),
+ ]
+
+ # When we're DONE, have no trailing data, and the connection gets
+ # closed, we report ConnectionClosed(). When we're in might-switch or
+ # switched, we don't.
+ p = setup()
+ sc = p.conn[SERVER]
+ sc.receive_data(b"")
+ assert sc.next_event() is PAUSED
+ assert sc.trailing_data == (b"", True)
+ p.send(SERVER, accept)
+ assert sc.next_event() is PAUSED
+
+ p = setup()
+ sc = p.conn[SERVER]
+ sc.receive_data(b"") == []
+ assert sc.next_event() is PAUSED
+ sc.send(deny)
+ assert sc.next_event() == ConnectionClosed()
+
+ # You can't send after switching protocols, or while waiting for a
+ # protocol switch
+ p = setup()
+ with pytest.raises(LocalProtocolError):
+ p.conn[CLIENT].send(
+ Request(method="GET", target="/", headers=[("Host", "a")])
+ )
+ p = setup()
+ p.send(SERVER, accept)
+ with pytest.raises(LocalProtocolError):
+ p.conn[SERVER].send(Data(data=b"123"))
+
+
+def test_close_simple():
+ # Just immediately closing a new connection without anything having
+ # happened yet.
+ for (who_shot_first, who_shot_second) in [(CLIENT, SERVER), (SERVER, CLIENT)]:
+
+ def setup():
+ p = ConnectionPair()
+ p.send(who_shot_first, ConnectionClosed())
+ for conn in p.conns:
+ assert conn.states == {
+ who_shot_first: CLOSED,
+ who_shot_second: MUST_CLOSE,
+ }
+ return p
+
+ # You can keep putting b"" into a closed connection, and you keep
+ # getting ConnectionClosed() out:
+ p = setup()
+ assert p.conn[who_shot_second].next_event() == ConnectionClosed()
+ assert p.conn[who_shot_second].next_event() == ConnectionClosed()
+ p.conn[who_shot_second].receive_data(b"")
+ assert p.conn[who_shot_second].next_event() == ConnectionClosed()
+ # Second party can close...
+ p = setup()
+ p.send(who_shot_second, ConnectionClosed())
+ for conn in p.conns:
+ assert conn.our_state is CLOSED
+ assert conn.their_state is CLOSED
+ # But trying to receive new data on a closed connection is a
+ # RuntimeError (not ProtocolError, because the problem here isn't
+ # violation of HTTP, it's violation of physics)
+ p = setup()
+ with pytest.raises(RuntimeError):
+ p.conn[who_shot_second].receive_data(b"123")
+ # And receiving new data on a MUST_CLOSE connection is a ProtocolError
+ p = setup()
+ p.conn[who_shot_first].receive_data(b"GET")
+ with pytest.raises(RemoteProtocolError):
+ p.conn[who_shot_first].next_event()
+
+
+def test_close_different_states():
+ req = [
+ Request(method="GET", target="/foo", headers=[("Host", "a")]),
+ EndOfMessage(),
+ ]
+ resp = [Response(status_code=200, headers=[]), EndOfMessage()]
+
+ # Client before request
+ p = ConnectionPair()
+ p.send(CLIENT, ConnectionClosed())
+ for conn in p.conns:
+ assert conn.states == {CLIENT: CLOSED, SERVER: MUST_CLOSE}
+
+ # Client after request
+ p = ConnectionPair()
+ p.send(CLIENT, req)
+ p.send(CLIENT, ConnectionClosed())
+ for conn in p.conns:
+ assert conn.states == {CLIENT: CLOSED, SERVER: SEND_RESPONSE}
+
+ # Server after request -> not allowed
+ p = ConnectionPair()
+ p.send(CLIENT, req)
+ with pytest.raises(LocalProtocolError):
+ p.conn[SERVER].send(ConnectionClosed())
+ p.conn[CLIENT].receive_data(b"")
+ with pytest.raises(RemoteProtocolError):
+ p.conn[CLIENT].next_event()
+
+ # Server after response
+ p = ConnectionPair()
+ p.send(CLIENT, req)
+ p.send(SERVER, resp)
+ p.send(SERVER, ConnectionClosed())
+ for conn in p.conns:
+ assert conn.states == {CLIENT: MUST_CLOSE, SERVER: CLOSED}
+
+ # Both after closing (ConnectionClosed() is idempotent)
+ p = ConnectionPair()
+ p.send(CLIENT, req)
+ p.send(SERVER, resp)
+ p.send(CLIENT, ConnectionClosed())
+ p.send(SERVER, ConnectionClosed())
+ p.send(CLIENT, ConnectionClosed())
+ p.send(SERVER, ConnectionClosed())
+
+ # In the middle of sending -> not allowed
+ p = ConnectionPair()
+ p.send(
+ CLIENT,
+ Request(
+ method="GET", target="/", headers=[("Host", "a"), ("Content-Length", "10")]
+ ),
+ )
+ with pytest.raises(LocalProtocolError):
+ p.conn[CLIENT].send(ConnectionClosed())
+ p.conn[SERVER].receive_data(b"")
+ with pytest.raises(RemoteProtocolError):
+ p.conn[SERVER].next_event()
+
+
+# Receive several requests and then client shuts down their side of the
+# connection; we can respond to each
+def test_pipelined_close():
+ c = Connection(SERVER)
+ # 2 requests then a close
+ c.receive_data(
+ b"GET /1 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n"
+ b"12345"
+ b"GET /2 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n"
+ b"67890"
+ )
+ c.receive_data(b"")
+ assert get_all_events(c) == [
+ Request(
+ method="GET",
+ target="/1",
+ headers=[("host", "a.com"), ("content-length", "5")],
+ ),
+ Data(data=b"12345"),
+ EndOfMessage(),
+ ]
+ assert c.states[CLIENT] is DONE
+ c.send(Response(status_code=200, headers=[]))
+ c.send(EndOfMessage())
+ assert c.states[SERVER] is DONE
+ c.start_next_cycle()
+ assert get_all_events(c) == [
+ Request(
+ method="GET",
+ target="/2",
+ headers=[("host", "a.com"), ("content-length", "5")],
+ ),
+ Data(data=b"67890"),
+ EndOfMessage(),
+ ConnectionClosed(),
+ ]
+ assert c.states == {CLIENT: CLOSED, SERVER: SEND_RESPONSE}
+ c.send(Response(status_code=200, headers=[]))
+ c.send(EndOfMessage())
+ assert c.states == {CLIENT: CLOSED, SERVER: MUST_CLOSE}
+ c.send(ConnectionClosed())
+ assert c.states == {CLIENT: CLOSED, SERVER: CLOSED}
+
+
+def test_sendfile():
+ class SendfilePlaceholder:
+ def __len__(self):
+ return 10
+
+ placeholder = SendfilePlaceholder()
+
+ def setup(header, http_version):
+ c = Connection(SERVER)
+ receive_and_get(
+ c, "GET / HTTP/{}\r\nHost: a\r\n\r\n".format(http_version).encode("ascii")
+ )
+ headers = []
+ if header:
+ headers.append(header)
+ c.send(Response(status_code=200, headers=headers))
+ return c, c.send_with_data_passthrough(Data(data=placeholder))
+
+ c, data = setup(("Content-Length", "10"), "1.1")
+ assert data == [placeholder]
+ # Raises an error if the connection object doesn't think we've sent
+ # exactly 10 bytes
+ c.send(EndOfMessage())
+
+ _, data = setup(("Transfer-Encoding", "chunked"), "1.1")
+ assert placeholder in data
+ data[data.index(placeholder)] = b"x" * 10
+ assert b"".join(data) == b"a\r\nxxxxxxxxxx\r\n"
+
+ c, data = setup(None, "1.0")
+ assert data == [placeholder]
+ assert c.our_state is SEND_BODY
+
+
+def test_errors():
+ # After a receive error, you can't receive
+ for role in [CLIENT, SERVER]:
+ c = Connection(our_role=role)
+ c.receive_data(b"gibberish\r\n\r\n")
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+ # Now any attempt to receive continues to raise
+ assert c.their_state is ERROR
+ assert c.our_state is not ERROR
+ print(c._cstate.states)
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+ # But we can still yell at the client for sending us gibberish
+ if role is SERVER:
+ assert (
+ c.send(Response(status_code=400, headers=[]))
+ == b"HTTP/1.1 400 \r\nConnection: close\r\n\r\n"
+ )
+
+ # After an error sending, you can no longer send
+ # (This is especially important for things like content-length errors,
+ # where there's complex internal state being modified)
+ def conn(role):
+ c = Connection(our_role=role)
+ if role is SERVER:
+ # Put it into the state where it *could* send a response...
+ receive_and_get(c, b"GET / HTTP/1.0\r\n\r\n")
+ assert c.our_state is SEND_RESPONSE
+ return c
+
+ for role in [CLIENT, SERVER]:
+ if role is CLIENT:
+ # This HTTP/1.0 request won't be detected as bad until after we go
+ # through the state machine and hit the writing code
+ good = Request(method="GET", target="/", headers=[("Host", "example.com")])
+ bad = Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.com")],
+ http_version="1.0",
+ )
+ elif role is SERVER:
+ good = Response(status_code=200, headers=[])
+ bad = Response(status_code=200, headers=[], http_version="1.0")
+ # Make sure 'good' actually is good
+ c = conn(role)
+ c.send(good)
+ assert c.our_state is not ERROR
+ # Do that again, but this time sending 'bad' first
+ c = conn(role)
+ with pytest.raises(LocalProtocolError):
+ c.send(bad)
+ assert c.our_state is ERROR
+ assert c.their_state is not ERROR
+ # Now 'good' is not so good
+ with pytest.raises(LocalProtocolError):
+ c.send(good)
+
+ # And check send_failed() too
+ c = conn(role)
+ c.send_failed()
+ assert c.our_state is ERROR
+ assert c.their_state is not ERROR
+ # This is idempotent
+ c.send_failed()
+ assert c.our_state is ERROR
+ assert c.their_state is not ERROR
+
+
+def test_idle_receive_nothing():
+ # At one point this incorrectly raised an error
+ for role in [CLIENT, SERVER]:
+ c = Connection(role)
+ assert c.next_event() is NEED_DATA
+
+
+def test_connection_drop():
+ c = Connection(SERVER)
+ c.receive_data(b"GET /")
+ assert c.next_event() is NEED_DATA
+ c.receive_data(b"")
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+
+
+def test_408_request_timeout():
+ # Should be able to send this spontaneously as a server without seeing
+ # anything from client
+ p = ConnectionPair()
+ p.send(SERVER, Response(status_code=408, headers=[]))
+
+
+# This used to raise IndexError
+def test_empty_request():
+ c = Connection(SERVER)
+ c.receive_data(b"\r\n")
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+
+
+# This used to raise IndexError
+def test_empty_response():
+ c = Connection(CLIENT)
+ c.send(Request(method="GET", target="/", headers=[("Host", "a")]))
+ c.receive_data(b"\r\n")
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+
+
+@pytest.mark.parametrize(
+ "data",
+ [
+ b"\x00",
+ b"\x20",
+ b"\x16\x03\x01\x00\xa5", # Typical start of a TLS Client Hello
+ ],
+)
+def test_early_detection_of_invalid_request(data):
+ c = Connection(SERVER)
+ # Early detection should occur before even receiving a `\r\n`
+ c.receive_data(data)
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+
+
+@pytest.mark.parametrize(
+ "data",
+ [
+ b"\x00",
+ b"\x20",
+ b"\x16\x03\x03\x00\x31", # Typical start of a TLS Server Hello
+ ],
+)
+def test_early_detection_of_invalid_response(data):
+ c = Connection(CLIENT)
+ # Early detection should occur before even receiving a `\r\n`
+ c.receive_data(data)
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+
+
+# This used to give different headers for HEAD and GET.
+# The correct way to handle HEAD is to put whatever headers we *would* have
+# put if it were a GET -- even though we know that for HEAD, those headers
+# will be ignored.
+def test_HEAD_framing_headers():
+ def setup(method, http_version):
+ c = Connection(SERVER)
+ c.receive_data(
+ method + b" / HTTP/" + http_version + b"\r\n" + b"Host: example.com\r\n\r\n"
+ )
+ assert type(c.next_event()) is Request
+ assert type(c.next_event()) is EndOfMessage
+ return c
+
+ for method in [b"GET", b"HEAD"]:
+ # No Content-Length, HTTP/1.1 peer, should use chunked
+ c = setup(method, b"1.1")
+ assert (
+ c.send(Response(status_code=200, headers=[])) == b"HTTP/1.1 200 \r\n"
+ b"Transfer-Encoding: chunked\r\n\r\n"
+ )
+
+ # No Content-Length, HTTP/1.0 peer, frame with connection: close
+ c = setup(method, b"1.0")
+ assert (
+ c.send(Response(status_code=200, headers=[])) == b"HTTP/1.1 200 \r\n"
+ b"Connection: close\r\n\r\n"
+ )
+
+ # Content-Length + Transfer-Encoding, TE wins
+ c = setup(method, b"1.1")
+ assert (
+ c.send(
+ Response(
+ status_code=200,
+ headers=[
+ ("Content-Length", "100"),
+ ("Transfer-Encoding", "chunked"),
+ ],
+ )
+ )
+ == b"HTTP/1.1 200 \r\n"
+ b"Transfer-Encoding: chunked\r\n\r\n"
+ )
+
+
+def test_special_exceptions_for_lost_connection_in_message_body():
+ c = Connection(SERVER)
+ c.receive_data(
+ b"POST / HTTP/1.1\r\n" b"Host: example.com\r\n" b"Content-Length: 100\r\n\r\n"
+ )
+ assert type(c.next_event()) is Request
+ assert c.next_event() is NEED_DATA
+ c.receive_data(b"12345")
+ assert c.next_event() == Data(data=b"12345")
+ c.receive_data(b"")
+ with pytest.raises(RemoteProtocolError) as excinfo:
+ c.next_event()
+ assert "received 5 bytes" in str(excinfo.value)
+ assert "expected 100" in str(excinfo.value)
+
+ c = Connection(SERVER)
+ c.receive_data(
+ b"POST / HTTP/1.1\r\n"
+ b"Host: example.com\r\n"
+ b"Transfer-Encoding: chunked\r\n\r\n"
+ )
+ assert type(c.next_event()) is Request
+ assert c.next_event() is NEED_DATA
+ c.receive_data(b"8\r\n012345")
+ assert c.next_event().data == b"012345"
+ c.receive_data(b"")
+ with pytest.raises(RemoteProtocolError) as excinfo:
+ c.next_event()
+ assert "incomplete chunked read" in str(excinfo.value)
diff --git a/packages/h11/tests/test_events.py b/packages/h11/tests/test_events.py
new file mode 100644
index 000000000..e20f741c5
--- /dev/null
+++ b/packages/h11/tests/test_events.py
@@ -0,0 +1,179 @@
+from http import HTTPStatus
+
+import pytest
+
+from .. import _events
+from .._events import *
+from .._util import LocalProtocolError
+
+
+def test_event_bundle():
+ class T(_events._EventBundle):
+ _fields = ["a", "b"]
+ _defaults = {"b": 1}
+
+ def _validate(self):
+ if self.a == 0:
+ raise ValueError
+
+ # basic construction and methods
+ t = T(a=1, b=0)
+ assert repr(t) == "T(a=1, b=0)"
+ assert t == T(a=1, b=0)
+ assert not (t == T(a=2, b=0))
+ assert not (t != T(a=1, b=0))
+ assert t != T(a=2, b=0)
+ with pytest.raises(TypeError):
+ hash(t)
+
+ # check defaults
+ t = T(a=10)
+ assert t.a == 10
+ assert t.b == 1
+
+ # no positional args
+ with pytest.raises(TypeError):
+ T(1)
+
+ with pytest.raises(TypeError):
+ T(1, a=1, b=0)
+
+ # unknown field
+ with pytest.raises(TypeError):
+ T(a=1, b=0, c=10)
+
+ # missing required field
+ with pytest.raises(TypeError) as exc:
+ T(b=0)
+ # make sure we error on the right missing kwarg
+ assert "kwarg a" in str(exc.value)
+
+ # _validate is called
+ with pytest.raises(ValueError):
+ T(a=0, b=0)
+
+
+def test_events():
+ with pytest.raises(LocalProtocolError):
+ # Missing Host:
+ req = Request(
+ method="GET", target="/", headers=[("a", "b")], http_version="1.1"
+ )
+ # But this is okay (HTTP/1.0)
+ req = Request(method="GET", target="/", headers=[("a", "b")], http_version="1.0")
+ # fields are normalized
+ assert req.method == b"GET"
+ assert req.target == b"/"
+ assert req.headers == [(b"a", b"b")]
+ assert req.http_version == b"1.0"
+
+ # This is also okay -- has a Host (with weird capitalization, which is ok)
+ req = Request(
+ method="GET",
+ target="/",
+ headers=[("a", "b"), ("hOSt", "example.com")],
+ http_version="1.1",
+ )
+ # we normalize header capitalization
+ assert req.headers == [(b"a", b"b"), (b"host", b"example.com")]
+
+ # Multiple host is bad too
+ with pytest.raises(LocalProtocolError):
+ req = Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "a"), ("Host", "a")],
+ http_version="1.1",
+ )
+ # Even for HTTP/1.0
+ with pytest.raises(LocalProtocolError):
+ req = Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "a"), ("Host", "a")],
+ http_version="1.0",
+ )
+
+ # Header values are validated
+ for bad_char in "\x00\r\n\f\v":
+ with pytest.raises(LocalProtocolError):
+ req = Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "a"), ("Foo", "asd" + bad_char)],
+ http_version="1.0",
+ )
+
+ # But for compatibility we allow non-whitespace control characters, even
+ # though they're forbidden by the spec.
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "a"), ("Foo", "asd\x01\x02\x7f")],
+ http_version="1.0",
+ )
+
+ # Request target is validated
+ for bad_char in b"\x00\x20\x7f\xee":
+ target = bytearray(b"/")
+ target.append(bad_char)
+ with pytest.raises(LocalProtocolError):
+ Request(
+ method="GET", target=target, headers=[("Host", "a")], http_version="1.1"
+ )
+
+ ir = InformationalResponse(status_code=100, headers=[("Host", "a")])
+ assert ir.status_code == 100
+ assert ir.headers == [(b"host", b"a")]
+ assert ir.http_version == b"1.1"
+
+ with pytest.raises(LocalProtocolError):
+ InformationalResponse(status_code=200, headers=[("Host", "a")])
+
+ resp = Response(status_code=204, headers=[], http_version="1.0")
+ assert resp.status_code == 204
+ assert resp.headers == []
+ assert resp.http_version == b"1.0"
+
+ with pytest.raises(LocalProtocolError):
+ resp = Response(status_code=100, headers=[], http_version="1.0")
+
+ with pytest.raises(LocalProtocolError):
+ Response(status_code="100", headers=[], http_version="1.0")
+
+ with pytest.raises(LocalProtocolError):
+ InformationalResponse(status_code=b"100", headers=[], http_version="1.0")
+
+ d = Data(data=b"asdf")
+ assert d.data == b"asdf"
+
+ eom = EndOfMessage()
+ assert eom.headers == []
+
+ cc = ConnectionClosed()
+ assert repr(cc) == "ConnectionClosed()"
+
+
+def test_intenum_status_code():
+ # https://github.com/python-hyper/h11/issues/72
+
+ r = Response(status_code=HTTPStatus.OK, headers=[], http_version="1.0")
+ assert r.status_code == HTTPStatus.OK
+ assert type(r.status_code) is not type(HTTPStatus.OK)
+ assert type(r.status_code) is int
+
+
+def test_header_casing():
+ r = Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.org"), ("Connection", "keep-alive")],
+ http_version="1.1",
+ )
+ assert len(r.headers) == 2
+ assert r.headers[0] == (b"host", b"example.org")
+ assert r.headers == [(b"host", b"example.org"), (b"connection", b"keep-alive")]
+ assert r.headers.raw_items() == [
+ (b"Host", b"example.org"),
+ (b"Connection", b"keep-alive"),
+ ]
diff --git a/packages/h11/tests/test_headers.py b/packages/h11/tests/test_headers.py
new file mode 100644
index 000000000..ff3dc8d75
--- /dev/null
+++ b/packages/h11/tests/test_headers.py
@@ -0,0 +1,151 @@
+import pytest
+
+from .._headers import *
+
+
+def test_normalize_and_validate():
+ assert normalize_and_validate([("foo", "bar")]) == [(b"foo", b"bar")]
+ assert normalize_and_validate([(b"foo", b"bar")]) == [(b"foo", b"bar")]
+
+ # no leading/trailing whitespace in names
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([(b"foo ", "bar")])
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([(b" foo", "bar")])
+
+ # no weird characters in names
+ with pytest.raises(LocalProtocolError) as excinfo:
+ normalize_and_validate([(b"foo bar", b"baz")])
+ assert "foo bar" in str(excinfo.value)
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([(b"foo\x00bar", b"baz")])
+ # Not even 8-bit characters:
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([(b"foo\xffbar", b"baz")])
+ # And not even the control characters we allow in values:
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([(b"foo\x01bar", b"baz")])
+
+ # no return or NUL characters in values
+ with pytest.raises(LocalProtocolError) as excinfo:
+ normalize_and_validate([("foo", "bar\rbaz")])
+ assert "bar\\rbaz" in str(excinfo.value)
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("foo", "bar\nbaz")])
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("foo", "bar\x00baz")])
+ # no leading/trailing whitespace
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("foo", "barbaz ")])
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("foo", " barbaz")])
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("foo", "barbaz\t")])
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("foo", "\tbarbaz")])
+
+ # content-length
+ assert normalize_and_validate([("Content-Length", "1")]) == [
+ (b"content-length", b"1")
+ ]
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("Content-Length", "asdf")])
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("Content-Length", "1x")])
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("Content-Length", "1"), ("Content-Length", "2")])
+ assert normalize_and_validate(
+ [("Content-Length", "0"), ("Content-Length", "0")]
+ ) == [(b"content-length", b"0")]
+ assert normalize_and_validate([("Content-Length", "0 , 0")]) == [
+ (b"content-length", b"0")
+ ]
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate(
+ [("Content-Length", "1"), ("Content-Length", "1"), ("Content-Length", "2")]
+ )
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("Content-Length", "1 , 1,2")])
+
+ # transfer-encoding
+ assert normalize_and_validate([("Transfer-Encoding", "chunked")]) == [
+ (b"transfer-encoding", b"chunked")
+ ]
+ assert normalize_and_validate([("Transfer-Encoding", "cHuNkEd")]) == [
+ (b"transfer-encoding", b"chunked")
+ ]
+ with pytest.raises(LocalProtocolError) as excinfo:
+ normalize_and_validate([("Transfer-Encoding", "gzip")])
+ assert excinfo.value.error_status_hint == 501 # Not Implemented
+ with pytest.raises(LocalProtocolError) as excinfo:
+ normalize_and_validate(
+ [("Transfer-Encoding", "chunked"), ("Transfer-Encoding", "gzip")]
+ )
+ assert excinfo.value.error_status_hint == 501 # Not Implemented
+
+
+def test_get_set_comma_header():
+ headers = normalize_and_validate(
+ [
+ ("Connection", "close"),
+ ("whatever", "something"),
+ ("connectiON", "fOo,, , BAR"),
+ ]
+ )
+
+ assert get_comma_header(headers, b"connection") == [b"close", b"foo", b"bar"]
+
+ headers = set_comma_header(headers, b"newthing", ["a", "b"])
+
+ with pytest.raises(LocalProtocolError):
+ set_comma_header(headers, b"newthing", [" a", "b"])
+
+ assert headers == [
+ (b"connection", b"close"),
+ (b"whatever", b"something"),
+ (b"connection", b"fOo,, , BAR"),
+ (b"newthing", b"a"),
+ (b"newthing", b"b"),
+ ]
+
+ headers = set_comma_header(headers, b"whatever", ["different thing"])
+
+ assert headers == [
+ (b"connection", b"close"),
+ (b"connection", b"fOo,, , BAR"),
+ (b"newthing", b"a"),
+ (b"newthing", b"b"),
+ (b"whatever", b"different thing"),
+ ]
+
+
+def test_has_100_continue():
+ from .._events import Request
+
+ assert has_expect_100_continue(
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.com"), ("Expect", "100-continue")],
+ )
+ )
+ assert not has_expect_100_continue(
+ Request(method="GET", target="/", headers=[("Host", "example.com")])
+ )
+ # Case insensitive
+ assert has_expect_100_continue(
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.com"), ("Expect", "100-Continue")],
+ )
+ )
+ # Doesn't work in HTTP/1.0
+ assert not has_expect_100_continue(
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.com"), ("Expect", "100-continue")],
+ http_version="1.0",
+ )
+ )
diff --git a/packages/h11/tests/test_helpers.py b/packages/h11/tests/test_helpers.py
new file mode 100644
index 000000000..1477947af
--- /dev/null
+++ b/packages/h11/tests/test_helpers.py
@@ -0,0 +1,23 @@
+from .helpers import *
+
+
+def test_normalize_data_events():
+ assert normalize_data_events(
+ [
+ Data(data=bytearray(b"1")),
+ Data(data=b"2"),
+ Response(status_code=200, headers=[]),
+ Data(data=b"3"),
+ Data(data=b"4"),
+ EndOfMessage(),
+ Data(data=b"5"),
+ Data(data=b"6"),
+ Data(data=b"7"),
+ ]
+ ) == [
+ Data(data=b"12"),
+ Response(status_code=200, headers=[]),
+ Data(data=b"34"),
+ EndOfMessage(),
+ Data(data=b"567"),
+ ]
diff --git a/packages/h11/tests/test_io.py b/packages/h11/tests/test_io.py
new file mode 100644
index 000000000..459a627d2
--- /dev/null
+++ b/packages/h11/tests/test_io.py
@@ -0,0 +1,544 @@
+import pytest
+
+from .._events import *
+from .._headers import Headers, normalize_and_validate
+from .._readers import (
+ _obsolete_line_fold,
+ ChunkedReader,
+ ContentLengthReader,
+ Http10Reader,
+ READERS,
+)
+from .._receivebuffer import ReceiveBuffer
+from .._state import *
+from .._util import LocalProtocolError
+from .._writers import (
+ ChunkedWriter,
+ ContentLengthWriter,
+ Http10Writer,
+ write_any_response,
+ write_headers,
+ write_request,
+ WRITERS,
+)
+from .helpers import normalize_data_events
+
+SIMPLE_CASES = [
+ (
+ (CLIENT, IDLE),
+ Request(
+ method="GET",
+ target="/a",
+ headers=[("Host", "foo"), ("Connection", "close")],
+ ),
+ b"GET /a HTTP/1.1\r\nHost: foo\r\nConnection: close\r\n\r\n",
+ ),
+ (
+ (SERVER, SEND_RESPONSE),
+ Response(status_code=200, headers=[("Connection", "close")], reason=b"OK"),
+ b"HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n",
+ ),
+ (
+ (SERVER, SEND_RESPONSE),
+ Response(status_code=200, headers=[], reason=b"OK"),
+ b"HTTP/1.1 200 OK\r\n\r\n",
+ ),
+ (
+ (SERVER, SEND_RESPONSE),
+ InformationalResponse(
+ status_code=101, headers=[("Upgrade", "websocket")], reason=b"Upgrade"
+ ),
+ b"HTTP/1.1 101 Upgrade\r\nUpgrade: websocket\r\n\r\n",
+ ),
+ (
+ (SERVER, SEND_RESPONSE),
+ InformationalResponse(status_code=101, headers=[], reason=b"Upgrade"),
+ b"HTTP/1.1 101 Upgrade\r\n\r\n",
+ ),
+]
+
+
+def dowrite(writer, obj):
+ got_list = []
+ writer(obj, got_list.append)
+ return b"".join(got_list)
+
+
+def tw(writer, obj, expected):
+ got = dowrite(writer, obj)
+ assert got == expected
+
+
+def makebuf(data):
+ buf = ReceiveBuffer()
+ buf += data
+ return buf
+
+
+def tr(reader, data, expected):
+ def check(got):
+ assert got == expected
+ # Headers should always be returned as bytes, not e.g. bytearray
+ # https://github.com/python-hyper/wsproto/pull/54#issuecomment-377709478
+ for name, value in getattr(got, "headers", []):
+ print(name, value)
+ assert type(name) is bytes
+ assert type(value) is bytes
+
+ # Simple: consume whole thing
+ buf = makebuf(data)
+ check(reader(buf))
+ assert not buf
+
+ # Incrementally growing buffer
+ buf = ReceiveBuffer()
+ for i in range(len(data)):
+ assert reader(buf) is None
+ buf += data[i : i + 1]
+ check(reader(buf))
+
+ # Trailing data
+ buf = makebuf(data)
+ buf += b"trailing"
+ check(reader(buf))
+ assert bytes(buf) == b"trailing"
+
+
+def test_writers_simple():
+ for ((role, state), event, binary) in SIMPLE_CASES:
+ tw(WRITERS[role, state], event, binary)
+
+
+def test_readers_simple():
+ for ((role, state), event, binary) in SIMPLE_CASES:
+ tr(READERS[role, state], binary, event)
+
+
+def test_writers_unusual():
+ # Simple test of the write_headers utility routine
+ tw(
+ write_headers,
+ normalize_and_validate([("foo", "bar"), ("baz", "quux")]),
+ b"foo: bar\r\nbaz: quux\r\n\r\n",
+ )
+ tw(write_headers, Headers([]), b"\r\n")
+
+ # We understand HTTP/1.0, but we don't speak it
+ with pytest.raises(LocalProtocolError):
+ tw(
+ write_request,
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "foo"), ("Connection", "close")],
+ http_version="1.0",
+ ),
+ None,
+ )
+ with pytest.raises(LocalProtocolError):
+ tw(
+ write_any_response,
+ Response(
+ status_code=200, headers=[("Connection", "close")], http_version="1.0"
+ ),
+ None,
+ )
+
+
+def test_readers_unusual():
+ # Reading HTTP/1.0
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.0\r\nSome: header\r\n\r\n",
+ Request(
+ method="HEAD",
+ target="/foo",
+ headers=[("Some", "header")],
+ http_version="1.0",
+ ),
+ )
+
+ # check no-headers, since it's only legal with HTTP/1.0
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.0\r\n\r\n",
+ Request(method="HEAD", target="/foo", headers=[], http_version="1.0"),
+ )
+
+ tr(
+ READERS[SERVER, SEND_RESPONSE],
+ b"HTTP/1.0 200 OK\r\nSome: header\r\n\r\n",
+ Response(
+ status_code=200,
+ headers=[("Some", "header")],
+ http_version="1.0",
+ reason=b"OK",
+ ),
+ )
+
+ # single-character header values (actually disallowed by the ABNF in RFC
+ # 7230 -- this is a bug in the standard that we originally copied...)
+ tr(
+ READERS[SERVER, SEND_RESPONSE],
+ b"HTTP/1.0 200 OK\r\n" b"Foo: a a a a a \r\n\r\n",
+ Response(
+ status_code=200,
+ headers=[("Foo", "a a a a a")],
+ http_version="1.0",
+ reason=b"OK",
+ ),
+ )
+
+ # Empty headers -- also legal
+ tr(
+ READERS[SERVER, SEND_RESPONSE],
+ b"HTTP/1.0 200 OK\r\n" b"Foo:\r\n\r\n",
+ Response(
+ status_code=200, headers=[("Foo", "")], http_version="1.0", reason=b"OK"
+ ),
+ )
+
+ tr(
+ READERS[SERVER, SEND_RESPONSE],
+ b"HTTP/1.0 200 OK\r\n" b"Foo: \t \t \r\n\r\n",
+ Response(
+ status_code=200, headers=[("Foo", "")], http_version="1.0", reason=b"OK"
+ ),
+ )
+
+ # Tolerate broken servers that leave off the response code
+ tr(
+ READERS[SERVER, SEND_RESPONSE],
+ b"HTTP/1.0 200\r\n" b"Foo: bar\r\n\r\n",
+ Response(
+ status_code=200, headers=[("Foo", "bar")], http_version="1.0", reason=b""
+ ),
+ )
+
+ # Tolerate headers line endings (\r\n and \n)
+ # \n\r\b between headers and body
+ tr(
+ READERS[SERVER, SEND_RESPONSE],
+ b"HTTP/1.1 200 OK\r\nSomeHeader: val\n\r\n",
+ Response(
+ status_code=200,
+ headers=[("SomeHeader", "val")],
+ http_version="1.1",
+ reason="OK",
+ ),
+ )
+
+ # delimited only with \n
+ tr(
+ READERS[SERVER, SEND_RESPONSE],
+ b"HTTP/1.1 200 OK\nSomeHeader1: val1\nSomeHeader2: val2\n\n",
+ Response(
+ status_code=200,
+ headers=[("SomeHeader1", "val1"), ("SomeHeader2", "val2")],
+ http_version="1.1",
+ reason="OK",
+ ),
+ )
+
+ # mixed \r\n and \n
+ tr(
+ READERS[SERVER, SEND_RESPONSE],
+ b"HTTP/1.1 200 OK\r\nSomeHeader1: val1\nSomeHeader2: val2\n\r\n",
+ Response(
+ status_code=200,
+ headers=[("SomeHeader1", "val1"), ("SomeHeader2", "val2")],
+ http_version="1.1",
+ reason="OK",
+ ),
+ )
+
+ # obsolete line folding
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.1\r\n"
+ b"Host: example.com\r\n"
+ b"Some: multi-line\r\n"
+ b" header\r\n"
+ b"\tnonsense\r\n"
+ b" \t \t\tI guess\r\n"
+ b"Connection: close\r\n"
+ b"More-nonsense: in the\r\n"
+ b" last header \r\n\r\n",
+ Request(
+ method="HEAD",
+ target="/foo",
+ headers=[
+ ("Host", "example.com"),
+ ("Some", "multi-line header nonsense I guess"),
+ ("Connection", "close"),
+ ("More-nonsense", "in the last header"),
+ ],
+ ),
+ )
+
+ with pytest.raises(LocalProtocolError):
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.1\r\n" b" folded: line\r\n\r\n",
+ None,
+ )
+
+ with pytest.raises(LocalProtocolError):
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.1\r\n" b"foo : line\r\n\r\n",
+ None,
+ )
+ with pytest.raises(LocalProtocolError):
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.1\r\n" b"foo\t: line\r\n\r\n",
+ None,
+ )
+ with pytest.raises(LocalProtocolError):
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.1\r\n" b"foo\t: line\r\n\r\n",
+ None,
+ )
+ with pytest.raises(LocalProtocolError):
+ tr(READERS[CLIENT, IDLE], b"HEAD /foo HTTP/1.1\r\n" b": line\r\n\r\n", None)
+
+
+def test__obsolete_line_fold_bytes():
+ # _obsolete_line_fold has a defensive cast to bytearray, which is
+ # necessary to protect against O(n^2) behavior in case anyone ever passes
+ # in regular bytestrings... but right now we never pass in regular
+ # bytestrings. so this test just exists to get some coverage on that
+ # defensive cast.
+ assert list(_obsolete_line_fold([b"aaa", b"bbb", b" ccc", b"ddd"])) == [
+ b"aaa",
+ bytearray(b"bbb ccc"),
+ b"ddd",
+ ]
+
+
+def _run_reader_iter(reader, buf, do_eof):
+ while True:
+ event = reader(buf)
+ if event is None:
+ break
+ yield event
+ # body readers have undefined behavior after returning EndOfMessage,
+ # because this changes the state so they don't get called again
+ if type(event) is EndOfMessage:
+ break
+ if do_eof:
+ assert not buf
+ yield reader.read_eof()
+
+
+def _run_reader(*args):
+ events = list(_run_reader_iter(*args))
+ return normalize_data_events(events)
+
+
+def t_body_reader(thunk, data, expected, do_eof=False):
+ # Simple: consume whole thing
+ print("Test 1")
+ buf = makebuf(data)
+ assert _run_reader(thunk(), buf, do_eof) == expected
+
+ # Incrementally growing buffer
+ print("Test 2")
+ reader = thunk()
+ buf = ReceiveBuffer()
+ events = []
+ for i in range(len(data)):
+ events += _run_reader(reader, buf, False)
+ buf += data[i : i + 1]
+ events += _run_reader(reader, buf, do_eof)
+ assert normalize_data_events(events) == expected
+
+ is_complete = any(type(event) is EndOfMessage for event in expected)
+ if is_complete and not do_eof:
+ buf = makebuf(data + b"trailing")
+ assert _run_reader(thunk(), buf, False) == expected
+
+
+def test_ContentLengthReader():
+ t_body_reader(lambda: ContentLengthReader(0), b"", [EndOfMessage()])
+
+ t_body_reader(
+ lambda: ContentLengthReader(10),
+ b"0123456789",
+ [Data(data=b"0123456789"), EndOfMessage()],
+ )
+
+
+def test_Http10Reader():
+ t_body_reader(Http10Reader, b"", [EndOfMessage()], do_eof=True)
+ t_body_reader(Http10Reader, b"asdf", [Data(data=b"asdf")], do_eof=False)
+ t_body_reader(
+ Http10Reader, b"asdf", [Data(data=b"asdf"), EndOfMessage()], do_eof=True
+ )
+
+
+def test_ChunkedReader():
+ t_body_reader(ChunkedReader, b"0\r\n\r\n", [EndOfMessage()])
+
+ t_body_reader(
+ ChunkedReader,
+ b"0\r\nSome: header\r\n\r\n",
+ [EndOfMessage(headers=[("Some", "header")])],
+ )
+
+ t_body_reader(
+ ChunkedReader,
+ b"5\r\n01234\r\n"
+ + b"10\r\n0123456789abcdef\r\n"
+ + b"0\r\n"
+ + b"Some: header\r\n\r\n",
+ [
+ Data(data=b"012340123456789abcdef"),
+ EndOfMessage(headers=[("Some", "header")]),
+ ],
+ )
+
+ t_body_reader(
+ ChunkedReader,
+ b"5\r\n01234\r\n" + b"10\r\n0123456789abcdef\r\n" + b"0\r\n\r\n",
+ [Data(data=b"012340123456789abcdef"), EndOfMessage()],
+ )
+
+ # handles upper and lowercase hex
+ t_body_reader(
+ ChunkedReader,
+ b"aA\r\n" + b"x" * 0xAA + b"\r\n" + b"0\r\n\r\n",
+ [Data(data=b"x" * 0xAA), EndOfMessage()],
+ )
+
+ # refuses arbitrarily long chunk integers
+ with pytest.raises(LocalProtocolError):
+ # Technically this is legal HTTP/1.1, but we refuse to process chunk
+ # sizes that don't fit into 20 characters of hex
+ t_body_reader(ChunkedReader, b"9" * 100 + b"\r\nxxx", [Data(data=b"xxx")])
+
+ # refuses garbage in the chunk count
+ with pytest.raises(LocalProtocolError):
+ t_body_reader(ChunkedReader, b"10\x00\r\nxxx", None)
+
+ # handles (and discards) "chunk extensions" omg wtf
+ t_body_reader(
+ ChunkedReader,
+ b"5; hello=there\r\n"
+ + b"xxxxx"
+ + b"\r\n"
+ + b'0; random="junk"; some=more; canbe=lonnnnngg\r\n\r\n',
+ [Data(data=b"xxxxx"), EndOfMessage()],
+ )
+
+
+def test_ContentLengthWriter():
+ w = ContentLengthWriter(5)
+ assert dowrite(w, Data(data=b"123")) == b"123"
+ assert dowrite(w, Data(data=b"45")) == b"45"
+ assert dowrite(w, EndOfMessage()) == b""
+
+ w = ContentLengthWriter(5)
+ with pytest.raises(LocalProtocolError):
+ dowrite(w, Data(data=b"123456"))
+
+ w = ContentLengthWriter(5)
+ dowrite(w, Data(data=b"123"))
+ with pytest.raises(LocalProtocolError):
+ dowrite(w, Data(data=b"456"))
+
+ w = ContentLengthWriter(5)
+ dowrite(w, Data(data=b"123"))
+ with pytest.raises(LocalProtocolError):
+ dowrite(w, EndOfMessage())
+
+ w = ContentLengthWriter(5)
+ dowrite(w, Data(data=b"123")) == b"123"
+ dowrite(w, Data(data=b"45")) == b"45"
+ with pytest.raises(LocalProtocolError):
+ dowrite(w, EndOfMessage(headers=[("Etag", "asdf")]))
+
+
+def test_ChunkedWriter():
+ w = ChunkedWriter()
+ assert dowrite(w, Data(data=b"aaa")) == b"3\r\naaa\r\n"
+ assert dowrite(w, Data(data=b"a" * 20)) == b"14\r\n" + b"a" * 20 + b"\r\n"
+
+ assert dowrite(w, Data(data=b"")) == b""
+
+ assert dowrite(w, EndOfMessage()) == b"0\r\n\r\n"
+
+ assert (
+ dowrite(w, EndOfMessage(headers=[("Etag", "asdf"), ("a", "b")]))
+ == b"0\r\nEtag: asdf\r\na: b\r\n\r\n"
+ )
+
+
+def test_Http10Writer():
+ w = Http10Writer()
+ assert dowrite(w, Data(data=b"1234")) == b"1234"
+ assert dowrite(w, EndOfMessage()) == b""
+
+ with pytest.raises(LocalProtocolError):
+ dowrite(w, EndOfMessage(headers=[("Etag", "asdf")]))
+
+
+def test_reject_garbage_after_request_line():
+ with pytest.raises(LocalProtocolError):
+ tr(READERS[SERVER, SEND_RESPONSE], b"HTTP/1.0 200 OK\x00xxxx\r\n\r\n", None)
+
+
+def test_reject_garbage_after_response_line():
+ with pytest.raises(LocalProtocolError):
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.1 xxxxxx\r\n" b"Host: a\r\n\r\n",
+ None,
+ )
+
+
+def test_reject_garbage_in_header_line():
+ with pytest.raises(LocalProtocolError):
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.1\r\n" b"Host: foo\x00bar\r\n\r\n",
+ None,
+ )
+
+
+def test_reject_non_vchar_in_path():
+ for bad_char in b"\x00\x20\x7f\xee":
+ message = bytearray(b"HEAD /")
+ message.append(bad_char)
+ message.extend(b" HTTP/1.1\r\nHost: foobar\r\n\r\n")
+ with pytest.raises(LocalProtocolError):
+ tr(READERS[CLIENT, IDLE], message, None)
+
+
+# https://github.com/python-hyper/h11/issues/57
+def test_allow_some_garbage_in_cookies():
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.1\r\n"
+ b"Host: foo\r\n"
+ b"Set-Cookie: ___utmvafIumyLc=kUd\x01UpAt; path=/; Max-Age=900\r\n"
+ b"\r\n",
+ Request(
+ method="HEAD",
+ target="/foo",
+ headers=[
+ ("Host", "foo"),
+ ("Set-Cookie", "___utmvafIumyLc=kUd\x01UpAt; path=/; Max-Age=900"),
+ ],
+ ),
+ )
+
+
+def test_host_comes_first():
+ tw(
+ write_headers,
+ normalize_and_validate([("foo", "bar"), ("Host", "example.com")]),
+ b"Host: example.com\r\nfoo: bar\r\n\r\n",
+ )
diff --git a/packages/h11/tests/test_receivebuffer.py b/packages/h11/tests/test_receivebuffer.py
new file mode 100644
index 000000000..3a61f9dc5
--- /dev/null
+++ b/packages/h11/tests/test_receivebuffer.py
@@ -0,0 +1,134 @@
+import re
+
+import pytest
+
+from .._receivebuffer import ReceiveBuffer
+
+
+def test_receivebuffer():
+ b = ReceiveBuffer()
+ assert not b
+ assert len(b) == 0
+ assert bytes(b) == b""
+
+ b += b"123"
+ assert b
+ assert len(b) == 3
+ assert bytes(b) == b"123"
+
+ assert bytes(b) == b"123"
+
+ assert b.maybe_extract_at_most(2) == b"12"
+ assert b
+ assert len(b) == 1
+ assert bytes(b) == b"3"
+
+ assert bytes(b) == b"3"
+
+ assert b.maybe_extract_at_most(10) == b"3"
+ assert bytes(b) == b""
+
+ assert b.maybe_extract_at_most(10) is None
+ assert not b
+
+ ################################################################
+ # maybe_extract_until_next
+ ################################################################
+
+ b += b"123\n456\r\n789\r\n"
+
+ assert b.maybe_extract_next_line() == b"123\n456\r\n"
+ assert bytes(b) == b"789\r\n"
+
+ assert b.maybe_extract_next_line() == b"789\r\n"
+ assert bytes(b) == b""
+
+ b += b"12\r"
+ assert b.maybe_extract_next_line() is None
+ assert bytes(b) == b"12\r"
+
+ b += b"345\n\r"
+ assert b.maybe_extract_next_line() is None
+ assert bytes(b) == b"12\r345\n\r"
+
+ # here we stopped at the middle of b"\r\n" delimiter
+
+ b += b"\n6789aaa123\r\n"
+ assert b.maybe_extract_next_line() == b"12\r345\n\r\n"
+ assert b.maybe_extract_next_line() == b"6789aaa123\r\n"
+ assert b.maybe_extract_next_line() is None
+ assert bytes(b) == b""
+
+ ################################################################
+ # maybe_extract_lines
+ ################################################################
+
+ b += b"123\r\na: b\r\nfoo:bar\r\n\r\ntrailing"
+ lines = b.maybe_extract_lines()
+ assert lines == [b"123", b"a: b", b"foo:bar"]
+ assert bytes(b) == b"trailing"
+
+ assert b.maybe_extract_lines() is None
+
+ b += b"\r\n\r"
+ assert b.maybe_extract_lines() is None
+
+ assert b.maybe_extract_at_most(100) == b"trailing\r\n\r"
+ assert not b
+
+ # Empty body case (as happens at the end of chunked encoding if there are
+ # no trailing headers, e.g.)
+ b += b"\r\ntrailing"
+ assert b.maybe_extract_lines() == []
+ assert bytes(b) == b"trailing"
+
+
+@pytest.mark.parametrize(
+ "data",
+ [
+ pytest.param(
+ (
+ b"HTTP/1.1 200 OK\r\n",
+ b"Content-type: text/plain\r\n",
+ b"Connection: close\r\n",
+ b"\r\n",
+ b"Some body",
+ ),
+ id="with_crlf_delimiter",
+ ),
+ pytest.param(
+ (
+ b"HTTP/1.1 200 OK\n",
+ b"Content-type: text/plain\n",
+ b"Connection: close\n",
+ b"\n",
+ b"Some body",
+ ),
+ id="with_lf_only_delimiter",
+ ),
+ pytest.param(
+ (
+ b"HTTP/1.1 200 OK\n",
+ b"Content-type: text/plain\r\n",
+ b"Connection: close\n",
+ b"\n",
+ b"Some body",
+ ),
+ id="with_mixed_crlf_and_lf",
+ ),
+ ],
+)
+def test_receivebuffer_for_invalid_delimiter(data):
+ b = ReceiveBuffer()
+
+ for line in data:
+ b += line
+
+ lines = b.maybe_extract_lines()
+
+ assert lines == [
+ b"HTTP/1.1 200 OK",
+ b"Content-type: text/plain",
+ b"Connection: close",
+ ]
+ assert bytes(b) == b"Some body"
diff --git a/packages/h11/tests/test_state.py b/packages/h11/tests/test_state.py
new file mode 100644
index 000000000..efe83f0ad
--- /dev/null
+++ b/packages/h11/tests/test_state.py
@@ -0,0 +1,250 @@
+import pytest
+
+from .._events import *
+from .._state import *
+from .._state import _SWITCH_CONNECT, _SWITCH_UPGRADE, ConnectionState
+from .._util import LocalProtocolError
+
+
+def test_ConnectionState():
+ cs = ConnectionState()
+
+ # Basic event-triggered transitions
+
+ assert cs.states == {CLIENT: IDLE, SERVER: IDLE}
+
+ cs.process_event(CLIENT, Request)
+ # The SERVER-Request special case:
+ assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
+
+ # Illegal transitions raise an error and nothing happens
+ with pytest.raises(LocalProtocolError):
+ cs.process_event(CLIENT, Request)
+ assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
+
+ cs.process_event(SERVER, InformationalResponse)
+ assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
+
+ cs.process_event(SERVER, Response)
+ assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_BODY}
+
+ cs.process_event(CLIENT, EndOfMessage)
+ cs.process_event(SERVER, EndOfMessage)
+ assert cs.states == {CLIENT: DONE, SERVER: DONE}
+
+ # State-triggered transition
+
+ cs.process_event(SERVER, ConnectionClosed)
+ assert cs.states == {CLIENT: MUST_CLOSE, SERVER: CLOSED}
+
+
+def test_ConnectionState_keep_alive():
+ # keep_alive = False
+ cs = ConnectionState()
+ cs.process_event(CLIENT, Request)
+ cs.process_keep_alive_disabled()
+ cs.process_event(CLIENT, EndOfMessage)
+ assert cs.states == {CLIENT: MUST_CLOSE, SERVER: SEND_RESPONSE}
+
+ cs.process_event(SERVER, Response)
+ cs.process_event(SERVER, EndOfMessage)
+ assert cs.states == {CLIENT: MUST_CLOSE, SERVER: MUST_CLOSE}
+
+
+def test_ConnectionState_keep_alive_in_DONE():
+ # Check that if keep_alive is disabled when the CLIENT is already in DONE,
+ # then this is sufficient to immediately trigger the DONE -> MUST_CLOSE
+ # transition
+ cs = ConnectionState()
+ cs.process_event(CLIENT, Request)
+ cs.process_event(CLIENT, EndOfMessage)
+ assert cs.states[CLIENT] is DONE
+ cs.process_keep_alive_disabled()
+ assert cs.states[CLIENT] is MUST_CLOSE
+
+
+def test_ConnectionState_switch_denied():
+ for switch_type in (_SWITCH_CONNECT, _SWITCH_UPGRADE):
+ for deny_early in (True, False):
+ cs = ConnectionState()
+ cs.process_client_switch_proposal(switch_type)
+ cs.process_event(CLIENT, Request)
+ cs.process_event(CLIENT, Data)
+ assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
+
+ assert switch_type in cs.pending_switch_proposals
+
+ if deny_early:
+ # before client reaches DONE
+ cs.process_event(SERVER, Response)
+ assert not cs.pending_switch_proposals
+
+ cs.process_event(CLIENT, EndOfMessage)
+
+ if deny_early:
+ assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY}
+ else:
+ assert cs.states == {
+ CLIENT: MIGHT_SWITCH_PROTOCOL,
+ SERVER: SEND_RESPONSE,
+ }
+
+ cs.process_event(SERVER, InformationalResponse)
+ assert cs.states == {
+ CLIENT: MIGHT_SWITCH_PROTOCOL,
+ SERVER: SEND_RESPONSE,
+ }
+
+ cs.process_event(SERVER, Response)
+ assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY}
+ assert not cs.pending_switch_proposals
+
+
+_response_type_for_switch = {
+ _SWITCH_UPGRADE: InformationalResponse,
+ _SWITCH_CONNECT: Response,
+ None: Response,
+}
+
+
+def test_ConnectionState_protocol_switch_accepted():
+ for switch_event in [_SWITCH_UPGRADE, _SWITCH_CONNECT]:
+ cs = ConnectionState()
+ cs.process_client_switch_proposal(switch_event)
+ cs.process_event(CLIENT, Request)
+ cs.process_event(CLIENT, Data)
+ assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
+
+ cs.process_event(CLIENT, EndOfMessage)
+ assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE}
+
+ cs.process_event(SERVER, InformationalResponse)
+ assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE}
+
+ cs.process_event(SERVER, _response_type_for_switch[switch_event], switch_event)
+ assert cs.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL}
+
+
+def test_ConnectionState_double_protocol_switch():
+ # CONNECT + Upgrade is legal! Very silly, but legal. So we support
+ # it. Because sometimes doing the silly thing is easier than not.
+ for server_switch in [None, _SWITCH_UPGRADE, _SWITCH_CONNECT]:
+ cs = ConnectionState()
+ cs.process_client_switch_proposal(_SWITCH_UPGRADE)
+ cs.process_client_switch_proposal(_SWITCH_CONNECT)
+ cs.process_event(CLIENT, Request)
+ cs.process_event(CLIENT, EndOfMessage)
+ assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE}
+ cs.process_event(
+ SERVER, _response_type_for_switch[server_switch], server_switch
+ )
+ if server_switch is None:
+ assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY}
+ else:
+ assert cs.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL}
+
+
+def test_ConnectionState_inconsistent_protocol_switch():
+ for client_switches, server_switch in [
+ ([], _SWITCH_CONNECT),
+ ([], _SWITCH_UPGRADE),
+ ([_SWITCH_UPGRADE], _SWITCH_CONNECT),
+ ([_SWITCH_CONNECT], _SWITCH_UPGRADE),
+ ]:
+ cs = ConnectionState()
+ for client_switch in client_switches:
+ cs.process_client_switch_proposal(client_switch)
+ cs.process_event(CLIENT, Request)
+ with pytest.raises(LocalProtocolError):
+ cs.process_event(SERVER, Response, server_switch)
+
+
+def test_ConnectionState_keepalive_protocol_switch_interaction():
+ # keep_alive=False + pending_switch_proposals
+ cs = ConnectionState()
+ cs.process_client_switch_proposal(_SWITCH_UPGRADE)
+ cs.process_event(CLIENT, Request)
+ cs.process_keep_alive_disabled()
+ cs.process_event(CLIENT, Data)
+ assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
+
+ # the protocol switch "wins"
+ cs.process_event(CLIENT, EndOfMessage)
+ assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE}
+
+ # but when the server denies the request, keep_alive comes back into play
+ cs.process_event(SERVER, Response)
+ assert cs.states == {CLIENT: MUST_CLOSE, SERVER: SEND_BODY}
+
+
+def test_ConnectionState_reuse():
+ cs = ConnectionState()
+
+ with pytest.raises(LocalProtocolError):
+ cs.start_next_cycle()
+
+ cs.process_event(CLIENT, Request)
+ cs.process_event(CLIENT, EndOfMessage)
+
+ with pytest.raises(LocalProtocolError):
+ cs.start_next_cycle()
+
+ cs.process_event(SERVER, Response)
+ cs.process_event(SERVER, EndOfMessage)
+
+ cs.start_next_cycle()
+ assert cs.states == {CLIENT: IDLE, SERVER: IDLE}
+
+ # No keepalive
+
+ cs.process_event(CLIENT, Request)
+ cs.process_keep_alive_disabled()
+ cs.process_event(CLIENT, EndOfMessage)
+ cs.process_event(SERVER, Response)
+ cs.process_event(SERVER, EndOfMessage)
+
+ with pytest.raises(LocalProtocolError):
+ cs.start_next_cycle()
+
+ # One side closed
+
+ cs = ConnectionState()
+ cs.process_event(CLIENT, Request)
+ cs.process_event(CLIENT, EndOfMessage)
+ cs.process_event(CLIENT, ConnectionClosed)
+ cs.process_event(SERVER, Response)
+ cs.process_event(SERVER, EndOfMessage)
+
+ with pytest.raises(LocalProtocolError):
+ cs.start_next_cycle()
+
+ # Succesful protocol switch
+
+ cs = ConnectionState()
+ cs.process_client_switch_proposal(_SWITCH_UPGRADE)
+ cs.process_event(CLIENT, Request)
+ cs.process_event(CLIENT, EndOfMessage)
+ cs.process_event(SERVER, InformationalResponse, _SWITCH_UPGRADE)
+
+ with pytest.raises(LocalProtocolError):
+ cs.start_next_cycle()
+
+ # Failed protocol switch
+
+ cs = ConnectionState()
+ cs.process_client_switch_proposal(_SWITCH_UPGRADE)
+ cs.process_event(CLIENT, Request)
+ cs.process_event(CLIENT, EndOfMessage)
+ cs.process_event(SERVER, Response)
+ cs.process_event(SERVER, EndOfMessage)
+
+ cs.start_next_cycle()
+ assert cs.states == {CLIENT: IDLE, SERVER: IDLE}
+
+
+def test_server_request_is_illegal():
+ # There used to be a bug in how we handled the Request special case that
+ # made this allowed...
+ cs = ConnectionState()
+ with pytest.raises(LocalProtocolError):
+ cs.process_event(SERVER, Request)
diff --git a/packages/h11/tests/test_util.py b/packages/h11/tests/test_util.py
new file mode 100644
index 000000000..d851bdcb6
--- /dev/null
+++ b/packages/h11/tests/test_util.py
@@ -0,0 +1,99 @@
+import re
+import sys
+import traceback
+
+import pytest
+
+from .._util import *
+
+
+def test_ProtocolError():
+ with pytest.raises(TypeError):
+ ProtocolError("abstract base class")
+
+
+def test_LocalProtocolError():
+ try:
+ raise LocalProtocolError("foo")
+ except LocalProtocolError as e:
+ assert str(e) == "foo"
+ assert e.error_status_hint == 400
+
+ try:
+ raise LocalProtocolError("foo", error_status_hint=418)
+ except LocalProtocolError as e:
+ assert str(e) == "foo"
+ assert e.error_status_hint == 418
+
+ def thunk():
+ raise LocalProtocolError("a", error_status_hint=420)
+
+ try:
+ try:
+ thunk()
+ except LocalProtocolError as exc1:
+ orig_traceback = "".join(traceback.format_tb(sys.exc_info()[2]))
+ exc1._reraise_as_remote_protocol_error()
+ except RemoteProtocolError as exc2:
+ assert type(exc2) is RemoteProtocolError
+ assert exc2.args == ("a",)
+ assert exc2.error_status_hint == 420
+ new_traceback = "".join(traceback.format_tb(sys.exc_info()[2]))
+ assert new_traceback.endswith(orig_traceback)
+
+
+def test_validate():
+ my_re = re.compile(br"(?P[0-9]+)\.(?P[0-9]+)")
+ with pytest.raises(LocalProtocolError):
+ validate(my_re, b"0.")
+
+ groups = validate(my_re, b"0.1")
+ assert groups == {"group1": b"0", "group2": b"1"}
+
+ # successful partial matches are an error - must match whole string
+ with pytest.raises(LocalProtocolError):
+ validate(my_re, b"0.1xx")
+ with pytest.raises(LocalProtocolError):
+ validate(my_re, b"0.1\n")
+
+
+def test_validate_formatting():
+ my_re = re.compile(br"foo")
+
+ with pytest.raises(LocalProtocolError) as excinfo:
+ validate(my_re, b"", "oops")
+ assert "oops" in str(excinfo.value)
+
+ with pytest.raises(LocalProtocolError) as excinfo:
+ validate(my_re, b"", "oops {}")
+ assert "oops {}" in str(excinfo.value)
+
+ with pytest.raises(LocalProtocolError) as excinfo:
+ validate(my_re, b"", "oops {} xx", 10)
+ assert "oops 10 xx" in str(excinfo.value)
+
+
+def test_make_sentinel():
+ S = make_sentinel("S")
+ assert repr(S) == "S"
+ assert S == S
+ assert type(S).__name__ == "S"
+ assert S in {S}
+ assert type(S) is S
+ S2 = make_sentinel("S2")
+ assert repr(S2) == "S2"
+ assert S != S2
+ assert S not in {S2}
+ assert type(S) is not type(S2)
+
+
+def test_bytesify():
+ assert bytesify(b"123") == b"123"
+ assert bytesify(bytearray(b"123")) == b"123"
+ assert bytesify("123") == b"123"
+
+ with pytest.raises(UnicodeEncodeError):
+ bytesify("\u1234")
+
+ with pytest.raises(TypeError):
+ bytesify(10)
diff --git a/packages/h2/__init__.py b/packages/h2/__init__.py
new file mode 100644
index 000000000..6d9e28e51
--- /dev/null
+++ b/packages/h2/__init__.py
@@ -0,0 +1,8 @@
+# -*- coding: utf-8 -*-
+"""
+hyper-h2
+~~
+
+A HTTP/2 implementation.
+"""
+__version__ = '4.0.0'
diff --git a/packages/h2/config.py b/packages/h2/config.py
new file mode 100644
index 000000000..730b61124
--- /dev/null
+++ b/packages/h2/config.py
@@ -0,0 +1,170 @@
+# -*- coding: utf-8 -*-
+"""
+h2/config
+~~~~~~~~~
+
+Objects for controlling the configuration of the HTTP/2 stack.
+"""
+
+
+class _BooleanConfigOption:
+ """
+ Descriptor for handling a boolean config option. This will block
+ attempts to set boolean config options to non-bools.
+ """
+ def __init__(self, name):
+ self.name = name
+ self.attr_name = '_%s' % self.name
+
+ def __get__(self, instance, owner):
+ return getattr(instance, self.attr_name)
+
+ def __set__(self, instance, value):
+ if not isinstance(value, bool):
+ raise ValueError("%s must be a bool" % self.name)
+ setattr(instance, self.attr_name, value)
+
+
+class DummyLogger:
+ """
+ An Logger object that does not actual logging, hence a DummyLogger.
+
+ For the class the log operation is merely a no-op. The intent is to avoid
+ conditionals being sprinkled throughout the hyper-h2 code for calls to
+ logging functions when no logger is passed into the corresponding object.
+ """
+ def __init__(self, *vargs):
+ pass
+
+ def debug(self, *vargs, **kwargs):
+ """
+ No-op logging. Only level needed for now.
+ """
+ pass
+
+ def trace(self, *vargs, **kwargs):
+ """
+ No-op logging. Only level needed for now.
+ """
+ pass
+
+
+class H2Configuration:
+ """
+ An object that controls the way a single HTTP/2 connection behaves.
+
+ This object allows the users to customize behaviour. In particular, it
+ allows users to enable or disable optional features, or to otherwise handle
+ various unusual behaviours.
+
+ This object has very little behaviour of its own: it mostly just ensures
+ that configuration is self-consistent.
+
+ :param client_side: Whether this object is to be used on the client side of
+ a connection, or on the server side. Affects the logic used by the
+ state machine, the default settings values, the allowable stream IDs,
+ and several other properties. Defaults to ``True``.
+ :type client_side: ``bool``
+
+ :param header_encoding: Controls whether the headers emitted by this object
+ in events are transparently decoded to ``unicode`` strings, and what
+ encoding is used to do that decoding. This defaults to ``None``,
+ meaning that headers will be returned as bytes. To automatically
+ decode headers (that is, to return them as unicode strings), this can
+ be set to the string name of any encoding, e.g. ``'utf-8'``.
+
+ .. versionchanged:: 3.0.0
+ Changed default value from ``'utf-8'`` to ``None``
+
+ :type header_encoding: ``str``, ``False``, or ``None``
+
+ :param validate_outbound_headers: Controls whether the headers emitted
+ by this object are validated against the rules in RFC 7540.
+ Disabling this setting will cause outbound header validation to
+ be skipped, and allow the object to emit headers that may be illegal
+ according to RFC 7540. Defaults to ``True``.
+ :type validate_outbound_headers: ``bool``
+
+ :param normalize_outbound_headers: Controls whether the headers emitted
+ by this object are normalized before sending. Disabling this setting
+ will cause outbound header normalization to be skipped, and allow
+ the object to emit headers that may be illegal according to
+ RFC 7540. Defaults to ``True``.
+ :type normalize_outbound_headers: ``bool``
+
+ :param validate_inbound_headers: Controls whether the headers received
+ by this object are validated against the rules in RFC 7540.
+ Disabling this setting will cause inbound header validation to
+ be skipped, and allow the object to receive headers that may be illegal
+ according to RFC 7540. Defaults to ``True``.
+ :type validate_inbound_headers: ``bool``
+
+ :param normalize_inbound_headers: Controls whether the headers received by
+ this object are normalized according to the rules of RFC 7540.
+ Disabling this setting may lead to hyper-h2 emitting header blocks that
+ some RFCs forbid, e.g. with multiple cookie fields.
+
+ .. versionadded:: 3.0.0
+
+ :type normalize_inbound_headers: ``bool``
+
+ :param logger: A logger that conforms to the requirements for this module,
+ those being no I/O and no context switches, which is needed in order
+ to run in asynchronous operation.
+
+ .. versionadded:: 2.6.0
+
+ :type logger: ``logging.Logger``
+ """
+ client_side = _BooleanConfigOption('client_side')
+ validate_outbound_headers = _BooleanConfigOption(
+ 'validate_outbound_headers'
+ )
+ normalize_outbound_headers = _BooleanConfigOption(
+ 'normalize_outbound_headers'
+ )
+ validate_inbound_headers = _BooleanConfigOption(
+ 'validate_inbound_headers'
+ )
+ normalize_inbound_headers = _BooleanConfigOption(
+ 'normalize_inbound_headers'
+ )
+
+ def __init__(self,
+ client_side=True,
+ header_encoding=None,
+ validate_outbound_headers=True,
+ normalize_outbound_headers=True,
+ validate_inbound_headers=True,
+ normalize_inbound_headers=True,
+ logger=None):
+ self.client_side = client_side
+ self.header_encoding = header_encoding
+ self.validate_outbound_headers = validate_outbound_headers
+ self.normalize_outbound_headers = normalize_outbound_headers
+ self.validate_inbound_headers = validate_inbound_headers
+ self.normalize_inbound_headers = normalize_inbound_headers
+ self.logger = logger or DummyLogger(__name__)
+
+ @property
+ def header_encoding(self):
+ """
+ Controls whether the headers emitted by this object in events are
+ transparently decoded to ``unicode`` strings, and what encoding is used
+ to do that decoding. This defaults to ``None``, meaning that headers
+ will be returned as bytes. To automatically decode headers (that is, to
+ return them as unicode strings), this can be set to the string name of
+ any encoding, e.g. ``'utf-8'``.
+ """
+ return self._header_encoding
+
+ @header_encoding.setter
+ def header_encoding(self, value):
+ """
+ Enforces constraints on the value of header encoding.
+ """
+ if not isinstance(value, (bool, str, type(None))):
+ raise ValueError("header_encoding must be bool, string, or None")
+ if value is True:
+ raise ValueError("header_encoding cannot be True")
+ self._header_encoding = value
diff --git a/packages/h2/connection.py b/packages/h2/connection.py
new file mode 100644
index 000000000..aa3071144
--- /dev/null
+++ b/packages/h2/connection.py
@@ -0,0 +1,2047 @@
+# -*- coding: utf-8 -*-
+"""
+h2/connection
+~~~~~~~~~~~~~
+
+An implementation of a HTTP/2 connection.
+"""
+import base64
+
+from enum import Enum, IntEnum
+
+from hyperframe.exceptions import InvalidPaddingError
+from hyperframe.frame import (
+ GoAwayFrame, WindowUpdateFrame, HeadersFrame, DataFrame, PingFrame,
+ PushPromiseFrame, SettingsFrame, RstStreamFrame, PriorityFrame,
+ ContinuationFrame, AltSvcFrame, ExtensionFrame
+)
+from hpack.hpack import Encoder, Decoder
+from hpack.exceptions import HPACKError, OversizedHeaderListError
+
+from .config import H2Configuration
+from .errors import ErrorCodes, _error_code_from_int
+from .events import (
+ WindowUpdated, RemoteSettingsChanged, PingReceived, PingAckReceived,
+ SettingsAcknowledged, ConnectionTerminated, PriorityUpdated,
+ AlternativeServiceAvailable, UnknownFrameReceived
+)
+from .exceptions import (
+ ProtocolError, NoSuchStreamError, FlowControlError, FrameTooLargeError,
+ TooManyStreamsError, StreamClosedError, StreamIDTooLowError,
+ NoAvailableStreamIDError, RFC1122Error, DenialOfServiceError
+)
+from .frame_buffer import FrameBuffer
+from .settings import Settings, SettingCodes
+from .stream import H2Stream, StreamClosedBy
+from .utilities import SizeLimitDict, guard_increment_window
+from .windows import WindowManager
+
+
+class ConnectionState(Enum):
+ IDLE = 0
+ CLIENT_OPEN = 1
+ SERVER_OPEN = 2
+ CLOSED = 3
+
+
+class ConnectionInputs(Enum):
+ SEND_HEADERS = 0
+ SEND_PUSH_PROMISE = 1
+ SEND_DATA = 2
+ SEND_GOAWAY = 3
+ SEND_WINDOW_UPDATE = 4
+ SEND_PING = 5
+ SEND_SETTINGS = 6
+ SEND_RST_STREAM = 7
+ SEND_PRIORITY = 8
+ RECV_HEADERS = 9
+ RECV_PUSH_PROMISE = 10
+ RECV_DATA = 11
+ RECV_GOAWAY = 12
+ RECV_WINDOW_UPDATE = 13
+ RECV_PING = 14
+ RECV_SETTINGS = 15
+ RECV_RST_STREAM = 16
+ RECV_PRIORITY = 17
+ SEND_ALTERNATIVE_SERVICE = 18 # Added in 2.3.0
+ RECV_ALTERNATIVE_SERVICE = 19 # Added in 2.3.0
+
+
+class AllowedStreamIDs(IntEnum):
+ EVEN = 0
+ ODD = 1
+
+
+class H2ConnectionStateMachine:
+ """
+ A single HTTP/2 connection state machine.
+
+ This state machine, while defined in its own class, is logically part of
+ the H2Connection class also defined in this file. The state machine itself
+ maintains very little state directly, instead focusing entirely on managing
+ state transitions.
+ """
+ # For the purposes of this state machine we treat HEADERS and their
+ # associated CONTINUATION frames as a single jumbo frame. The protocol
+ # allows/requires this by preventing other frames from being interleved in
+ # between HEADERS/CONTINUATION frames.
+ #
+ # The _transitions dictionary contains a mapping of tuples of
+ # (state, input) to tuples of (side_effect_function, end_state). This map
+ # contains all allowed transitions: anything not in this map is invalid
+ # and immediately causes a transition to ``closed``.
+
+ _transitions = {
+ # State: idle
+ (ConnectionState.IDLE, ConnectionInputs.SEND_HEADERS):
+ (None, ConnectionState.CLIENT_OPEN),
+ (ConnectionState.IDLE, ConnectionInputs.RECV_HEADERS):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.IDLE, ConnectionInputs.SEND_SETTINGS):
+ (None, ConnectionState.IDLE),
+ (ConnectionState.IDLE, ConnectionInputs.RECV_SETTINGS):
+ (None, ConnectionState.IDLE),
+ (ConnectionState.IDLE, ConnectionInputs.SEND_WINDOW_UPDATE):
+ (None, ConnectionState.IDLE),
+ (ConnectionState.IDLE, ConnectionInputs.RECV_WINDOW_UPDATE):
+ (None, ConnectionState.IDLE),
+ (ConnectionState.IDLE, ConnectionInputs.SEND_PING):
+ (None, ConnectionState.IDLE),
+ (ConnectionState.IDLE, ConnectionInputs.RECV_PING):
+ (None, ConnectionState.IDLE),
+ (ConnectionState.IDLE, ConnectionInputs.SEND_GOAWAY):
+ (None, ConnectionState.CLOSED),
+ (ConnectionState.IDLE, ConnectionInputs.RECV_GOAWAY):
+ (None, ConnectionState.CLOSED),
+ (ConnectionState.IDLE, ConnectionInputs.SEND_PRIORITY):
+ (None, ConnectionState.IDLE),
+ (ConnectionState.IDLE, ConnectionInputs.RECV_PRIORITY):
+ (None, ConnectionState.IDLE),
+ (ConnectionState.IDLE, ConnectionInputs.SEND_ALTERNATIVE_SERVICE):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.IDLE, ConnectionInputs.RECV_ALTERNATIVE_SERVICE):
+ (None, ConnectionState.CLIENT_OPEN),
+
+ # State: open, client side.
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.SEND_HEADERS):
+ (None, ConnectionState.CLIENT_OPEN),
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.SEND_DATA):
+ (None, ConnectionState.CLIENT_OPEN),
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.SEND_GOAWAY):
+ (None, ConnectionState.CLOSED),
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.SEND_WINDOW_UPDATE):
+ (None, ConnectionState.CLIENT_OPEN),
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.SEND_PING):
+ (None, ConnectionState.CLIENT_OPEN),
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.SEND_SETTINGS):
+ (None, ConnectionState.CLIENT_OPEN),
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.SEND_PRIORITY):
+ (None, ConnectionState.CLIENT_OPEN),
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_HEADERS):
+ (None, ConnectionState.CLIENT_OPEN),
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_PUSH_PROMISE):
+ (None, ConnectionState.CLIENT_OPEN),
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_DATA):
+ (None, ConnectionState.CLIENT_OPEN),
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_GOAWAY):
+ (None, ConnectionState.CLOSED),
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_WINDOW_UPDATE):
+ (None, ConnectionState.CLIENT_OPEN),
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_PING):
+ (None, ConnectionState.CLIENT_OPEN),
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_SETTINGS):
+ (None, ConnectionState.CLIENT_OPEN),
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.SEND_RST_STREAM):
+ (None, ConnectionState.CLIENT_OPEN),
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_RST_STREAM):
+ (None, ConnectionState.CLIENT_OPEN),
+ (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_PRIORITY):
+ (None, ConnectionState.CLIENT_OPEN),
+ (ConnectionState.CLIENT_OPEN,
+ ConnectionInputs.RECV_ALTERNATIVE_SERVICE):
+ (None, ConnectionState.CLIENT_OPEN),
+
+ # State: open, server side.
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_HEADERS):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_PUSH_PROMISE):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_DATA):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_GOAWAY):
+ (None, ConnectionState.CLOSED),
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_WINDOW_UPDATE):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_PING):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_SETTINGS):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_PRIORITY):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.RECV_HEADERS):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.RECV_DATA):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.RECV_GOAWAY):
+ (None, ConnectionState.CLOSED),
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.RECV_WINDOW_UPDATE):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.RECV_PING):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.RECV_SETTINGS):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.RECV_PRIORITY):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_RST_STREAM):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.SERVER_OPEN, ConnectionInputs.RECV_RST_STREAM):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.SERVER_OPEN,
+ ConnectionInputs.SEND_ALTERNATIVE_SERVICE):
+ (None, ConnectionState.SERVER_OPEN),
+ (ConnectionState.SERVER_OPEN,
+ ConnectionInputs.RECV_ALTERNATIVE_SERVICE):
+ (None, ConnectionState.SERVER_OPEN),
+
+ # State: closed
+ (ConnectionState.CLOSED, ConnectionInputs.SEND_GOAWAY):
+ (None, ConnectionState.CLOSED),
+ (ConnectionState.CLOSED, ConnectionInputs.RECV_GOAWAY):
+ (None, ConnectionState.CLOSED),
+ }
+
+ def __init__(self):
+ self.state = ConnectionState.IDLE
+
+ def process_input(self, input_):
+ """
+ Process a specific input in the state machine.
+ """
+ if not isinstance(input_, ConnectionInputs):
+ raise ValueError("Input must be an instance of ConnectionInputs")
+
+ try:
+ func, target_state = self._transitions[(self.state, input_)]
+ except KeyError:
+ old_state = self.state
+ self.state = ConnectionState.CLOSED
+ raise ProtocolError(
+ "Invalid input %s in state %s" % (input_, old_state)
+ )
+ else:
+ self.state = target_state
+ if func is not None: # pragma: no cover
+ return func()
+
+ return []
+
+
+class H2Connection:
+ """
+ A low-level HTTP/2 connection object. This handles building and receiving
+ frames and maintains both connection and per-stream state for all streams
+ on this connection.
+
+ This wraps a HTTP/2 Connection state machine implementation, ensuring that
+ frames can only be sent/received when the connection is in a valid state.
+ It also builds stream state machines on demand to ensure that the
+ constraints of those state machines are met as well. Attempts to create
+ frames that cannot be sent will raise a ``ProtocolError``.
+
+ .. versionchanged:: 2.3.0
+ Added the ``header_encoding`` keyword argument.
+
+ .. versionchanged:: 2.5.0
+ Added the ``config`` keyword argument. Deprecated the ``client_side``
+ and ``header_encoding`` parameters.
+
+ .. versionchanged:: 3.0.0
+ Removed deprecated parameters and properties.
+
+ :param config: The configuration for the HTTP/2 connection.
+
+ .. versionadded:: 2.5.0
+
+ :type config: :class:`H2Configuration `
+ """
+ # The initial maximum outbound frame size. This can be changed by receiving
+ # a settings frame.
+ DEFAULT_MAX_OUTBOUND_FRAME_SIZE = 65535
+
+ # The initial maximum inbound frame size. This is somewhat arbitrarily
+ # chosen.
+ DEFAULT_MAX_INBOUND_FRAME_SIZE = 2**24
+
+ # The highest acceptable stream ID.
+ HIGHEST_ALLOWED_STREAM_ID = 2**31 - 1
+
+ # The largest acceptable window increment.
+ MAX_WINDOW_INCREMENT = 2**31 - 1
+
+ # The initial default value of SETTINGS_MAX_HEADER_LIST_SIZE.
+ DEFAULT_MAX_HEADER_LIST_SIZE = 2**16
+
+ # Keep in memory limited amount of results for streams closes
+ MAX_CLOSED_STREAMS = 2**16
+
+ def __init__(self, config=None):
+ self.state_machine = H2ConnectionStateMachine()
+ self.streams = {}
+ self.highest_inbound_stream_id = 0
+ self.highest_outbound_stream_id = 0
+ self.encoder = Encoder()
+ self.decoder = Decoder()
+
+ # This won't always actually do anything: for versions of HPACK older
+ # than 2.3.0 it does nothing. However, we have to try!
+ self.decoder.max_header_list_size = self.DEFAULT_MAX_HEADER_LIST_SIZE
+
+ #: The configuration for this HTTP/2 connection object.
+ #:
+ #: .. versionadded:: 2.5.0
+ self.config = config
+ if self.config is None:
+ self.config = H2Configuration(
+ client_side=True,
+ )
+
+ # Objects that store settings, including defaults.
+ #
+ # We set the MAX_CONCURRENT_STREAMS value to 100 because its default is
+ # unbounded, and that's a dangerous default because it allows
+ # essentially unbounded resources to be allocated regardless of how
+ # they will be used. 100 should be suitable for the average
+ # application. This default obviously does not apply to the remote
+ # peer's settings: the remote peer controls them!
+ #
+ # We also set MAX_HEADER_LIST_SIZE to a reasonable value. This is to
+ # advertise our defence against CVE-2016-6581. However, not all
+ # versions of HPACK will let us do it. That's ok: we should at least
+ # suggest that we're not vulnerable.
+ self.local_settings = Settings(
+ client=self.config.client_side,
+ initial_values={
+ SettingCodes.MAX_CONCURRENT_STREAMS: 100,
+ SettingCodes.MAX_HEADER_LIST_SIZE:
+ self.DEFAULT_MAX_HEADER_LIST_SIZE,
+ }
+ )
+ self.remote_settings = Settings(client=not self.config.client_side)
+
+ # The current value of the connection flow control windows on the
+ # connection.
+ self.outbound_flow_control_window = (
+ self.remote_settings.initial_window_size
+ )
+
+ #: The maximum size of a frame that can be emitted by this peer, in
+ #: bytes.
+ self.max_outbound_frame_size = self.remote_settings.max_frame_size
+
+ #: The maximum size of a frame that can be received by this peer, in
+ #: bytes.
+ self.max_inbound_frame_size = self.local_settings.max_frame_size
+
+ # Buffer for incoming data.
+ self.incoming_buffer = FrameBuffer(server=not self.config.client_side)
+
+ # A private variable to store a sequence of received header frames
+ # until completion.
+ self._header_frames = []
+
+ # Data that needs to be sent.
+ self._data_to_send = bytearray()
+
+ # Keeps track of how streams are closed.
+ # Used to ensure that we don't blow up in the face of frames that were
+ # in flight when a RST_STREAM was sent.
+ # Also used to determine whether we should consider a frame received
+ # while a stream is closed as either a stream error or a connection
+ # error.
+ self._closed_streams = SizeLimitDict(
+ size_limit=self.MAX_CLOSED_STREAMS
+ )
+
+ # The flow control window manager for the connection.
+ self._inbound_flow_control_window_manager = WindowManager(
+ max_window_size=self.local_settings.initial_window_size
+ )
+
+ # When in doubt use dict-dispatch.
+ self._frame_dispatch_table = {
+ HeadersFrame: self._receive_headers_frame,
+ PushPromiseFrame: self._receive_push_promise_frame,
+ SettingsFrame: self._receive_settings_frame,
+ DataFrame: self._receive_data_frame,
+ WindowUpdateFrame: self._receive_window_update_frame,
+ PingFrame: self._receive_ping_frame,
+ RstStreamFrame: self._receive_rst_stream_frame,
+ PriorityFrame: self._receive_priority_frame,
+ GoAwayFrame: self._receive_goaway_frame,
+ ContinuationFrame: self._receive_naked_continuation,
+ AltSvcFrame: self._receive_alt_svc_frame,
+ ExtensionFrame: self._receive_unknown_frame
+ }
+
+ def _prepare_for_sending(self, frames):
+ if not frames:
+ return
+ self._data_to_send += b''.join(f.serialize() for f in frames)
+ assert all(f.body_len <= self.max_outbound_frame_size for f in frames)
+
+ def _open_streams(self, remainder):
+ """
+ A common method of counting number of open streams. Returns the number
+ of streams that are open *and* that have (stream ID % 2) == remainder.
+ While it iterates, also deletes any closed streams.
+ """
+ count = 0
+ to_delete = []
+
+ for stream_id, stream in self.streams.items():
+ if stream.open and (stream_id % 2 == remainder):
+ count += 1
+ elif stream.closed:
+ to_delete.append(stream_id)
+
+ for stream_id in to_delete:
+ stream = self.streams.pop(stream_id)
+ self._closed_streams[stream_id] = stream.closed_by
+
+ return count
+
+ @property
+ def open_outbound_streams(self):
+ """
+ The current number of open outbound streams.
+ """
+ outbound_numbers = int(self.config.client_side)
+ return self._open_streams(outbound_numbers)
+
+ @property
+ def open_inbound_streams(self):
+ """
+ The current number of open inbound streams.
+ """
+ inbound_numbers = int(not self.config.client_side)
+ return self._open_streams(inbound_numbers)
+
+ @property
+ def inbound_flow_control_window(self):
+ """
+ The size of the inbound flow control window for the connection. This is
+ rarely publicly useful: instead, use :meth:`remote_flow_control_window
+ `. This
+ shortcut is largely present to provide a shortcut to this data.
+ """
+ return self._inbound_flow_control_window_manager.current_window_size
+
+ def _begin_new_stream(self, stream_id, allowed_ids):
+ """
+ Initiate a new stream.
+
+ .. versionchanged:: 2.0.0
+ Removed this function from the public API.
+
+ :param stream_id: The ID of the stream to open.
+ :param allowed_ids: What kind of stream ID is allowed.
+ """
+ self.config.logger.debug(
+ "Attempting to initiate stream ID %d", stream_id
+ )
+ outbound = self._stream_id_is_outbound(stream_id)
+ highest_stream_id = (
+ self.highest_outbound_stream_id if outbound else
+ self.highest_inbound_stream_id
+ )
+
+ if stream_id <= highest_stream_id:
+ raise StreamIDTooLowError(stream_id, highest_stream_id)
+
+ if (stream_id % 2) != int(allowed_ids):
+ raise ProtocolError(
+ "Invalid stream ID for peer."
+ )
+
+ s = H2Stream(
+ stream_id,
+ config=self.config,
+ inbound_window_size=self.local_settings.initial_window_size,
+ outbound_window_size=self.remote_settings.initial_window_size
+ )
+ self.config.logger.debug("Stream ID %d created", stream_id)
+ s.max_inbound_frame_size = self.max_inbound_frame_size
+ s.max_outbound_frame_size = self.max_outbound_frame_size
+
+ self.streams[stream_id] = s
+ self.config.logger.debug("Current streams: %s", self.streams.keys())
+
+ if outbound:
+ self.highest_outbound_stream_id = stream_id
+ else:
+ self.highest_inbound_stream_id = stream_id
+
+ return s
+
+ def initiate_connection(self):
+ """
+ Provides any data that needs to be sent at the start of the connection.
+ Must be called for both clients and servers.
+ """
+ self.config.logger.debug("Initializing connection")
+ self.state_machine.process_input(ConnectionInputs.SEND_SETTINGS)
+ if self.config.client_side:
+ preamble = b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n'
+ else:
+ preamble = b''
+
+ f = SettingsFrame(0)
+ for setting, value in self.local_settings.items():
+ f.settings[setting] = value
+ self.config.logger.debug(
+ "Send Settings frame: %s", self.local_settings
+ )
+
+ self._data_to_send += preamble + f.serialize()
+
+ def initiate_upgrade_connection(self, settings_header=None):
+ """
+ Call to initialise the connection object for use with an upgraded
+ HTTP/2 connection (i.e. a connection negotiated using the
+ ``Upgrade: h2c`` HTTP header).
+
+ This method differs from :meth:`initiate_connection
+ ` in several ways.
+ Firstly, it handles the additional SETTINGS frame that is sent in the
+ ``HTTP2-Settings`` header field. When called on a client connection,
+ this method will return a bytestring that the caller can put in the
+ ``HTTP2-Settings`` field they send on their initial request. When
+ called on a server connection, the user **must** provide the value they
+ received from the client in the ``HTTP2-Settings`` header field to the
+ ``settings_header`` argument, which will be used appropriately.
+
+ Additionally, this method sets up stream 1 in a half-closed state
+ appropriate for this side of the connection, to reflect the fact that
+ the request is already complete.
+
+ Finally, this method also prepares the appropriate preamble to be sent
+ after the upgrade.
+
+ .. versionadded:: 2.3.0
+
+ :param settings_header: (optional, server-only): The value of the
+ ``HTTP2-Settings`` header field received from the client.
+ :type settings_header: ``bytes``
+
+ :returns: For clients, a bytestring to put in the ``HTTP2-Settings``.
+ For servers, returns nothing.
+ :rtype: ``bytes`` or ``None``
+ """
+ self.config.logger.debug(
+ "Upgrade connection. Current settings: %s", self.local_settings
+ )
+
+ frame_data = None
+ # Begin by getting the preamble in place.
+ self.initiate_connection()
+
+ if self.config.client_side:
+ f = SettingsFrame(0)
+ for setting, value in self.local_settings.items():
+ f.settings[setting] = value
+
+ frame_data = f.serialize_body()
+ frame_data = base64.urlsafe_b64encode(frame_data)
+ elif settings_header:
+ # We have a settings header from the client. This needs to be
+ # applied, but we want to throw away the ACK. We do this by
+ # inserting the data into a Settings frame and then passing it to
+ # the state machine, but ignoring the return value.
+ settings_header = base64.urlsafe_b64decode(settings_header)
+ f = SettingsFrame(0)
+ f.parse_body(settings_header)
+ self._receive_settings_frame(f)
+
+ # Set up appropriate state. Stream 1 in a half-closed state:
+ # half-closed(local) for clients, half-closed(remote) for servers.
+ # Additionally, we need to set up the Connection state machine.
+ connection_input = (
+ ConnectionInputs.SEND_HEADERS if self.config.client_side
+ else ConnectionInputs.RECV_HEADERS
+ )
+ self.config.logger.debug("Process input %s", connection_input)
+ self.state_machine.process_input(connection_input)
+
+ # Set up stream 1.
+ self._begin_new_stream(stream_id=1, allowed_ids=AllowedStreamIDs.ODD)
+ self.streams[1].upgrade(self.config.client_side)
+ return frame_data
+
+ def _get_or_create_stream(self, stream_id, allowed_ids):
+ """
+ Gets a stream by its stream ID. Will create one if one does not already
+ exist. Use allowed_ids to circumvent the usual stream ID rules for
+ clients and servers.
+
+ .. versionchanged:: 2.0.0
+ Removed this function from the public API.
+ """
+ try:
+ return self.streams[stream_id]
+ except KeyError:
+ return self._begin_new_stream(stream_id, allowed_ids)
+
+ def _get_stream_by_id(self, stream_id):
+ """
+ Gets a stream by its stream ID. Raises NoSuchStreamError if the stream
+ ID does not correspond to a known stream and is higher than the current
+ maximum: raises if it is lower than the current maximum.
+
+ .. versionchanged:: 2.0.0
+ Removed this function from the public API.
+ """
+ try:
+ return self.streams[stream_id]
+ except KeyError:
+ outbound = self._stream_id_is_outbound(stream_id)
+ highest_stream_id = (
+ self.highest_outbound_stream_id if outbound else
+ self.highest_inbound_stream_id
+ )
+
+ if stream_id > highest_stream_id:
+ raise NoSuchStreamError(stream_id)
+ else:
+ raise StreamClosedError(stream_id)
+
+ def get_next_available_stream_id(self):
+ """
+ Returns an integer suitable for use as the stream ID for the next
+ stream created by this endpoint. For server endpoints, this stream ID
+ will be even. For client endpoints, this stream ID will be odd. If no
+ stream IDs are available, raises :class:`NoAvailableStreamIDError
+ `.
+
+ .. warning:: The return value from this function does not change until
+ the stream ID has actually been used by sending or pushing
+ headers on that stream. For that reason, it should be
+ called as close as possible to the actual use of the
+ stream ID.
+
+ .. versionadded:: 2.0.0
+
+ :raises: :class:`NoAvailableStreamIDError
+ `
+ :returns: The next free stream ID this peer can use to initiate a
+ stream.
+ :rtype: ``int``
+ """
+ # No streams have been opened yet, so return the lowest allowed stream
+ # ID.
+ if not self.highest_outbound_stream_id:
+ next_stream_id = 1 if self.config.client_side else 2
+ else:
+ next_stream_id = self.highest_outbound_stream_id + 2
+ self.config.logger.debug(
+ "Next available stream ID %d", next_stream_id
+ )
+ if next_stream_id > self.HIGHEST_ALLOWED_STREAM_ID:
+ raise NoAvailableStreamIDError("Exhausted allowed stream IDs")
+
+ return next_stream_id
+
+ def send_headers(self, stream_id, headers, end_stream=False,
+ priority_weight=None, priority_depends_on=None,
+ priority_exclusive=None):
+ """
+ Send headers on a given stream.
+
+ This function can be used to send request or response headers: the kind
+ that are sent depends on whether this connection has been opened as a
+ client or server connection, and whether the stream was opened by the
+ remote peer or not.
+
+ If this is a client connection, calling ``send_headers`` will send the
+ headers as a request. It will also implicitly open the stream being
+ used. If this is a client connection and ``send_headers`` has *already*
+ been called, this will send trailers instead.
+
+ If this is a server connection, calling ``send_headers`` will send the
+ headers as a response. It is a protocol error for a server to open a
+ stream by sending headers. If this is a server connection and
+ ``send_headers`` has *already* been called, this will send trailers
+ instead.
+
+ When acting as a server, you may call ``send_headers`` any number of
+ times allowed by the following rules, in this order:
+
+ - zero or more times with ``(':status', '1XX')`` (where ``1XX`` is a
+ placeholder for any 100-level status code).
+ - once with any other status header.
+ - zero or one time for trailers.
+
+ That is, you are allowed to send as many informational responses as you
+ like, followed by one complete response and zero or one HTTP trailer
+ blocks.
+
+ Clients may send one or two header blocks: one request block, and
+ optionally one trailer block.
+
+ If it is important to send HPACK "never indexed" header fields (as
+ defined in `RFC 7451 Section 7.1.3
+ `_), the user may
+ instead provide headers using the HPACK library's :class:`HeaderTuple
+ ` and :class:`NeverIndexedHeaderTuple
+ ` objects.
+
+ This method also allows users to prioritize the stream immediately,
+ by sending priority information on the HEADERS frame directly. To do
+ this, any one of ``priority_weight``, ``priority_depends_on``, or
+ ``priority_exclusive`` must be set to a value that is not ``None``. For
+ more information on the priority fields, see :meth:`prioritize
+ `.
+
+ .. warning:: In HTTP/2, it is mandatory that all the HTTP/2 special
+ headers (that is, ones whose header keys begin with ``:``) appear
+ at the start of the header block, before any normal headers.
+
+ .. versionchanged:: 2.3.0
+ Added support for using :class:`HeaderTuple
+ ` objects to store headers.
+
+ .. versionchanged:: 2.4.0
+ Added the ability to provide priority keyword arguments:
+ ``priority_weight``, ``priority_depends_on``, and
+ ``priority_exclusive``.
+
+ :param stream_id: The stream ID to send the headers on. If this stream
+ does not currently exist, it will be created.
+ :type stream_id: ``int``
+
+ :param headers: The request/response headers to send.
+ :type headers: An iterable of two tuples of bytestrings or
+ :class:`HeaderTuple ` objects.
+
+ :param end_stream: Whether this headers frame should end the stream
+ immediately (that is, whether no more data will be sent after this
+ frame). Defaults to ``False``.
+ :type end_stream: ``bool``
+
+ :param priority_weight: Sets the priority weight of the stream. See
+ :meth:`prioritize ` for more
+ about how this field works. Defaults to ``None``, which means that
+ no priority information will be sent.
+ :type priority_weight: ``int`` or ``None``
+
+ :param priority_depends_on: Sets which stream this one depends on for
+ priority purposes. See :meth:`prioritize
+ ` for more about how this
+ field works. Defaults to ``None``, which means that no priority
+ information will be sent.
+ :type priority_depends_on: ``int`` or ``None``
+
+ :param priority_exclusive: Sets whether this stream exclusively depends
+ on the stream given in ``priority_depends_on`` for priority
+ purposes. See :meth:`prioritize
+ ` for more about how this
+ field workds. Defaults to ``None``, which means that no priority
+ information will be sent.
+ :type priority_depends_on: ``bool`` or ``None``
+
+ :returns: Nothing
+ """
+ self.config.logger.debug(
+ "Send headers on stream ID %d", stream_id
+ )
+
+ # Check we can open the stream.
+ if stream_id not in self.streams:
+ max_open_streams = self.remote_settings.max_concurrent_streams
+ if (self.open_outbound_streams + 1) > max_open_streams:
+ raise TooManyStreamsError(
+ "Max outbound streams is %d, %d open" %
+ (max_open_streams, self.open_outbound_streams)
+ )
+
+ self.state_machine.process_input(ConnectionInputs.SEND_HEADERS)
+ stream = self._get_or_create_stream(
+ stream_id, AllowedStreamIDs(self.config.client_side)
+ )
+ frames = stream.send_headers(
+ headers, self.encoder, end_stream
+ )
+
+ # We may need to send priority information.
+ priority_present = (
+ (priority_weight is not None) or
+ (priority_depends_on is not None) or
+ (priority_exclusive is not None)
+ )
+
+ if priority_present:
+ if not self.config.client_side:
+ raise RFC1122Error("Servers SHOULD NOT prioritize streams.")
+
+ headers_frame = frames[0]
+ headers_frame.flags.add('PRIORITY')
+ frames[0] = _add_frame_priority(
+ headers_frame,
+ priority_weight,
+ priority_depends_on,
+ priority_exclusive
+ )
+
+ self._prepare_for_sending(frames)
+
+ def send_data(self, stream_id, data, end_stream=False, pad_length=None):
+ """
+ Send data on a given stream.
+
+ This method does no breaking up of data: if the data is larger than the
+ value returned by :meth:`local_flow_control_window
+ ` for this stream
+ then a :class:`FlowControlError ` will
+ be raised. If the data is larger than :data:`max_outbound_frame_size
+ ` then a
+ :class:`FrameTooLargeError ` will be
+ raised.
+
+ Hyper-h2 does this to avoid buffering the data internally. If the user
+ has more data to send than hyper-h2 will allow, consider breaking it up
+ and buffering it externally.
+
+ :param stream_id: The ID of the stream on which to send the data.
+ :type stream_id: ``int``
+ :param data: The data to send on the stream.
+ :type data: ``bytes``
+ :param end_stream: (optional) Whether this is the last data to be sent
+ on the stream. Defaults to ``False``.
+ :type end_stream: ``bool``
+ :param pad_length: (optional) Length of the padding to apply to the
+ data frame. Defaults to ``None`` for no use of padding. Note that
+ a value of ``0`` results in padding of length ``0``
+ (with the "padding" flag set on the frame).
+
+ .. versionadded:: 2.6.0
+
+ :type pad_length: ``int``
+ :returns: Nothing
+ """
+ self.config.logger.debug(
+ "Send data on stream ID %d with len %d", stream_id, len(data)
+ )
+ frame_size = len(data)
+ if pad_length is not None:
+ if not isinstance(pad_length, int):
+ raise TypeError("pad_length must be an int")
+ if pad_length < 0 or pad_length > 255:
+ raise ValueError("pad_length must be within range: [0, 255]")
+ # Account for padding bytes plus the 1-byte padding length field.
+ frame_size += pad_length + 1
+ self.config.logger.debug(
+ "Frame size on stream ID %d is %d", stream_id, frame_size
+ )
+
+ if frame_size > self.local_flow_control_window(stream_id):
+ raise FlowControlError(
+ "Cannot send %d bytes, flow control window is %d." %
+ (frame_size, self.local_flow_control_window(stream_id))
+ )
+ elif frame_size > self.max_outbound_frame_size:
+ raise FrameTooLargeError(
+ "Cannot send frame size %d, max frame size is %d" %
+ (frame_size, self.max_outbound_frame_size)
+ )
+
+ self.state_machine.process_input(ConnectionInputs.SEND_DATA)
+ frames = self.streams[stream_id].send_data(
+ data, end_stream, pad_length=pad_length
+ )
+
+ self._prepare_for_sending(frames)
+
+ self.outbound_flow_control_window -= frame_size
+ self.config.logger.debug(
+ "Outbound flow control window size is %d",
+ self.outbound_flow_control_window
+ )
+ assert self.outbound_flow_control_window >= 0
+
+ def end_stream(self, stream_id):
+ """
+ Cleanly end a given stream.
+
+ This method ends a stream by sending an empty DATA frame on that stream
+ with the ``END_STREAM`` flag set.
+
+ :param stream_id: The ID of the stream to end.
+ :type stream_id: ``int``
+ :returns: Nothing
+ """
+ self.config.logger.debug("End stream ID %d", stream_id)
+ self.state_machine.process_input(ConnectionInputs.SEND_DATA)
+ frames = self.streams[stream_id].end_stream()
+ self._prepare_for_sending(frames)
+
+ def increment_flow_control_window(self, increment, stream_id=None):
+ """
+ Increment a flow control window, optionally for a single stream. Allows
+ the remote peer to send more data.
+
+ .. versionchanged:: 2.0.0
+ Rejects attempts to increment the flow control window by out of
+ range values with a ``ValueError``.
+
+ :param increment: The amount to increment the flow control window by.
+ :type increment: ``int``
+ :param stream_id: (optional) The ID of the stream that should have its
+ flow control window opened. If not present or ``None``, the
+ connection flow control window will be opened instead.
+ :type stream_id: ``int`` or ``None``
+ :returns: Nothing
+ :raises: ``ValueError``
+ """
+ if not (1 <= increment <= self.MAX_WINDOW_INCREMENT):
+ raise ValueError(
+ "Flow control increment must be between 1 and %d" %
+ self.MAX_WINDOW_INCREMENT
+ )
+
+ self.state_machine.process_input(ConnectionInputs.SEND_WINDOW_UPDATE)
+
+ if stream_id is not None:
+ stream = self.streams[stream_id]
+ frames = stream.increase_flow_control_window(
+ increment
+ )
+
+ self.config.logger.debug(
+ "Increase stream ID %d flow control window by %d",
+ stream_id, increment
+ )
+ else:
+ self._inbound_flow_control_window_manager.window_opened(increment)
+ f = WindowUpdateFrame(0)
+ f.window_increment = increment
+ frames = [f]
+
+ self.config.logger.debug(
+ "Increase connection flow control window by %d", increment
+ )
+
+ self._prepare_for_sending(frames)
+
+ def push_stream(self, stream_id, promised_stream_id, request_headers):
+ """
+ Push a response to the client by sending a PUSH_PROMISE frame.
+
+ If it is important to send HPACK "never indexed" header fields (as
+ defined in `RFC 7451 Section 7.1.3
+ `_), the user may
+ instead provide headers using the HPACK library's :class:`HeaderTuple
+ ` and :class:`NeverIndexedHeaderTuple
+ ` objects.
+
+ :param stream_id: The ID of the stream that this push is a response to.
+ :type stream_id: ``int``
+ :param promised_stream_id: The ID of the stream that the pushed
+ response will be sent on.
+ :type promised_stream_id: ``int``
+ :param request_headers: The headers of the request that the pushed
+ response will be responding to.
+ :type request_headers: An iterable of two tuples of bytestrings or
+ :class:`HeaderTuple ` objects.
+ :returns: Nothing
+ """
+ self.config.logger.debug(
+ "Send Push Promise frame on stream ID %d", stream_id
+ )
+
+ if not self.remote_settings.enable_push:
+ raise ProtocolError("Remote peer has disabled stream push")
+
+ self.state_machine.process_input(ConnectionInputs.SEND_PUSH_PROMISE)
+ stream = self._get_stream_by_id(stream_id)
+
+ # We need to prevent users pushing streams in response to streams that
+ # they themselves have already pushed: see #163 and RFC 7540 § 6.6. The
+ # easiest way to do that is to assert that the stream_id is not even:
+ # this shortcut works because only servers can push and the state
+ # machine will enforce this.
+ if (stream_id % 2) == 0:
+ raise ProtocolError("Cannot recursively push streams.")
+
+ new_stream = self._begin_new_stream(
+ promised_stream_id, AllowedStreamIDs.EVEN
+ )
+ self.streams[promised_stream_id] = new_stream
+
+ frames = stream.push_stream_in_band(
+ promised_stream_id, request_headers, self.encoder
+ )
+ new_frames = new_stream.locally_pushed()
+ self._prepare_for_sending(frames + new_frames)
+
+ def ping(self, opaque_data):
+ """
+ Send a PING frame.
+
+ :param opaque_data: A bytestring of length 8 that will be sent in the
+ PING frame.
+ :returns: Nothing
+ """
+ self.config.logger.debug("Send Ping frame")
+
+ if not isinstance(opaque_data, bytes) or len(opaque_data) != 8:
+ raise ValueError("Invalid value for ping data: %r" % opaque_data)
+
+ self.state_machine.process_input(ConnectionInputs.SEND_PING)
+ f = PingFrame(0)
+ f.opaque_data = opaque_data
+ self._prepare_for_sending([f])
+
+ def reset_stream(self, stream_id, error_code=0):
+ """
+ Reset a stream.
+
+ This method forcibly closes a stream by sending a RST_STREAM frame for
+ a given stream. This is not a graceful closure. To gracefully end a
+ stream, try the :meth:`end_stream
+ ` method.
+
+ :param stream_id: The ID of the stream to reset.
+ :type stream_id: ``int``
+ :param error_code: (optional) The error code to use to reset the
+ stream. Defaults to :data:`ErrorCodes.NO_ERROR
+ `.
+ :type error_code: ``int``
+ :returns: Nothing
+ """
+ self.config.logger.debug("Reset stream ID %d", stream_id)
+ self.state_machine.process_input(ConnectionInputs.SEND_RST_STREAM)
+ stream = self._get_stream_by_id(stream_id)
+ frames = stream.reset_stream(error_code)
+ self._prepare_for_sending(frames)
+
+ def close_connection(self, error_code=0, additional_data=None,
+ last_stream_id=None):
+
+ """
+ Close a connection, emitting a GOAWAY frame.
+
+ .. versionchanged:: 2.4.0
+ Added ``additional_data`` and ``last_stream_id`` arguments.
+
+ :param error_code: (optional) The error code to send in the GOAWAY
+ frame.
+ :param additional_data: (optional) Additional debug data indicating
+ a reason for closing the connection. Must be a bytestring.
+ :param last_stream_id: (optional) The last stream which was processed
+ by the sender. Defaults to ``highest_inbound_stream_id``.
+ :returns: Nothing
+ """
+ self.config.logger.debug("Close connection")
+ self.state_machine.process_input(ConnectionInputs.SEND_GOAWAY)
+
+ # Additional_data must be bytes
+ if additional_data is not None:
+ assert isinstance(additional_data, bytes)
+
+ if last_stream_id is None:
+ last_stream_id = self.highest_inbound_stream_id
+
+ f = GoAwayFrame(
+ stream_id=0,
+ last_stream_id=last_stream_id,
+ error_code=error_code,
+ additional_data=(additional_data or b'')
+ )
+ self._prepare_for_sending([f])
+
+ def update_settings(self, new_settings):
+ """
+ Update the local settings. This will prepare and emit the appropriate
+ SETTINGS frame.
+
+ :param new_settings: A dictionary of {setting: new value}
+ """
+ self.config.logger.debug(
+ "Update connection settings to %s", new_settings
+ )
+ self.state_machine.process_input(ConnectionInputs.SEND_SETTINGS)
+ self.local_settings.update(new_settings)
+ s = SettingsFrame(0)
+ s.settings = new_settings
+ self._prepare_for_sending([s])
+
+ def advertise_alternative_service(self,
+ field_value,
+ origin=None,
+ stream_id=None):
+ """
+ Notify a client about an available Alternative Service.
+
+ An Alternative Service is defined in `RFC 7838
+ `_. An Alternative Service
+ notification informs a client that a given origin is also available
+ elsewhere.
+
+ Alternative Services can be advertised in two ways. Firstly, they can
+ be advertised explicitly: that is, a server can say "origin X is also
+ available at Y". To advertise like this, set the ``origin`` argument
+ and not the ``stream_id`` argument. Alternatively, they can be
+ advertised implicitly: that is, a server can say "the origin you're
+ contacting on stream X is also available at Y". To advertise like this,
+ set the ``stream_id`` argument and not the ``origin`` argument.
+
+ The explicit method of advertising can be done as long as the
+ connection is active. The implicit method can only be done after the
+ client has sent the request headers and before the server has sent the
+ response headers: outside of those points, Hyper-h2 will forbid sending
+ the Alternative Service advertisement by raising a ProtocolError.
+
+ The ``field_value`` parameter is specified in RFC 7838. Hyper-h2 does
+ not validate or introspect this argument: the user is required to
+ ensure that it's well-formed. ``field_value`` corresponds to RFC 7838's
+ "Alternative Service Field Value".
+
+ .. note:: It is strongly preferred to use the explicit method of
+ advertising Alternative Services. The implicit method of
+ advertising Alternative Services has a number of subtleties
+ and can lead to inconsistencies between the server and
+ client. Hyper-h2 allows both mechanisms, but caution is
+ strongly advised.
+
+ .. versionadded:: 2.3.0
+
+ :param field_value: The RFC 7838 Alternative Service Field Value. This
+ argument is not introspected by Hyper-h2: the user is responsible
+ for ensuring that it is well-formed.
+ :type field_value: ``bytes``
+
+ :param origin: The origin/authority to which the Alternative Service
+ being advertised applies. Must not be provided at the same time as
+ ``stream_id``.
+ :type origin: ``bytes`` or ``None``
+
+ :param stream_id: The ID of the stream which was sent to the authority
+ for which this Alternative Service advertisement applies. Must not
+ be provided at the same time as ``origin``.
+ :type stream_id: ``int`` or ``None``
+
+ :returns: Nothing.
+ """
+ if not isinstance(field_value, bytes):
+ raise ValueError("Field must be bytestring.")
+
+ if origin is not None and stream_id is not None:
+ raise ValueError("Must not provide both origin and stream_id")
+
+ self.state_machine.process_input(
+ ConnectionInputs.SEND_ALTERNATIVE_SERVICE
+ )
+
+ if origin is not None:
+ # This ALTSVC is sent on stream zero.
+ f = AltSvcFrame(stream_id=0)
+ f.origin = origin
+ f.field = field_value
+ frames = [f]
+ else:
+ stream = self._get_stream_by_id(stream_id)
+ frames = stream.advertise_alternative_service(field_value)
+
+ self._prepare_for_sending(frames)
+
+ def prioritize(self, stream_id, weight=None, depends_on=None,
+ exclusive=None):
+ """
+ Notify a server about the priority of a stream.
+
+ Stream priorities are a form of guidance to a remote server: they
+ inform the server about how important a given response is, so that the
+ server may allocate its resources (e.g. bandwidth, CPU time, etc.)
+ accordingly. This exists to allow clients to ensure that the most
+ important data arrives earlier, while less important data does not
+ starve out the more important data.
+
+ Stream priorities are explained in depth in `RFC 7540 Section 5.3
+ `_.
+
+ This method updates the priority information of a single stream. It may
+ be called well before a stream is actively in use, or well after a
+ stream is closed.
+
+ .. warning:: RFC 7540 allows for servers to change the priority of
+ streams. However, hyper-h2 **does not** allow server
+ stacks to do this. This is because most clients do not
+ adequately know how to respond when provided conflicting
+ priority information, and relatively little utility is
+ provided by making that functionality available.
+
+ .. note:: hyper-h2 **does not** maintain any information about the
+ RFC 7540 priority tree. That means that hyper-h2 does not
+ prevent incautious users from creating invalid priority
+ trees, particularly by creating priority loops. While some
+ basic error checking is provided by hyper-h2, users are
+ strongly recommended to understand their prioritisation
+ strategies before using the priority tools here.
+
+ .. note:: Priority information is strictly advisory. Servers are
+ allowed to disregard it entirely. Avoid relying on the idea
+ that your priority signaling will definitely be obeyed.
+
+ .. versionadded:: 2.4.0
+
+ :param stream_id: The ID of the stream to prioritize.
+ :type stream_id: ``int``
+
+ :param weight: The weight to give the stream. Defaults to ``16``, the
+ default weight of any stream. May be any value between ``1`` and
+ ``256`` inclusive. The relative weight of a stream indicates what
+ proportion of available resources will be allocated to that
+ stream.
+ :type weight: ``int``
+
+ :param depends_on: The ID of the stream on which this stream depends.
+ This stream will only be progressed if it is impossible to
+ progress the parent stream (the one on which this one depends).
+ Passing the value ``0`` means that this stream does not depend on
+ any other. Defaults to ``0``.
+ :type depends_on: ``int``
+
+ :param exclusive: Whether this stream is an exclusive dependency of its
+ "parent" stream (i.e. the stream given by ``depends_on``). If a
+ stream is an exclusive dependency of another, that means that all
+ previously-set children of the parent are moved to become children
+ of the new exclusively-dependent stream. Defaults to ``False``.
+ :type exclusive: ``bool``
+ """
+ if not self.config.client_side:
+ raise RFC1122Error("Servers SHOULD NOT prioritize streams.")
+
+ self.state_machine.process_input(
+ ConnectionInputs.SEND_PRIORITY
+ )
+
+ frame = PriorityFrame(stream_id)
+ frame = _add_frame_priority(frame, weight, depends_on, exclusive)
+
+ self._prepare_for_sending([frame])
+
+ def local_flow_control_window(self, stream_id):
+ """
+ Returns the maximum amount of data that can be sent on stream
+ ``stream_id``.
+
+ This value will never be larger than the total data that can be sent on
+ the connection: even if the given stream allows more data, the
+ connection window provides a logical maximum to the amount of data that
+ can be sent.
+
+ The maximum data that can be sent in a single data frame on a stream
+ is either this value, or the maximum frame size, whichever is
+ *smaller*.
+
+ :param stream_id: The ID of the stream whose flow control window is
+ being queried.
+ :type stream_id: ``int``
+ :returns: The amount of data in bytes that can be sent on the stream
+ before the flow control window is exhausted.
+ :rtype: ``int``
+ """
+ stream = self._get_stream_by_id(stream_id)
+ return min(
+ self.outbound_flow_control_window,
+ stream.outbound_flow_control_window
+ )
+
+ def remote_flow_control_window(self, stream_id):
+ """
+ Returns the maximum amount of data the remote peer can send on stream
+ ``stream_id``.
+
+ This value will never be larger than the total data that can be sent on
+ the connection: even if the given stream allows more data, the
+ connection window provides a logical maximum to the amount of data that
+ can be sent.
+
+ The maximum data that can be sent in a single data frame on a stream
+ is either this value, or the maximum frame size, whichever is
+ *smaller*.
+
+ :param stream_id: The ID of the stream whose flow control window is
+ being queried.
+ :type stream_id: ``int``
+ :returns: The amount of data in bytes that can be received on the
+ stream before the flow control window is exhausted.
+ :rtype: ``int``
+ """
+ stream = self._get_stream_by_id(stream_id)
+ return min(
+ self.inbound_flow_control_window,
+ stream.inbound_flow_control_window
+ )
+
+ def acknowledge_received_data(self, acknowledged_size, stream_id):
+ """
+ Inform the :class:`H2Connection ` that a
+ certain number of flow-controlled bytes have been processed, and that
+ the space should be handed back to the remote peer at an opportune
+ time.
+
+ .. versionadded:: 2.5.0
+
+ :param acknowledged_size: The total *flow-controlled size* of the data
+ that has been processed. Note that this must include the amount of
+ padding that was sent with that data.
+ :type acknowledged_size: ``int``
+ :param stream_id: The ID of the stream on which this data was received.
+ :type stream_id: ``int``
+ :returns: Nothing
+ :rtype: ``None``
+ """
+ self.config.logger.debug(
+ "Ack received data on stream ID %d with size %d",
+ stream_id, acknowledged_size
+ )
+ if stream_id <= 0:
+ raise ValueError(
+ "Stream ID %d is not valid for acknowledge_received_data" %
+ stream_id
+ )
+ if acknowledged_size < 0:
+ raise ValueError("Cannot acknowledge negative data")
+
+ frames = []
+
+ conn_manager = self._inbound_flow_control_window_manager
+ conn_increment = conn_manager.process_bytes(acknowledged_size)
+ if conn_increment:
+ f = WindowUpdateFrame(0)
+ f.window_increment = conn_increment
+ frames.append(f)
+
+ try:
+ stream = self._get_stream_by_id(stream_id)
+ except StreamClosedError:
+ # The stream is already gone. We're not worried about incrementing
+ # the window in this case.
+ pass
+ else:
+ # No point incrementing the windows of closed streams.
+ if stream.open:
+ frames.extend(
+ stream.acknowledge_received_data(acknowledged_size)
+ )
+
+ self._prepare_for_sending(frames)
+
+ def data_to_send(self, amount=None):
+ """
+ Returns some data for sending out of the internal data buffer.
+
+ This method is analogous to ``read`` on a file-like object, but it
+ doesn't block. Instead, it returns as much data as the user asks for,
+ or less if that much data is not available. It does not perform any
+ I/O, and so uses a different name.
+
+ :param amount: (optional) The maximum amount of data to return. If not
+ set, or set to ``None``, will return as much data as possible.
+ :type amount: ``int``
+ :returns: A bytestring containing the data to send on the wire.
+ :rtype: ``bytes``
+ """
+ if amount is None:
+ data = bytes(self._data_to_send)
+ self._data_to_send = bytearray()
+ return data
+ else:
+ data = bytes(self._data_to_send[:amount])
+ self._data_to_send = self._data_to_send[amount:]
+ return data
+
+ def clear_outbound_data_buffer(self):
+ """
+ Clears the outbound data buffer, such that if this call was immediately
+ followed by a call to
+ :meth:`data_to_send `, that
+ call would return no data.
+
+ This method should not normally be used, but is made available to avoid
+ exposing implementation details.
+ """
+ self._data_to_send = bytearray()
+
+ def _acknowledge_settings(self):
+ """
+ Acknowledge settings that have been received.
+
+ .. versionchanged:: 2.0.0
+ Removed from public API, removed useless ``event`` parameter, made
+ automatic.
+
+ :returns: Nothing
+ """
+ self.state_machine.process_input(ConnectionInputs.SEND_SETTINGS)
+
+ changes = self.remote_settings.acknowledge()
+
+ if SettingCodes.INITIAL_WINDOW_SIZE in changes:
+ setting = changes[SettingCodes.INITIAL_WINDOW_SIZE]
+ self._flow_control_change_from_settings(
+ setting.original_value,
+ setting.new_value,
+ )
+
+ # HEADER_TABLE_SIZE changes by the remote part affect our encoder: cf.
+ # RFC 7540 Section 6.5.2.
+ if SettingCodes.HEADER_TABLE_SIZE in changes:
+ setting = changes[SettingCodes.HEADER_TABLE_SIZE]
+ self.encoder.header_table_size = setting.new_value
+
+ if SettingCodes.MAX_FRAME_SIZE in changes:
+ setting = changes[SettingCodes.MAX_FRAME_SIZE]
+ self.max_outbound_frame_size = setting.new_value
+ for stream in self.streams.values():
+ stream.max_outbound_frame_size = setting.new_value
+
+ f = SettingsFrame(0)
+ f.flags.add('ACK')
+ return [f]
+
+ def _flow_control_change_from_settings(self, old_value, new_value):
+ """
+ Update flow control windows in response to a change in the value of
+ SETTINGS_INITIAL_WINDOW_SIZE.
+
+ When this setting is changed, it automatically updates all flow control
+ windows by the delta in the settings values. Note that it does not
+ increment the *connection* flow control window, per section 6.9.2 of
+ RFC 7540.
+ """
+ delta = new_value - old_value
+
+ for stream in self.streams.values():
+ stream.outbound_flow_control_window = guard_increment_window(
+ stream.outbound_flow_control_window,
+ delta
+ )
+
+ def _inbound_flow_control_change_from_settings(self, old_value, new_value):
+ """
+ Update remote flow control windows in response to a change in the value
+ of SETTINGS_INITIAL_WINDOW_SIZE.
+
+ When this setting is changed, it automatically updates all remote flow
+ control windows by the delta in the settings values.
+ """
+ delta = new_value - old_value
+
+ for stream in self.streams.values():
+ stream._inbound_flow_control_change_from_settings(delta)
+
+ def receive_data(self, data):
+ """
+ Pass some received HTTP/2 data to the connection for handling.
+
+ :param data: The data received from the remote peer on the network.
+ :type data: ``bytes``
+ :returns: A list of events that the remote peer triggered by sending
+ this data.
+ """
+ self.config.logger.trace(
+ "Process received data on connection. Received data: %r", data
+ )
+
+ events = []
+ self.incoming_buffer.add_data(data)
+ self.incoming_buffer.max_frame_size = self.max_inbound_frame_size
+
+ try:
+ for frame in self.incoming_buffer:
+ events.extend(self._receive_frame(frame))
+ except InvalidPaddingError:
+ self._terminate_connection(ErrorCodes.PROTOCOL_ERROR)
+ raise ProtocolError("Received frame with invalid padding.")
+ except ProtocolError as e:
+ # For whatever reason, receiving the frame caused a protocol error.
+ # We should prepare to emit a GoAway frame before throwing the
+ # exception up further. No need for an event: the exception will
+ # do fine.
+ self._terminate_connection(e.error_code)
+ raise
+
+ return events
+
+ def _receive_frame(self, frame):
+ """
+ Handle a frame received on the connection.
+
+ .. versionchanged:: 2.0.0
+ Removed from the public API.
+ """
+ try:
+ # I don't love using __class__ here, maybe reconsider it.
+ frames, events = self._frame_dispatch_table[frame.__class__](frame)
+ except StreamClosedError as e:
+ # If the stream was closed by RST_STREAM, we just send a RST_STREAM
+ # to the remote peer. Otherwise, this is a connection error, and so
+ # we will re-raise to trigger one.
+ if self._stream_is_closed_by_reset(e.stream_id):
+ f = RstStreamFrame(e.stream_id)
+ f.error_code = e.error_code
+ self._prepare_for_sending([f])
+ events = e._events
+ else:
+ raise
+ except StreamIDTooLowError as e:
+ # The stream ID seems invalid. This may happen when the closed
+ # stream has been cleaned up, or when the remote peer has opened a
+ # new stream with a higher stream ID than this one, forcing it
+ # closed implicitly.
+ #
+ # Check how the stream was closed: depending on the mechanism, it
+ # is either a stream error or a connection error.
+ if self._stream_is_closed_by_reset(e.stream_id):
+ # Closed by RST_STREAM is a stream error.
+ f = RstStreamFrame(e.stream_id)
+ f.error_code = ErrorCodes.STREAM_CLOSED
+ self._prepare_for_sending([f])
+ events = []
+ elif self._stream_is_closed_by_end(e.stream_id):
+ # Closed by END_STREAM is a connection error.
+ raise StreamClosedError(e.stream_id)
+ else:
+ # Closed implicitly, also a connection error, but of type
+ # PROTOCOL_ERROR.
+ raise
+ else:
+ self._prepare_for_sending(frames)
+
+ return events
+
+ def _terminate_connection(self, error_code):
+ """
+ Terminate the connection early. Used in error handling blocks to send
+ GOAWAY frames.
+ """
+ f = GoAwayFrame(0)
+ f.last_stream_id = self.highest_inbound_stream_id
+ f.error_code = error_code
+ self.state_machine.process_input(ConnectionInputs.SEND_GOAWAY)
+ self._prepare_for_sending([f])
+
+ def _receive_headers_frame(self, frame):
+ """
+ Receive a headers frame on the connection.
+ """
+ # If necessary, check we can open the stream. Also validate that the
+ # stream ID is valid.
+ if frame.stream_id not in self.streams:
+ max_open_streams = self.local_settings.max_concurrent_streams
+ if (self.open_inbound_streams + 1) > max_open_streams:
+ raise TooManyStreamsError(
+ "Max outbound streams is %d, %d open" %
+ (max_open_streams, self.open_outbound_streams)
+ )
+
+ # Let's decode the headers. We handle headers as bytes internally up
+ # until we hang them off the event, at which point we may optionally
+ # convert them to unicode.
+ headers = _decode_headers(self.decoder, frame.data)
+
+ events = self.state_machine.process_input(
+ ConnectionInputs.RECV_HEADERS
+ )
+ stream = self._get_or_create_stream(
+ frame.stream_id, AllowedStreamIDs(not self.config.client_side)
+ )
+ frames, stream_events = stream.receive_headers(
+ headers,
+ 'END_STREAM' in frame.flags,
+ self.config.header_encoding
+ )
+
+ if 'PRIORITY' in frame.flags:
+ p_frames, p_events = self._receive_priority_frame(frame)
+ stream_events[0].priority_updated = p_events[0]
+ stream_events.extend(p_events)
+ assert not p_frames
+
+ return frames, events + stream_events
+
+ def _receive_push_promise_frame(self, frame):
+ """
+ Receive a push-promise frame on the connection.
+ """
+ if not self.local_settings.enable_push:
+ raise ProtocolError("Received pushed stream")
+
+ pushed_headers = _decode_headers(self.decoder, frame.data)
+
+ events = self.state_machine.process_input(
+ ConnectionInputs.RECV_PUSH_PROMISE
+ )
+
+ try:
+ stream = self._get_stream_by_id(frame.stream_id)
+ except NoSuchStreamError:
+ # We need to check if the parent stream was reset by us. If it was
+ # then we presume that the PUSH_PROMISE was in flight when we reset
+ # the parent stream. Rather than accept the new stream, just reset
+ # it.
+ #
+ # If this was closed naturally, however, we should call this a
+ # PROTOCOL_ERROR: pushing a stream on a naturally closed stream is
+ # a real problem because it creates a brand new stream that the
+ # remote peer now believes exists.
+ if (self._stream_closed_by(frame.stream_id) ==
+ StreamClosedBy.SEND_RST_STREAM):
+ f = RstStreamFrame(frame.promised_stream_id)
+ f.error_code = ErrorCodes.REFUSED_STREAM
+ return [f], events
+
+ raise ProtocolError("Attempted to push on closed stream.")
+
+ # We need to prevent peers pushing streams in response to streams that
+ # they themselves have already pushed: see #163 and RFC 7540 § 6.6. The
+ # easiest way to do that is to assert that the stream_id is not even:
+ # this shortcut works because only servers can push and the state
+ # machine will enforce this.
+ if (frame.stream_id % 2) == 0:
+ raise ProtocolError("Cannot recursively push streams.")
+
+ try:
+ frames, stream_events = stream.receive_push_promise_in_band(
+ frame.promised_stream_id,
+ pushed_headers,
+ self.config.header_encoding,
+ )
+ except StreamClosedError:
+ # The parent stream was reset by us, so we presume that
+ # PUSH_PROMISE was in flight when we reset the parent stream.
+ # So we just reset the new stream.
+ f = RstStreamFrame(frame.promised_stream_id)
+ f.error_code = ErrorCodes.REFUSED_STREAM
+ return [f], events
+
+ new_stream = self._begin_new_stream(
+ frame.promised_stream_id, AllowedStreamIDs.EVEN
+ )
+ self.streams[frame.promised_stream_id] = new_stream
+ new_stream.remotely_pushed(pushed_headers)
+
+ return frames, events + stream_events
+
+ def _handle_data_on_closed_stream(self, events, exc, frame):
+ # This stream is already closed - and yet we received a DATA frame.
+ # The received DATA frame counts towards the connection flow window.
+ # We need to manually to acknowledge the DATA frame to update the flow
+ # window of the connection. Otherwise the whole connection stalls due
+ # the inbound flow window being 0.
+ frames = []
+ conn_manager = self._inbound_flow_control_window_manager
+ conn_increment = conn_manager.process_bytes(
+ frame.flow_controlled_length
+ )
+ if conn_increment:
+ f = WindowUpdateFrame(0)
+ f.window_increment = conn_increment
+ frames.append(f)
+ self.config.logger.debug(
+ "Received DATA frame on closed stream %d - "
+ "auto-emitted a WINDOW_UPDATE by %d",
+ frame.stream_id, conn_increment
+ )
+ f = RstStreamFrame(exc.stream_id)
+ f.error_code = exc.error_code
+ frames.append(f)
+ self.config.logger.debug(
+ "Stream %d already CLOSED or cleaned up - "
+ "auto-emitted a RST_FRAME" % frame.stream_id
+ )
+ return frames, events + exc._events
+
+ def _receive_data_frame(self, frame):
+ """
+ Receive a data frame on the connection.
+ """
+ flow_controlled_length = frame.flow_controlled_length
+
+ events = self.state_machine.process_input(
+ ConnectionInputs.RECV_DATA
+ )
+ self._inbound_flow_control_window_manager.window_consumed(
+ flow_controlled_length
+ )
+
+ try:
+ stream = self._get_stream_by_id(frame.stream_id)
+ frames, stream_events = stream.receive_data(
+ frame.data,
+ 'END_STREAM' in frame.flags,
+ flow_controlled_length
+ )
+ except StreamClosedError as e:
+ # This stream is either marked as CLOSED or already gone from our
+ # internal state.
+ return self._handle_data_on_closed_stream(events, e, frame)
+
+ return frames, events + stream_events
+
+ def _receive_settings_frame(self, frame):
+ """
+ Receive a SETTINGS frame on the connection.
+ """
+ events = self.state_machine.process_input(
+ ConnectionInputs.RECV_SETTINGS
+ )
+
+ # This is an ack of the local settings.
+ if 'ACK' in frame.flags:
+ changed_settings = self._local_settings_acked()
+ ack_event = SettingsAcknowledged()
+ ack_event.changed_settings = changed_settings
+ events.append(ack_event)
+ return [], events
+
+ # Add the new settings.
+ self.remote_settings.update(frame.settings)
+ events.append(
+ RemoteSettingsChanged.from_settings(
+ self.remote_settings, frame.settings
+ )
+ )
+ frames = self._acknowledge_settings()
+
+ return frames, events
+
+ def _receive_window_update_frame(self, frame):
+ """
+ Receive a WINDOW_UPDATE frame on the connection.
+ """
+ # hyperframe will take care of validating the window_increment.
+ # If we reach in here, we can assume a valid value.
+
+ events = self.state_machine.process_input(
+ ConnectionInputs.RECV_WINDOW_UPDATE
+ )
+
+ if frame.stream_id:
+ try:
+ stream = self._get_stream_by_id(frame.stream_id)
+ frames, stream_events = stream.receive_window_update(
+ frame.window_increment
+ )
+ except StreamClosedError:
+ return [], events
+ else:
+ # Increment our local flow control window.
+ self.outbound_flow_control_window = guard_increment_window(
+ self.outbound_flow_control_window,
+ frame.window_increment
+ )
+
+ # FIXME: Should we split this into one event per active stream?
+ window_updated_event = WindowUpdated()
+ window_updated_event.stream_id = 0
+ window_updated_event.delta = frame.window_increment
+ stream_events = [window_updated_event]
+ frames = []
+
+ return frames, events + stream_events
+
+ def _receive_ping_frame(self, frame):
+ """
+ Receive a PING frame on the connection.
+ """
+ events = self.state_machine.process_input(
+ ConnectionInputs.RECV_PING
+ )
+ flags = []
+
+ if 'ACK' in frame.flags:
+ evt = PingAckReceived()
+ else:
+ evt = PingReceived()
+
+ # automatically ACK the PING with the same 'opaque data'
+ f = PingFrame(0)
+ f.flags = {'ACK'}
+ f.opaque_data = frame.opaque_data
+ flags.append(f)
+
+ evt.ping_data = frame.opaque_data
+ events.append(evt)
+
+ return flags, events
+
+ def _receive_rst_stream_frame(self, frame):
+ """
+ Receive a RST_STREAM frame on the connection.
+ """
+ events = self.state_machine.process_input(
+ ConnectionInputs.RECV_RST_STREAM
+ )
+ try:
+ stream = self._get_stream_by_id(frame.stream_id)
+ except NoSuchStreamError:
+ # The stream is missing. That's ok, we just do nothing here.
+ stream_frames = []
+ stream_events = []
+ else:
+ stream_frames, stream_events = stream.stream_reset(frame)
+
+ return stream_frames, events + stream_events
+
+ def _receive_priority_frame(self, frame):
+ """
+ Receive a PRIORITY frame on the connection.
+ """
+ events = self.state_machine.process_input(
+ ConnectionInputs.RECV_PRIORITY
+ )
+
+ event = PriorityUpdated()
+ event.stream_id = frame.stream_id
+ event.depends_on = frame.depends_on
+ event.exclusive = frame.exclusive
+
+ # Weight is an integer between 1 and 256, but the byte only allows
+ # 0 to 255: add one.
+ event.weight = frame.stream_weight + 1
+
+ # A stream may not depend on itself.
+ if event.depends_on == frame.stream_id:
+ raise ProtocolError(
+ "Stream %d may not depend on itself" % frame.stream_id
+ )
+ events.append(event)
+
+ return [], events
+
+ def _receive_goaway_frame(self, frame):
+ """
+ Receive a GOAWAY frame on the connection.
+ """
+ events = self.state_machine.process_input(
+ ConnectionInputs.RECV_GOAWAY
+ )
+
+ # Clear the outbound data buffer: we cannot send further data now.
+ self.clear_outbound_data_buffer()
+
+ # Fire an appropriate ConnectionTerminated event.
+ new_event = ConnectionTerminated()
+ new_event.error_code = _error_code_from_int(frame.error_code)
+ new_event.last_stream_id = frame.last_stream_id
+ new_event.additional_data = (frame.additional_data
+ if frame.additional_data else None)
+ events.append(new_event)
+
+ return [], events
+
+ def _receive_naked_continuation(self, frame):
+ """
+ A naked CONTINUATION frame has been received. This is always an error,
+ but the type of error it is depends on the state of the stream and must
+ transition the state of the stream, so we need to pass it to the
+ appropriate stream.
+ """
+ stream = self._get_stream_by_id(frame.stream_id)
+ stream.receive_continuation()
+ assert False, "Should not be reachable"
+
+ def _receive_alt_svc_frame(self, frame):
+ """
+ An ALTSVC frame has been received. This frame, specified in RFC 7838,
+ is used to advertise alternative places where the same service can be
+ reached.
+
+ This frame can optionally be received either on a stream or on stream
+ 0, and its semantics are different in each case.
+ """
+ events = self.state_machine.process_input(
+ ConnectionInputs.RECV_ALTERNATIVE_SERVICE
+ )
+ frames = []
+
+ if frame.stream_id:
+ # Given that it makes no sense to receive ALTSVC on a stream
+ # before that stream has been opened with a HEADERS frame, the
+ # ALTSVC frame cannot create a stream. If the stream is not
+ # present, we simply ignore the frame.
+ try:
+ stream = self._get_stream_by_id(frame.stream_id)
+ except (NoSuchStreamError, StreamClosedError):
+ pass
+ else:
+ stream_frames, stream_events = stream.receive_alt_svc(frame)
+ frames.extend(stream_frames)
+ events.extend(stream_events)
+ else:
+ # This frame is sent on stream 0. The origin field on the frame
+ # must be present, though if it isn't it's not a ProtocolError
+ # (annoyingly), we just need to ignore it.
+ if not frame.origin:
+ return frames, events
+
+ # If we're a server, we want to ignore this (RFC 7838 says so).
+ if not self.config.client_side:
+ return frames, events
+
+ event = AlternativeServiceAvailable()
+ event.origin = frame.origin
+ event.field_value = frame.field
+ events.append(event)
+
+ return frames, events
+
+ def _receive_unknown_frame(self, frame):
+ """
+ We have received a frame that we do not understand. This is almost
+ certainly an extension frame, though it's impossible to be entirely
+ sure.
+
+ RFC 7540 § 5.5 says that we MUST ignore unknown frame types: so we
+ do. We do notify the user that we received one, however.
+ """
+ # All we do here is log.
+ self.config.logger.debug(
+ "Received unknown extension frame (ID %d)", frame.stream_id
+ )
+ event = UnknownFrameReceived()
+ event.frame = frame
+ return [], [event]
+
+ def _local_settings_acked(self):
+ """
+ Handle the local settings being ACKed, update internal state.
+ """
+ changes = self.local_settings.acknowledge()
+
+ if SettingCodes.INITIAL_WINDOW_SIZE in changes:
+ setting = changes[SettingCodes.INITIAL_WINDOW_SIZE]
+ self._inbound_flow_control_change_from_settings(
+ setting.original_value,
+ setting.new_value,
+ )
+
+ if SettingCodes.MAX_HEADER_LIST_SIZE in changes:
+ setting = changes[SettingCodes.MAX_HEADER_LIST_SIZE]
+ self.decoder.max_header_list_size = setting.new_value
+
+ if SettingCodes.MAX_FRAME_SIZE in changes:
+ setting = changes[SettingCodes.MAX_FRAME_SIZE]
+ self.max_inbound_frame_size = setting.new_value
+
+ if SettingCodes.HEADER_TABLE_SIZE in changes:
+ setting = changes[SettingCodes.HEADER_TABLE_SIZE]
+ # This is safe across all hpack versions: some versions just won't
+ # respect it.
+ self.decoder.max_allowed_table_size = setting.new_value
+
+ return changes
+
+ def _stream_id_is_outbound(self, stream_id):
+ """
+ Returns ``True`` if the stream ID corresponds to an outbound stream
+ (one initiated by this peer), returns ``False`` otherwise.
+ """
+ return (stream_id % 2 == int(self.config.client_side))
+
+ def _stream_closed_by(self, stream_id):
+ """
+ Returns how the stream was closed.
+
+ The return value will be either a member of
+ ``h2.stream.StreamClosedBy`` or ``None``. If ``None``, the stream was
+ closed implicitly by the peer opening a stream with a higher stream ID
+ before opening this one.
+ """
+ if stream_id in self.streams:
+ return self.streams[stream_id].closed_by
+ if stream_id in self._closed_streams:
+ return self._closed_streams[stream_id]
+ return None
+
+ def _stream_is_closed_by_reset(self, stream_id):
+ """
+ Returns ``True`` if the stream was closed by sending or receiving a
+ RST_STREAM frame. Returns ``False`` otherwise.
+ """
+ return self._stream_closed_by(stream_id) in (
+ StreamClosedBy.RECV_RST_STREAM, StreamClosedBy.SEND_RST_STREAM
+ )
+
+ def _stream_is_closed_by_end(self, stream_id):
+ """
+ Returns ``True`` if the stream was closed by sending or receiving an
+ END_STREAM flag in a HEADERS or DATA frame. Returns ``False``
+ otherwise.
+ """
+ return self._stream_closed_by(stream_id) in (
+ StreamClosedBy.RECV_END_STREAM, StreamClosedBy.SEND_END_STREAM
+ )
+
+
+def _add_frame_priority(frame, weight=None, depends_on=None, exclusive=None):
+ """
+ Adds priority data to a given frame. Does not change any flags set on that
+ frame: if the caller is adding priority information to a HEADERS frame they
+ must set that themselves.
+
+ This method also deliberately sets defaults for anything missing.
+
+ This method validates the input values.
+ """
+ # A stream may not depend on itself.
+ if depends_on == frame.stream_id:
+ raise ProtocolError(
+ "Stream %d may not depend on itself" % frame.stream_id
+ )
+
+ # Weight must be between 1 and 256.
+ if weight is not None:
+ if weight > 256 or weight < 1:
+ raise ProtocolError(
+ "Weight must be between 1 and 256, not %d" % weight
+ )
+ else:
+ # Weight is an integer between 1 and 256, but the byte only allows
+ # 0 to 255: subtract one.
+ weight -= 1
+
+ # Set defaults for anything not provided.
+ weight = weight if weight is not None else 15
+ depends_on = depends_on if depends_on is not None else 0
+ exclusive = exclusive if exclusive is not None else False
+
+ frame.stream_weight = weight
+ frame.depends_on = depends_on
+ frame.exclusive = exclusive
+
+ return frame
+
+
+def _decode_headers(decoder, encoded_header_block):
+ """
+ Decode a HPACK-encoded header block, translating HPACK exceptions into
+ sensible hyper-h2 errors.
+
+ This only ever returns bytestring headers: hyper-h2 may emit them as
+ unicode later, but internally it processes them as bytestrings only.
+ """
+ try:
+ return decoder.decode(encoded_header_block, raw=True)
+ except OversizedHeaderListError as e:
+ # This is a symptom of a HPACK bomb attack: the user has
+ # disregarded our requirements on how large a header block we'll
+ # accept.
+ raise DenialOfServiceError("Oversized header block: %s" % e)
+ except (HPACKError, IndexError, TypeError, UnicodeDecodeError) as e:
+ # We should only need HPACKError here, but versions of HPACK older
+ # than 2.1.0 throw all three others as well. For maximum
+ # compatibility, catch all of them.
+ raise ProtocolError("Error decoding header block: %s" % e)
diff --git a/packages/h2/errors.py b/packages/h2/errors.py
new file mode 100644
index 000000000..303df5976
--- /dev/null
+++ b/packages/h2/errors.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+"""
+h2/errors
+~~~~~~~~~
+
+Global error code registry containing the established HTTP/2 error codes.
+
+The current registry is available at:
+https://tools.ietf.org/html/rfc7540#section-11.4
+"""
+import enum
+
+
+class ErrorCodes(enum.IntEnum):
+ """
+ All known HTTP/2 error codes.
+
+ .. versionadded:: 2.5.0
+ """
+ #: Graceful shutdown.
+ NO_ERROR = 0x0
+
+ #: Protocol error detected.
+ PROTOCOL_ERROR = 0x1
+
+ #: Implementation fault.
+ INTERNAL_ERROR = 0x2
+
+ #: Flow-control limits exceeded.
+ FLOW_CONTROL_ERROR = 0x3
+
+ #: Settings not acknowledged.
+ SETTINGS_TIMEOUT = 0x4
+
+ #: Frame received for closed stream.
+ STREAM_CLOSED = 0x5
+
+ #: Frame size incorrect.
+ FRAME_SIZE_ERROR = 0x6
+
+ #: Stream not processed.
+ REFUSED_STREAM = 0x7
+
+ #: Stream cancelled.
+ CANCEL = 0x8
+
+ #: Compression state not updated.
+ COMPRESSION_ERROR = 0x9
+
+ #: TCP connection error for CONNECT method.
+ CONNECT_ERROR = 0xa
+
+ #: Processing capacity exceeded.
+ ENHANCE_YOUR_CALM = 0xb
+
+ #: Negotiated TLS parameters not acceptable.
+ INADEQUATE_SECURITY = 0xc
+
+ #: Use HTTP/1.1 for the request.
+ HTTP_1_1_REQUIRED = 0xd
+
+
+def _error_code_from_int(code):
+ """
+ Given an integer error code, returns either one of :class:`ErrorCodes
+ ` or, if not present in the known set of codes,
+ returns the integer directly.
+ """
+ try:
+ return ErrorCodes(code)
+ except ValueError:
+ return code
+
+
+__all__ = ['ErrorCodes']
diff --git a/packages/h2/events.py b/packages/h2/events.py
new file mode 100644
index 000000000..08b318671
--- /dev/null
+++ b/packages/h2/events.py
@@ -0,0 +1,634 @@
+# -*- coding: utf-8 -*-
+"""
+h2/events
+~~~~~~~~~
+
+Defines Event types for HTTP/2.
+
+Events are returned by the H2 state machine to allow implementations to keep
+track of events triggered by receiving data. Each time data is provided to the
+H2 state machine it processes the data and returns a list of Event objects.
+"""
+import binascii
+
+from .settings import ChangedSetting, _setting_code_from_int
+
+
+class Event:
+ """
+ Base class for h2 events.
+ """
+ pass
+
+
+class RequestReceived(Event):
+ """
+ The RequestReceived event is fired whenever request headers are received.
+ This event carries the HTTP headers for the given request and the stream ID
+ of the new stream.
+
+ .. versionchanged:: 2.3.0
+ Changed the type of ``headers`` to :class:`HeaderTuple
+ `. This has no effect on current users.
+
+ .. versionchanged:: 2.4.0
+ Added ``stream_ended`` and ``priority_updated`` properties.
+ """
+ def __init__(self):
+ #: The Stream ID for the stream this request was made on.
+ self.stream_id = None
+
+ #: The request headers.
+ self.headers = None
+
+ #: If this request also ended the stream, the associated
+ #: :class:`StreamEnded ` event will be available
+ #: here.
+ #:
+ #: .. versionadded:: 2.4.0
+ self.stream_ended = None
+
+ #: If this request also had associated priority information, the
+ #: associated :class:`PriorityUpdated `
+ #: event will be available here.
+ #:
+ #: .. versionadded:: 2.4.0
+ self.priority_updated = None
+
+ def __repr__(self):
+ return "" % (
+ self.stream_id, self.headers
+ )
+
+
+class ResponseReceived(Event):
+ """
+ The ResponseReceived event is fired whenever response headers are received.
+ This event carries the HTTP headers for the given response and the stream
+ ID of the new stream.
+
+ .. versionchanged:: 2.3.0
+ Changed the type of ``headers`` to :class:`HeaderTuple
+ `. This has no effect on current users.
+
+ .. versionchanged:: 2.4.0
+ Added ``stream_ended`` and ``priority_updated`` properties.
+ """
+ def __init__(self):
+ #: The Stream ID for the stream this response was made on.
+ self.stream_id = None
+
+ #: The response headers.
+ self.headers = None
+
+ #: If this response also ended the stream, the associated
+ #: :class:`StreamEnded ` event will be available
+ #: here.
+ #:
+ #: .. versionadded:: 2.4.0
+ self.stream_ended = None
+
+ #: If this response also had associated priority information, the
+ #: associated :class:`PriorityUpdated `
+ #: event will be available here.
+ #:
+ #: .. versionadded:: 2.4.0
+ self.priority_updated = None
+
+ def __repr__(self):
+ return "" % (
+ self.stream_id, self.headers
+ )
+
+
+class TrailersReceived(Event):
+ """
+ The TrailersReceived event is fired whenever trailers are received on a
+ stream. Trailers are a set of headers sent after the body of the
+ request/response, and are used to provide information that wasn't known
+ ahead of time (e.g. content-length). This event carries the HTTP header
+ fields that form the trailers and the stream ID of the stream on which they
+ were received.
+
+ .. versionchanged:: 2.3.0
+ Changed the type of ``headers`` to :class:`HeaderTuple
+ `. This has no effect on current users.
+
+ .. versionchanged:: 2.4.0
+ Added ``stream_ended`` and ``priority_updated`` properties.
+ """
+ def __init__(self):
+ #: The Stream ID for the stream on which these trailers were received.
+ self.stream_id = None
+
+ #: The trailers themselves.
+ self.headers = None
+
+ #: Trailers always end streams. This property has the associated
+ #: :class:`StreamEnded ` in it.
+ #:
+ #: .. versionadded:: 2.4.0
+ self.stream_ended = None
+
+ #: If the trailers also set associated priority information, the
+ #: associated :class:`PriorityUpdated `
+ #: event will be available here.
+ #:
+ #: .. versionadded:: 2.4.0
+ self.priority_updated = None
+
+ def __repr__(self):
+ return "" % (
+ self.stream_id, self.headers
+ )
+
+
+class _HeadersSent(Event):
+ """
+ The _HeadersSent event is fired whenever headers are sent.
+
+ This is an internal event, used to determine validation steps on
+ outgoing header blocks.
+ """
+ pass
+
+
+class _ResponseSent(_HeadersSent):
+ """
+ The _ResponseSent event is fired whenever response headers are sent
+ on a stream.
+
+ This is an internal event, used to determine validation steps on
+ outgoing header blocks.
+ """
+ pass
+
+
+class _RequestSent(_HeadersSent):
+ """
+ The _RequestSent event is fired whenever request headers are sent
+ on a stream.
+
+ This is an internal event, used to determine validation steps on
+ outgoing header blocks.
+ """
+ pass
+
+
+class _TrailersSent(_HeadersSent):
+ """
+ The _TrailersSent event is fired whenever trailers are sent on a
+ stream. Trailers are a set of headers sent after the body of the
+ request/response, and are used to provide information that wasn't known
+ ahead of time (e.g. content-length).
+
+ This is an internal event, used to determine validation steps on
+ outgoing header blocks.
+ """
+ pass
+
+
+class _PushedRequestSent(_HeadersSent):
+ """
+ The _PushedRequestSent event is fired whenever pushed request headers are
+ sent.
+
+ This is an internal event, used to determine validation steps on outgoing
+ header blocks.
+ """
+ pass
+
+
+class InformationalResponseReceived(Event):
+ """
+ The InformationalResponseReceived event is fired when an informational
+ response (that is, one whose status code is a 1XX code) is received from
+ the remote peer.
+
+ The remote peer may send any number of these, from zero upwards. These
+ responses are most commonly sent in response to requests that have the
+ ``expect: 100-continue`` header field present. Most users can safely
+ ignore this event unless you are intending to use the
+ ``expect: 100-continue`` flow, or are for any reason expecting a different
+ 1XX status code.
+
+ .. versionadded:: 2.2.0
+
+ .. versionchanged:: 2.3.0
+ Changed the type of ``headers`` to :class:`HeaderTuple
+ `. This has no effect on current users.
+
+ .. versionchanged:: 2.4.0
+ Added ``priority_updated`` property.
+ """
+ def __init__(self):
+ #: The Stream ID for the stream this informational response was made
+ #: on.
+ self.stream_id = None
+
+ #: The headers for this informational response.
+ self.headers = None
+
+ #: If this response also had associated priority information, the
+ #: associated :class:`PriorityUpdated `
+ #: event will be available here.
+ #:
+ #: .. versionadded:: 2.4.0
+ self.priority_updated = None
+
+ def __repr__(self):
+ return "" % (
+ self.stream_id, self.headers
+ )
+
+
+class DataReceived(Event):
+ """
+ The DataReceived event is fired whenever data is received on a stream from
+ the remote peer. The event carries the data itself, and the stream ID on
+ which the data was received.
+
+ .. versionchanged:: 2.4.0
+ Added ``stream_ended`` property.
+ """
+ def __init__(self):
+ #: The Stream ID for the stream this data was received on.
+ self.stream_id = None
+
+ #: The data itself.
+ self.data = None
+
+ #: The amount of data received that counts against the flow control
+ #: window. Note that padding counts against the flow control window, so
+ #: when adjusting flow control you should always use this field rather
+ #: than ``len(data)``.
+ self.flow_controlled_length = None
+
+ #: If this data chunk also completed the stream, the associated
+ #: :class:`StreamEnded ` event will be available
+ #: here.
+ #:
+ #: .. versionadded:: 2.4.0
+ self.stream_ended = None
+
+ def __repr__(self):
+ return (
+ "" % (
+ self.stream_id,
+ self.flow_controlled_length,
+ _bytes_representation(self.data[:20]),
+ )
+ )
+
+
+class WindowUpdated(Event):
+ """
+ The WindowUpdated event is fired whenever a flow control window changes
+ size. HTTP/2 defines flow control windows for connections and streams: this
+ event fires for both connections and streams. The event carries the ID of
+ the stream to which it applies (set to zero if the window update applies to
+ the connection), and the delta in the window size.
+ """
+ def __init__(self):
+ #: The Stream ID of the stream whose flow control window was changed.
+ #: May be ``0`` if the connection window was changed.
+ self.stream_id = None
+
+ #: The window delta.
+ self.delta = None
+
+ def __repr__(self):
+ return "" % (
+ self.stream_id, self.delta
+ )
+
+
+class RemoteSettingsChanged(Event):
+ """
+ The RemoteSettingsChanged event is fired whenever the remote peer changes
+ its settings. It contains a complete inventory of changed settings,
+ including their previous values.
+
+ In HTTP/2, settings changes need to be acknowledged. hyper-h2 automatically
+ acknowledges settings changes for efficiency. However, it is possible that
+ the caller may not be happy with the changed setting.
+
+ When this event is received, the caller should confirm that the new
+ settings are acceptable. If they are not acceptable, the user should close
+ the connection with the error code :data:`PROTOCOL_ERROR
+ `.
+
+ .. versionchanged:: 2.0.0
+ Prior to this version the user needed to acknowledge settings changes.
+ This is no longer the case: hyper-h2 now automatically acknowledges
+ them.
+ """
+ def __init__(self):
+ #: A dictionary of setting byte to
+ #: :class:`ChangedSetting `, representing
+ #: the changed settings.
+ self.changed_settings = {}
+
+ @classmethod
+ def from_settings(cls, old_settings, new_settings):
+ """
+ Build a RemoteSettingsChanged event from a set of changed settings.
+
+ :param old_settings: A complete collection of old settings, in the form
+ of a dictionary of ``{setting: value}``.
+ :param new_settings: All the changed settings and their new values, in
+ the form of a dictionary of ``{setting: value}``.
+ """
+ e = cls()
+ for setting, new_value in new_settings.items():
+ setting = _setting_code_from_int(setting)
+ original_value = old_settings.get(setting)
+ change = ChangedSetting(setting, original_value, new_value)
+ e.changed_settings[setting] = change
+
+ return e
+
+ def __repr__(self):
+ return "" % (
+ ", ".join(repr(cs) for cs in self.changed_settings.values()),
+ )
+
+
+class PingReceived(Event):
+ """
+ The PingReceived event is fired whenever a PING is received. It contains
+ the 'opaque data' of the PING frame. A ping acknowledgment with the same
+ 'opaque data' is automatically emitted after receiving a ping.
+
+ .. versionadded:: 3.1.0
+ """
+ def __init__(self):
+ #: The data included on the ping.
+ self.ping_data = None
+
+ def __repr__(self):
+ return "" % (
+ _bytes_representation(self.ping_data),
+ )
+
+
+class PingAckReceived(Event):
+ """
+ The PingAckReceived event is fired whenever a PING acknowledgment is
+ received. It contains the 'opaque data' of the PING+ACK frame, allowing the
+ user to correlate PINGs and calculate RTT.
+
+ .. versionadded:: 3.1.0
+
+ .. versionchanged:: 4.0.0
+ Removed deprecated but equivalent ``PingAcknowledged``.
+ """
+ def __init__(self):
+ #: The data included on the ping.
+ self.ping_data = None
+
+ def __repr__(self):
+ return "" % (
+ _bytes_representation(self.ping_data),
+ )
+
+
+class StreamEnded(Event):
+ """
+ The StreamEnded event is fired whenever a stream is ended by a remote
+ party. The stream may not be fully closed if it has not been closed
+ locally, but no further data or headers should be expected on that stream.
+ """
+ def __init__(self):
+ #: The Stream ID of the stream that was closed.
+ self.stream_id = None
+
+ def __repr__(self):
+ return "" % self.stream_id
+
+
+class StreamReset(Event):
+ """
+ The StreamReset event is fired in two situations. The first is when the
+ remote party forcefully resets the stream. The second is when the remote
+ party has made a protocol error which only affects a single stream. In this
+ case, Hyper-h2 will terminate the stream early and return this event.
+
+ .. versionchanged:: 2.0.0
+ This event is now fired when Hyper-h2 automatically resets a stream.
+ """
+ def __init__(self):
+ #: The Stream ID of the stream that was reset.
+ self.stream_id = None
+
+ #: The error code given. Either one of :class:`ErrorCodes
+ #: ` or ``int``
+ self.error_code = None
+
+ #: Whether the remote peer sent a RST_STREAM or we did.
+ self.remote_reset = True
+
+ def __repr__(self):
+ return "" % (
+ self.stream_id, self.error_code, self.remote_reset
+ )
+
+
+class PushedStreamReceived(Event):
+ """
+ The PushedStreamReceived event is fired whenever a pushed stream has been
+ received from a remote peer. The event carries on it the new stream ID, the
+ ID of the parent stream, and the request headers pushed by the remote peer.
+ """
+ def __init__(self):
+ #: The Stream ID of the stream created by the push.
+ self.pushed_stream_id = None
+
+ #: The Stream ID of the stream that the push is related to.
+ self.parent_stream_id = None
+
+ #: The request headers, sent by the remote party in the push.
+ self.headers = None
+
+ def __repr__(self):
+ return (
+ "" % (
+ self.pushed_stream_id,
+ self.parent_stream_id,
+ self.headers,
+ )
+ )
+
+
+class SettingsAcknowledged(Event):
+ """
+ The SettingsAcknowledged event is fired whenever a settings ACK is received
+ from the remote peer. The event carries on it the settings that were
+ acknowedged, in the same format as
+ :class:`h2.events.RemoteSettingsChanged`.
+ """
+ def __init__(self):
+ #: A dictionary of setting byte to
+ #: :class:`ChangedSetting `, representing
+ #: the changed settings.
+ self.changed_settings = {}
+
+ def __repr__(self):
+ return "" % (
+ ", ".join(repr(cs) for cs in self.changed_settings.values()),
+ )
+
+
+class PriorityUpdated(Event):
+ """
+ The PriorityUpdated event is fired whenever a stream sends updated priority
+ information. This can occur when the stream is opened, or at any time
+ during the stream lifetime.
+
+ This event is purely advisory, and does not need to be acted on.
+
+ .. versionadded:: 2.0.0
+ """
+ def __init__(self):
+ #: The ID of the stream whose priority information is being updated.
+ self.stream_id = None
+
+ #: The new stream weight. May be the same as the original stream
+ #: weight. An integer between 1 and 256.
+ self.weight = None
+
+ #: The stream ID this stream now depends on. May be ``0``.
+ self.depends_on = None
+
+ #: Whether the stream *exclusively* depends on the parent stream. If it
+ #: does, this stream should inherit the current children of its new
+ #: parent.
+ self.exclusive = None
+
+ def __repr__(self):
+ return (
+ "" % (
+ self.stream_id,
+ self.weight,
+ self.depends_on,
+ self.exclusive
+ )
+ )
+
+
+class ConnectionTerminated(Event):
+ """
+ The ConnectionTerminated event is fired when a connection is torn down by
+ the remote peer using a GOAWAY frame. Once received, no further action may
+ be taken on the connection: a new connection must be established.
+ """
+ def __init__(self):
+ #: The error code cited when tearing down the connection. Should be
+ #: one of :class:`ErrorCodes `, but may not be if
+ #: unknown HTTP/2 extensions are being used.
+ self.error_code = None
+
+ #: The stream ID of the last stream the remote peer saw. This can
+ #: provide an indication of what data, if any, never reached the remote
+ #: peer and so can safely be resent.
+ self.last_stream_id = None
+
+ #: Additional debug data that can be appended to GOAWAY frame.
+ self.additional_data = None
+
+ def __repr__(self):
+ return (
+ "" % (
+ self.error_code,
+ self.last_stream_id,
+ _bytes_representation(
+ self.additional_data[:20]
+ if self.additional_data else None)
+ )
+ )
+
+
+class AlternativeServiceAvailable(Event):
+ """
+ The AlternativeServiceAvailable event is fired when the remote peer
+ advertises an `RFC 7838 `_ Alternative
+ Service using an ALTSVC frame.
+
+ This event always carries the origin to which the ALTSVC information
+ applies. That origin is either supplied by the server directly, or inferred
+ by hyper-h2 from the ``:authority`` pseudo-header field that was sent by
+ the user when initiating a given stream.
+
+ This event also carries what RFC 7838 calls the "Alternative Service Field
+ Value", which is formatted like a HTTP header field and contains the
+ relevant alternative service information. Hyper-h2 does not parse or in any
+ way modify that information: the user is required to do that.
+
+ This event can only be fired on the client end of a connection.
+
+ .. versionadded:: 2.3.0
+ """
+ def __init__(self):
+ #: The origin to which the alternative service field value applies.
+ #: This field is either supplied by the server directly, or inferred by
+ #: hyper-h2 from the ``:authority`` pseudo-header field that was sent
+ #: by the user when initiating the stream on which the frame was
+ #: received.
+ self.origin = None
+
+ #: The ALTSVC field value. This contains information about the HTTP
+ #: alternative service being advertised by the server. Hyper-h2 does
+ #: not parse this field: it is left exactly as sent by the server. The
+ #: structure of the data in this field is given by `RFC 7838 Section 3
+ #: `_.
+ self.field_value = None
+
+ def __repr__(self):
+ return (
+ "" % (
+ self.origin.decode('utf-8', 'ignore'),
+ self.field_value.decode('utf-8', 'ignore'),
+ )
+ )
+
+
+class UnknownFrameReceived(Event):
+ """
+ The UnknownFrameReceived event is fired when the remote peer sends a frame
+ that hyper-h2 does not understand. This occurs primarily when the remote
+ peer is employing HTTP/2 extensions that hyper-h2 doesn't know anything
+ about.
+
+ RFC 7540 requires that HTTP/2 implementations ignore these frames. hyper-h2
+ does so. However, this event is fired to allow implementations to perform
+ special processing on those frames if needed (e.g. if the implementation
+ is capable of handling the frame itself).
+
+ .. versionadded:: 2.7.0
+ """
+ def __init__(self):
+ #: The hyperframe Frame object that encapsulates the received frame.
+ self.frame = None
+
+ def __repr__(self):
+ return ""
+
+
+def _bytes_representation(data):
+ """
+ Converts a bytestring into something that is safe to print on all Python
+ platforms.
+
+ This function is relatively expensive, so it should not be called on the
+ mainline of the code. It's safe to use in things like object repr methods
+ though.
+ """
+ if data is None:
+ return None
+
+ return binascii.hexlify(data).decode('ascii')
diff --git a/packages/h2/exceptions.py b/packages/h2/exceptions.py
new file mode 100644
index 000000000..e22bebc0b
--- /dev/null
+++ b/packages/h2/exceptions.py
@@ -0,0 +1,187 @@
+# -*- coding: utf-8 -*-
+"""
+h2/exceptions
+~~~~~~~~~~~~~
+
+Exceptions for the HTTP/2 module.
+"""
+import h2.errors
+
+
+class H2Error(Exception):
+ """
+ The base class for all exceptions for the HTTP/2 module.
+ """
+
+
+class ProtocolError(H2Error):
+ """
+ An action was attempted in violation of the HTTP/2 protocol.
+ """
+ #: The error code corresponds to this kind of Protocol Error.
+ error_code = h2.errors.ErrorCodes.PROTOCOL_ERROR
+
+
+class FrameTooLargeError(ProtocolError):
+ """
+ The frame that we tried to send or that we received was too large.
+ """
+ #: The error code corresponds to this kind of Protocol Error.
+ error_code = h2.errors.ErrorCodes.FRAME_SIZE_ERROR
+
+
+class FrameDataMissingError(ProtocolError):
+ """
+ The frame that we received is missing some data.
+
+ .. versionadded:: 2.0.0
+ """
+ #: The error code corresponds to this kind of Protocol Error.
+ error_code = h2.errors.ErrorCodes.FRAME_SIZE_ERROR
+
+
+class TooManyStreamsError(ProtocolError):
+ """
+ An attempt was made to open a stream that would lead to too many concurrent
+ streams.
+ """
+ pass
+
+
+class FlowControlError(ProtocolError):
+ """
+ An attempted action violates flow control constraints.
+ """
+ #: The error code corresponds to this kind of Protocol Error.
+ error_code = h2.errors.ErrorCodes.FLOW_CONTROL_ERROR
+
+
+class StreamIDTooLowError(ProtocolError):
+ """
+ An attempt was made to open a stream that had an ID that is lower than the
+ highest ID we have seen on this connection.
+ """
+ def __init__(self, stream_id, max_stream_id):
+ #: The ID of the stream that we attempted to open.
+ self.stream_id = stream_id
+
+ #: The current highest-seen stream ID.
+ self.max_stream_id = max_stream_id
+
+ def __str__(self):
+ return "StreamIDTooLowError: %d is lower than %d" % (
+ self.stream_id, self.max_stream_id
+ )
+
+
+class NoAvailableStreamIDError(ProtocolError):
+ """
+ There are no available stream IDs left to the connection. All stream IDs
+ have been exhausted.
+
+ .. versionadded:: 2.0.0
+ """
+ pass
+
+
+class NoSuchStreamError(ProtocolError):
+ """
+ A stream-specific action referenced a stream that does not exist.
+
+ .. versionchanged:: 2.0.0
+ Became a subclass of :class:`ProtocolError
+ `
+ """
+ def __init__(self, stream_id):
+ #: The stream ID corresponds to the non-existent stream.
+ self.stream_id = stream_id
+
+
+class StreamClosedError(NoSuchStreamError):
+ """
+ A more specific form of
+ :class:`NoSuchStreamError `. Indicates
+ that the stream has since been closed, and that all state relating to that
+ stream has been removed.
+ """
+ def __init__(self, stream_id):
+ #: The stream ID corresponds to the nonexistent stream.
+ self.stream_id = stream_id
+
+ #: The relevant HTTP/2 error code.
+ self.error_code = h2.errors.ErrorCodes.STREAM_CLOSED
+
+ # Any events that internal code may need to fire. Not relevant to
+ # external users that may receive a StreamClosedError.
+ self._events = []
+
+
+class InvalidSettingsValueError(ProtocolError, ValueError):
+ """
+ An attempt was made to set an invalid Settings value.
+
+ .. versionadded:: 2.0.0
+ """
+ def __init__(self, msg, error_code):
+ super(InvalidSettingsValueError, self).__init__(msg)
+ self.error_code = error_code
+
+
+class InvalidBodyLengthError(ProtocolError):
+ """
+ The remote peer sent more or less data that the Content-Length header
+ indicated.
+
+ .. versionadded:: 2.0.0
+ """
+ def __init__(self, expected, actual):
+ self.expected_length = expected
+ self.actual_length = actual
+
+ def __str__(self):
+ return "InvalidBodyLengthError: Expected %d bytes, received %d" % (
+ self.expected_length, self.actual_length
+ )
+
+
+class UnsupportedFrameError(ProtocolError):
+ """
+ The remote peer sent a frame that is unsupported in this context.
+
+ .. versionadded:: 2.1.0
+
+ .. versionchanged:: 4.0.0
+ Removed deprecated KeyError parent class.
+ """
+ pass
+
+
+class RFC1122Error(H2Error):
+ """
+ Emitted when users attempt to do something that is literally allowed by the
+ relevant RFC, but is sufficiently ill-defined that it's unwise to allow
+ users to actually do it.
+
+ While there is some disagreement about whether or not we should be liberal
+ in what accept, it is a truth universally acknowledged that we should be
+ conservative in what emit.
+
+ .. versionadded:: 2.4.0
+ """
+ # shazow says I'm going to regret naming the exception this way. If that
+ # turns out to be true, TELL HIM NOTHING.
+ pass
+
+
+class DenialOfServiceError(ProtocolError):
+ """
+ Emitted when the remote peer exhibits a behaviour that is likely to be an
+ attempt to perform a Denial of Service attack on the implementation. This
+ is a form of ProtocolError that carries a different error code, and allows
+ more easy detection of this kind of behaviour.
+
+ .. versionadded:: 2.5.0
+ """
+ #: The error code corresponds to this kind of
+ #: :class:`ProtocolError `
+ error_code = h2.errors.ErrorCodes.ENHANCE_YOUR_CALM
diff --git a/packages/h2/frame_buffer.py b/packages/h2/frame_buffer.py
new file mode 100644
index 000000000..785775eb5
--- /dev/null
+++ b/packages/h2/frame_buffer.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+"""
+h2/frame_buffer
+~~~~~~~~~~~~~~~
+
+A data structure that provides a way to iterate over a byte buffer in terms of
+frames.
+"""
+from hyperframe.exceptions import InvalidFrameError, InvalidDataError
+from hyperframe.frame import (
+ Frame, HeadersFrame, ContinuationFrame, PushPromiseFrame
+)
+
+from .exceptions import (
+ ProtocolError, FrameTooLargeError, FrameDataMissingError
+)
+
+# To avoid a DOS attack based on sending loads of continuation frames, we limit
+# the maximum number we're perpared to receive. In this case, we'll set the
+# limit to 64, which means the largest encoded header block we can receive by
+# default is 262144 bytes long, and the largest possible *at all* is 1073741760
+# bytes long.
+#
+# This value seems reasonable for now, but in future we may want to evaluate
+# making it configurable.
+CONTINUATION_BACKLOG = 64
+
+
+class FrameBuffer:
+ """
+ This is a data structure that expects to act as a buffer for HTTP/2 data
+ that allows iteraton in terms of H2 frames.
+ """
+ def __init__(self, server=False):
+ self.data = b''
+ self.max_frame_size = 0
+ self._preamble = b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n' if server else b''
+ self._preamble_len = len(self._preamble)
+ self._headers_buffer = []
+
+ def add_data(self, data):
+ """
+ Add more data to the frame buffer.
+
+ :param data: A bytestring containing the byte buffer.
+ """
+ if self._preamble_len:
+ data_len = len(data)
+ of_which_preamble = min(self._preamble_len, data_len)
+
+ if self._preamble[:of_which_preamble] != data[:of_which_preamble]:
+ raise ProtocolError("Invalid HTTP/2 preamble.")
+
+ data = data[of_which_preamble:]
+ self._preamble_len -= of_which_preamble
+ self._preamble = self._preamble[of_which_preamble:]
+
+ self.data += data
+
+ def _validate_frame_length(self, length):
+ """
+ Confirm that the frame is an appropriate length.
+ """
+ if length > self.max_frame_size:
+ raise FrameTooLargeError(
+ "Received overlong frame: length %d, max %d" %
+ (length, self.max_frame_size)
+ )
+
+ def _update_header_buffer(self, f):
+ """
+ Updates the internal header buffer. Returns a frame that should replace
+ the current one. May throw exceptions if this frame is invalid.
+ """
+ # Check if we're in the middle of a headers block. If we are, this
+ # frame *must* be a CONTINUATION frame with the same stream ID as the
+ # leading HEADERS or PUSH_PROMISE frame. Anything else is a
+ # ProtocolError. If the frame *is* valid, append it to the header
+ # buffer.
+ if self._headers_buffer:
+ stream_id = self._headers_buffer[0].stream_id
+ valid_frame = (
+ f is not None and
+ isinstance(f, ContinuationFrame) and
+ f.stream_id == stream_id
+ )
+ if not valid_frame:
+ raise ProtocolError("Invalid frame during header block.")
+
+ # Append the frame to the buffer.
+ self._headers_buffer.append(f)
+ if len(self._headers_buffer) > CONTINUATION_BACKLOG:
+ raise ProtocolError("Too many continuation frames received.")
+
+ # If this is the end of the header block, then we want to build a
+ # mutant HEADERS frame that's massive. Use the original one we got,
+ # then set END_HEADERS and set its data appopriately. If it's not
+ # the end of the block, lose the current frame: we can't yield it.
+ if 'END_HEADERS' in f.flags:
+ f = self._headers_buffer[0]
+ f.flags.add('END_HEADERS')
+ f.data = b''.join(x.data for x in self._headers_buffer)
+ self._headers_buffer = []
+ else:
+ f = None
+ elif (isinstance(f, (HeadersFrame, PushPromiseFrame)) and
+ 'END_HEADERS' not in f.flags):
+ # This is the start of a headers block! Save the frame off and then
+ # act like we didn't receive one.
+ self._headers_buffer.append(f)
+ f = None
+
+ return f
+
+ # The methods below support the iterator protocol.
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ # First, check that we have enough data to successfully parse the
+ # next frame header. If not, bail. Otherwise, parse it.
+ if len(self.data) < 9:
+ raise StopIteration()
+
+ try:
+ f, length = Frame.parse_frame_header(self.data[:9])
+ except (InvalidDataError, InvalidFrameError) as e: # pragma: no cover
+ raise ProtocolError(
+ "Received frame with invalid header: %s" % str(e)
+ )
+
+ # Next, check that we have enough length to parse the frame body. If
+ # not, bail, leaving the frame header data in the buffer for next time.
+ if len(self.data) < length + 9:
+ raise StopIteration()
+
+ # Confirm the frame has an appropriate length.
+ self._validate_frame_length(length)
+
+ # Try to parse the frame body
+ try:
+ f.parse_body(memoryview(self.data[9:9+length]))
+ except InvalidDataError:
+ raise ProtocolError("Received frame with non-compliant data")
+ except InvalidFrameError:
+ raise FrameDataMissingError("Frame data missing or invalid")
+
+ # At this point, as we know we'll use or discard the entire frame, we
+ # can update the data.
+ self.data = self.data[9+length:]
+
+ # Pass the frame through the header buffer.
+ f = self._update_header_buffer(f)
+
+ # If we got a frame we didn't understand or shouldn't yield, rather
+ # than return None it'd be better if we just tried to get the next
+ # frame in the sequence instead. Recurse back into ourselves to do
+ # that. This is safe because the amount of work we have to do here is
+ # strictly bounded by the length of the buffer.
+ return f if f is not None else self.__next__()
diff --git a/packages/h2/settings.py b/packages/h2/settings.py
new file mode 100644
index 000000000..969a162eb
--- /dev/null
+++ b/packages/h2/settings.py
@@ -0,0 +1,334 @@
+# -*- coding: utf-8 -*-
+"""
+h2/settings
+~~~~~~~~~~~
+
+This module contains a HTTP/2 settings object. This object provides a simple
+API for manipulating HTTP/2 settings, keeping track of both the current active
+state of the settings and the unacknowledged future values of the settings.
+"""
+import collections
+from collections.abc import MutableMapping
+import enum
+
+from hyperframe.frame import SettingsFrame
+
+from h2.errors import ErrorCodes
+from h2.exceptions import InvalidSettingsValueError
+
+
+class SettingCodes(enum.IntEnum):
+ """
+ All known HTTP/2 setting codes.
+
+ .. versionadded:: 2.6.0
+ """
+
+ #: Allows the sender to inform the remote endpoint of the maximum size of
+ #: the header compression table used to decode header blocks, in octets.
+ HEADER_TABLE_SIZE = SettingsFrame.HEADER_TABLE_SIZE
+
+ #: This setting can be used to disable server push. To disable server push
+ #: on a client, set this to 0.
+ ENABLE_PUSH = SettingsFrame.ENABLE_PUSH
+
+ #: Indicates the maximum number of concurrent streams that the sender will
+ #: allow.
+ MAX_CONCURRENT_STREAMS = SettingsFrame.MAX_CONCURRENT_STREAMS
+
+ #: Indicates the sender's initial window size (in octets) for stream-level
+ #: flow control.
+ INITIAL_WINDOW_SIZE = SettingsFrame.INITIAL_WINDOW_SIZE
+
+ #: Indicates the size of the largest frame payload that the sender is
+ #: willing to receive, in octets.
+ MAX_FRAME_SIZE = SettingsFrame.MAX_FRAME_SIZE
+
+ #: This advisory setting informs a peer of the maximum size of header list
+ #: that the sender is prepared to accept, in octets. The value is based on
+ #: the uncompressed size of header fields, including the length of the name
+ #: and value in octets plus an overhead of 32 octets for each header field.
+ MAX_HEADER_LIST_SIZE = SettingsFrame.MAX_HEADER_LIST_SIZE
+
+ #: This setting can be used to enable the connect protocol. To enable on a
+ #: client set this to 1.
+ ENABLE_CONNECT_PROTOCOL = SettingsFrame.ENABLE_CONNECT_PROTOCOL
+
+
+def _setting_code_from_int(code):
+ """
+ Given an integer setting code, returns either one of :class:`SettingCodes
+ ` or, if not present in the known set of codes,
+ returns the integer directly.
+ """
+ try:
+ return SettingCodes(code)
+ except ValueError:
+ return code
+
+
+class ChangedSetting:
+
+ def __init__(self, setting, original_value, new_value):
+ #: The setting code given. Either one of :class:`SettingCodes
+ #: ` or ``int``
+ #:
+ #: .. versionchanged:: 2.6.0
+ self.setting = setting
+
+ #: The original value before being changed.
+ self.original_value = original_value
+
+ #: The new value after being changed.
+ self.new_value = new_value
+
+ def __repr__(self):
+ return (
+ "ChangedSetting(setting=%s, original_value=%s, "
+ "new_value=%s)"
+ ) % (
+ self.setting,
+ self.original_value,
+ self.new_value
+ )
+
+
+class Settings(MutableMapping):
+ """
+ An object that encapsulates HTTP/2 settings state.
+
+ HTTP/2 Settings are a complex beast. Each party, remote and local, has its
+ own settings and a view of the other party's settings. When a settings
+ frame is emitted by a peer it cannot assume that the new settings values
+ are in place until the remote peer acknowledges the setting. In principle,
+ multiple settings changes can be "in flight" at the same time, all with
+ different values.
+
+ This object encapsulates this mess. It provides a dict-like interface to
+ settings, which return the *current* values of the settings in question.
+ Additionally, it keeps track of the stack of proposed values: each time an
+ acknowledgement is sent/received, it updates the current values with the
+ stack of proposed values. On top of all that, it validates the values to
+ make sure they're allowed, and raises :class:`InvalidSettingsValueError
+ ` if they are not.
+
+ Finally, this object understands what the default values of the HTTP/2
+ settings are, and sets those defaults appropriately.
+
+ .. versionchanged:: 2.2.0
+ Added the ``initial_values`` parameter.
+
+ .. versionchanged:: 2.5.0
+ Added the ``max_header_list_size`` property.
+
+ :param client: (optional) Whether these settings should be defaulted for a
+ client implementation or a server implementation. Defaults to ``True``.
+ :type client: ``bool``
+ :param initial_values: (optional) Any initial values the user would like
+ set, rather than RFC 7540's defaults.
+ :type initial_vales: ``MutableMapping``
+ """
+ def __init__(self, client=True, initial_values=None):
+ # Backing object for the settings. This is a dictionary of
+ # (setting: [list of values]), where the first value in the list is the
+ # current value of the setting. Strictly this doesn't use lists but
+ # instead uses collections.deque to avoid repeated memory allocations.
+ #
+ # This contains the default values for HTTP/2.
+ self._settings = {
+ SettingCodes.HEADER_TABLE_SIZE: collections.deque([4096]),
+ SettingCodes.ENABLE_PUSH: collections.deque([int(client)]),
+ SettingCodes.INITIAL_WINDOW_SIZE: collections.deque([65535]),
+ SettingCodes.MAX_FRAME_SIZE: collections.deque([16384]),
+ SettingCodes.ENABLE_CONNECT_PROTOCOL: collections.deque([0]),
+ }
+ if initial_values is not None:
+ for key, value in initial_values.items():
+ invalid = _validate_setting(key, value)
+ if invalid:
+ raise InvalidSettingsValueError(
+ "Setting %d has invalid value %d" % (key, value),
+ error_code=invalid
+ )
+ self._settings[key] = collections.deque([value])
+
+ def acknowledge(self):
+ """
+ The settings have been acknowledged, either by the user (remote
+ settings) or by the remote peer (local settings).
+
+ :returns: A dict of {setting: ChangedSetting} that were applied.
+ """
+ changed_settings = {}
+
+ # If there is more than one setting in the list, we have a setting
+ # value outstanding. Update them.
+ for k, v in self._settings.items():
+ if len(v) > 1:
+ old_setting = v.popleft()
+ new_setting = v[0]
+ changed_settings[k] = ChangedSetting(
+ k, old_setting, new_setting
+ )
+
+ return changed_settings
+
+ # Provide easy-access to well known settings.
+ @property
+ def header_table_size(self):
+ """
+ The current value of the :data:`HEADER_TABLE_SIZE
+ ` setting.
+ """
+ return self[SettingCodes.HEADER_TABLE_SIZE]
+
+ @header_table_size.setter
+ def header_table_size(self, value):
+ self[SettingCodes.HEADER_TABLE_SIZE] = value
+
+ @property
+ def enable_push(self):
+ """
+ The current value of the :data:`ENABLE_PUSH
+ ` setting.
+ """
+ return self[SettingCodes.ENABLE_PUSH]
+
+ @enable_push.setter
+ def enable_push(self, value):
+ self[SettingCodes.ENABLE_PUSH] = value
+
+ @property
+ def initial_window_size(self):
+ """
+ The current value of the :data:`INITIAL_WINDOW_SIZE
+ ` setting.
+ """
+ return self[SettingCodes.INITIAL_WINDOW_SIZE]
+
+ @initial_window_size.setter
+ def initial_window_size(self, value):
+ self[SettingCodes.INITIAL_WINDOW_SIZE] = value
+
+ @property
+ def max_frame_size(self):
+ """
+ The current value of the :data:`MAX_FRAME_SIZE
+ ` setting.
+ """
+ return self[SettingCodes.MAX_FRAME_SIZE]
+
+ @max_frame_size.setter
+ def max_frame_size(self, value):
+ self[SettingCodes.MAX_FRAME_SIZE] = value
+
+ @property
+ def max_concurrent_streams(self):
+ """
+ The current value of the :data:`MAX_CONCURRENT_STREAMS
+ ` setting.
+ """
+ return self.get(SettingCodes.MAX_CONCURRENT_STREAMS, 2**32+1)
+
+ @max_concurrent_streams.setter
+ def max_concurrent_streams(self, value):
+ self[SettingCodes.MAX_CONCURRENT_STREAMS] = value
+
+ @property
+ def max_header_list_size(self):
+ """
+ The current value of the :data:`MAX_HEADER_LIST_SIZE
+ ` setting. If not set,
+ returns ``None``, which means unlimited.
+
+ .. versionadded:: 2.5.0
+ """
+ return self.get(SettingCodes.MAX_HEADER_LIST_SIZE, None)
+
+ @max_header_list_size.setter
+ def max_header_list_size(self, value):
+ self[SettingCodes.MAX_HEADER_LIST_SIZE] = value
+
+ @property
+ def enable_connect_protocol(self):
+ """
+ The current value of the :data:`ENABLE_CONNECT_PROTOCOL
+ ` setting.
+ """
+ return self[SettingCodes.ENABLE_CONNECT_PROTOCOL]
+
+ @enable_connect_protocol.setter
+ def enable_connect_protocol(self, value):
+ self[SettingCodes.ENABLE_CONNECT_PROTOCOL] = value
+
+ # Implement the MutableMapping API.
+ def __getitem__(self, key):
+ val = self._settings[key][0]
+
+ # Things that were created when a setting was received should stay
+ # KeyError'd.
+ if val is None:
+ raise KeyError
+
+ return val
+
+ def __setitem__(self, key, value):
+ invalid = _validate_setting(key, value)
+ if invalid:
+ raise InvalidSettingsValueError(
+ "Setting %d has invalid value %d" % (key, value),
+ error_code=invalid
+ )
+
+ try:
+ items = self._settings[key]
+ except KeyError:
+ items = collections.deque([None])
+ self._settings[key] = items
+
+ items.append(value)
+
+ def __delitem__(self, key):
+ del self._settings[key]
+
+ def __iter__(self):
+ return self._settings.__iter__()
+
+ def __len__(self):
+ return len(self._settings)
+
+ def __eq__(self, other):
+ if isinstance(other, Settings):
+ return self._settings == other._settings
+ else:
+ return NotImplemented
+
+ def __ne__(self, other):
+ if isinstance(other, Settings):
+ return not self == other
+ else:
+ return NotImplemented
+
+
+def _validate_setting(setting, value): # noqa: C901
+ """
+ Confirms that a specific setting has a well-formed value. If the setting is
+ invalid, returns an error code. Otherwise, returns 0 (NO_ERROR).
+ """
+ if setting == SettingCodes.ENABLE_PUSH:
+ if value not in (0, 1):
+ return ErrorCodes.PROTOCOL_ERROR
+ elif setting == SettingCodes.INITIAL_WINDOW_SIZE:
+ if not 0 <= value <= 2147483647: # 2^31 - 1
+ return ErrorCodes.FLOW_CONTROL_ERROR
+ elif setting == SettingCodes.MAX_FRAME_SIZE:
+ if not 16384 <= value <= 16777215: # 2^14 and 2^24 - 1
+ return ErrorCodes.PROTOCOL_ERROR
+ elif setting == SettingCodes.MAX_HEADER_LIST_SIZE:
+ if value < 0:
+ return ErrorCodes.PROTOCOL_ERROR
+ elif setting == SettingCodes.ENABLE_CONNECT_PROTOCOL:
+ if value not in (0, 1):
+ return ErrorCodes.PROTOCOL_ERROR
+
+ return 0
diff --git a/packages/h2/stream.py b/packages/h2/stream.py
new file mode 100644
index 000000000..3c29b2431
--- /dev/null
+++ b/packages/h2/stream.py
@@ -0,0 +1,1371 @@
+# -*- coding: utf-8 -*-
+"""
+h2/stream
+~~~~~~~~~
+
+An implementation of a HTTP/2 stream.
+"""
+from enum import Enum, IntEnum
+from hpack import HeaderTuple
+from hyperframe.frame import (
+ HeadersFrame, ContinuationFrame, DataFrame, WindowUpdateFrame,
+ RstStreamFrame, PushPromiseFrame, AltSvcFrame
+)
+
+from .errors import ErrorCodes, _error_code_from_int
+from .events import (
+ RequestReceived, ResponseReceived, DataReceived, WindowUpdated,
+ StreamEnded, PushedStreamReceived, StreamReset, TrailersReceived,
+ InformationalResponseReceived, AlternativeServiceAvailable,
+ _ResponseSent, _RequestSent, _TrailersSent, _PushedRequestSent
+)
+from .exceptions import (
+ ProtocolError, StreamClosedError, InvalidBodyLengthError, FlowControlError
+)
+from .utilities import (
+ guard_increment_window, is_informational_response, authority_from_headers,
+ validate_headers, validate_outbound_headers, normalize_outbound_headers,
+ HeaderValidationFlags, extract_method_header, normalize_inbound_headers
+)
+from .windows import WindowManager
+
+
+class StreamState(IntEnum):
+ IDLE = 0
+ RESERVED_REMOTE = 1
+ RESERVED_LOCAL = 2
+ OPEN = 3
+ HALF_CLOSED_REMOTE = 4
+ HALF_CLOSED_LOCAL = 5
+ CLOSED = 6
+
+
+class StreamInputs(Enum):
+ SEND_HEADERS = 0
+ SEND_PUSH_PROMISE = 1
+ SEND_RST_STREAM = 2
+ SEND_DATA = 3
+ SEND_WINDOW_UPDATE = 4
+ SEND_END_STREAM = 5
+ RECV_HEADERS = 6
+ RECV_PUSH_PROMISE = 7
+ RECV_RST_STREAM = 8
+ RECV_DATA = 9
+ RECV_WINDOW_UPDATE = 10
+ RECV_END_STREAM = 11
+ RECV_CONTINUATION = 12 # Added in 2.0.0
+ SEND_INFORMATIONAL_HEADERS = 13 # Added in 2.2.0
+ RECV_INFORMATIONAL_HEADERS = 14 # Added in 2.2.0
+ SEND_ALTERNATIVE_SERVICE = 15 # Added in 2.3.0
+ RECV_ALTERNATIVE_SERVICE = 16 # Added in 2.3.0
+ UPGRADE_CLIENT = 17 # Added 2.3.0
+ UPGRADE_SERVER = 18 # Added 2.3.0
+
+
+class StreamClosedBy(Enum):
+ SEND_END_STREAM = 0
+ RECV_END_STREAM = 1
+ SEND_RST_STREAM = 2
+ RECV_RST_STREAM = 3
+
+
+# This array is initialized once, and is indexed by the stream states above.
+# It indicates whether a stream in the given state is open. The reason we do
+# this is that we potentially check whether a stream in a given state is open
+# quite frequently: given that we check so often, we should do so in the
+# fastest and most performant way possible.
+STREAM_OPEN = [False for _ in range(0, len(StreamState))]
+STREAM_OPEN[StreamState.OPEN] = True
+STREAM_OPEN[StreamState.HALF_CLOSED_LOCAL] = True
+STREAM_OPEN[StreamState.HALF_CLOSED_REMOTE] = True
+
+
+class H2StreamStateMachine:
+ """
+ A single HTTP/2 stream state machine.
+
+ This stream object implements basically the state machine described in
+ RFC 7540 section 5.1.
+
+ :param stream_id: The stream ID of this stream. This is stored primarily
+ for logging purposes.
+ """
+ def __init__(self, stream_id):
+ self.state = StreamState.IDLE
+ self.stream_id = stream_id
+
+ #: Whether this peer is the client side of this stream.
+ self.client = None
+
+ # Whether trailers have been sent/received on this stream or not.
+ self.headers_sent = None
+ self.trailers_sent = None
+ self.headers_received = None
+ self.trailers_received = None
+
+ # How the stream was closed. One of StreamClosedBy.
+ self.stream_closed_by = None
+
+ def process_input(self, input_):
+ """
+ Process a specific input in the state machine.
+ """
+ if not isinstance(input_, StreamInputs):
+ raise ValueError("Input must be an instance of StreamInputs")
+
+ try:
+ func, target_state = _transitions[(self.state, input_)]
+ except KeyError:
+ old_state = self.state
+ self.state = StreamState.CLOSED
+ raise ProtocolError(
+ "Invalid input %s in state %s" % (input_, old_state)
+ )
+ else:
+ previous_state = self.state
+ self.state = target_state
+ if func is not None:
+ try:
+ return func(self, previous_state)
+ except ProtocolError:
+ self.state = StreamState.CLOSED
+ raise
+ except AssertionError as e: # pragma: no cover
+ self.state = StreamState.CLOSED
+ raise ProtocolError(e)
+
+ return []
+
+ def request_sent(self, previous_state):
+ """
+ Fires when a request is sent.
+ """
+ self.client = True
+ self.headers_sent = True
+ event = _RequestSent()
+
+ return [event]
+
+ def response_sent(self, previous_state):
+ """
+ Fires when something that should be a response is sent. This 'response'
+ may actually be trailers.
+ """
+ if not self.headers_sent:
+ if self.client is True or self.client is None:
+ raise ProtocolError("Client cannot send responses.")
+ self.headers_sent = True
+ event = _ResponseSent()
+ else:
+ assert not self.trailers_sent
+ self.trailers_sent = True
+ event = _TrailersSent()
+
+ return [event]
+
+ def request_received(self, previous_state):
+ """
+ Fires when a request is received.
+ """
+ assert not self.headers_received
+ assert not self.trailers_received
+
+ self.client = False
+ self.headers_received = True
+ event = RequestReceived()
+
+ event.stream_id = self.stream_id
+ return [event]
+
+ def response_received(self, previous_state):
+ """
+ Fires when a response is received. Also disambiguates between responses
+ and trailers.
+ """
+ if not self.headers_received:
+ assert self.client is True
+ self.headers_received = True
+ event = ResponseReceived()
+ else:
+ assert not self.trailers_received
+ self.trailers_received = True
+ event = TrailersReceived()
+
+ event.stream_id = self.stream_id
+ return [event]
+
+ def data_received(self, previous_state):
+ """
+ Fires when data is received.
+ """
+ if not self.headers_received:
+ raise ProtocolError("cannot receive data before headers")
+ event = DataReceived()
+ event.stream_id = self.stream_id
+ return [event]
+
+ def window_updated(self, previous_state):
+ """
+ Fires when a window update frame is received.
+ """
+ event = WindowUpdated()
+ event.stream_id = self.stream_id
+ return [event]
+
+ def stream_half_closed(self, previous_state):
+ """
+ Fires when an END_STREAM flag is received in the OPEN state,
+ transitioning this stream to a HALF_CLOSED_REMOTE state.
+ """
+ event = StreamEnded()
+ event.stream_id = self.stream_id
+ return [event]
+
+ def stream_ended(self, previous_state):
+ """
+ Fires when a stream is cleanly ended.
+ """
+ self.stream_closed_by = StreamClosedBy.RECV_END_STREAM
+ event = StreamEnded()
+ event.stream_id = self.stream_id
+ return [event]
+
+ def stream_reset(self, previous_state):
+ """
+ Fired when a stream is forcefully reset.
+ """
+ self.stream_closed_by = StreamClosedBy.RECV_RST_STREAM
+ event = StreamReset()
+ event.stream_id = self.stream_id
+ return [event]
+
+ def send_new_pushed_stream(self, previous_state):
+ """
+ Fires on the newly pushed stream, when pushed by the local peer.
+
+ No event here, but definitionally this peer must be a server.
+ """
+ assert self.client is None
+ self.client = False
+ self.headers_received = True
+ return []
+
+ def recv_new_pushed_stream(self, previous_state):
+ """
+ Fires on the newly pushed stream, when pushed by the remote peer.
+
+ No event here, but definitionally this peer must be a client.
+ """
+ assert self.client is None
+ self.client = True
+ self.headers_sent = True
+ return []
+
+ def send_push_promise(self, previous_state):
+ """
+ Fires on the already-existing stream when a PUSH_PROMISE frame is sent.
+ We may only send PUSH_PROMISE frames if we're a server.
+ """
+ if self.client is True:
+ raise ProtocolError("Cannot push streams from client peers.")
+
+ event = _PushedRequestSent()
+ return [event]
+
+ def recv_push_promise(self, previous_state):
+ """
+ Fires on the already-existing stream when a PUSH_PROMISE frame is
+ received. We may only receive PUSH_PROMISE frames if we're a client.
+
+ Fires a PushedStreamReceived event.
+ """
+ if not self.client:
+ if self.client is None: # pragma: no cover
+ msg = "Idle streams cannot receive pushes"
+ else: # pragma: no cover
+ msg = "Cannot receive pushed streams as a server"
+ raise ProtocolError(msg)
+
+ event = PushedStreamReceived()
+ event.parent_stream_id = self.stream_id
+ return [event]
+
+ def send_end_stream(self, previous_state):
+ """
+ Called when an attempt is made to send END_STREAM in the
+ HALF_CLOSED_REMOTE state.
+ """
+ self.stream_closed_by = StreamClosedBy.SEND_END_STREAM
+
+ def send_reset_stream(self, previous_state):
+ """
+ Called when an attempt is made to send RST_STREAM in a non-closed
+ stream state.
+ """
+ self.stream_closed_by = StreamClosedBy.SEND_RST_STREAM
+
+ def reset_stream_on_error(self, previous_state):
+ """
+ Called when we need to forcefully emit another RST_STREAM frame on
+ behalf of the state machine.
+
+ If this is the first time we've done this, we should also hang an event
+ off the StreamClosedError so that the user can be informed. We know
+ it's the first time we've done this if the stream is currently in a
+ state other than CLOSED.
+ """
+ self.stream_closed_by = StreamClosedBy.SEND_RST_STREAM
+
+ error = StreamClosedError(self.stream_id)
+
+ event = StreamReset()
+ event.stream_id = self.stream_id
+ event.error_code = ErrorCodes.STREAM_CLOSED
+ event.remote_reset = False
+ error._events = [event]
+ raise error
+
+ def recv_on_closed_stream(self, previous_state):
+ """
+ Called when an unexpected frame is received on an already-closed
+ stream.
+
+ An endpoint that receives an unexpected frame should treat it as
+ a stream error or connection error with type STREAM_CLOSED, depending
+ on the specific frame. The error handling is done at a higher level:
+ this just raises the appropriate error.
+ """
+ raise StreamClosedError(self.stream_id)
+
+ def send_on_closed_stream(self, previous_state):
+ """
+ Called when an attempt is made to send data on an already-closed
+ stream.
+
+ This essentially overrides the standard logic by throwing a
+ more-specific error: StreamClosedError. This is a ProtocolError, so it
+ matches the standard API of the state machine, but provides more detail
+ to the user.
+ """
+ raise StreamClosedError(self.stream_id)
+
+ def recv_push_on_closed_stream(self, previous_state):
+ """
+ Called when a PUSH_PROMISE frame is received on a full stop
+ stream.
+
+ If the stream was closed by us sending a RST_STREAM frame, then we
+ presume that the PUSH_PROMISE was in flight when we reset the parent
+ stream. Rathen than accept the new stream, we just reset it.
+ Otherwise, we should call this a PROTOCOL_ERROR: pushing a stream on a
+ naturally closed stream is a real problem because it creates a brand
+ new stream that the remote peer now believes exists.
+ """
+ assert self.stream_closed_by is not None
+
+ if self.stream_closed_by == StreamClosedBy.SEND_RST_STREAM:
+ raise StreamClosedError(self.stream_id)
+ else:
+ raise ProtocolError("Attempted to push on closed stream.")
+
+ def send_push_on_closed_stream(self, previous_state):
+ """
+ Called when an attempt is made to push on an already-closed stream.
+
+ This essentially overrides the standard logic by providing a more
+ useful error message. It's necessary because simply indicating that the
+ stream is closed is not enough: there is now a new stream that is not
+ allowed to be there. The only recourse is to tear the whole connection
+ down.
+ """
+ raise ProtocolError("Attempted to push on closed stream.")
+
+ def send_informational_response(self, previous_state):
+ """
+ Called when an informational header block is sent (that is, a block
+ where the :status header has a 1XX value).
+
+ Only enforces that these are sent *before* final headers are sent.
+ """
+ if self.headers_sent:
+ raise ProtocolError("Information response after final response")
+
+ event = _ResponseSent()
+ return [event]
+
+ def recv_informational_response(self, previous_state):
+ """
+ Called when an informational header block is received (that is, a block
+ where the :status header has a 1XX value).
+ """
+ if self.headers_received:
+ raise ProtocolError("Informational response after final response")
+
+ event = InformationalResponseReceived()
+ event.stream_id = self.stream_id
+ return [event]
+
+ def recv_alt_svc(self, previous_state):
+ """
+ Called when receiving an ALTSVC frame.
+
+ RFC 7838 allows us to receive ALTSVC frames at any stream state, which
+ is really absurdly overzealous. For that reason, we want to limit the
+ states in which we can actually receive it. It's really only sensible
+ to receive it after we've sent our own headers and before the server
+ has sent its header block: the server can't guarantee that we have any
+ state around after it completes its header block, and the server
+ doesn't know what origin we're talking about before we've sent ours.
+
+ For that reason, this function applies a few extra checks on both state
+ and some of the little state variables we keep around. If those suggest
+ an unreasonable situation for the ALTSVC frame to have been sent in,
+ we quietly ignore it (as RFC 7838 suggests).
+
+ This function is also *not* always called by the state machine. In some
+ states (IDLE, RESERVED_LOCAL, CLOSED) we don't bother to call it,
+ because we know the frame cannot be valid in that state (IDLE because
+ the server cannot know what origin the stream applies to, CLOSED
+ because the server cannot assume we still have state around,
+ RESERVED_LOCAL because by definition if we're in the RESERVED_LOCAL
+ state then *we* are the server).
+ """
+ # Servers can't receive ALTSVC frames, but RFC 7838 tells us to ignore
+ # them.
+ if self.client is False:
+ return []
+
+ # If we've received the response headers from the server they can't
+ # guarantee we still have any state around. Other implementations
+ # (like nghttp2) ignore ALTSVC in this state, so we will too.
+ if self.headers_received:
+ return []
+
+ # Otherwise, this is a sensible enough frame to have received. Return
+ # the event and let it get populated.
+ return [AlternativeServiceAvailable()]
+
+ def send_alt_svc(self, previous_state):
+ """
+ Called when sending an ALTSVC frame on this stream.
+
+ For consistency with the restrictions we apply on receiving ALTSVC
+ frames in ``recv_alt_svc``, we want to restrict when users can send
+ ALTSVC frames to the situations when we ourselves would accept them.
+
+ That means: when we are a server, when we have received the request
+ headers, and when we have not yet sent our own response headers.
+ """
+ # We should not send ALTSVC after we've sent response headers, as the
+ # client may have disposed of its state.
+ if self.headers_sent:
+ raise ProtocolError(
+ "Cannot send ALTSVC after sending response headers."
+ )
+
+ return
+
+
+# STATE MACHINE
+#
+# The stream state machine is defined here to avoid the need to allocate it
+# repeatedly for each stream. It cannot be defined in the stream class because
+# it needs to be able to reference the callbacks defined on the class, but
+# because Python's scoping rules are weird the class object is not actually in
+# scope during the body of the class object.
+#
+# For the sake of clarity, we reproduce the RFC 7540 state machine here:
+#
+# +--------+
+# send PP | | recv PP
+# ,--------| idle |--------.
+# / | | \
+# v +--------+ v
+# +----------+ | +----------+
+# | | | send H / | |
+# ,------| reserved | | recv H | reserved |------.
+# | | (local) | | | (remote) | |
+# | +----------+ v +----------+ |
+# | | +--------+ | |
+# | | recv ES | | send ES | |
+# | send H | ,-------| open |-------. | recv H |
+# | | / | | \ | |
+# | v v +--------+ v v |
+# | +----------+ | +----------+ |
+# | | half | | | half | |
+# | | closed | | send R / | closed | |
+# | | (remote) | | recv R | (local) | |
+# | +----------+ | +----------+ |
+# | | | | |
+# | | send ES / | recv ES / | |
+# | | send R / v send R / | |
+# | | recv R +--------+ recv R | |
+# | send R / `----------->| |<-----------' send R / |
+# | recv R | closed | recv R |
+# `----------------------->| |<----------------------'
+# +--------+
+#
+# send: endpoint sends this frame
+# recv: endpoint receives this frame
+#
+# H: HEADERS frame (with implied CONTINUATIONs)
+# PP: PUSH_PROMISE frame (with implied CONTINUATIONs)
+# ES: END_STREAM flag
+# R: RST_STREAM frame
+#
+# For the purposes of this state machine we treat HEADERS and their
+# associated CONTINUATION frames as a single jumbo frame. The protocol
+# allows/requires this by preventing other frames from being interleved in
+# between HEADERS/CONTINUATION frames. However, if a CONTINUATION frame is
+# received without a prior HEADERS frame, it *will* be passed to this state
+# machine. The state machine should always reject that frame, either as an
+# invalid transition or because the stream is closed.
+#
+# There is a confusing relationship around PUSH_PROMISE frames. The state
+# machine above considers them to be frames belonging to the new stream,
+# which is *somewhat* true. However, they are sent with the stream ID of
+# their related stream, and are only sendable in some cases.
+# For this reason, our state machine implementation below allows for
+# PUSH_PROMISE frames both in the IDLE state (as in the diagram), but also
+# in the OPEN, HALF_CLOSED_LOCAL, and HALF_CLOSED_REMOTE states.
+# Essentially, for hyper-h2, PUSH_PROMISE frames are effectively sent on
+# two streams.
+#
+# The _transitions dictionary contains a mapping of tuples of
+# (state, input) to tuples of (side_effect_function, end_state). This
+# map contains all allowed transitions: anything not in this map is
+# invalid and immediately causes a transition to ``closed``.
+_transitions = {
+ # State: idle
+ (StreamState.IDLE, StreamInputs.SEND_HEADERS):
+ (H2StreamStateMachine.request_sent, StreamState.OPEN),
+ (StreamState.IDLE, StreamInputs.RECV_HEADERS):
+ (H2StreamStateMachine.request_received, StreamState.OPEN),
+ (StreamState.IDLE, StreamInputs.RECV_DATA):
+ (H2StreamStateMachine.reset_stream_on_error, StreamState.CLOSED),
+ (StreamState.IDLE, StreamInputs.SEND_PUSH_PROMISE):
+ (H2StreamStateMachine.send_new_pushed_stream,
+ StreamState.RESERVED_LOCAL),
+ (StreamState.IDLE, StreamInputs.RECV_PUSH_PROMISE):
+ (H2StreamStateMachine.recv_new_pushed_stream,
+ StreamState.RESERVED_REMOTE),
+ (StreamState.IDLE, StreamInputs.RECV_ALTERNATIVE_SERVICE):
+ (None, StreamState.IDLE),
+ (StreamState.IDLE, StreamInputs.UPGRADE_CLIENT):
+ (H2StreamStateMachine.request_sent, StreamState.HALF_CLOSED_LOCAL),
+ (StreamState.IDLE, StreamInputs.UPGRADE_SERVER):
+ (H2StreamStateMachine.request_received,
+ StreamState.HALF_CLOSED_REMOTE),
+
+ # State: reserved local
+ (StreamState.RESERVED_LOCAL, StreamInputs.SEND_HEADERS):
+ (H2StreamStateMachine.response_sent, StreamState.HALF_CLOSED_REMOTE),
+ (StreamState.RESERVED_LOCAL, StreamInputs.RECV_DATA):
+ (H2StreamStateMachine.reset_stream_on_error, StreamState.CLOSED),
+ (StreamState.RESERVED_LOCAL, StreamInputs.SEND_WINDOW_UPDATE):
+ (None, StreamState.RESERVED_LOCAL),
+ (StreamState.RESERVED_LOCAL, StreamInputs.RECV_WINDOW_UPDATE):
+ (H2StreamStateMachine.window_updated, StreamState.RESERVED_LOCAL),
+ (StreamState.RESERVED_LOCAL, StreamInputs.SEND_RST_STREAM):
+ (H2StreamStateMachine.send_reset_stream, StreamState.CLOSED),
+ (StreamState.RESERVED_LOCAL, StreamInputs.RECV_RST_STREAM):
+ (H2StreamStateMachine.stream_reset, StreamState.CLOSED),
+ (StreamState.RESERVED_LOCAL, StreamInputs.SEND_ALTERNATIVE_SERVICE):
+ (H2StreamStateMachine.send_alt_svc, StreamState.RESERVED_LOCAL),
+ (StreamState.RESERVED_LOCAL, StreamInputs.RECV_ALTERNATIVE_SERVICE):
+ (None, StreamState.RESERVED_LOCAL),
+
+ # State: reserved remote
+ (StreamState.RESERVED_REMOTE, StreamInputs.RECV_HEADERS):
+ (H2StreamStateMachine.response_received,
+ StreamState.HALF_CLOSED_LOCAL),
+ (StreamState.RESERVED_REMOTE, StreamInputs.RECV_DATA):
+ (H2StreamStateMachine.reset_stream_on_error, StreamState.CLOSED),
+ (StreamState.RESERVED_REMOTE, StreamInputs.SEND_WINDOW_UPDATE):
+ (None, StreamState.RESERVED_REMOTE),
+ (StreamState.RESERVED_REMOTE, StreamInputs.RECV_WINDOW_UPDATE):
+ (H2StreamStateMachine.window_updated, StreamState.RESERVED_REMOTE),
+ (StreamState.RESERVED_REMOTE, StreamInputs.SEND_RST_STREAM):
+ (H2StreamStateMachine.send_reset_stream, StreamState.CLOSED),
+ (StreamState.RESERVED_REMOTE, StreamInputs.RECV_RST_STREAM):
+ (H2StreamStateMachine.stream_reset, StreamState.CLOSED),
+ (StreamState.RESERVED_REMOTE, StreamInputs.RECV_ALTERNATIVE_SERVICE):
+ (H2StreamStateMachine.recv_alt_svc, StreamState.RESERVED_REMOTE),
+
+ # State: open
+ (StreamState.OPEN, StreamInputs.SEND_HEADERS):
+ (H2StreamStateMachine.response_sent, StreamState.OPEN),
+ (StreamState.OPEN, StreamInputs.RECV_HEADERS):
+ (H2StreamStateMachine.response_received, StreamState.OPEN),
+ (StreamState.OPEN, StreamInputs.SEND_DATA):
+ (None, StreamState.OPEN),
+ (StreamState.OPEN, StreamInputs.RECV_DATA):
+ (H2StreamStateMachine.data_received, StreamState.OPEN),
+ (StreamState.OPEN, StreamInputs.SEND_END_STREAM):
+ (None, StreamState.HALF_CLOSED_LOCAL),
+ (StreamState.OPEN, StreamInputs.RECV_END_STREAM):
+ (H2StreamStateMachine.stream_half_closed,
+ StreamState.HALF_CLOSED_REMOTE),
+ (StreamState.OPEN, StreamInputs.SEND_WINDOW_UPDATE):
+ (None, StreamState.OPEN),
+ (StreamState.OPEN, StreamInputs.RECV_WINDOW_UPDATE):
+ (H2StreamStateMachine.window_updated, StreamState.OPEN),
+ (StreamState.OPEN, StreamInputs.SEND_RST_STREAM):
+ (H2StreamStateMachine.send_reset_stream, StreamState.CLOSED),
+ (StreamState.OPEN, StreamInputs.RECV_RST_STREAM):
+ (H2StreamStateMachine.stream_reset, StreamState.CLOSED),
+ (StreamState.OPEN, StreamInputs.SEND_PUSH_PROMISE):
+ (H2StreamStateMachine.send_push_promise, StreamState.OPEN),
+ (StreamState.OPEN, StreamInputs.RECV_PUSH_PROMISE):
+ (H2StreamStateMachine.recv_push_promise, StreamState.OPEN),
+ (StreamState.OPEN, StreamInputs.SEND_INFORMATIONAL_HEADERS):
+ (H2StreamStateMachine.send_informational_response, StreamState.OPEN),
+ (StreamState.OPEN, StreamInputs.RECV_INFORMATIONAL_HEADERS):
+ (H2StreamStateMachine.recv_informational_response, StreamState.OPEN),
+ (StreamState.OPEN, StreamInputs.SEND_ALTERNATIVE_SERVICE):
+ (H2StreamStateMachine.send_alt_svc, StreamState.OPEN),
+ (StreamState.OPEN, StreamInputs.RECV_ALTERNATIVE_SERVICE):
+ (H2StreamStateMachine.recv_alt_svc, StreamState.OPEN),
+
+ # State: half-closed remote
+ (StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_HEADERS):
+ (H2StreamStateMachine.response_sent, StreamState.HALF_CLOSED_REMOTE),
+ (StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_HEADERS):
+ (H2StreamStateMachine.reset_stream_on_error, StreamState.CLOSED),
+ (StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_DATA):
+ (None, StreamState.HALF_CLOSED_REMOTE),
+ (StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_DATA):
+ (H2StreamStateMachine.reset_stream_on_error, StreamState.CLOSED),
+ (StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_END_STREAM):
+ (H2StreamStateMachine.send_end_stream, StreamState.CLOSED),
+ (StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_WINDOW_UPDATE):
+ (None, StreamState.HALF_CLOSED_REMOTE),
+ (StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_WINDOW_UPDATE):
+ (H2StreamStateMachine.window_updated, StreamState.HALF_CLOSED_REMOTE),
+ (StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_RST_STREAM):
+ (H2StreamStateMachine.send_reset_stream, StreamState.CLOSED),
+ (StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_RST_STREAM):
+ (H2StreamStateMachine.stream_reset, StreamState.CLOSED),
+ (StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_PUSH_PROMISE):
+ (H2StreamStateMachine.send_push_promise,
+ StreamState.HALF_CLOSED_REMOTE),
+ (StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_PUSH_PROMISE):
+ (H2StreamStateMachine.reset_stream_on_error, StreamState.CLOSED),
+ (StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_INFORMATIONAL_HEADERS):
+ (H2StreamStateMachine.send_informational_response,
+ StreamState.HALF_CLOSED_REMOTE),
+ (StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_ALTERNATIVE_SERVICE):
+ (H2StreamStateMachine.send_alt_svc, StreamState.HALF_CLOSED_REMOTE),
+ (StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_ALTERNATIVE_SERVICE):
+ (H2StreamStateMachine.recv_alt_svc, StreamState.HALF_CLOSED_REMOTE),
+
+ # State: half-closed local
+ (StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_HEADERS):
+ (H2StreamStateMachine.response_received,
+ StreamState.HALF_CLOSED_LOCAL),
+ (StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_DATA):
+ (H2StreamStateMachine.data_received, StreamState.HALF_CLOSED_LOCAL),
+ (StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_END_STREAM):
+ (H2StreamStateMachine.stream_ended, StreamState.CLOSED),
+ (StreamState.HALF_CLOSED_LOCAL, StreamInputs.SEND_WINDOW_UPDATE):
+ (None, StreamState.HALF_CLOSED_LOCAL),
+ (StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_WINDOW_UPDATE):
+ (H2StreamStateMachine.window_updated, StreamState.HALF_CLOSED_LOCAL),
+ (StreamState.HALF_CLOSED_LOCAL, StreamInputs.SEND_RST_STREAM):
+ (H2StreamStateMachine.send_reset_stream, StreamState.CLOSED),
+ (StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_RST_STREAM):
+ (H2StreamStateMachine.stream_reset, StreamState.CLOSED),
+ (StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_PUSH_PROMISE):
+ (H2StreamStateMachine.recv_push_promise,
+ StreamState.HALF_CLOSED_LOCAL),
+ (StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_INFORMATIONAL_HEADERS):
+ (H2StreamStateMachine.recv_informational_response,
+ StreamState.HALF_CLOSED_LOCAL),
+ (StreamState.HALF_CLOSED_LOCAL, StreamInputs.SEND_ALTERNATIVE_SERVICE):
+ (H2StreamStateMachine.send_alt_svc, StreamState.HALF_CLOSED_LOCAL),
+ (StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_ALTERNATIVE_SERVICE):
+ (H2StreamStateMachine.recv_alt_svc, StreamState.HALF_CLOSED_LOCAL),
+
+ # State: closed
+ (StreamState.CLOSED, StreamInputs.RECV_END_STREAM):
+ (None, StreamState.CLOSED),
+ (StreamState.CLOSED, StreamInputs.RECV_ALTERNATIVE_SERVICE):
+ (None, StreamState.CLOSED),
+
+ # RFC 7540 Section 5.1 defines how the end point should react when
+ # receiving a frame on a closed stream with the following statements:
+ #
+ # > An endpoint that receives any frame other than PRIORITY after receiving
+ # > a RST_STREAM MUST treat that as a stream error of type STREAM_CLOSED.
+ # > An endpoint that receives any frames after receiving a frame with the
+ # > END_STREAM flag set MUST treat that as a connection error of type
+ # > STREAM_CLOSED.
+ (StreamState.CLOSED, StreamInputs.RECV_HEADERS):
+ (H2StreamStateMachine.recv_on_closed_stream, StreamState.CLOSED),
+ (StreamState.CLOSED, StreamInputs.RECV_DATA):
+ (H2StreamStateMachine.recv_on_closed_stream, StreamState.CLOSED),
+
+ # > WINDOW_UPDATE or RST_STREAM frames can be received in this state
+ # > for a short period after a DATA or HEADERS frame containing a
+ # > END_STREAM flag is sent, as instructed in RFC 7540 Section 5.1. But we
+ # > don't have access to a clock so we just always allow it.
+ (StreamState.CLOSED, StreamInputs.RECV_WINDOW_UPDATE):
+ (None, StreamState.CLOSED),
+ (StreamState.CLOSED, StreamInputs.RECV_RST_STREAM):
+ (None, StreamState.CLOSED),
+
+ # > A receiver MUST treat the receipt of a PUSH_PROMISE on a stream that is
+ # > neither "open" nor "half-closed (local)" as a connection error of type
+ # > PROTOCOL_ERROR.
+ (StreamState.CLOSED, StreamInputs.RECV_PUSH_PROMISE):
+ (H2StreamStateMachine.recv_push_on_closed_stream, StreamState.CLOSED),
+
+ # Also, users should be forbidden from sending on closed streams.
+ (StreamState.CLOSED, StreamInputs.SEND_HEADERS):
+ (H2StreamStateMachine.send_on_closed_stream, StreamState.CLOSED),
+ (StreamState.CLOSED, StreamInputs.SEND_PUSH_PROMISE):
+ (H2StreamStateMachine.send_push_on_closed_stream, StreamState.CLOSED),
+ (StreamState.CLOSED, StreamInputs.SEND_RST_STREAM):
+ (H2StreamStateMachine.send_on_closed_stream, StreamState.CLOSED),
+ (StreamState.CLOSED, StreamInputs.SEND_DATA):
+ (H2StreamStateMachine.send_on_closed_stream, StreamState.CLOSED),
+ (StreamState.CLOSED, StreamInputs.SEND_WINDOW_UPDATE):
+ (H2StreamStateMachine.send_on_closed_stream, StreamState.CLOSED),
+ (StreamState.CLOSED, StreamInputs.SEND_END_STREAM):
+ (H2StreamStateMachine.send_on_closed_stream, StreamState.CLOSED),
+}
+
+
+class H2Stream:
+ """
+ A low-level HTTP/2 stream object. This handles building and receiving
+ frames and maintains per-stream state.
+
+ This wraps a HTTP/2 Stream state machine implementation, ensuring that
+ frames can only be sent/received when the stream is in a valid state.
+ Attempts to create frames that cannot be sent will raise a
+ ``ProtocolError``.
+ """
+ def __init__(self,
+ stream_id,
+ config,
+ inbound_window_size,
+ outbound_window_size):
+ self.state_machine = H2StreamStateMachine(stream_id)
+ self.stream_id = stream_id
+ self.max_outbound_frame_size = None
+ self.request_method = None
+
+ # The current value of the outbound stream flow control window
+ self.outbound_flow_control_window = outbound_window_size
+
+ # The flow control manager.
+ self._inbound_window_manager = WindowManager(inbound_window_size)
+
+ # The expected content length, if any.
+ self._expected_content_length = None
+
+ # The actual received content length. Always tracked.
+ self._actual_content_length = 0
+
+ # The authority we believe this stream belongs to.
+ self._authority = None
+
+ # The configuration for this stream.
+ self.config = config
+
+ def __repr__(self):
+ return "<%s id:%d state:%r>" % (
+ type(self).__name__,
+ self.stream_id,
+ self.state_machine.state
+ )
+
+ @property
+ def inbound_flow_control_window(self):
+ """
+ The size of the inbound flow control window for the stream. This is
+ rarely publicly useful: instead, use :meth:`remote_flow_control_window
+ `. This shortcut is
+ largely present to provide a shortcut to this data.
+ """
+ return self._inbound_window_manager.current_window_size
+
+ @property
+ def open(self):
+ """
+ Whether the stream is 'open' in any sense: that is, whether it counts
+ against the number of concurrent streams.
+ """
+ # RFC 7540 Section 5.1.2 defines 'open' for this purpose to mean either
+ # the OPEN state or either of the HALF_CLOSED states. Perplexingly,
+ # this excludes the reserved states.
+ # For more detail on why we're doing this in this slightly weird way,
+ # see the comment on ``STREAM_OPEN`` at the top of the file.
+ return STREAM_OPEN[self.state_machine.state]
+
+ @property
+ def closed(self):
+ """
+ Whether the stream is closed.
+ """
+ return self.state_machine.state == StreamState.CLOSED
+
+ @property
+ def closed_by(self):
+ """
+ Returns how the stream was closed, as one of StreamClosedBy.
+ """
+ return self.state_machine.stream_closed_by
+
+ def upgrade(self, client_side):
+ """
+ Called by the connection to indicate that this stream is the initial
+ request/response of an upgraded connection. Places the stream into an
+ appropriate state.
+ """
+ self.config.logger.debug("Upgrading %r", self)
+
+ assert self.stream_id == 1
+ input_ = (
+ StreamInputs.UPGRADE_CLIENT if client_side
+ else StreamInputs.UPGRADE_SERVER
+ )
+
+ # This may return events, we deliberately don't want them.
+ self.state_machine.process_input(input_)
+ return
+
+ def send_headers(self, headers, encoder, end_stream=False):
+ """
+ Returns a list of HEADERS/CONTINUATION frames to emit as either headers
+ or trailers.
+ """
+ self.config.logger.debug("Send headers %s on %r", headers, self)
+
+ # Because encoding headers makes an irreversible change to the header
+ # compression context, we make the state transition before we encode
+ # them.
+
+ # First, check if we're a client. If we are, no problem: if we aren't,
+ # we need to scan the header block to see if this is an informational
+ # response.
+ input_ = StreamInputs.SEND_HEADERS
+ if ((not self.state_machine.client) and
+ is_informational_response(headers)):
+ if end_stream:
+ raise ProtocolError(
+ "Cannot set END_STREAM on informational responses."
+ )
+
+ input_ = StreamInputs.SEND_INFORMATIONAL_HEADERS
+
+ events = self.state_machine.process_input(input_)
+
+ hf = HeadersFrame(self.stream_id)
+ hdr_validation_flags = self._build_hdr_validation_flags(events)
+ frames = self._build_headers_frames(
+ headers, encoder, hf, hdr_validation_flags
+ )
+
+ if end_stream:
+ # Not a bug: the END_STREAM flag is valid on the initial HEADERS
+ # frame, not the CONTINUATION frames that follow.
+ self.state_machine.process_input(StreamInputs.SEND_END_STREAM)
+ frames[0].flags.add('END_STREAM')
+
+ if self.state_machine.trailers_sent and not end_stream:
+ raise ProtocolError("Trailers must have END_STREAM set.")
+
+ if self.state_machine.client and self._authority is None:
+ self._authority = authority_from_headers(headers)
+
+ # store request method for _initialize_content_length
+ self.request_method = extract_method_header(headers)
+
+ return frames
+
+ def push_stream_in_band(self, related_stream_id, headers, encoder):
+ """
+ Returns a list of PUSH_PROMISE/CONTINUATION frames to emit as a pushed
+ stream header. Called on the stream that has the PUSH_PROMISE frame
+ sent on it.
+ """
+ self.config.logger.debug("Push stream %r", self)
+
+ # Because encoding headers makes an irreversible change to the header
+ # compression context, we make the state transition *first*.
+
+ events = self.state_machine.process_input(
+ StreamInputs.SEND_PUSH_PROMISE
+ )
+
+ ppf = PushPromiseFrame(self.stream_id)
+ ppf.promised_stream_id = related_stream_id
+ hdr_validation_flags = self._build_hdr_validation_flags(events)
+ frames = self._build_headers_frames(
+ headers, encoder, ppf, hdr_validation_flags
+ )
+
+ return frames
+
+ def locally_pushed(self):
+ """
+ Mark this stream as one that was pushed by this peer. Must be called
+ immediately after initialization. Sends no frames, simply updates the
+ state machine.
+ """
+ # This does not trigger any events.
+ events = self.state_machine.process_input(
+ StreamInputs.SEND_PUSH_PROMISE
+ )
+ assert not events
+ return []
+
+ def send_data(self, data, end_stream=False, pad_length=None):
+ """
+ Prepare some data frames. Optionally end the stream.
+
+ .. warning:: Does not perform flow control checks.
+ """
+ self.config.logger.debug(
+ "Send data on %r with end stream set to %s", self, end_stream
+ )
+
+ self.state_machine.process_input(StreamInputs.SEND_DATA)
+
+ df = DataFrame(self.stream_id)
+ df.data = data
+ if end_stream:
+ self.state_machine.process_input(StreamInputs.SEND_END_STREAM)
+ df.flags.add('END_STREAM')
+ if pad_length is not None:
+ df.flags.add('PADDED')
+ df.pad_length = pad_length
+
+ # Subtract flow_controlled_length to account for possible padding
+ self.outbound_flow_control_window -= df.flow_controlled_length
+ assert self.outbound_flow_control_window >= 0
+
+ return [df]
+
+ def end_stream(self):
+ """
+ End a stream without sending data.
+ """
+ self.config.logger.debug("End stream %r", self)
+
+ self.state_machine.process_input(StreamInputs.SEND_END_STREAM)
+ df = DataFrame(self.stream_id)
+ df.flags.add('END_STREAM')
+ return [df]
+
+ def advertise_alternative_service(self, field_value):
+ """
+ Advertise an RFC 7838 alternative service. The semantics of this are
+ better documented in the ``H2Connection`` class.
+ """
+ self.config.logger.debug(
+ "Advertise alternative service of %r for %r", field_value, self
+ )
+ self.state_machine.process_input(StreamInputs.SEND_ALTERNATIVE_SERVICE)
+ asf = AltSvcFrame(self.stream_id)
+ asf.field = field_value
+ return [asf]
+
+ def increase_flow_control_window(self, increment):
+ """
+ Increase the size of the flow control window for the remote side.
+ """
+ self.config.logger.debug(
+ "Increase flow control window for %r by %d",
+ self, increment
+ )
+ self.state_machine.process_input(StreamInputs.SEND_WINDOW_UPDATE)
+ self._inbound_window_manager.window_opened(increment)
+
+ wuf = WindowUpdateFrame(self.stream_id)
+ wuf.window_increment = increment
+ return [wuf]
+
+ def receive_push_promise_in_band(self,
+ promised_stream_id,
+ headers,
+ header_encoding):
+ """
+ Receives a push promise frame sent on this stream, pushing a remote
+ stream. This is called on the stream that has the PUSH_PROMISE sent
+ on it.
+ """
+ self.config.logger.debug(
+ "Receive Push Promise on %r for remote stream %d",
+ self, promised_stream_id
+ )
+ events = self.state_machine.process_input(
+ StreamInputs.RECV_PUSH_PROMISE
+ )
+ events[0].pushed_stream_id = promised_stream_id
+
+ hdr_validation_flags = self._build_hdr_validation_flags(events)
+ events[0].headers = self._process_received_headers(
+ headers, hdr_validation_flags, header_encoding
+ )
+ return [], events
+
+ def remotely_pushed(self, pushed_headers):
+ """
+ Mark this stream as one that was pushed by the remote peer. Must be
+ called immediately after initialization. Sends no frames, simply
+ updates the state machine.
+ """
+ self.config.logger.debug("%r pushed by remote peer", self)
+ events = self.state_machine.process_input(
+ StreamInputs.RECV_PUSH_PROMISE
+ )
+ self._authority = authority_from_headers(pushed_headers)
+ return [], events
+
+ def receive_headers(self, headers, end_stream, header_encoding):
+ """
+ Receive a set of headers (or trailers).
+ """
+ if is_informational_response(headers):
+ if end_stream:
+ raise ProtocolError(
+ "Cannot set END_STREAM on informational responses"
+ )
+ input_ = StreamInputs.RECV_INFORMATIONAL_HEADERS
+ else:
+ input_ = StreamInputs.RECV_HEADERS
+
+ events = self.state_machine.process_input(input_)
+
+ if end_stream:
+ es_events = self.state_machine.process_input(
+ StreamInputs.RECV_END_STREAM
+ )
+ events[0].stream_ended = es_events[0]
+ events += es_events
+
+ self._initialize_content_length(headers)
+
+ if isinstance(events[0], TrailersReceived):
+ if not end_stream:
+ raise ProtocolError("Trailers must have END_STREAM set")
+
+ hdr_validation_flags = self._build_hdr_validation_flags(events)
+ events[0].headers = self._process_received_headers(
+ headers, hdr_validation_flags, header_encoding
+ )
+ return [], events
+
+ def receive_data(self, data, end_stream, flow_control_len):
+ """
+ Receive some data.
+ """
+ self.config.logger.debug(
+ "Receive data on %r with end stream %s and flow control length "
+ "set to %d", self, end_stream, flow_control_len
+ )
+ events = self.state_machine.process_input(StreamInputs.RECV_DATA)
+ self._inbound_window_manager.window_consumed(flow_control_len)
+ self._track_content_length(len(data), end_stream)
+
+ if end_stream:
+ es_events = self.state_machine.process_input(
+ StreamInputs.RECV_END_STREAM
+ )
+ events[0].stream_ended = es_events[0]
+ events.extend(es_events)
+
+ events[0].data = data
+ events[0].flow_controlled_length = flow_control_len
+ return [], events
+
+ def receive_window_update(self, increment):
+ """
+ Handle a WINDOW_UPDATE increment.
+ """
+ self.config.logger.debug(
+ "Receive Window Update on %r for increment of %d",
+ self, increment
+ )
+ events = self.state_machine.process_input(
+ StreamInputs.RECV_WINDOW_UPDATE
+ )
+ frames = []
+
+ # If we encounter a problem with incrementing the flow control window,
+ # this should be treated as a *stream* error, not a *connection* error.
+ # That means we need to catch the error and forcibly close the stream.
+ if events:
+ events[0].delta = increment
+ try:
+ self.outbound_flow_control_window = guard_increment_window(
+ self.outbound_flow_control_window,
+ increment
+ )
+ except FlowControlError:
+ # Ok, this is bad. We're going to need to perform a local
+ # reset.
+ event = StreamReset()
+ event.stream_id = self.stream_id
+ event.error_code = ErrorCodes.FLOW_CONTROL_ERROR
+ event.remote_reset = False
+
+ events = [event]
+ frames = self.reset_stream(event.error_code)
+
+ return frames, events
+
+ def receive_continuation(self):
+ """
+ A naked CONTINUATION frame has been received. This is always an error,
+ but the type of error it is depends on the state of the stream and must
+ transition the state of the stream, so we need to handle it.
+ """
+ self.config.logger.debug("Receive Continuation frame on %r", self)
+ self.state_machine.process_input(
+ StreamInputs.RECV_CONTINUATION
+ )
+ assert False, "Should not be reachable"
+
+ def receive_alt_svc(self, frame):
+ """
+ An Alternative Service frame was received on the stream. This frame
+ inherits the origin associated with this stream.
+ """
+ self.config.logger.debug(
+ "Receive Alternative Service frame on stream %r", self
+ )
+
+ # If the origin is present, RFC 7838 says we have to ignore it.
+ if frame.origin:
+ return [], []
+
+ events = self.state_machine.process_input(
+ StreamInputs.RECV_ALTERNATIVE_SERVICE
+ )
+
+ # There are lots of situations where we want to ignore the ALTSVC
+ # frame. If we need to pay attention, we'll have an event and should
+ # fill it out.
+ if events:
+ assert isinstance(events[0], AlternativeServiceAvailable)
+ events[0].origin = self._authority
+ events[0].field_value = frame.field
+
+ return [], events
+
+ def reset_stream(self, error_code=0):
+ """
+ Close the stream locally. Reset the stream with an error code.
+ """
+ self.config.logger.debug(
+ "Local reset %r with error code: %d", self, error_code
+ )
+ self.state_machine.process_input(StreamInputs.SEND_RST_STREAM)
+
+ rsf = RstStreamFrame(self.stream_id)
+ rsf.error_code = error_code
+ return [rsf]
+
+ def stream_reset(self, frame):
+ """
+ Handle a stream being reset remotely.
+ """
+ self.config.logger.debug(
+ "Remote reset %r with error code: %d", self, frame.error_code
+ )
+ events = self.state_machine.process_input(StreamInputs.RECV_RST_STREAM)
+
+ if events:
+ # We don't fire an event if this stream is already closed.
+ events[0].error_code = _error_code_from_int(frame.error_code)
+
+ return [], events
+
+ def acknowledge_received_data(self, acknowledged_size):
+ """
+ The user has informed us that they've processed some amount of data
+ that was received on this stream. Pass that to the window manager and
+ potentially return some WindowUpdate frames.
+ """
+ self.config.logger.debug(
+ "Acknowledge received data with size %d on %r",
+ acknowledged_size, self
+ )
+ increment = self._inbound_window_manager.process_bytes(
+ acknowledged_size
+ )
+ if increment:
+ f = WindowUpdateFrame(self.stream_id)
+ f.window_increment = increment
+ return [f]
+
+ return []
+
+ def _build_hdr_validation_flags(self, events):
+ """
+ Constructs a set of header validation flags for use when normalizing
+ and validating header blocks.
+ """
+ is_trailer = isinstance(
+ events[0], (_TrailersSent, TrailersReceived)
+ )
+ is_response_header = isinstance(
+ events[0],
+ (
+ _ResponseSent,
+ ResponseReceived,
+ InformationalResponseReceived
+ )
+ )
+ is_push_promise = isinstance(
+ events[0], (PushedStreamReceived, _PushedRequestSent)
+ )
+
+ return HeaderValidationFlags(
+ is_client=self.state_machine.client,
+ is_trailer=is_trailer,
+ is_response_header=is_response_header,
+ is_push_promise=is_push_promise,
+ )
+
+ def _build_headers_frames(self,
+ headers,
+ encoder,
+ first_frame,
+ hdr_validation_flags):
+ """
+ Helper method to build headers or push promise frames.
+ """
+ # We need to lowercase the header names, and to ensure that secure
+ # header fields are kept out of compression contexts.
+ if self.config.normalize_outbound_headers:
+ headers = normalize_outbound_headers(
+ headers, hdr_validation_flags
+ )
+ if self.config.validate_outbound_headers:
+ headers = validate_outbound_headers(
+ headers, hdr_validation_flags
+ )
+
+ encoded_headers = encoder.encode(headers)
+
+ # Slice into blocks of max_outbound_frame_size. Be careful with this:
+ # it only works right because we never send padded frames or priority
+ # information on the frames. Revisit this if we do.
+ header_blocks = [
+ encoded_headers[i:i+self.max_outbound_frame_size]
+ for i in range(
+ 0, len(encoded_headers), self.max_outbound_frame_size
+ )
+ ]
+
+ frames = []
+ first_frame.data = header_blocks[0]
+ frames.append(first_frame)
+
+ for block in header_blocks[1:]:
+ cf = ContinuationFrame(self.stream_id)
+ cf.data = block
+ frames.append(cf)
+
+ frames[-1].flags.add('END_HEADERS')
+ return frames
+
+ def _process_received_headers(self,
+ headers,
+ header_validation_flags,
+ header_encoding):
+ """
+ When headers have been received from the remote peer, run a processing
+ pipeline on them to transform them into the appropriate form for
+ attaching to an event.
+ """
+ if self.config.normalize_inbound_headers:
+ headers = normalize_inbound_headers(
+ headers, header_validation_flags
+ )
+
+ if self.config.validate_inbound_headers:
+ headers = validate_headers(headers, header_validation_flags)
+
+ if header_encoding:
+ headers = _decode_headers(headers, header_encoding)
+
+ # The above steps are all generators, so we need to concretize the
+ # headers now.
+ return list(headers)
+
+ def _initialize_content_length(self, headers):
+ """
+ Checks the headers for a content-length header and initializes the
+ _expected_content_length field from it. It's not an error for no
+ Content-Length header to be present.
+ """
+ if self.request_method == b'HEAD':
+ self._expected_content_length = 0
+ return
+
+ for n, v in headers:
+ if n == b'content-length':
+ try:
+ self._expected_content_length = int(v, 10)
+ except ValueError:
+ raise ProtocolError(
+ "Invalid content-length header: %s" % v
+ )
+
+ return
+
+ def _track_content_length(self, length, end_stream):
+ """
+ Update the expected content length in response to data being received.
+ Validates that the appropriate amount of data is sent. Always updates
+ the received data, but only validates the length against the
+ content-length header if one was sent.
+
+ :param length: The length of the body chunk received.
+ :param end_stream: If this is the last body chunk received.
+ """
+ self._actual_content_length += length
+ actual = self._actual_content_length
+ expected = self._expected_content_length
+
+ if expected is not None:
+ if expected < actual:
+ raise InvalidBodyLengthError(expected, actual)
+
+ if end_stream and expected != actual:
+ raise InvalidBodyLengthError(expected, actual)
+
+ def _inbound_flow_control_change_from_settings(self, delta):
+ """
+ We changed SETTINGS_INITIAL_WINDOW_SIZE, which means we need to
+ update the target window size for flow control. For our flow control
+ strategy, this means we need to do two things: we need to adjust the
+ current window size, but we also need to set the target maximum window
+ size to the new value.
+ """
+ new_max_size = self._inbound_window_manager.max_window_size + delta
+ self._inbound_window_manager.window_opened(delta)
+ self._inbound_window_manager.max_window_size = new_max_size
+
+
+def _decode_headers(headers, encoding):
+ """
+ Given an iterable of header two-tuples and an encoding, decodes those
+ headers using that encoding while preserving the type of the header tuple.
+ This ensures that the use of ``HeaderTuple`` is preserved.
+ """
+ for header in headers:
+ # This function expects to work on decoded headers, which are always
+ # HeaderTuple objects.
+ assert isinstance(header, HeaderTuple)
+
+ name, value = header
+ name = name.decode(encoding)
+ value = value.decode(encoding)
+ yield header.__class__(name, value)
diff --git a/packages/h2/utilities.py b/packages/h2/utilities.py
new file mode 100644
index 000000000..eb07f575e
--- /dev/null
+++ b/packages/h2/utilities.py
@@ -0,0 +1,656 @@
+# -*- coding: utf-8 -*-
+"""
+h2/utilities
+~~~~~~~~~~~~
+
+Utility functions that do not belong in a separate module.
+"""
+import collections
+import re
+from string import whitespace
+
+from hpack import HeaderTuple, NeverIndexedHeaderTuple
+
+from .exceptions import ProtocolError, FlowControlError
+
+UPPER_RE = re.compile(b"[A-Z]")
+
+# A set of headers that are hop-by-hop or connection-specific and thus
+# forbidden in HTTP/2. This list comes from RFC 7540 § 8.1.2.2.
+CONNECTION_HEADERS = frozenset([
+ b'connection', u'connection',
+ b'proxy-connection', u'proxy-connection',
+ b'keep-alive', u'keep-alive',
+ b'transfer-encoding', u'transfer-encoding',
+ b'upgrade', u'upgrade',
+])
+
+
+_ALLOWED_PSEUDO_HEADER_FIELDS = frozenset([
+ b':method', u':method',
+ b':scheme', u':scheme',
+ b':authority', u':authority',
+ b':path', u':path',
+ b':status', u':status',
+ b':protocol', u':protocol',
+])
+
+
+_SECURE_HEADERS = frozenset([
+ # May have basic credentials which are vulnerable to dictionary attacks.
+ b'authorization', u'authorization',
+ b'proxy-authorization', u'proxy-authorization',
+])
+
+
+_REQUEST_ONLY_HEADERS = frozenset([
+ b':scheme', u':scheme',
+ b':path', u':path',
+ b':authority', u':authority',
+ b':method', u':method',
+ b':protocol', u':protocol',
+])
+
+
+_RESPONSE_ONLY_HEADERS = frozenset([b':status', u':status'])
+
+
+# A Set of pseudo headers that are only valid if the method is
+# CONNECT, see RFC 8441 § 5
+_CONNECT_REQUEST_ONLY_HEADERS = frozenset([b':protocol', u':protocol'])
+
+
+_WHITESPACE = frozenset(map(ord, whitespace))
+
+
+def _secure_headers(headers, hdr_validation_flags):
+ """
+ Certain headers are at risk of being attacked during the header compression
+ phase, and so need to be kept out of header compression contexts. This
+ function automatically transforms certain specific headers into HPACK
+ never-indexed fields to ensure they don't get added to header compression
+ contexts.
+
+ This function currently implements two rules:
+
+ - 'authorization' and 'proxy-authorization' fields are automatically made
+ never-indexed.
+ - Any 'cookie' header field shorter than 20 bytes long is made
+ never-indexed.
+
+ These fields are the most at-risk. These rules are inspired by Firefox
+ and nghttp2.
+ """
+ for header in headers:
+ if header[0] in _SECURE_HEADERS:
+ yield NeverIndexedHeaderTuple(*header)
+ elif header[0] in (b'cookie', u'cookie') and len(header[1]) < 20:
+ yield NeverIndexedHeaderTuple(*header)
+ else:
+ yield header
+
+
+def extract_method_header(headers):
+ """
+ Extracts the request method from the headers list.
+ """
+ for k, v in headers:
+ if k in (b':method', u':method'):
+ if not isinstance(v, bytes):
+ return v.encode('utf-8')
+ else:
+ return v
+
+
+def is_informational_response(headers):
+ """
+ Searches a header block for a :status header to confirm that a given
+ collection of headers are an informational response. Assumes the header
+ block is well formed: that is, that the HTTP/2 special headers are first
+ in the block, and so that it can stop looking when it finds the first
+ header field whose name does not begin with a colon.
+
+ :param headers: The HTTP/2 header block.
+ :returns: A boolean indicating if this is an informational response.
+ """
+ for n, v in headers:
+ if isinstance(n, bytes):
+ sigil = b':'
+ status = b':status'
+ informational_start = b'1'
+ else:
+ sigil = u':'
+ status = u':status'
+ informational_start = u'1'
+
+ # If we find a non-special header, we're done here: stop looping.
+ if not n.startswith(sigil):
+ return False
+
+ # This isn't the status header, bail.
+ if n != status:
+ continue
+
+ # If the first digit is a 1, we've got informational headers.
+ return v.startswith(informational_start)
+
+
+def guard_increment_window(current, increment):
+ """
+ Increments a flow control window, guarding against that window becoming too
+ large.
+
+ :param current: The current value of the flow control window.
+ :param increment: The increment to apply to that window.
+ :returns: The new value of the window.
+ :raises: ``FlowControlError``
+ """
+ # The largest value the flow control window may take.
+ LARGEST_FLOW_CONTROL_WINDOW = 2**31 - 1
+
+ new_size = current + increment
+
+ if new_size > LARGEST_FLOW_CONTROL_WINDOW:
+ raise FlowControlError(
+ "May not increment flow control window past %d" %
+ LARGEST_FLOW_CONTROL_WINDOW
+ )
+
+ return new_size
+
+
+def authority_from_headers(headers):
+ """
+ Given a header set, searches for the authority header and returns the
+ value.
+
+ Note that this doesn't terminate early, so should only be called if the
+ headers are for a client request. Otherwise, will loop over the entire
+ header set, which is potentially unwise.
+
+ :param headers: The HTTP header set.
+ :returns: The value of the authority header, or ``None``.
+ :rtype: ``bytes`` or ``None``.
+ """
+ for n, v in headers:
+ # This gets run against headers that come both from HPACK and from the
+ # user, so we may have unicode floating around in here. We only want
+ # bytes.
+ if n in (b':authority', u':authority'):
+ return v.encode('utf-8') if not isinstance(v, bytes) else v
+
+ return None
+
+
+# Flags used by the validate_headers pipeline to determine which checks
+# should be applied to a given set of headers.
+HeaderValidationFlags = collections.namedtuple(
+ 'HeaderValidationFlags',
+ ['is_client', 'is_trailer', 'is_response_header', 'is_push_promise']
+)
+
+
+def validate_headers(headers, hdr_validation_flags):
+ """
+ Validates a header sequence against a set of constraints from RFC 7540.
+
+ :param headers: The HTTP header set.
+ :param hdr_validation_flags: An instance of HeaderValidationFlags.
+ """
+ # This validation logic is built on a sequence of generators that are
+ # iterated over to provide the final header list. This reduces some of the
+ # overhead of doing this checking. However, it's worth noting that this
+ # checking remains somewhat expensive, and attempts should be made wherever
+ # possible to reduce the time spent doing them.
+ #
+ # For example, we avoid tuple upacking in loops because it represents a
+ # fixed cost that we don't want to spend, instead indexing into the header
+ # tuples.
+ headers = _reject_uppercase_header_fields(
+ headers, hdr_validation_flags
+ )
+ headers = _reject_surrounding_whitespace(
+ headers, hdr_validation_flags
+ )
+ headers = _reject_te(
+ headers, hdr_validation_flags
+ )
+ headers = _reject_connection_header(
+ headers, hdr_validation_flags
+ )
+ headers = _reject_pseudo_header_fields(
+ headers, hdr_validation_flags
+ )
+ headers = _check_host_authority_header(
+ headers, hdr_validation_flags
+ )
+ headers = _check_path_header(headers, hdr_validation_flags)
+
+ return headers
+
+
+def _reject_uppercase_header_fields(headers, hdr_validation_flags):
+ """
+ Raises a ProtocolError if any uppercase character is found in a header
+ block.
+ """
+ for header in headers:
+ if UPPER_RE.search(header[0]):
+ raise ProtocolError(
+ "Received uppercase header name %s." % header[0])
+ yield header
+
+
+def _reject_surrounding_whitespace(headers, hdr_validation_flags):
+ """
+ Raises a ProtocolError if any header name or value is surrounded by
+ whitespace characters.
+ """
+ # For compatibility with RFC 7230 header fields, we need to allow the field
+ # value to be an empty string. This is ludicrous, but technically allowed.
+ # The field name may not be empty, though, so we can safely assume that it
+ # must have at least one character in it and throw exceptions if it
+ # doesn't.
+ for header in headers:
+ if header[0][0] in _WHITESPACE or header[0][-1] in _WHITESPACE:
+ raise ProtocolError(
+ "Received header name surrounded by whitespace %r" % header[0])
+ if header[1] and ((header[1][0] in _WHITESPACE) or
+ (header[1][-1] in _WHITESPACE)):
+ raise ProtocolError(
+ "Received header value surrounded by whitespace %r" % header[1]
+ )
+ yield header
+
+
+def _reject_te(headers, hdr_validation_flags):
+ """
+ Raises a ProtocolError if the TE header is present in a header block and
+ its value is anything other than "trailers".
+ """
+ for header in headers:
+ if header[0] in (b'te', u'te'):
+ if header[1].lower() not in (b'trailers', u'trailers'):
+ raise ProtocolError(
+ "Invalid value for Transfer-Encoding header: %s" %
+ header[1]
+ )
+
+ yield header
+
+
+def _reject_connection_header(headers, hdr_validation_flags):
+ """
+ Raises a ProtocolError if the Connection header is present in a header
+ block.
+ """
+ for header in headers:
+ if header[0] in CONNECTION_HEADERS:
+ raise ProtocolError(
+ "Connection-specific header field present: %s." % header[0]
+ )
+
+ yield header
+
+
+def _custom_startswith(test_string, bytes_prefix, unicode_prefix):
+ """
+ Given a string that might be a bytestring or a Unicode string,
+ return True if it starts with the appropriate prefix.
+ """
+ if isinstance(test_string, bytes):
+ return test_string.startswith(bytes_prefix)
+ else:
+ return test_string.startswith(unicode_prefix)
+
+
+def _assert_header_in_set(string_header, bytes_header, header_set):
+ """
+ Given a set of header names, checks whether the string or byte version of
+ the header name is present. Raises a Protocol error with the appropriate
+ error if it's missing.
+ """
+ if not (string_header in header_set or bytes_header in header_set):
+ raise ProtocolError(
+ "Header block missing mandatory %s header" % string_header
+ )
+
+
+def _reject_pseudo_header_fields(headers, hdr_validation_flags):
+ """
+ Raises a ProtocolError if duplicate pseudo-header fields are found in a
+ header block or if a pseudo-header field appears in a block after an
+ ordinary header field.
+
+ Raises a ProtocolError if pseudo-header fields are found in trailers.
+ """
+ seen_pseudo_header_fields = set()
+ seen_regular_header = False
+ method = None
+
+ for header in headers:
+ if _custom_startswith(header[0], b':', u':'):
+ if header[0] in seen_pseudo_header_fields:
+ raise ProtocolError(
+ "Received duplicate pseudo-header field %s" % header[0]
+ )
+
+ seen_pseudo_header_fields.add(header[0])
+
+ if seen_regular_header:
+ raise ProtocolError(
+ "Received pseudo-header field out of sequence: %s" %
+ header[0]
+ )
+
+ if header[0] not in _ALLOWED_PSEUDO_HEADER_FIELDS:
+ raise ProtocolError(
+ "Received custom pseudo-header field %s" % header[0]
+ )
+
+ if header[0] in (b':method', u':method'):
+ if not isinstance(header[1], bytes):
+ method = header[1].encode('utf-8')
+ else:
+ method = header[1]
+
+ else:
+ seen_regular_header = True
+
+ yield header
+
+ # Check the pseudo-headers we got to confirm they're acceptable.
+ _check_pseudo_header_field_acceptability(
+ seen_pseudo_header_fields, method, hdr_validation_flags
+ )
+
+
+def _check_pseudo_header_field_acceptability(pseudo_headers,
+ method,
+ hdr_validation_flags):
+ """
+ Given the set of pseudo-headers present in a header block and the
+ validation flags, confirms that RFC 7540 allows them.
+ """
+ # Pseudo-header fields MUST NOT appear in trailers - RFC 7540 § 8.1.2.1
+ if hdr_validation_flags.is_trailer and pseudo_headers:
+ raise ProtocolError(
+ "Received pseudo-header in trailer %s" % pseudo_headers
+ )
+
+ # If ':status' pseudo-header is not there in a response header, reject it.
+ # Similarly, if ':path', ':method', or ':scheme' are not there in a request
+ # header, reject it. Additionally, if a response contains any request-only
+ # headers or vice-versa, reject it.
+ # Relevant RFC section: RFC 7540 § 8.1.2.4
+ # https://tools.ietf.org/html/rfc7540#section-8.1.2.4
+ if hdr_validation_flags.is_response_header:
+ _assert_header_in_set(u':status', b':status', pseudo_headers)
+ invalid_response_headers = pseudo_headers & _REQUEST_ONLY_HEADERS
+ if invalid_response_headers:
+ raise ProtocolError(
+ "Encountered request-only headers %s" %
+ invalid_response_headers
+ )
+ elif (not hdr_validation_flags.is_response_header and
+ not hdr_validation_flags.is_trailer):
+ # This is a request, so we need to have seen :path, :method, and
+ # :scheme.
+ _assert_header_in_set(u':path', b':path', pseudo_headers)
+ _assert_header_in_set(u':method', b':method', pseudo_headers)
+ _assert_header_in_set(u':scheme', b':scheme', pseudo_headers)
+ invalid_request_headers = pseudo_headers & _RESPONSE_ONLY_HEADERS
+ if invalid_request_headers:
+ raise ProtocolError(
+ "Encountered response-only headers %s" %
+ invalid_request_headers
+ )
+ if method != b'CONNECT':
+ invalid_headers = pseudo_headers & _CONNECT_REQUEST_ONLY_HEADERS
+ if invalid_headers:
+ raise ProtocolError(
+ "Encountered connect-request-only headers %s" %
+ invalid_headers
+ )
+
+
+def _validate_host_authority_header(headers):
+ """
+ Given the :authority and Host headers from a request block that isn't
+ a trailer, check that:
+ 1. At least one of these headers is set.
+ 2. If both headers are set, they match.
+
+ :param headers: The HTTP header set.
+ :raises: ``ProtocolError``
+ """
+ # We use None as a sentinel value. Iterate over the list of headers,
+ # and record the value of these headers (if present). We don't need
+ # to worry about receiving duplicate :authority headers, as this is
+ # enforced by the _reject_pseudo_header_fields() pipeline.
+ #
+ # TODO: We should also guard against receiving duplicate Host headers,
+ # and against sending duplicate headers.
+ authority_header_val = None
+ host_header_val = None
+
+ for header in headers:
+ if header[0] in (b':authority', u':authority'):
+ authority_header_val = header[1]
+ elif header[0] in (b'host', u'host'):
+ host_header_val = header[1]
+
+ yield header
+
+ # If we have not-None values for these variables, then we know we saw
+ # the corresponding header.
+ authority_present = (authority_header_val is not None)
+ host_present = (host_header_val is not None)
+
+ # It is an error for a request header block to contain neither
+ # an :authority header nor a Host header.
+ if not authority_present and not host_present:
+ raise ProtocolError(
+ "Request header block does not have an :authority or Host header."
+ )
+
+ # If we receive both headers, they should definitely match.
+ if authority_present and host_present:
+ if authority_header_val != host_header_val:
+ raise ProtocolError(
+ "Request header block has mismatched :authority and "
+ "Host headers: %r / %r"
+ % (authority_header_val, host_header_val)
+ )
+
+
+def _check_host_authority_header(headers, hdr_validation_flags):
+ """
+ Raises a ProtocolError if a header block arrives that does not contain an
+ :authority or a Host header, or if a header block contains both fields,
+ but their values do not match.
+ """
+ # We only expect to see :authority and Host headers on request header
+ # blocks that aren't trailers, so skip this validation if this is a
+ # response header or we're looking at trailer blocks.
+ skip_validation = (
+ hdr_validation_flags.is_response_header or
+ hdr_validation_flags.is_trailer
+ )
+ if skip_validation:
+ return headers
+
+ return _validate_host_authority_header(headers)
+
+
+def _check_path_header(headers, hdr_validation_flags):
+ """
+ Raise a ProtocolError if a header block arrives or is sent that contains an
+ empty :path header.
+ """
+ def inner():
+ for header in headers:
+ if header[0] in (b':path', u':path'):
+ if not header[1]:
+ raise ProtocolError("An empty :path header is forbidden")
+
+ yield header
+
+ # We only expect to see :authority and Host headers on request header
+ # blocks that aren't trailers, so skip this validation if this is a
+ # response header or we're looking at trailer blocks.
+ skip_validation = (
+ hdr_validation_flags.is_response_header or
+ hdr_validation_flags.is_trailer
+ )
+ if skip_validation:
+ return headers
+ else:
+ return inner()
+
+
+def _lowercase_header_names(headers, hdr_validation_flags):
+ """
+ Given an iterable of header two-tuples, rebuilds that iterable with the
+ header names lowercased. This generator produces tuples that preserve the
+ original type of the header tuple for tuple and any ``HeaderTuple``.
+ """
+ for header in headers:
+ if isinstance(header, HeaderTuple):
+ yield header.__class__(header[0].lower(), header[1])
+ else:
+ yield (header[0].lower(), header[1])
+
+
+def _strip_surrounding_whitespace(headers, hdr_validation_flags):
+ """
+ Given an iterable of header two-tuples, strip both leading and trailing
+ whitespace from both header names and header values. This generator
+ produces tuples that preserve the original type of the header tuple for
+ tuple and any ``HeaderTuple``.
+ """
+ for header in headers:
+ if isinstance(header, HeaderTuple):
+ yield header.__class__(header[0].strip(), header[1].strip())
+ else:
+ yield (header[0].strip(), header[1].strip())
+
+
+def _strip_connection_headers(headers, hdr_validation_flags):
+ """
+ Strip any connection headers as per RFC7540 § 8.1.2.2.
+ """
+ for header in headers:
+ if header[0] not in CONNECTION_HEADERS:
+ yield header
+
+
+def _check_sent_host_authority_header(headers, hdr_validation_flags):
+ """
+ Raises an InvalidHeaderBlockError if we try to send a header block
+ that does not contain an :authority or a Host header, or if
+ the header block contains both fields, but their values do not match.
+ """
+ # We only expect to see :authority and Host headers on request header
+ # blocks that aren't trailers, so skip this validation if this is a
+ # response header or we're looking at trailer blocks.
+ skip_validation = (
+ hdr_validation_flags.is_response_header or
+ hdr_validation_flags.is_trailer
+ )
+ if skip_validation:
+ return headers
+
+ return _validate_host_authority_header(headers)
+
+
+def _combine_cookie_fields(headers, hdr_validation_flags):
+ """
+ RFC 7540 § 8.1.2.5 allows HTTP/2 clients to split the Cookie header field,
+ which must normally appear only once, into multiple fields for better
+ compression. However, they MUST be joined back up again when received.
+ This normalization step applies that transform. The side-effect is that
+ all cookie fields now appear *last* in the header block.
+ """
+ # There is a problem here about header indexing. Specifically, it's
+ # possible that all these cookies are sent with different header indexing
+ # values. At this point it shouldn't matter too much, so we apply our own
+ # logic and make them never-indexed.
+ cookies = []
+ for header in headers:
+ if header[0] == b'cookie':
+ cookies.append(header[1])
+ else:
+ yield header
+ if cookies:
+ cookie_val = b'; '.join(cookies)
+ yield NeverIndexedHeaderTuple(b'cookie', cookie_val)
+
+
+def normalize_outbound_headers(headers, hdr_validation_flags):
+ """
+ Normalizes a header sequence that we are about to send.
+
+ :param headers: The HTTP header set.
+ :param hdr_validation_flags: An instance of HeaderValidationFlags.
+ """
+ headers = _lowercase_header_names(headers, hdr_validation_flags)
+ headers = _strip_surrounding_whitespace(headers, hdr_validation_flags)
+ headers = _strip_connection_headers(headers, hdr_validation_flags)
+ headers = _secure_headers(headers, hdr_validation_flags)
+
+ return headers
+
+
+def normalize_inbound_headers(headers, hdr_validation_flags):
+ """
+ Normalizes a header sequence that we have received.
+
+ :param headers: The HTTP header set.
+ :param hdr_validation_flags: An instance of HeaderValidationFlags
+ """
+ headers = _combine_cookie_fields(headers, hdr_validation_flags)
+ return headers
+
+
+def validate_outbound_headers(headers, hdr_validation_flags):
+ """
+ Validates and normalizes a header sequence that we are about to send.
+
+ :param headers: The HTTP header set.
+ :param hdr_validation_flags: An instance of HeaderValidationFlags.
+ """
+ headers = _reject_te(
+ headers, hdr_validation_flags
+ )
+ headers = _reject_connection_header(
+ headers, hdr_validation_flags
+ )
+ headers = _reject_pseudo_header_fields(
+ headers, hdr_validation_flags
+ )
+ headers = _check_sent_host_authority_header(
+ headers, hdr_validation_flags
+ )
+ headers = _check_path_header(headers, hdr_validation_flags)
+
+ return headers
+
+
+class SizeLimitDict(collections.OrderedDict):
+
+ def __init__(self, *args, **kwargs):
+ self._size_limit = kwargs.pop("size_limit", None)
+ super(SizeLimitDict, self).__init__(*args, **kwargs)
+
+ self._check_size_limit()
+
+ def __setitem__(self, key, value):
+ super(SizeLimitDict, self).__setitem__(key, value)
+
+ self._check_size_limit()
+
+ def _check_size_limit(self):
+ if self._size_limit is not None:
+ while len(self) > self._size_limit:
+ self.popitem(last=False)
diff --git a/packages/h2/windows.py b/packages/h2/windows.py
new file mode 100644
index 000000000..be4eb438b
--- /dev/null
+++ b/packages/h2/windows.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+"""
+h2/windows
+~~~~~~~~~~
+
+Defines tools for managing HTTP/2 flow control windows.
+
+The objects defined in this module are used to automatically manage HTTP/2
+flow control windows. Specifically, they keep track of what the size of the
+window is, how much data has been consumed from that window, and how much data
+the user has already used. It then implements a basic algorithm that attempts
+to manage the flow control window without user input, trying to ensure that it
+does not emit too many WINDOW_UPDATE frames.
+"""
+from __future__ import division
+
+from .exceptions import FlowControlError
+
+
+# The largest acceptable value for a HTTP/2 flow control window.
+LARGEST_FLOW_CONTROL_WINDOW = 2**31 - 1
+
+
+class WindowManager:
+ """
+ A basic HTTP/2 window manager.
+
+ :param max_window_size: The maximum size of the flow control window.
+ :type max_window_size: ``int``
+ """
+ def __init__(self, max_window_size):
+ assert max_window_size <= LARGEST_FLOW_CONTROL_WINDOW
+ self.max_window_size = max_window_size
+ self.current_window_size = max_window_size
+ self._bytes_processed = 0
+
+ def window_consumed(self, size):
+ """
+ We have received a certain number of bytes from the remote peer. This
+ necessarily shrinks the flow control window!
+
+ :param size: The number of flow controlled bytes we received from the
+ remote peer.
+ :type size: ``int``
+ :returns: Nothing.
+ :rtype: ``None``
+ """
+ self.current_window_size -= size
+ if self.current_window_size < 0:
+ raise FlowControlError("Flow control window shrunk below 0")
+
+ def window_opened(self, size):
+ """
+ The flow control window has been incremented, either because of manual
+ flow control management or because of the user changing the flow
+ control settings. This can have the effect of increasing what we
+ consider to be the "maximum" flow control window size.
+
+ This does not increase our view of how many bytes have been processed,
+ only of how much space is in the window.
+
+ :param size: The increment to the flow control window we received.
+ :type size: ``int``
+ :returns: Nothing
+ :rtype: ``None``
+ """
+ self.current_window_size += size
+
+ if self.current_window_size > LARGEST_FLOW_CONTROL_WINDOW:
+ raise FlowControlError(
+ "Flow control window mustn't exceed %d" %
+ LARGEST_FLOW_CONTROL_WINDOW
+ )
+
+ if self.current_window_size > self.max_window_size:
+ self.max_window_size = self.current_window_size
+
+ def process_bytes(self, size):
+ """
+ The application has informed us that it has processed a certain number
+ of bytes. This may cause us to want to emit a window update frame. If
+ we do want to emit a window update frame, this method will return the
+ number of bytes that we should increment the window by.
+
+ :param size: The number of flow controlled bytes that the application
+ has processed.
+ :type size: ``int``
+ :returns: The number of bytes to increment the flow control window by,
+ or ``None``.
+ :rtype: ``int`` or ``None``
+ """
+ self._bytes_processed += size
+ return self._maybe_update_window()
+
+ def _maybe_update_window(self):
+ """
+ Run the algorithm.
+
+ Our current algorithm can be described like this.
+
+ 1. If no bytes have been processed, we immediately return 0. There is
+ no meaningful way for us to hand space in the window back to the
+ remote peer, so let's not even try.
+ 2. If there is no space in the flow control window, and we have
+ processed at least 1024 bytes (or 1/4 of the window, if the window
+ is smaller), we will emit a window update frame. This is to avoid
+ the risk of blocking a stream altogether.
+ 3. If there is space in the flow control window, and we have processed
+ at least 1/2 of the window worth of bytes, we will emit a window
+ update frame. This is to minimise the number of window update frames
+ we have to emit.
+
+ In a healthy system with large flow control windows, this will
+ irregularly emit WINDOW_UPDATE frames. This prevents us starving the
+ connection by emitting eleventy bajillion WINDOW_UPDATE frames,
+ especially in situations where the remote peer is sending a lot of very
+ small DATA frames.
+ """
+ # TODO: Can the window be smaller than 1024 bytes? If not, we can
+ # streamline this algorithm.
+ if not self._bytes_processed:
+ return None
+
+ max_increment = (self.max_window_size - self.current_window_size)
+ increment = 0
+
+ # Note that, even though we may increment less than _bytes_processed,
+ # we still want to set it to zero whenever we emit an increment. This
+ # is because we'll always increment up to the maximum we can.
+ if (self.current_window_size == 0) and (
+ self._bytes_processed > min(1024, self.max_window_size // 4)):
+ increment = min(self._bytes_processed, max_increment)
+ self._bytes_processed = 0
+ elif self._bytes_processed >= (self.max_window_size // 2):
+ increment = min(self._bytes_processed, max_increment)
+ self._bytes_processed = 0
+
+ self.current_window_size += increment
+ return increment
diff --git a/packages/hpack/__init__.py b/packages/hpack/__init__.py
new file mode 100644
index 000000000..fc26ac5a5
--- /dev/null
+++ b/packages/hpack/__init__.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+"""
+hpack
+~~~~~
+
+HTTP/2 header encoding for Python.
+"""
+from .hpack import Encoder, Decoder
+from .struct import HeaderTuple, NeverIndexedHeaderTuple
+from .exceptions import (
+ HPACKError,
+ HPACKDecodingError,
+ InvalidTableIndex,
+ OversizedHeaderListError,
+ InvalidTableSizeError
+)
+
+__all__ = [
+ 'Encoder',
+ 'Decoder',
+ 'HeaderTuple',
+ 'NeverIndexedHeaderTuple',
+ 'HPACKError',
+ 'HPACKDecodingError',
+ 'InvalidTableIndex',
+ 'OversizedHeaderListError',
+ 'InvalidTableSizeError',
+]
+
+__version__ = '4.0.0'
diff --git a/packages/hpack/exceptions.py b/packages/hpack/exceptions.py
new file mode 100644
index 000000000..571ba98f2
--- /dev/null
+++ b/packages/hpack/exceptions.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+"""
+hyper/http20/exceptions
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This defines exceptions used in the HTTP/2 portion of hyper.
+"""
+
+
+class HPACKError(Exception):
+ """
+ The base class for all ``hpack`` exceptions.
+ """
+ pass
+
+
+class HPACKDecodingError(HPACKError):
+ """
+ An error has been encountered while performing HPACK decoding.
+ """
+ pass
+
+
+class InvalidTableIndex(HPACKDecodingError):
+ """
+ An invalid table index was received.
+ """
+ pass
+
+
+class OversizedHeaderListError(HPACKDecodingError):
+ """
+ A header list that was larger than we allow has been received. This may be
+ a DoS attack.
+
+ .. versionadded:: 2.3.0
+ """
+ pass
+
+
+class InvalidTableSizeError(HPACKDecodingError):
+ """
+ An attempt was made to change the decoder table size to a value larger than
+ allowed, or the list was shrunk and the remote peer didn't shrink their
+ table size.
+
+ .. versionadded:: 3.0.0
+ """
+ pass
diff --git a/packages/hpack/hpack.py b/packages/hpack/hpack.py
new file mode 100644
index 000000000..cc39bfd6f
--- /dev/null
+++ b/packages/hpack/hpack.py
@@ -0,0 +1,633 @@
+# -*- coding: utf-8 -*-
+"""
+hpack/hpack
+~~~~~~~~~~~
+
+Implements the HPACK header compression algorithm as detailed by the IETF.
+"""
+import logging
+
+from .table import HeaderTable, table_entry_size
+from .exceptions import (
+ HPACKDecodingError, OversizedHeaderListError, InvalidTableSizeError
+)
+from .huffman import HuffmanEncoder
+from .huffman_constants import (
+ REQUEST_CODES, REQUEST_CODES_LENGTH
+)
+from .huffman_table import decode_huffman
+from .struct import HeaderTuple, NeverIndexedHeaderTuple
+
+log = logging.getLogger(__name__)
+
+INDEX_NONE = b'\x00'
+INDEX_NEVER = b'\x10'
+INDEX_INCREMENTAL = b'\x40'
+
+# Precompute 2^i for 1-8 for use in prefix calcs.
+# Zero index is not used but there to save a subtraction
+# as prefix numbers are not zero indexed.
+_PREFIX_BIT_MAX_NUMBERS = [(2 ** i) - 1 for i in range(9)]
+
+try: # pragma: no cover
+ basestring = basestring
+except NameError: # pragma: no cover
+ basestring = (str, bytes)
+
+
+# We default the maximum header list we're willing to accept to 64kB. That's a
+# lot of headers, but if applications want to raise it they can do.
+DEFAULT_MAX_HEADER_LIST_SIZE = 2 ** 16
+
+
+def _unicode_if_needed(header, raw):
+ """
+ Provides a header as a unicode string if raw is False, otherwise returns
+ it as a bytestring.
+ """
+ name = bytes(header[0])
+ value = bytes(header[1])
+ if not raw:
+ name = name.decode('utf-8')
+ value = value.decode('utf-8')
+ return header.__class__(name, value)
+
+
+def encode_integer(integer, prefix_bits):
+ """
+ This encodes an integer according to the wacky integer encoding rules
+ defined in the HPACK spec.
+ """
+ log.debug("Encoding %d with %d bits", integer, prefix_bits)
+
+ if integer < 0:
+ raise ValueError(
+ "Can only encode positive integers, got %s" % integer
+ )
+
+ if prefix_bits < 1 or prefix_bits > 8:
+ raise ValueError(
+ "Prefix bits must be between 1 and 8, got %s" % prefix_bits
+ )
+
+ max_number = _PREFIX_BIT_MAX_NUMBERS[prefix_bits]
+
+ if integer < max_number:
+ return bytearray([integer]) # Seriously?
+ else:
+ elements = [max_number]
+ integer -= max_number
+
+ while integer >= 128:
+ elements.append((integer & 127) + 128)
+ integer >>= 7
+
+ elements.append(integer)
+
+ return bytearray(elements)
+
+
+def decode_integer(data, prefix_bits):
+ """
+ This decodes an integer according to the wacky integer encoding rules
+ defined in the HPACK spec. Returns a tuple of the decoded integer and the
+ number of bytes that were consumed from ``data`` in order to get that
+ integer.
+ """
+ if prefix_bits < 1 or prefix_bits > 8:
+ raise ValueError(
+ "Prefix bits must be between 1 and 8, got %s" % prefix_bits
+ )
+
+ max_number = _PREFIX_BIT_MAX_NUMBERS[prefix_bits]
+ index = 1
+ shift = 0
+ mask = (0xFF >> (8 - prefix_bits))
+
+ try:
+ number = data[0] & mask
+ if number == max_number:
+ while True:
+ next_byte = data[index]
+ index += 1
+
+ if next_byte >= 128:
+ number += (next_byte - 128) << shift
+ else:
+ number += next_byte << shift
+ break
+ shift += 7
+
+ except IndexError:
+ raise HPACKDecodingError(
+ "Unable to decode HPACK integer representation from %r" % data
+ )
+
+ log.debug("Decoded %d, consumed %d bytes", number, index)
+
+ return number, index
+
+
+def _dict_to_iterable(header_dict):
+ """
+ This converts a dictionary to an iterable of two-tuples. This is a
+ HPACK-specific function because it pulls "special-headers" out first and
+ then emits them.
+ """
+ assert isinstance(header_dict, dict)
+ keys = sorted(
+ header_dict.keys(),
+ key=lambda k: not _to_bytes(k).startswith(b':')
+ )
+ for key in keys:
+ yield key, header_dict[key]
+
+
+def _to_bytes(string):
+ """
+ Convert string to bytes.
+ """
+ if not isinstance(string, basestring): # pragma: no cover
+ string = str(string)
+
+ return string if isinstance(string, bytes) else string.encode('utf-8')
+
+
+class Encoder:
+ """
+ An HPACK encoder object. This object takes HTTP headers and emits encoded
+ HTTP/2 header blocks.
+ """
+
+ def __init__(self):
+ self.header_table = HeaderTable()
+ self.huffman_coder = HuffmanEncoder(
+ REQUEST_CODES, REQUEST_CODES_LENGTH
+ )
+ self.table_size_changes = []
+
+ @property
+ def header_table_size(self):
+ """
+ Controls the size of the HPACK header table.
+ """
+ return self.header_table.maxsize
+
+ @header_table_size.setter
+ def header_table_size(self, value):
+ self.header_table.maxsize = value
+ if self.header_table.resized:
+ self.table_size_changes.append(value)
+
+ def encode(self, headers, huffman=True):
+ """
+ Takes a set of headers and encodes them into a HPACK-encoded header
+ block.
+
+ :param headers: The headers to encode. Must be either an iterable of
+ tuples, an iterable of :class:`HeaderTuple
+ `, or a ``dict``.
+
+ If an iterable of tuples, the tuples may be either
+ two-tuples or three-tuples. If they are two-tuples, the
+ tuples must be of the format ``(name, value)``. If they
+ are three-tuples, they must be of the format
+ ``(name, value, sensitive)``, where ``sensitive`` is a
+ boolean value indicating whether the header should be
+ added to header tables anywhere. If not present,
+ ``sensitive`` defaults to ``False``.
+
+ If an iterable of :class:`HeaderTuple
+ `, the tuples must always be
+ two-tuples. Instead of using ``sensitive`` as a third
+ tuple entry, use :class:`NeverIndexedHeaderTuple
+ ` to request that
+ the field never be indexed.
+
+ .. warning:: HTTP/2 requires that all special headers
+ (headers whose names begin with ``:`` characters)
+ appear at the *start* of the header block. While
+ this method will ensure that happens for ``dict``
+ subclasses, callers using any other iterable of
+ tuples **must** ensure they place their special
+ headers at the start of the iterable.
+
+ For efficiency reasons users should prefer to use
+ iterables of two-tuples: fixing the ordering of
+ dictionary headers is an expensive operation that
+ should be avoided if possible.
+
+ :param huffman: (optional) Whether to Huffman-encode any header sent as
+ a literal value. Except for use when debugging, it is
+ recommended that this be left enabled.
+
+ :returns: A bytestring containing the HPACK-encoded header block.
+ """
+ # Transforming the headers into a header block is a procedure that can
+ # be modeled as a chain or pipe. First, the headers are encoded. This
+ # encoding can be done a number of ways. If the header name-value pair
+ # are already in the header table we can represent them using the
+ # indexed representation: the same is true if they are in the static
+ # table. Otherwise, a literal representation will be used.
+ header_block = []
+
+ # Turn the headers into a list of tuples if possible. This is the
+ # natural way to interact with them in HPACK. Because dictionaries are
+ # un-ordered, we need to make sure we grab the "special" headers first.
+ if isinstance(headers, dict):
+ headers = _dict_to_iterable(headers)
+
+ # Before we begin, if the header table size has been changed we need
+ # to signal all changes since last emission appropriately.
+ if self.header_table.resized:
+ header_block.append(self._encode_table_size_change())
+ self.header_table.resized = False
+
+ # Add each header to the header block
+ for header in headers:
+ sensitive = False
+ if isinstance(header, HeaderTuple):
+ sensitive = not header.indexable
+ elif len(header) > 2:
+ sensitive = header[2]
+
+ header = (_to_bytes(header[0]), _to_bytes(header[1]))
+ header_block.append(self.add(header, sensitive, huffman))
+
+ header_block = b''.join(header_block)
+
+ log.debug("Encoded header block to %s", header_block)
+
+ return header_block
+
+ def add(self, to_add, sensitive, huffman=False):
+ """
+ This function takes a header key-value tuple and serializes it.
+ """
+ log.debug(
+ "Adding %s to the header table, sensitive:%s, huffman:%s",
+ to_add,
+ sensitive,
+ huffman
+ )
+
+ name, value = to_add
+
+ # Set our indexing mode
+ indexbit = INDEX_INCREMENTAL if not sensitive else INDEX_NEVER
+
+ # Search for a matching header in the header table.
+ match = self.header_table.search(name, value)
+
+ if match is None:
+ # Not in the header table. Encode using the literal syntax,
+ # and add it to the header table.
+ encoded = self._encode_literal(name, value, indexbit, huffman)
+ if not sensitive:
+ self.header_table.add(name, value)
+ return encoded
+
+ # The header is in the table, break out the values. If we matched
+ # perfectly, we can use the indexed representation: otherwise we
+ # can use the indexed literal.
+ index, name, perfect = match
+
+ if perfect:
+ # Indexed representation.
+ encoded = self._encode_indexed(index)
+ else:
+ # Indexed literal. We are going to add header to the
+ # header table unconditionally. It is a future todo to
+ # filter out headers which are known to be ineffective for
+ # indexing since they just take space in the table and
+ # pushed out other valuable headers.
+ encoded = self._encode_indexed_literal(
+ index, value, indexbit, huffman
+ )
+ if not sensitive:
+ self.header_table.add(name, value)
+
+ return encoded
+
+ def _encode_indexed(self, index):
+ """
+ Encodes a header using the indexed representation.
+ """
+ field = encode_integer(index, 7)
+ field[0] |= 0x80 # we set the top bit
+ return bytes(field)
+
+ def _encode_literal(self, name, value, indexbit, huffman=False):
+ """
+ Encodes a header with a literal name and literal value. If ``indexing``
+ is True, the header will be added to the header table: otherwise it
+ will not.
+ """
+ if huffman:
+ name = self.huffman_coder.encode(name)
+ value = self.huffman_coder.encode(value)
+
+ name_len = encode_integer(len(name), 7)
+ value_len = encode_integer(len(value), 7)
+
+ if huffman:
+ name_len[0] |= 0x80
+ value_len[0] |= 0x80
+
+ return b''.join(
+ [indexbit, bytes(name_len), name, bytes(value_len), value]
+ )
+
+ def _encode_indexed_literal(self, index, value, indexbit, huffman=False):
+ """
+ Encodes a header with an indexed name and a literal value and performs
+ incremental indexing.
+ """
+ if indexbit != INDEX_INCREMENTAL:
+ prefix = encode_integer(index, 4)
+ else:
+ prefix = encode_integer(index, 6)
+
+ prefix[0] |= ord(indexbit)
+
+ if huffman:
+ value = self.huffman_coder.encode(value)
+
+ value_len = encode_integer(len(value), 7)
+
+ if huffman:
+ value_len[0] |= 0x80
+
+ return b''.join([bytes(prefix), bytes(value_len), value])
+
+ def _encode_table_size_change(self):
+ """
+ Produces the encoded form of all header table size change context
+ updates.
+ """
+ block = b''
+ for size_bytes in self.table_size_changes:
+ size_bytes = encode_integer(size_bytes, 5)
+ size_bytes[0] |= 0x20
+ block += bytes(size_bytes)
+ self.table_size_changes = []
+ return block
+
+
+class Decoder:
+ """
+ An HPACK decoder object.
+
+ .. versionchanged:: 2.3.0
+ Added ``max_header_list_size`` argument.
+
+ :param max_header_list_size: The maximum decompressed size we will allow
+ for any single header block. This is a protection against DoS attacks
+ that attempt to force the application to expand a relatively small
+ amount of data into a really large header list, allowing enormous
+ amounts of memory to be allocated.
+
+ If this amount of data is exceeded, a `OversizedHeaderListError
+ ` exception will be raised. At this
+ point the connection should be shut down, as the HPACK state will no
+ longer be usable.
+
+ Defaults to 64kB.
+ :type max_header_list_size: ``int``
+ """
+ def __init__(self, max_header_list_size=DEFAULT_MAX_HEADER_LIST_SIZE):
+ self.header_table = HeaderTable()
+
+ #: The maximum decompressed size we will allow for any single header
+ #: block. This is a protection against DoS attacks that attempt to
+ #: force the application to expand a relatively small amount of data
+ #: into a really large header list, allowing enormous amounts of memory
+ #: to be allocated.
+ #:
+ #: If this amount of data is exceeded, a `OversizedHeaderListError
+ #: ` exception will be raised. At this
+ #: point the connection should be shut down, as the HPACK state will no
+ #: longer be usable.
+ #:
+ #: Defaults to 64kB.
+ #:
+ #: .. versionadded:: 2.3.0
+ self.max_header_list_size = max_header_list_size
+
+ #: Maximum allowed header table size.
+ #:
+ #: A HTTP/2 implementation should set this to the most recent value of
+ #: SETTINGS_HEADER_TABLE_SIZE that it sent *and has received an ACK
+ #: for*. Once this setting is set, the actual header table size will be
+ #: checked at the end of each decoding run and whenever it is changed,
+ #: to confirm that it fits in this size.
+ self.max_allowed_table_size = self.header_table.maxsize
+
+ @property
+ def header_table_size(self):
+ """
+ Controls the size of the HPACK header table.
+ """
+ return self.header_table.maxsize
+
+ @header_table_size.setter
+ def header_table_size(self, value):
+ self.header_table.maxsize = value
+
+ def decode(self, data, raw=False):
+ """
+ Takes an HPACK-encoded header block and decodes it into a header set.
+
+ :param data: A bytestring representing a complete HPACK-encoded header
+ block.
+ :param raw: (optional) Whether to return the headers as tuples of raw
+ byte strings or to decode them as UTF-8 before returning
+ them. The default value is False, which returns tuples of
+ Unicode strings
+ :returns: A list of two-tuples of ``(name, value)`` representing the
+ HPACK-encoded headers, in the order they were decoded.
+ :raises HPACKDecodingError: If an error is encountered while decoding
+ the header block.
+ """
+ log.debug("Decoding %s", data)
+
+ data_mem = memoryview(data)
+ headers = []
+ data_len = len(data)
+ inflated_size = 0
+ current_index = 0
+
+ while current_index < data_len:
+ # Work out what kind of header we're decoding.
+ # If the high bit is 1, it's an indexed field.
+ current = data[current_index]
+ indexed = True if current & 0x80 else False
+
+ # Otherwise, if the second-highest bit is 1 it's a field that does
+ # alter the header table.
+ literal_index = True if current & 0x40 else False
+
+ # Otherwise, if the third-highest bit is 1 it's an encoding context
+ # update.
+ encoding_update = True if current & 0x20 else False
+
+ if indexed:
+ header, consumed = self._decode_indexed(
+ data_mem[current_index:]
+ )
+ elif literal_index:
+ # It's a literal header that does affect the header table.
+ header, consumed = self._decode_literal_index(
+ data_mem[current_index:]
+ )
+ elif encoding_update:
+ # It's an update to the encoding context. These are forbidden
+ # in a header block after any actual header.
+ if headers:
+ raise HPACKDecodingError(
+ "Table size update not at the start of the block"
+ )
+ consumed = self._update_encoding_context(
+ data_mem[current_index:]
+ )
+ header = None
+ else:
+ # It's a literal header that does not affect the header table.
+ header, consumed = self._decode_literal_no_index(
+ data_mem[current_index:]
+ )
+
+ if header:
+ headers.append(header)
+ inflated_size += table_entry_size(*header)
+
+ if inflated_size > self.max_header_list_size:
+ raise OversizedHeaderListError(
+ "A header list larger than %d has been received" %
+ self.max_header_list_size
+ )
+
+ current_index += consumed
+
+ # Confirm that the table size is lower than the maximum. We do this
+ # here to ensure that we catch when the max has been *shrunk* and the
+ # remote peer hasn't actually done that.
+ self._assert_valid_table_size()
+
+ try:
+ return [_unicode_if_needed(h, raw) for h in headers]
+ except UnicodeDecodeError:
+ raise HPACKDecodingError("Unable to decode headers as UTF-8.")
+
+ def _assert_valid_table_size(self):
+ """
+ Check that the table size set by the encoder is lower than the maximum
+ we expect to have.
+ """
+ if self.header_table_size > self.max_allowed_table_size:
+ raise InvalidTableSizeError(
+ "Encoder did not shrink table size to within the max"
+ )
+
+ def _update_encoding_context(self, data):
+ """
+ Handles a byte that updates the encoding context.
+ """
+ # We've been asked to resize the header table.
+ new_size, consumed = decode_integer(data, 5)
+ if new_size > self.max_allowed_table_size:
+ raise InvalidTableSizeError(
+ "Encoder exceeded max allowable table size"
+ )
+ self.header_table_size = new_size
+ return consumed
+
+ def _decode_indexed(self, data):
+ """
+ Decodes a header represented using the indexed representation.
+ """
+ index, consumed = decode_integer(data, 7)
+ header = HeaderTuple(*self.header_table.get_by_index(index))
+ log.debug("Decoded %s, consumed %d", header, consumed)
+ return header, consumed
+
+ def _decode_literal_no_index(self, data):
+ return self._decode_literal(data, False)
+
+ def _decode_literal_index(self, data):
+ return self._decode_literal(data, True)
+
+ def _decode_literal(self, data, should_index):
+ """
+ Decodes a header represented with a literal.
+ """
+ total_consumed = 0
+
+ # When should_index is true, if the low six bits of the first byte are
+ # nonzero, the header name is indexed.
+ # When should_index is false, if the low four bits of the first byte
+ # are nonzero the header name is indexed.
+ if should_index:
+ indexed_name = data[0] & 0x3F
+ name_len = 6
+ not_indexable = False
+ else:
+ high_byte = data[0]
+ indexed_name = high_byte & 0x0F
+ name_len = 4
+ not_indexable = high_byte & 0x10
+
+ if indexed_name:
+ # Indexed header name.
+ index, consumed = decode_integer(data, name_len)
+ name = self.header_table.get_by_index(index)[0]
+
+ total_consumed = consumed
+ length = 0
+ else:
+ # Literal header name. The first byte was consumed, so we need to
+ # move forward.
+ data = data[1:]
+
+ length, consumed = decode_integer(data, 7)
+ name = data[consumed:consumed + length]
+ if len(name) != length:
+ raise HPACKDecodingError("Truncated header block")
+
+ if data[0] & 0x80:
+ name = decode_huffman(name)
+ total_consumed = consumed + length + 1 # Since we moved forward 1.
+
+ data = data[consumed + length:]
+
+ # The header value is definitely length-based.
+ length, consumed = decode_integer(data, 7)
+ value = data[consumed:consumed + length]
+ if len(value) != length:
+ raise HPACKDecodingError("Truncated header block")
+
+ if data[0] & 0x80:
+ value = decode_huffman(value)
+
+ # Updated the total consumed length.
+ total_consumed += length + consumed
+
+ # If we have been told never to index the header field, encode that in
+ # the tuple we use.
+ if not_indexable:
+ header = NeverIndexedHeaderTuple(name, value)
+ else:
+ header = HeaderTuple(name, value)
+
+ # If we've been asked to index this, add it to the header table.
+ if should_index:
+ self.header_table.add(name, value)
+
+ log.debug(
+ "Decoded %s, total consumed %d bytes, indexed %s",
+ header,
+ total_consumed,
+ should_index
+ )
+
+ return header, total_consumed
diff --git a/packages/hpack/huffman.py b/packages/hpack/huffman.py
new file mode 100644
index 000000000..595d69b6e
--- /dev/null
+++ b/packages/hpack/huffman.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+"""
+hpack/huffman_decoder
+~~~~~~~~~~~~~~~~~~~~~
+
+An implementation of a bitwise prefix tree specially built for decoding
+Huffman-coded content where we already know the Huffman table.
+"""
+
+
+class HuffmanEncoder:
+ """
+ Encodes a string according to the Huffman encoding table defined in the
+ HPACK specification.
+ """
+ def __init__(self, huffman_code_list, huffman_code_list_lengths):
+ self.huffman_code_list = huffman_code_list
+ self.huffman_code_list_lengths = huffman_code_list_lengths
+
+ def encode(self, bytes_to_encode):
+ """
+ Given a string of bytes, encodes them according to the HPACK Huffman
+ specification.
+ """
+ # If handed the empty string, just immediately return.
+ if not bytes_to_encode:
+ return b''
+
+ final_num = 0
+ final_int_len = 0
+
+ # Turn each byte into its huffman code. These codes aren't necessarily
+ # octet aligned, so keep track of how far through an octet we are. To
+ # handle this cleanly, just use a single giant integer.
+ for byte in bytes_to_encode:
+ bin_int_len = self.huffman_code_list_lengths[byte]
+ bin_int = self.huffman_code_list[byte] & (
+ 2 ** (bin_int_len + 1) - 1
+ )
+ final_num <<= bin_int_len
+ final_num |= bin_int
+ final_int_len += bin_int_len
+
+ # Pad out to an octet with ones.
+ bits_to_be_padded = (8 - (final_int_len % 8)) % 8
+ final_num <<= bits_to_be_padded
+ final_num |= (1 << bits_to_be_padded) - 1
+
+ # Convert the number to hex and strip off the leading '0x' and the
+ # trailing 'L', if present.
+ final_num = hex(final_num)[2:].rstrip('L')
+
+ # If this is odd, prepend a zero.
+ final_num = '0' + final_num if len(final_num) % 2 != 0 else final_num
+
+ # This number should have twice as many digits as bytes. If not, we're
+ # missing some leading zeroes. Work out how many bytes we want and how
+ # many digits we have, then add the missing zero digits to the front.
+ total_bytes = (final_int_len + bits_to_be_padded) // 8
+ expected_digits = total_bytes * 2
+
+ if len(final_num) != expected_digits:
+ missing_digits = expected_digits - len(final_num)
+ final_num = ('0' * missing_digits) + final_num
+
+ return bytes.fromhex(final_num)
diff --git a/packages/hpack/huffman_constants.py b/packages/hpack/huffman_constants.py
new file mode 100644
index 000000000..4caf012d4
--- /dev/null
+++ b/packages/hpack/huffman_constants.py
@@ -0,0 +1,289 @@
+# -*- coding: utf-8 -*-
+"""
+hpack/huffman_constants
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Defines the constant Huffman table. This takes up an upsetting amount of space,
+but c'est la vie.
+"""
+# flake8: noqa
+
+REQUEST_CODES = [
+ 0x1ff8,
+ 0x7fffd8,
+ 0xfffffe2,
+ 0xfffffe3,
+ 0xfffffe4,
+ 0xfffffe5,
+ 0xfffffe6,
+ 0xfffffe7,
+ 0xfffffe8,
+ 0xffffea,
+ 0x3ffffffc,
+ 0xfffffe9,
+ 0xfffffea,
+ 0x3ffffffd,
+ 0xfffffeb,
+ 0xfffffec,
+ 0xfffffed,
+ 0xfffffee,
+ 0xfffffef,
+ 0xffffff0,
+ 0xffffff1,
+ 0xffffff2,
+ 0x3ffffffe,
+ 0xffffff3,
+ 0xffffff4,
+ 0xffffff5,
+ 0xffffff6,
+ 0xffffff7,
+ 0xffffff8,
+ 0xffffff9,
+ 0xffffffa,
+ 0xffffffb,
+ 0x14,
+ 0x3f8,
+ 0x3f9,
+ 0xffa,
+ 0x1ff9,
+ 0x15,
+ 0xf8,
+ 0x7fa,
+ 0x3fa,
+ 0x3fb,
+ 0xf9,
+ 0x7fb,
+ 0xfa,
+ 0x16,
+ 0x17,
+ 0x18,
+ 0x0,
+ 0x1,
+ 0x2,
+ 0x19,
+ 0x1a,
+ 0x1b,
+ 0x1c,
+ 0x1d,
+ 0x1e,
+ 0x1f,
+ 0x5c,
+ 0xfb,
+ 0x7ffc,
+ 0x20,
+ 0xffb,
+ 0x3fc,
+ 0x1ffa,
+ 0x21,
+ 0x5d,
+ 0x5e,
+ 0x5f,
+ 0x60,
+ 0x61,
+ 0x62,
+ 0x63,
+ 0x64,
+ 0x65,
+ 0x66,
+ 0x67,
+ 0x68,
+ 0x69,
+ 0x6a,
+ 0x6b,
+ 0x6c,
+ 0x6d,
+ 0x6e,
+ 0x6f,
+ 0x70,
+ 0x71,
+ 0x72,
+ 0xfc,
+ 0x73,
+ 0xfd,
+ 0x1ffb,
+ 0x7fff0,
+ 0x1ffc,
+ 0x3ffc,
+ 0x22,
+ 0x7ffd,
+ 0x3,
+ 0x23,
+ 0x4,
+ 0x24,
+ 0x5,
+ 0x25,
+ 0x26,
+ 0x27,
+ 0x6,
+ 0x74,
+ 0x75,
+ 0x28,
+ 0x29,
+ 0x2a,
+ 0x7,
+ 0x2b,
+ 0x76,
+ 0x2c,
+ 0x8,
+ 0x9,
+ 0x2d,
+ 0x77,
+ 0x78,
+ 0x79,
+ 0x7a,
+ 0x7b,
+ 0x7ffe,
+ 0x7fc,
+ 0x3ffd,
+ 0x1ffd,
+ 0xffffffc,
+ 0xfffe6,
+ 0x3fffd2,
+ 0xfffe7,
+ 0xfffe8,
+ 0x3fffd3,
+ 0x3fffd4,
+ 0x3fffd5,
+ 0x7fffd9,
+ 0x3fffd6,
+ 0x7fffda,
+ 0x7fffdb,
+ 0x7fffdc,
+ 0x7fffdd,
+ 0x7fffde,
+ 0xffffeb,
+ 0x7fffdf,
+ 0xffffec,
+ 0xffffed,
+ 0x3fffd7,
+ 0x7fffe0,
+ 0xffffee,
+ 0x7fffe1,
+ 0x7fffe2,
+ 0x7fffe3,
+ 0x7fffe4,
+ 0x1fffdc,
+ 0x3fffd8,
+ 0x7fffe5,
+ 0x3fffd9,
+ 0x7fffe6,
+ 0x7fffe7,
+ 0xffffef,
+ 0x3fffda,
+ 0x1fffdd,
+ 0xfffe9,
+ 0x3fffdb,
+ 0x3fffdc,
+ 0x7fffe8,
+ 0x7fffe9,
+ 0x1fffde,
+ 0x7fffea,
+ 0x3fffdd,
+ 0x3fffde,
+ 0xfffff0,
+ 0x1fffdf,
+ 0x3fffdf,
+ 0x7fffeb,
+ 0x7fffec,
+ 0x1fffe0,
+ 0x1fffe1,
+ 0x3fffe0,
+ 0x1fffe2,
+ 0x7fffed,
+ 0x3fffe1,
+ 0x7fffee,
+ 0x7fffef,
+ 0xfffea,
+ 0x3fffe2,
+ 0x3fffe3,
+ 0x3fffe4,
+ 0x7ffff0,
+ 0x3fffe5,
+ 0x3fffe6,
+ 0x7ffff1,
+ 0x3ffffe0,
+ 0x3ffffe1,
+ 0xfffeb,
+ 0x7fff1,
+ 0x3fffe7,
+ 0x7ffff2,
+ 0x3fffe8,
+ 0x1ffffec,
+ 0x3ffffe2,
+ 0x3ffffe3,
+ 0x3ffffe4,
+ 0x7ffffde,
+ 0x7ffffdf,
+ 0x3ffffe5,
+ 0xfffff1,
+ 0x1ffffed,
+ 0x7fff2,
+ 0x1fffe3,
+ 0x3ffffe6,
+ 0x7ffffe0,
+ 0x7ffffe1,
+ 0x3ffffe7,
+ 0x7ffffe2,
+ 0xfffff2,
+ 0x1fffe4,
+ 0x1fffe5,
+ 0x3ffffe8,
+ 0x3ffffe9,
+ 0xffffffd,
+ 0x7ffffe3,
+ 0x7ffffe4,
+ 0x7ffffe5,
+ 0xfffec,
+ 0xfffff3,
+ 0xfffed,
+ 0x1fffe6,
+ 0x3fffe9,
+ 0x1fffe7,
+ 0x1fffe8,
+ 0x7ffff3,
+ 0x3fffea,
+ 0x3fffeb,
+ 0x1ffffee,
+ 0x1ffffef,
+ 0xfffff4,
+ 0xfffff5,
+ 0x3ffffea,
+ 0x7ffff4,
+ 0x3ffffeb,
+ 0x7ffffe6,
+ 0x3ffffec,
+ 0x3ffffed,
+ 0x7ffffe7,
+ 0x7ffffe8,
+ 0x7ffffe9,
+ 0x7ffffea,
+ 0x7ffffeb,
+ 0xffffffe,
+ 0x7ffffec,
+ 0x7ffffed,
+ 0x7ffffee,
+ 0x7ffffef,
+ 0x7fffff0,
+ 0x3ffffee,
+ 0x3fffffff,
+]
+
+REQUEST_CODES_LENGTH = [
+ 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
+ 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
+ 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
+ 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
+ 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
+ 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
+ 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
+ 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
+ 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
+ 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
+ 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
+ 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
+ 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
+ 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
+ 30,
+]
diff --git a/packages/hpack/huffman_table.py b/packages/hpack/huffman_table.py
new file mode 100644
index 000000000..c199ef5a3
--- /dev/null
+++ b/packages/hpack/huffman_table.py
@@ -0,0 +1,4739 @@
+# -*- coding: utf-8 -*-
+"""
+hpack/huffman_table
+~~~~~~~~~~~~~~~~~~~
+
+This implementation of a Huffman decoding table for HTTP/2 is essentially a
+Python port of the work originally done for nghttp2's Huffman decoding. For
+this reason, while this file is made available under the MIT license as is the
+rest of this module, this file is undoubtedly a derivative work of the nghttp2
+file ``nghttp2_hd_huffman_data.c``, obtained from
+https://github.com/tatsuhiro-t/nghttp2/ at commit
+d2b55ad1a245e1d1964579fa3fac36ebf3939e72. That work is made available under
+the Apache 2.0 license under the following terms:
+
+ Copyright (c) 2013 Tatsuhiro Tsujikawa
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+The essence of this approach is that it builds a finite state machine out of
+4-bit nibbles of Huffman coded data. The input function passes 4 bits worth of
+data to the state machine each time, which uses those 4 bits of data along with
+the current accumulated state data to process the data given.
+
+For the sake of efficiency, the in-memory representation of the states,
+transitions, and result values of the state machine are represented as a long
+list containing three-tuples. This list is enormously long, and viewing it as
+an in-memory representation is not very clear, but it is laid out here in a way
+that is intended to be *somewhat* more clear.
+
+Essentially, the list is structured as 256 collections of 16 entries (one for
+each nibble) of three-tuples. Each collection is called a "node", and the
+zeroth collection is called the "root node". The state machine tracks one
+value: the "state" byte.
+
+For each nibble passed to the state machine, it first multiplies the "state"
+byte by 16 and adds the numerical value of the nibble. This number is the index
+into the large flat list.
+
+The three-tuple that is found by looking up that index consists of three
+values:
+
+- a new state value, used for subsequent decoding
+- a collection of flags, used to determine whether data is emitted or whether
+ the state machine is complete.
+- the byte value to emit, assuming that emitting a byte is required.
+
+The flags are consulted, if necessary a byte is emitted, and then the next
+nibble is used. This continues until the state machine believes it has
+completely Huffman-decoded the data.
+
+This approach has relatively little indirection, and therefore performs
+relatively well, particularly on implementations like PyPy where the cost of
+loops at the Python-level is not too expensive. The total number of loop
+iterations is 4x the number of bytes passed to the decoder.
+"""
+from .exceptions import HPACKDecodingError
+
+
+# This defines the state machine "class" at the top of the file. The reason we
+# do this is to keep the terrifing monster state table at the *bottom* of the
+# file so you don't have to actually *look* at the damn thing.
+def decode_huffman(huffman_string):
+ """
+ Given a bytestring of Huffman-encoded data for HPACK, returns a bytestring
+ of the decompressed data.
+ """
+ if not huffman_string:
+ return b''
+
+ state = 0
+ flags = 0
+ decoded_bytes = bytearray()
+
+ # Perversely, bytearrays are a lot more convenient across Python 2 and
+ # Python 3 because they behave *the same way* on both platforms. Given that
+ # we really do want numerical bytes when we iterate here, let's use a
+ # bytearray.
+ huffman_string = bytearray(huffman_string)
+
+ # This loop is unrolled somewhat. Because we use a nibble, not a byte, we
+ # need to handle each nibble twice. We unroll that: it makes the loop body
+ # a bit longer, but that's ok.
+ for input_byte in huffman_string:
+ index = (state * 16) + (input_byte >> 4)
+ state, flags, output_byte = HUFFMAN_TABLE[index]
+
+ if flags & HUFFMAN_FAIL:
+ raise HPACKDecodingError("Invalid Huffman String")
+
+ if flags & HUFFMAN_EMIT_SYMBOL:
+ decoded_bytes.append(output_byte)
+
+ index = (state * 16) + (input_byte & 0x0F)
+ state, flags, output_byte = HUFFMAN_TABLE[index]
+
+ if flags & HUFFMAN_FAIL:
+ raise HPACKDecodingError("Invalid Huffman String")
+
+ if flags & HUFFMAN_EMIT_SYMBOL:
+ decoded_bytes.append(output_byte)
+
+ if not (flags & HUFFMAN_COMPLETE):
+ raise HPACKDecodingError("Incomplete Huffman string")
+
+ return bytes(decoded_bytes)
+
+
+# Some decoder flags to control state transitions.
+HUFFMAN_COMPLETE = 1
+HUFFMAN_EMIT_SYMBOL = (1 << 1)
+HUFFMAN_FAIL = (1 << 2)
+
+# This is the monster table. Avert your eyes, children.
+HUFFMAN_TABLE = [
+ # Node 0 (Root Node, never emits symbols.)
+ (4, 0, 0),
+ (5, 0, 0),
+ (7, 0, 0),
+ (8, 0, 0),
+ (11, 0, 0),
+ (12, 0, 0),
+ (16, 0, 0),
+ (19, 0, 0),
+ (25, 0, 0),
+ (28, 0, 0),
+ (32, 0, 0),
+ (35, 0, 0),
+ (42, 0, 0),
+ (49, 0, 0),
+ (57, 0, 0),
+ (64, HUFFMAN_COMPLETE, 0),
+
+ # Node 1
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 48),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 49),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 50),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 97),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 99),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 101),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 105),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 111),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 115),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 116),
+ (13, 0, 0),
+ (14, 0, 0),
+ (17, 0, 0),
+ (18, 0, 0),
+ (20, 0, 0),
+ (21, 0, 0),
+
+ # Node 2
+ (1, HUFFMAN_EMIT_SYMBOL, 48),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 48),
+ (1, HUFFMAN_EMIT_SYMBOL, 49),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 49),
+ (1, HUFFMAN_EMIT_SYMBOL, 50),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 50),
+ (1, HUFFMAN_EMIT_SYMBOL, 97),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 97),
+ (1, HUFFMAN_EMIT_SYMBOL, 99),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 99),
+ (1, HUFFMAN_EMIT_SYMBOL, 101),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 101),
+ (1, HUFFMAN_EMIT_SYMBOL, 105),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 105),
+ (1, HUFFMAN_EMIT_SYMBOL, 111),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 111),
+
+ # Node 3
+ (2, HUFFMAN_EMIT_SYMBOL, 48),
+ (9, HUFFMAN_EMIT_SYMBOL, 48),
+ (23, HUFFMAN_EMIT_SYMBOL, 48),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 48),
+ (2, HUFFMAN_EMIT_SYMBOL, 49),
+ (9, HUFFMAN_EMIT_SYMBOL, 49),
+ (23, HUFFMAN_EMIT_SYMBOL, 49),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 49),
+ (2, HUFFMAN_EMIT_SYMBOL, 50),
+ (9, HUFFMAN_EMIT_SYMBOL, 50),
+ (23, HUFFMAN_EMIT_SYMBOL, 50),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 50),
+ (2, HUFFMAN_EMIT_SYMBOL, 97),
+ (9, HUFFMAN_EMIT_SYMBOL, 97),
+ (23, HUFFMAN_EMIT_SYMBOL, 97),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 97),
+
+ # Node 4
+ (3, HUFFMAN_EMIT_SYMBOL, 48),
+ (6, HUFFMAN_EMIT_SYMBOL, 48),
+ (10, HUFFMAN_EMIT_SYMBOL, 48),
+ (15, HUFFMAN_EMIT_SYMBOL, 48),
+ (24, HUFFMAN_EMIT_SYMBOL, 48),
+ (31, HUFFMAN_EMIT_SYMBOL, 48),
+ (41, HUFFMAN_EMIT_SYMBOL, 48),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 48),
+ (3, HUFFMAN_EMIT_SYMBOL, 49),
+ (6, HUFFMAN_EMIT_SYMBOL, 49),
+ (10, HUFFMAN_EMIT_SYMBOL, 49),
+ (15, HUFFMAN_EMIT_SYMBOL, 49),
+ (24, HUFFMAN_EMIT_SYMBOL, 49),
+ (31, HUFFMAN_EMIT_SYMBOL, 49),
+ (41, HUFFMAN_EMIT_SYMBOL, 49),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 49),
+
+ # Node 5
+ (3, HUFFMAN_EMIT_SYMBOL, 50),
+ (6, HUFFMAN_EMIT_SYMBOL, 50),
+ (10, HUFFMAN_EMIT_SYMBOL, 50),
+ (15, HUFFMAN_EMIT_SYMBOL, 50),
+ (24, HUFFMAN_EMIT_SYMBOL, 50),
+ (31, HUFFMAN_EMIT_SYMBOL, 50),
+ (41, HUFFMAN_EMIT_SYMBOL, 50),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 50),
+ (3, HUFFMAN_EMIT_SYMBOL, 97),
+ (6, HUFFMAN_EMIT_SYMBOL, 97),
+ (10, HUFFMAN_EMIT_SYMBOL, 97),
+ (15, HUFFMAN_EMIT_SYMBOL, 97),
+ (24, HUFFMAN_EMIT_SYMBOL, 97),
+ (31, HUFFMAN_EMIT_SYMBOL, 97),
+ (41, HUFFMAN_EMIT_SYMBOL, 97),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 97),
+
+ # Node 6
+ (2, HUFFMAN_EMIT_SYMBOL, 99),
+ (9, HUFFMAN_EMIT_SYMBOL, 99),
+ (23, HUFFMAN_EMIT_SYMBOL, 99),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 99),
+ (2, HUFFMAN_EMIT_SYMBOL, 101),
+ (9, HUFFMAN_EMIT_SYMBOL, 101),
+ (23, HUFFMAN_EMIT_SYMBOL, 101),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 101),
+ (2, HUFFMAN_EMIT_SYMBOL, 105),
+ (9, HUFFMAN_EMIT_SYMBOL, 105),
+ (23, HUFFMAN_EMIT_SYMBOL, 105),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 105),
+ (2, HUFFMAN_EMIT_SYMBOL, 111),
+ (9, HUFFMAN_EMIT_SYMBOL, 111),
+ (23, HUFFMAN_EMIT_SYMBOL, 111),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 111),
+
+ # Node 7
+ (3, HUFFMAN_EMIT_SYMBOL, 99),
+ (6, HUFFMAN_EMIT_SYMBOL, 99),
+ (10, HUFFMAN_EMIT_SYMBOL, 99),
+ (15, HUFFMAN_EMIT_SYMBOL, 99),
+ (24, HUFFMAN_EMIT_SYMBOL, 99),
+ (31, HUFFMAN_EMIT_SYMBOL, 99),
+ (41, HUFFMAN_EMIT_SYMBOL, 99),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 99),
+ (3, HUFFMAN_EMIT_SYMBOL, 101),
+ (6, HUFFMAN_EMIT_SYMBOL, 101),
+ (10, HUFFMAN_EMIT_SYMBOL, 101),
+ (15, HUFFMAN_EMIT_SYMBOL, 101),
+ (24, HUFFMAN_EMIT_SYMBOL, 101),
+ (31, HUFFMAN_EMIT_SYMBOL, 101),
+ (41, HUFFMAN_EMIT_SYMBOL, 101),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 101),
+
+ # Node 8
+ (3, HUFFMAN_EMIT_SYMBOL, 105),
+ (6, HUFFMAN_EMIT_SYMBOL, 105),
+ (10, HUFFMAN_EMIT_SYMBOL, 105),
+ (15, HUFFMAN_EMIT_SYMBOL, 105),
+ (24, HUFFMAN_EMIT_SYMBOL, 105),
+ (31, HUFFMAN_EMIT_SYMBOL, 105),
+ (41, HUFFMAN_EMIT_SYMBOL, 105),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 105),
+ (3, HUFFMAN_EMIT_SYMBOL, 111),
+ (6, HUFFMAN_EMIT_SYMBOL, 111),
+ (10, HUFFMAN_EMIT_SYMBOL, 111),
+ (15, HUFFMAN_EMIT_SYMBOL, 111),
+ (24, HUFFMAN_EMIT_SYMBOL, 111),
+ (31, HUFFMAN_EMIT_SYMBOL, 111),
+ (41, HUFFMAN_EMIT_SYMBOL, 111),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 111),
+
+ # Node 9
+ (1, HUFFMAN_EMIT_SYMBOL, 115),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 115),
+ (1, HUFFMAN_EMIT_SYMBOL, 116),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 116),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 32),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 37),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 45),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 46),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 47),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 51),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 52),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 53),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 54),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 55),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 56),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 57),
+
+ # Node 10
+ (2, HUFFMAN_EMIT_SYMBOL, 115),
+ (9, HUFFMAN_EMIT_SYMBOL, 115),
+ (23, HUFFMAN_EMIT_SYMBOL, 115),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 115),
+ (2, HUFFMAN_EMIT_SYMBOL, 116),
+ (9, HUFFMAN_EMIT_SYMBOL, 116),
+ (23, HUFFMAN_EMIT_SYMBOL, 116),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 116),
+ (1, HUFFMAN_EMIT_SYMBOL, 32),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 32),
+ (1, HUFFMAN_EMIT_SYMBOL, 37),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 37),
+ (1, HUFFMAN_EMIT_SYMBOL, 45),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 45),
+ (1, HUFFMAN_EMIT_SYMBOL, 46),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 46),
+
+ # Node 11
+ (3, HUFFMAN_EMIT_SYMBOL, 115),
+ (6, HUFFMAN_EMIT_SYMBOL, 115),
+ (10, HUFFMAN_EMIT_SYMBOL, 115),
+ (15, HUFFMAN_EMIT_SYMBOL, 115),
+ (24, HUFFMAN_EMIT_SYMBOL, 115),
+ (31, HUFFMAN_EMIT_SYMBOL, 115),
+ (41, HUFFMAN_EMIT_SYMBOL, 115),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 115),
+ (3, HUFFMAN_EMIT_SYMBOL, 116),
+ (6, HUFFMAN_EMIT_SYMBOL, 116),
+ (10, HUFFMAN_EMIT_SYMBOL, 116),
+ (15, HUFFMAN_EMIT_SYMBOL, 116),
+ (24, HUFFMAN_EMIT_SYMBOL, 116),
+ (31, HUFFMAN_EMIT_SYMBOL, 116),
+ (41, HUFFMAN_EMIT_SYMBOL, 116),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 116),
+
+ # Node 12
+ (2, HUFFMAN_EMIT_SYMBOL, 32),
+ (9, HUFFMAN_EMIT_SYMBOL, 32),
+ (23, HUFFMAN_EMIT_SYMBOL, 32),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 32),
+ (2, HUFFMAN_EMIT_SYMBOL, 37),
+ (9, HUFFMAN_EMIT_SYMBOL, 37),
+ (23, HUFFMAN_EMIT_SYMBOL, 37),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 37),
+ (2, HUFFMAN_EMIT_SYMBOL, 45),
+ (9, HUFFMAN_EMIT_SYMBOL, 45),
+ (23, HUFFMAN_EMIT_SYMBOL, 45),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 45),
+ (2, HUFFMAN_EMIT_SYMBOL, 46),
+ (9, HUFFMAN_EMIT_SYMBOL, 46),
+ (23, HUFFMAN_EMIT_SYMBOL, 46),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 46),
+
+ # Node 13
+ (3, HUFFMAN_EMIT_SYMBOL, 32),
+ (6, HUFFMAN_EMIT_SYMBOL, 32),
+ (10, HUFFMAN_EMIT_SYMBOL, 32),
+ (15, HUFFMAN_EMIT_SYMBOL, 32),
+ (24, HUFFMAN_EMIT_SYMBOL, 32),
+ (31, HUFFMAN_EMIT_SYMBOL, 32),
+ (41, HUFFMAN_EMIT_SYMBOL, 32),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 32),
+ (3, HUFFMAN_EMIT_SYMBOL, 37),
+ (6, HUFFMAN_EMIT_SYMBOL, 37),
+ (10, HUFFMAN_EMIT_SYMBOL, 37),
+ (15, HUFFMAN_EMIT_SYMBOL, 37),
+ (24, HUFFMAN_EMIT_SYMBOL, 37),
+ (31, HUFFMAN_EMIT_SYMBOL, 37),
+ (41, HUFFMAN_EMIT_SYMBOL, 37),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 37),
+
+ # Node 14
+ (3, HUFFMAN_EMIT_SYMBOL, 45),
+ (6, HUFFMAN_EMIT_SYMBOL, 45),
+ (10, HUFFMAN_EMIT_SYMBOL, 45),
+ (15, HUFFMAN_EMIT_SYMBOL, 45),
+ (24, HUFFMAN_EMIT_SYMBOL, 45),
+ (31, HUFFMAN_EMIT_SYMBOL, 45),
+ (41, HUFFMAN_EMIT_SYMBOL, 45),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 45),
+ (3, HUFFMAN_EMIT_SYMBOL, 46),
+ (6, HUFFMAN_EMIT_SYMBOL, 46),
+ (10, HUFFMAN_EMIT_SYMBOL, 46),
+ (15, HUFFMAN_EMIT_SYMBOL, 46),
+ (24, HUFFMAN_EMIT_SYMBOL, 46),
+ (31, HUFFMAN_EMIT_SYMBOL, 46),
+ (41, HUFFMAN_EMIT_SYMBOL, 46),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 46),
+
+ # Node 15
+ (1, HUFFMAN_EMIT_SYMBOL, 47),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 47),
+ (1, HUFFMAN_EMIT_SYMBOL, 51),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 51),
+ (1, HUFFMAN_EMIT_SYMBOL, 52),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 52),
+ (1, HUFFMAN_EMIT_SYMBOL, 53),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 53),
+ (1, HUFFMAN_EMIT_SYMBOL, 54),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 54),
+ (1, HUFFMAN_EMIT_SYMBOL, 55),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 55),
+ (1, HUFFMAN_EMIT_SYMBOL, 56),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 56),
+ (1, HUFFMAN_EMIT_SYMBOL, 57),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 57),
+
+ # Node 16
+ (2, HUFFMAN_EMIT_SYMBOL, 47),
+ (9, HUFFMAN_EMIT_SYMBOL, 47),
+ (23, HUFFMAN_EMIT_SYMBOL, 47),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 47),
+ (2, HUFFMAN_EMIT_SYMBOL, 51),
+ (9, HUFFMAN_EMIT_SYMBOL, 51),
+ (23, HUFFMAN_EMIT_SYMBOL, 51),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 51),
+ (2, HUFFMAN_EMIT_SYMBOL, 52),
+ (9, HUFFMAN_EMIT_SYMBOL, 52),
+ (23, HUFFMAN_EMIT_SYMBOL, 52),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 52),
+ (2, HUFFMAN_EMIT_SYMBOL, 53),
+ (9, HUFFMAN_EMIT_SYMBOL, 53),
+ (23, HUFFMAN_EMIT_SYMBOL, 53),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 53),
+
+ # Node 17
+ (3, HUFFMAN_EMIT_SYMBOL, 47),
+ (6, HUFFMAN_EMIT_SYMBOL, 47),
+ (10, HUFFMAN_EMIT_SYMBOL, 47),
+ (15, HUFFMAN_EMIT_SYMBOL, 47),
+ (24, HUFFMAN_EMIT_SYMBOL, 47),
+ (31, HUFFMAN_EMIT_SYMBOL, 47),
+ (41, HUFFMAN_EMIT_SYMBOL, 47),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 47),
+ (3, HUFFMAN_EMIT_SYMBOL, 51),
+ (6, HUFFMAN_EMIT_SYMBOL, 51),
+ (10, HUFFMAN_EMIT_SYMBOL, 51),
+ (15, HUFFMAN_EMIT_SYMBOL, 51),
+ (24, HUFFMAN_EMIT_SYMBOL, 51),
+ (31, HUFFMAN_EMIT_SYMBOL, 51),
+ (41, HUFFMAN_EMIT_SYMBOL, 51),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 51),
+
+ # Node 18
+ (3, HUFFMAN_EMIT_SYMBOL, 52),
+ (6, HUFFMAN_EMIT_SYMBOL, 52),
+ (10, HUFFMAN_EMIT_SYMBOL, 52),
+ (15, HUFFMAN_EMIT_SYMBOL, 52),
+ (24, HUFFMAN_EMIT_SYMBOL, 52),
+ (31, HUFFMAN_EMIT_SYMBOL, 52),
+ (41, HUFFMAN_EMIT_SYMBOL, 52),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 52),
+ (3, HUFFMAN_EMIT_SYMBOL, 53),
+ (6, HUFFMAN_EMIT_SYMBOL, 53),
+ (10, HUFFMAN_EMIT_SYMBOL, 53),
+ (15, HUFFMAN_EMIT_SYMBOL, 53),
+ (24, HUFFMAN_EMIT_SYMBOL, 53),
+ (31, HUFFMAN_EMIT_SYMBOL, 53),
+ (41, HUFFMAN_EMIT_SYMBOL, 53),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 53),
+
+ # Node 19
+ (2, HUFFMAN_EMIT_SYMBOL, 54),
+ (9, HUFFMAN_EMIT_SYMBOL, 54),
+ (23, HUFFMAN_EMIT_SYMBOL, 54),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 54),
+ (2, HUFFMAN_EMIT_SYMBOL, 55),
+ (9, HUFFMAN_EMIT_SYMBOL, 55),
+ (23, HUFFMAN_EMIT_SYMBOL, 55),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 55),
+ (2, HUFFMAN_EMIT_SYMBOL, 56),
+ (9, HUFFMAN_EMIT_SYMBOL, 56),
+ (23, HUFFMAN_EMIT_SYMBOL, 56),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 56),
+ (2, HUFFMAN_EMIT_SYMBOL, 57),
+ (9, HUFFMAN_EMIT_SYMBOL, 57),
+ (23, HUFFMAN_EMIT_SYMBOL, 57),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 57),
+
+ # Node 20
+ (3, HUFFMAN_EMIT_SYMBOL, 54),
+ (6, HUFFMAN_EMIT_SYMBOL, 54),
+ (10, HUFFMAN_EMIT_SYMBOL, 54),
+ (15, HUFFMAN_EMIT_SYMBOL, 54),
+ (24, HUFFMAN_EMIT_SYMBOL, 54),
+ (31, HUFFMAN_EMIT_SYMBOL, 54),
+ (41, HUFFMAN_EMIT_SYMBOL, 54),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 54),
+ (3, HUFFMAN_EMIT_SYMBOL, 55),
+ (6, HUFFMAN_EMIT_SYMBOL, 55),
+ (10, HUFFMAN_EMIT_SYMBOL, 55),
+ (15, HUFFMAN_EMIT_SYMBOL, 55),
+ (24, HUFFMAN_EMIT_SYMBOL, 55),
+ (31, HUFFMAN_EMIT_SYMBOL, 55),
+ (41, HUFFMAN_EMIT_SYMBOL, 55),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 55),
+
+ # Node 21
+ (3, HUFFMAN_EMIT_SYMBOL, 56),
+ (6, HUFFMAN_EMIT_SYMBOL, 56),
+ (10, HUFFMAN_EMIT_SYMBOL, 56),
+ (15, HUFFMAN_EMIT_SYMBOL, 56),
+ (24, HUFFMAN_EMIT_SYMBOL, 56),
+ (31, HUFFMAN_EMIT_SYMBOL, 56),
+ (41, HUFFMAN_EMIT_SYMBOL, 56),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 56),
+ (3, HUFFMAN_EMIT_SYMBOL, 57),
+ (6, HUFFMAN_EMIT_SYMBOL, 57),
+ (10, HUFFMAN_EMIT_SYMBOL, 57),
+ (15, HUFFMAN_EMIT_SYMBOL, 57),
+ (24, HUFFMAN_EMIT_SYMBOL, 57),
+ (31, HUFFMAN_EMIT_SYMBOL, 57),
+ (41, HUFFMAN_EMIT_SYMBOL, 57),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 57),
+
+ # Node 22
+ (26, 0, 0),
+ (27, 0, 0),
+ (29, 0, 0),
+ (30, 0, 0),
+ (33, 0, 0),
+ (34, 0, 0),
+ (36, 0, 0),
+ (37, 0, 0),
+ (43, 0, 0),
+ (46, 0, 0),
+ (50, 0, 0),
+ (53, 0, 0),
+ (58, 0, 0),
+ (61, 0, 0),
+ (65, 0, 0),
+ (68, HUFFMAN_COMPLETE, 0),
+
+ # Node 23
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 61),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 65),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 95),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 98),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 100),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 102),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 103),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 104),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 108),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 109),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 110),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 112),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 114),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 117),
+ (38, 0, 0),
+ (39, 0, 0),
+
+ # Node 24
+ (1, HUFFMAN_EMIT_SYMBOL, 61),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 61),
+ (1, HUFFMAN_EMIT_SYMBOL, 65),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 65),
+ (1, HUFFMAN_EMIT_SYMBOL, 95),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 95),
+ (1, HUFFMAN_EMIT_SYMBOL, 98),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 98),
+ (1, HUFFMAN_EMIT_SYMBOL, 100),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 100),
+ (1, HUFFMAN_EMIT_SYMBOL, 102),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 102),
+ (1, HUFFMAN_EMIT_SYMBOL, 103),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 103),
+ (1, HUFFMAN_EMIT_SYMBOL, 104),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 104),
+
+ # Node 25
+ (2, HUFFMAN_EMIT_SYMBOL, 61),
+ (9, HUFFMAN_EMIT_SYMBOL, 61),
+ (23, HUFFMAN_EMIT_SYMBOL, 61),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 61),
+ (2, HUFFMAN_EMIT_SYMBOL, 65),
+ (9, HUFFMAN_EMIT_SYMBOL, 65),
+ (23, HUFFMAN_EMIT_SYMBOL, 65),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 65),
+ (2, HUFFMAN_EMIT_SYMBOL, 95),
+ (9, HUFFMAN_EMIT_SYMBOL, 95),
+ (23, HUFFMAN_EMIT_SYMBOL, 95),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 95),
+ (2, HUFFMAN_EMIT_SYMBOL, 98),
+ (9, HUFFMAN_EMIT_SYMBOL, 98),
+ (23, HUFFMAN_EMIT_SYMBOL, 98),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 98),
+
+ # Node 26
+ (3, HUFFMAN_EMIT_SYMBOL, 61),
+ (6, HUFFMAN_EMIT_SYMBOL, 61),
+ (10, HUFFMAN_EMIT_SYMBOL, 61),
+ (15, HUFFMAN_EMIT_SYMBOL, 61),
+ (24, HUFFMAN_EMIT_SYMBOL, 61),
+ (31, HUFFMAN_EMIT_SYMBOL, 61),
+ (41, HUFFMAN_EMIT_SYMBOL, 61),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 61),
+ (3, HUFFMAN_EMIT_SYMBOL, 65),
+ (6, HUFFMAN_EMIT_SYMBOL, 65),
+ (10, HUFFMAN_EMIT_SYMBOL, 65),
+ (15, HUFFMAN_EMIT_SYMBOL, 65),
+ (24, HUFFMAN_EMIT_SYMBOL, 65),
+ (31, HUFFMAN_EMIT_SYMBOL, 65),
+ (41, HUFFMAN_EMIT_SYMBOL, 65),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 65),
+
+ # Node 27
+ (3, HUFFMAN_EMIT_SYMBOL, 95),
+ (6, HUFFMAN_EMIT_SYMBOL, 95),
+ (10, HUFFMAN_EMIT_SYMBOL, 95),
+ (15, HUFFMAN_EMIT_SYMBOL, 95),
+ (24, HUFFMAN_EMIT_SYMBOL, 95),
+ (31, HUFFMAN_EMIT_SYMBOL, 95),
+ (41, HUFFMAN_EMIT_SYMBOL, 95),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 95),
+ (3, HUFFMAN_EMIT_SYMBOL, 98),
+ (6, HUFFMAN_EMIT_SYMBOL, 98),
+ (10, HUFFMAN_EMIT_SYMBOL, 98),
+ (15, HUFFMAN_EMIT_SYMBOL, 98),
+ (24, HUFFMAN_EMIT_SYMBOL, 98),
+ (31, HUFFMAN_EMIT_SYMBOL, 98),
+ (41, HUFFMAN_EMIT_SYMBOL, 98),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 98),
+
+ # Node 28
+ (2, HUFFMAN_EMIT_SYMBOL, 100),
+ (9, HUFFMAN_EMIT_SYMBOL, 100),
+ (23, HUFFMAN_EMIT_SYMBOL, 100),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 100),
+ (2, HUFFMAN_EMIT_SYMBOL, 102),
+ (9, HUFFMAN_EMIT_SYMBOL, 102),
+ (23, HUFFMAN_EMIT_SYMBOL, 102),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 102),
+ (2, HUFFMAN_EMIT_SYMBOL, 103),
+ (9, HUFFMAN_EMIT_SYMBOL, 103),
+ (23, HUFFMAN_EMIT_SYMBOL, 103),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 103),
+ (2, HUFFMAN_EMIT_SYMBOL, 104),
+ (9, HUFFMAN_EMIT_SYMBOL, 104),
+ (23, HUFFMAN_EMIT_SYMBOL, 104),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 104),
+
+ # Node 29
+ (3, HUFFMAN_EMIT_SYMBOL, 100),
+ (6, HUFFMAN_EMIT_SYMBOL, 100),
+ (10, HUFFMAN_EMIT_SYMBOL, 100),
+ (15, HUFFMAN_EMIT_SYMBOL, 100),
+ (24, HUFFMAN_EMIT_SYMBOL, 100),
+ (31, HUFFMAN_EMIT_SYMBOL, 100),
+ (41, HUFFMAN_EMIT_SYMBOL, 100),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 100),
+ (3, HUFFMAN_EMIT_SYMBOL, 102),
+ (6, HUFFMAN_EMIT_SYMBOL, 102),
+ (10, HUFFMAN_EMIT_SYMBOL, 102),
+ (15, HUFFMAN_EMIT_SYMBOL, 102),
+ (24, HUFFMAN_EMIT_SYMBOL, 102),
+ (31, HUFFMAN_EMIT_SYMBOL, 102),
+ (41, HUFFMAN_EMIT_SYMBOL, 102),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 102),
+
+ # Node 30
+ (3, HUFFMAN_EMIT_SYMBOL, 103),
+ (6, HUFFMAN_EMIT_SYMBOL, 103),
+ (10, HUFFMAN_EMIT_SYMBOL, 103),
+ (15, HUFFMAN_EMIT_SYMBOL, 103),
+ (24, HUFFMAN_EMIT_SYMBOL, 103),
+ (31, HUFFMAN_EMIT_SYMBOL, 103),
+ (41, HUFFMAN_EMIT_SYMBOL, 103),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 103),
+ (3, HUFFMAN_EMIT_SYMBOL, 104),
+ (6, HUFFMAN_EMIT_SYMBOL, 104),
+ (10, HUFFMAN_EMIT_SYMBOL, 104),
+ (15, HUFFMAN_EMIT_SYMBOL, 104),
+ (24, HUFFMAN_EMIT_SYMBOL, 104),
+ (31, HUFFMAN_EMIT_SYMBOL, 104),
+ (41, HUFFMAN_EMIT_SYMBOL, 104),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 104),
+
+ # Node 31
+ (1, HUFFMAN_EMIT_SYMBOL, 108),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 108),
+ (1, HUFFMAN_EMIT_SYMBOL, 109),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 109),
+ (1, HUFFMAN_EMIT_SYMBOL, 110),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 110),
+ (1, HUFFMAN_EMIT_SYMBOL, 112),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 112),
+ (1, HUFFMAN_EMIT_SYMBOL, 114),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 114),
+ (1, HUFFMAN_EMIT_SYMBOL, 117),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 117),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 58),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 66),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 67),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 68),
+
+ # Node 32
+ (2, HUFFMAN_EMIT_SYMBOL, 108),
+ (9, HUFFMAN_EMIT_SYMBOL, 108),
+ (23, HUFFMAN_EMIT_SYMBOL, 108),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 108),
+ (2, HUFFMAN_EMIT_SYMBOL, 109),
+ (9, HUFFMAN_EMIT_SYMBOL, 109),
+ (23, HUFFMAN_EMIT_SYMBOL, 109),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 109),
+ (2, HUFFMAN_EMIT_SYMBOL, 110),
+ (9, HUFFMAN_EMIT_SYMBOL, 110),
+ (23, HUFFMAN_EMIT_SYMBOL, 110),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 110),
+ (2, HUFFMAN_EMIT_SYMBOL, 112),
+ (9, HUFFMAN_EMIT_SYMBOL, 112),
+ (23, HUFFMAN_EMIT_SYMBOL, 112),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 112),
+
+ # Node 33
+ (3, HUFFMAN_EMIT_SYMBOL, 108),
+ (6, HUFFMAN_EMIT_SYMBOL, 108),
+ (10, HUFFMAN_EMIT_SYMBOL, 108),
+ (15, HUFFMAN_EMIT_SYMBOL, 108),
+ (24, HUFFMAN_EMIT_SYMBOL, 108),
+ (31, HUFFMAN_EMIT_SYMBOL, 108),
+ (41, HUFFMAN_EMIT_SYMBOL, 108),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 108),
+ (3, HUFFMAN_EMIT_SYMBOL, 109),
+ (6, HUFFMAN_EMIT_SYMBOL, 109),
+ (10, HUFFMAN_EMIT_SYMBOL, 109),
+ (15, HUFFMAN_EMIT_SYMBOL, 109),
+ (24, HUFFMAN_EMIT_SYMBOL, 109),
+ (31, HUFFMAN_EMIT_SYMBOL, 109),
+ (41, HUFFMAN_EMIT_SYMBOL, 109),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 109),
+
+ # Node 34
+ (3, HUFFMAN_EMIT_SYMBOL, 110),
+ (6, HUFFMAN_EMIT_SYMBOL, 110),
+ (10, HUFFMAN_EMIT_SYMBOL, 110),
+ (15, HUFFMAN_EMIT_SYMBOL, 110),
+ (24, HUFFMAN_EMIT_SYMBOL, 110),
+ (31, HUFFMAN_EMIT_SYMBOL, 110),
+ (41, HUFFMAN_EMIT_SYMBOL, 110),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 110),
+ (3, HUFFMAN_EMIT_SYMBOL, 112),
+ (6, HUFFMAN_EMIT_SYMBOL, 112),
+ (10, HUFFMAN_EMIT_SYMBOL, 112),
+ (15, HUFFMAN_EMIT_SYMBOL, 112),
+ (24, HUFFMAN_EMIT_SYMBOL, 112),
+ (31, HUFFMAN_EMIT_SYMBOL, 112),
+ (41, HUFFMAN_EMIT_SYMBOL, 112),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 112),
+
+ # Node 35
+ (2, HUFFMAN_EMIT_SYMBOL, 114),
+ (9, HUFFMAN_EMIT_SYMBOL, 114),
+ (23, HUFFMAN_EMIT_SYMBOL, 114),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 114),
+ (2, HUFFMAN_EMIT_SYMBOL, 117),
+ (9, HUFFMAN_EMIT_SYMBOL, 117),
+ (23, HUFFMAN_EMIT_SYMBOL, 117),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 117),
+ (1, HUFFMAN_EMIT_SYMBOL, 58),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 58),
+ (1, HUFFMAN_EMIT_SYMBOL, 66),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 66),
+ (1, HUFFMAN_EMIT_SYMBOL, 67),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 67),
+ (1, HUFFMAN_EMIT_SYMBOL, 68),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 68),
+
+ # Node 36
+ (3, HUFFMAN_EMIT_SYMBOL, 114),
+ (6, HUFFMAN_EMIT_SYMBOL, 114),
+ (10, HUFFMAN_EMIT_SYMBOL, 114),
+ (15, HUFFMAN_EMIT_SYMBOL, 114),
+ (24, HUFFMAN_EMIT_SYMBOL, 114),
+ (31, HUFFMAN_EMIT_SYMBOL, 114),
+ (41, HUFFMAN_EMIT_SYMBOL, 114),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 114),
+ (3, HUFFMAN_EMIT_SYMBOL, 117),
+ (6, HUFFMAN_EMIT_SYMBOL, 117),
+ (10, HUFFMAN_EMIT_SYMBOL, 117),
+ (15, HUFFMAN_EMIT_SYMBOL, 117),
+ (24, HUFFMAN_EMIT_SYMBOL, 117),
+ (31, HUFFMAN_EMIT_SYMBOL, 117),
+ (41, HUFFMAN_EMIT_SYMBOL, 117),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 117),
+
+ # Node 37
+ (2, HUFFMAN_EMIT_SYMBOL, 58),
+ (9, HUFFMAN_EMIT_SYMBOL, 58),
+ (23, HUFFMAN_EMIT_SYMBOL, 58),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 58),
+ (2, HUFFMAN_EMIT_SYMBOL, 66),
+ (9, HUFFMAN_EMIT_SYMBOL, 66),
+ (23, HUFFMAN_EMIT_SYMBOL, 66),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 66),
+ (2, HUFFMAN_EMIT_SYMBOL, 67),
+ (9, HUFFMAN_EMIT_SYMBOL, 67),
+ (23, HUFFMAN_EMIT_SYMBOL, 67),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 67),
+ (2, HUFFMAN_EMIT_SYMBOL, 68),
+ (9, HUFFMAN_EMIT_SYMBOL, 68),
+ (23, HUFFMAN_EMIT_SYMBOL, 68),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 68),
+
+ # Node 38
+ (3, HUFFMAN_EMIT_SYMBOL, 58),
+ (6, HUFFMAN_EMIT_SYMBOL, 58),
+ (10, HUFFMAN_EMIT_SYMBOL, 58),
+ (15, HUFFMAN_EMIT_SYMBOL, 58),
+ (24, HUFFMAN_EMIT_SYMBOL, 58),
+ (31, HUFFMAN_EMIT_SYMBOL, 58),
+ (41, HUFFMAN_EMIT_SYMBOL, 58),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 58),
+ (3, HUFFMAN_EMIT_SYMBOL, 66),
+ (6, HUFFMAN_EMIT_SYMBOL, 66),
+ (10, HUFFMAN_EMIT_SYMBOL, 66),
+ (15, HUFFMAN_EMIT_SYMBOL, 66),
+ (24, HUFFMAN_EMIT_SYMBOL, 66),
+ (31, HUFFMAN_EMIT_SYMBOL, 66),
+ (41, HUFFMAN_EMIT_SYMBOL, 66),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 66),
+
+ # Node 39
+ (3, HUFFMAN_EMIT_SYMBOL, 67),
+ (6, HUFFMAN_EMIT_SYMBOL, 67),
+ (10, HUFFMAN_EMIT_SYMBOL, 67),
+ (15, HUFFMAN_EMIT_SYMBOL, 67),
+ (24, HUFFMAN_EMIT_SYMBOL, 67),
+ (31, HUFFMAN_EMIT_SYMBOL, 67),
+ (41, HUFFMAN_EMIT_SYMBOL, 67),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 67),
+ (3, HUFFMAN_EMIT_SYMBOL, 68),
+ (6, HUFFMAN_EMIT_SYMBOL, 68),
+ (10, HUFFMAN_EMIT_SYMBOL, 68),
+ (15, HUFFMAN_EMIT_SYMBOL, 68),
+ (24, HUFFMAN_EMIT_SYMBOL, 68),
+ (31, HUFFMAN_EMIT_SYMBOL, 68),
+ (41, HUFFMAN_EMIT_SYMBOL, 68),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 68),
+
+ # Node 40
+ (44, 0, 0),
+ (45, 0, 0),
+ (47, 0, 0),
+ (48, 0, 0),
+ (51, 0, 0),
+ (52, 0, 0),
+ (54, 0, 0),
+ (55, 0, 0),
+ (59, 0, 0),
+ (60, 0, 0),
+ (62, 0, 0),
+ (63, 0, 0),
+ (66, 0, 0),
+ (67, 0, 0),
+ (69, 0, 0),
+ (72, HUFFMAN_COMPLETE, 0),
+
+ # Node 41
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 69),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 70),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 71),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 72),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 73),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 74),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 75),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 76),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 77),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 78),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 79),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 80),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 81),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 82),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 83),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 84),
+
+ # Node 42
+ (1, HUFFMAN_EMIT_SYMBOL, 69),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 69),
+ (1, HUFFMAN_EMIT_SYMBOL, 70),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 70),
+ (1, HUFFMAN_EMIT_SYMBOL, 71),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 71),
+ (1, HUFFMAN_EMIT_SYMBOL, 72),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 72),
+ (1, HUFFMAN_EMIT_SYMBOL, 73),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 73),
+ (1, HUFFMAN_EMIT_SYMBOL, 74),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 74),
+ (1, HUFFMAN_EMIT_SYMBOL, 75),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 75),
+ (1, HUFFMAN_EMIT_SYMBOL, 76),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 76),
+
+ # Node 43
+ (2, HUFFMAN_EMIT_SYMBOL, 69),
+ (9, HUFFMAN_EMIT_SYMBOL, 69),
+ (23, HUFFMAN_EMIT_SYMBOL, 69),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 69),
+ (2, HUFFMAN_EMIT_SYMBOL, 70),
+ (9, HUFFMAN_EMIT_SYMBOL, 70),
+ (23, HUFFMAN_EMIT_SYMBOL, 70),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 70),
+ (2, HUFFMAN_EMIT_SYMBOL, 71),
+ (9, HUFFMAN_EMIT_SYMBOL, 71),
+ (23, HUFFMAN_EMIT_SYMBOL, 71),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 71),
+ (2, HUFFMAN_EMIT_SYMBOL, 72),
+ (9, HUFFMAN_EMIT_SYMBOL, 72),
+ (23, HUFFMAN_EMIT_SYMBOL, 72),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 72),
+
+ # Node 44
+ (3, HUFFMAN_EMIT_SYMBOL, 69),
+ (6, HUFFMAN_EMIT_SYMBOL, 69),
+ (10, HUFFMAN_EMIT_SYMBOL, 69),
+ (15, HUFFMAN_EMIT_SYMBOL, 69),
+ (24, HUFFMAN_EMIT_SYMBOL, 69),
+ (31, HUFFMAN_EMIT_SYMBOL, 69),
+ (41, HUFFMAN_EMIT_SYMBOL, 69),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 69),
+ (3, HUFFMAN_EMIT_SYMBOL, 70),
+ (6, HUFFMAN_EMIT_SYMBOL, 70),
+ (10, HUFFMAN_EMIT_SYMBOL, 70),
+ (15, HUFFMAN_EMIT_SYMBOL, 70),
+ (24, HUFFMAN_EMIT_SYMBOL, 70),
+ (31, HUFFMAN_EMIT_SYMBOL, 70),
+ (41, HUFFMAN_EMIT_SYMBOL, 70),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 70),
+
+ # Node 45
+ (3, HUFFMAN_EMIT_SYMBOL, 71),
+ (6, HUFFMAN_EMIT_SYMBOL, 71),
+ (10, HUFFMAN_EMIT_SYMBOL, 71),
+ (15, HUFFMAN_EMIT_SYMBOL, 71),
+ (24, HUFFMAN_EMIT_SYMBOL, 71),
+ (31, HUFFMAN_EMIT_SYMBOL, 71),
+ (41, HUFFMAN_EMIT_SYMBOL, 71),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 71),
+ (3, HUFFMAN_EMIT_SYMBOL, 72),
+ (6, HUFFMAN_EMIT_SYMBOL, 72),
+ (10, HUFFMAN_EMIT_SYMBOL, 72),
+ (15, HUFFMAN_EMIT_SYMBOL, 72),
+ (24, HUFFMAN_EMIT_SYMBOL, 72),
+ (31, HUFFMAN_EMIT_SYMBOL, 72),
+ (41, HUFFMAN_EMIT_SYMBOL, 72),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 72),
+
+ # Node 46
+ (2, HUFFMAN_EMIT_SYMBOL, 73),
+ (9, HUFFMAN_EMIT_SYMBOL, 73),
+ (23, HUFFMAN_EMIT_SYMBOL, 73),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 73),
+ (2, HUFFMAN_EMIT_SYMBOL, 74),
+ (9, HUFFMAN_EMIT_SYMBOL, 74),
+ (23, HUFFMAN_EMIT_SYMBOL, 74),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 74),
+ (2, HUFFMAN_EMIT_SYMBOL, 75),
+ (9, HUFFMAN_EMIT_SYMBOL, 75),
+ (23, HUFFMAN_EMIT_SYMBOL, 75),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 75),
+ (2, HUFFMAN_EMIT_SYMBOL, 76),
+ (9, HUFFMAN_EMIT_SYMBOL, 76),
+ (23, HUFFMAN_EMIT_SYMBOL, 76),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 76),
+
+ # Node 47
+ (3, HUFFMAN_EMIT_SYMBOL, 73),
+ (6, HUFFMAN_EMIT_SYMBOL, 73),
+ (10, HUFFMAN_EMIT_SYMBOL, 73),
+ (15, HUFFMAN_EMIT_SYMBOL, 73),
+ (24, HUFFMAN_EMIT_SYMBOL, 73),
+ (31, HUFFMAN_EMIT_SYMBOL, 73),
+ (41, HUFFMAN_EMIT_SYMBOL, 73),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 73),
+ (3, HUFFMAN_EMIT_SYMBOL, 74),
+ (6, HUFFMAN_EMIT_SYMBOL, 74),
+ (10, HUFFMAN_EMIT_SYMBOL, 74),
+ (15, HUFFMAN_EMIT_SYMBOL, 74),
+ (24, HUFFMAN_EMIT_SYMBOL, 74),
+ (31, HUFFMAN_EMIT_SYMBOL, 74),
+ (41, HUFFMAN_EMIT_SYMBOL, 74),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 74),
+
+ # Node 48
+ (3, HUFFMAN_EMIT_SYMBOL, 75),
+ (6, HUFFMAN_EMIT_SYMBOL, 75),
+ (10, HUFFMAN_EMIT_SYMBOL, 75),
+ (15, HUFFMAN_EMIT_SYMBOL, 75),
+ (24, HUFFMAN_EMIT_SYMBOL, 75),
+ (31, HUFFMAN_EMIT_SYMBOL, 75),
+ (41, HUFFMAN_EMIT_SYMBOL, 75),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 75),
+ (3, HUFFMAN_EMIT_SYMBOL, 76),
+ (6, HUFFMAN_EMIT_SYMBOL, 76),
+ (10, HUFFMAN_EMIT_SYMBOL, 76),
+ (15, HUFFMAN_EMIT_SYMBOL, 76),
+ (24, HUFFMAN_EMIT_SYMBOL, 76),
+ (31, HUFFMAN_EMIT_SYMBOL, 76),
+ (41, HUFFMAN_EMIT_SYMBOL, 76),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 76),
+
+ # Node 49
+ (1, HUFFMAN_EMIT_SYMBOL, 77),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 77),
+ (1, HUFFMAN_EMIT_SYMBOL, 78),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 78),
+ (1, HUFFMAN_EMIT_SYMBOL, 79),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 79),
+ (1, HUFFMAN_EMIT_SYMBOL, 80),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 80),
+ (1, HUFFMAN_EMIT_SYMBOL, 81),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 81),
+ (1, HUFFMAN_EMIT_SYMBOL, 82),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 82),
+ (1, HUFFMAN_EMIT_SYMBOL, 83),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 83),
+ (1, HUFFMAN_EMIT_SYMBOL, 84),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 84),
+
+ # Node 50
+ (2, HUFFMAN_EMIT_SYMBOL, 77),
+ (9, HUFFMAN_EMIT_SYMBOL, 77),
+ (23, HUFFMAN_EMIT_SYMBOL, 77),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 77),
+ (2, HUFFMAN_EMIT_SYMBOL, 78),
+ (9, HUFFMAN_EMIT_SYMBOL, 78),
+ (23, HUFFMAN_EMIT_SYMBOL, 78),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 78),
+ (2, HUFFMAN_EMIT_SYMBOL, 79),
+ (9, HUFFMAN_EMIT_SYMBOL, 79),
+ (23, HUFFMAN_EMIT_SYMBOL, 79),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 79),
+ (2, HUFFMAN_EMIT_SYMBOL, 80),
+ (9, HUFFMAN_EMIT_SYMBOL, 80),
+ (23, HUFFMAN_EMIT_SYMBOL, 80),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 80),
+
+ # Node 51
+ (3, HUFFMAN_EMIT_SYMBOL, 77),
+ (6, HUFFMAN_EMIT_SYMBOL, 77),
+ (10, HUFFMAN_EMIT_SYMBOL, 77),
+ (15, HUFFMAN_EMIT_SYMBOL, 77),
+ (24, HUFFMAN_EMIT_SYMBOL, 77),
+ (31, HUFFMAN_EMIT_SYMBOL, 77),
+ (41, HUFFMAN_EMIT_SYMBOL, 77),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 77),
+ (3, HUFFMAN_EMIT_SYMBOL, 78),
+ (6, HUFFMAN_EMIT_SYMBOL, 78),
+ (10, HUFFMAN_EMIT_SYMBOL, 78),
+ (15, HUFFMAN_EMIT_SYMBOL, 78),
+ (24, HUFFMAN_EMIT_SYMBOL, 78),
+ (31, HUFFMAN_EMIT_SYMBOL, 78),
+ (41, HUFFMAN_EMIT_SYMBOL, 78),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 78),
+
+ # Node 52
+ (3, HUFFMAN_EMIT_SYMBOL, 79),
+ (6, HUFFMAN_EMIT_SYMBOL, 79),
+ (10, HUFFMAN_EMIT_SYMBOL, 79),
+ (15, HUFFMAN_EMIT_SYMBOL, 79),
+ (24, HUFFMAN_EMIT_SYMBOL, 79),
+ (31, HUFFMAN_EMIT_SYMBOL, 79),
+ (41, HUFFMAN_EMIT_SYMBOL, 79),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 79),
+ (3, HUFFMAN_EMIT_SYMBOL, 80),
+ (6, HUFFMAN_EMIT_SYMBOL, 80),
+ (10, HUFFMAN_EMIT_SYMBOL, 80),
+ (15, HUFFMAN_EMIT_SYMBOL, 80),
+ (24, HUFFMAN_EMIT_SYMBOL, 80),
+ (31, HUFFMAN_EMIT_SYMBOL, 80),
+ (41, HUFFMAN_EMIT_SYMBOL, 80),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 80),
+
+ # Node 53
+ (2, HUFFMAN_EMIT_SYMBOL, 81),
+ (9, HUFFMAN_EMIT_SYMBOL, 81),
+ (23, HUFFMAN_EMIT_SYMBOL, 81),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 81),
+ (2, HUFFMAN_EMIT_SYMBOL, 82),
+ (9, HUFFMAN_EMIT_SYMBOL, 82),
+ (23, HUFFMAN_EMIT_SYMBOL, 82),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 82),
+ (2, HUFFMAN_EMIT_SYMBOL, 83),
+ (9, HUFFMAN_EMIT_SYMBOL, 83),
+ (23, HUFFMAN_EMIT_SYMBOL, 83),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 83),
+ (2, HUFFMAN_EMIT_SYMBOL, 84),
+ (9, HUFFMAN_EMIT_SYMBOL, 84),
+ (23, HUFFMAN_EMIT_SYMBOL, 84),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 84),
+
+ # Node 54
+ (3, HUFFMAN_EMIT_SYMBOL, 81),
+ (6, HUFFMAN_EMIT_SYMBOL, 81),
+ (10, HUFFMAN_EMIT_SYMBOL, 81),
+ (15, HUFFMAN_EMIT_SYMBOL, 81),
+ (24, HUFFMAN_EMIT_SYMBOL, 81),
+ (31, HUFFMAN_EMIT_SYMBOL, 81),
+ (41, HUFFMAN_EMIT_SYMBOL, 81),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 81),
+ (3, HUFFMAN_EMIT_SYMBOL, 82),
+ (6, HUFFMAN_EMIT_SYMBOL, 82),
+ (10, HUFFMAN_EMIT_SYMBOL, 82),
+ (15, HUFFMAN_EMIT_SYMBOL, 82),
+ (24, HUFFMAN_EMIT_SYMBOL, 82),
+ (31, HUFFMAN_EMIT_SYMBOL, 82),
+ (41, HUFFMAN_EMIT_SYMBOL, 82),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 82),
+
+ # Node 55
+ (3, HUFFMAN_EMIT_SYMBOL, 83),
+ (6, HUFFMAN_EMIT_SYMBOL, 83),
+ (10, HUFFMAN_EMIT_SYMBOL, 83),
+ (15, HUFFMAN_EMIT_SYMBOL, 83),
+ (24, HUFFMAN_EMIT_SYMBOL, 83),
+ (31, HUFFMAN_EMIT_SYMBOL, 83),
+ (41, HUFFMAN_EMIT_SYMBOL, 83),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 83),
+ (3, HUFFMAN_EMIT_SYMBOL, 84),
+ (6, HUFFMAN_EMIT_SYMBOL, 84),
+ (10, HUFFMAN_EMIT_SYMBOL, 84),
+ (15, HUFFMAN_EMIT_SYMBOL, 84),
+ (24, HUFFMAN_EMIT_SYMBOL, 84),
+ (31, HUFFMAN_EMIT_SYMBOL, 84),
+ (41, HUFFMAN_EMIT_SYMBOL, 84),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 84),
+
+ # Node 56
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 85),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 86),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 87),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 89),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 106),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 107),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 113),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 118),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 119),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 120),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 121),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 122),
+ (70, 0, 0),
+ (71, 0, 0),
+ (73, 0, 0),
+ (74, HUFFMAN_COMPLETE, 0),
+
+ # Node 57
+ (1, HUFFMAN_EMIT_SYMBOL, 85),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 85),
+ (1, HUFFMAN_EMIT_SYMBOL, 86),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 86),
+ (1, HUFFMAN_EMIT_SYMBOL, 87),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 87),
+ (1, HUFFMAN_EMIT_SYMBOL, 89),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 89),
+ (1, HUFFMAN_EMIT_SYMBOL, 106),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 106),
+ (1, HUFFMAN_EMIT_SYMBOL, 107),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 107),
+ (1, HUFFMAN_EMIT_SYMBOL, 113),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 113),
+ (1, HUFFMAN_EMIT_SYMBOL, 118),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 118),
+
+ # Node 58
+ (2, HUFFMAN_EMIT_SYMBOL, 85),
+ (9, HUFFMAN_EMIT_SYMBOL, 85),
+ (23, HUFFMAN_EMIT_SYMBOL, 85),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 85),
+ (2, HUFFMAN_EMIT_SYMBOL, 86),
+ (9, HUFFMAN_EMIT_SYMBOL, 86),
+ (23, HUFFMAN_EMIT_SYMBOL, 86),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 86),
+ (2, HUFFMAN_EMIT_SYMBOL, 87),
+ (9, HUFFMAN_EMIT_SYMBOL, 87),
+ (23, HUFFMAN_EMIT_SYMBOL, 87),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 87),
+ (2, HUFFMAN_EMIT_SYMBOL, 89),
+ (9, HUFFMAN_EMIT_SYMBOL, 89),
+ (23, HUFFMAN_EMIT_SYMBOL, 89),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 89),
+
+ # Node 59
+ (3, HUFFMAN_EMIT_SYMBOL, 85),
+ (6, HUFFMAN_EMIT_SYMBOL, 85),
+ (10, HUFFMAN_EMIT_SYMBOL, 85),
+ (15, HUFFMAN_EMIT_SYMBOL, 85),
+ (24, HUFFMAN_EMIT_SYMBOL, 85),
+ (31, HUFFMAN_EMIT_SYMBOL, 85),
+ (41, HUFFMAN_EMIT_SYMBOL, 85),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 85),
+ (3, HUFFMAN_EMIT_SYMBOL, 86),
+ (6, HUFFMAN_EMIT_SYMBOL, 86),
+ (10, HUFFMAN_EMIT_SYMBOL, 86),
+ (15, HUFFMAN_EMIT_SYMBOL, 86),
+ (24, HUFFMAN_EMIT_SYMBOL, 86),
+ (31, HUFFMAN_EMIT_SYMBOL, 86),
+ (41, HUFFMAN_EMIT_SYMBOL, 86),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 86),
+
+ # Node 60
+ (3, HUFFMAN_EMIT_SYMBOL, 87),
+ (6, HUFFMAN_EMIT_SYMBOL, 87),
+ (10, HUFFMAN_EMIT_SYMBOL, 87),
+ (15, HUFFMAN_EMIT_SYMBOL, 87),
+ (24, HUFFMAN_EMIT_SYMBOL, 87),
+ (31, HUFFMAN_EMIT_SYMBOL, 87),
+ (41, HUFFMAN_EMIT_SYMBOL, 87),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 87),
+ (3, HUFFMAN_EMIT_SYMBOL, 89),
+ (6, HUFFMAN_EMIT_SYMBOL, 89),
+ (10, HUFFMAN_EMIT_SYMBOL, 89),
+ (15, HUFFMAN_EMIT_SYMBOL, 89),
+ (24, HUFFMAN_EMIT_SYMBOL, 89),
+ (31, HUFFMAN_EMIT_SYMBOL, 89),
+ (41, HUFFMAN_EMIT_SYMBOL, 89),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 89),
+
+ # Node 61
+ (2, HUFFMAN_EMIT_SYMBOL, 106),
+ (9, HUFFMAN_EMIT_SYMBOL, 106),
+ (23, HUFFMAN_EMIT_SYMBOL, 106),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 106),
+ (2, HUFFMAN_EMIT_SYMBOL, 107),
+ (9, HUFFMAN_EMIT_SYMBOL, 107),
+ (23, HUFFMAN_EMIT_SYMBOL, 107),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 107),
+ (2, HUFFMAN_EMIT_SYMBOL, 113),
+ (9, HUFFMAN_EMIT_SYMBOL, 113),
+ (23, HUFFMAN_EMIT_SYMBOL, 113),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 113),
+ (2, HUFFMAN_EMIT_SYMBOL, 118),
+ (9, HUFFMAN_EMIT_SYMBOL, 118),
+ (23, HUFFMAN_EMIT_SYMBOL, 118),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 118),
+
+ # Node 62
+ (3, HUFFMAN_EMIT_SYMBOL, 106),
+ (6, HUFFMAN_EMIT_SYMBOL, 106),
+ (10, HUFFMAN_EMIT_SYMBOL, 106),
+ (15, HUFFMAN_EMIT_SYMBOL, 106),
+ (24, HUFFMAN_EMIT_SYMBOL, 106),
+ (31, HUFFMAN_EMIT_SYMBOL, 106),
+ (41, HUFFMAN_EMIT_SYMBOL, 106),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 106),
+ (3, HUFFMAN_EMIT_SYMBOL, 107),
+ (6, HUFFMAN_EMIT_SYMBOL, 107),
+ (10, HUFFMAN_EMIT_SYMBOL, 107),
+ (15, HUFFMAN_EMIT_SYMBOL, 107),
+ (24, HUFFMAN_EMIT_SYMBOL, 107),
+ (31, HUFFMAN_EMIT_SYMBOL, 107),
+ (41, HUFFMAN_EMIT_SYMBOL, 107),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 107),
+
+ # Node 63
+ (3, HUFFMAN_EMIT_SYMBOL, 113),
+ (6, HUFFMAN_EMIT_SYMBOL, 113),
+ (10, HUFFMAN_EMIT_SYMBOL, 113),
+ (15, HUFFMAN_EMIT_SYMBOL, 113),
+ (24, HUFFMAN_EMIT_SYMBOL, 113),
+ (31, HUFFMAN_EMIT_SYMBOL, 113),
+ (41, HUFFMAN_EMIT_SYMBOL, 113),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 113),
+ (3, HUFFMAN_EMIT_SYMBOL, 118),
+ (6, HUFFMAN_EMIT_SYMBOL, 118),
+ (10, HUFFMAN_EMIT_SYMBOL, 118),
+ (15, HUFFMAN_EMIT_SYMBOL, 118),
+ (24, HUFFMAN_EMIT_SYMBOL, 118),
+ (31, HUFFMAN_EMIT_SYMBOL, 118),
+ (41, HUFFMAN_EMIT_SYMBOL, 118),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 118),
+
+ # Node 64
+ (1, HUFFMAN_EMIT_SYMBOL, 119),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 119),
+ (1, HUFFMAN_EMIT_SYMBOL, 120),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 120),
+ (1, HUFFMAN_EMIT_SYMBOL, 121),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 121),
+ (1, HUFFMAN_EMIT_SYMBOL, 122),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 122),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 38),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 42),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 44),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 59),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 88),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 90),
+ (75, 0, 0),
+ (78, 0, 0),
+
+ # Node 65
+ (2, HUFFMAN_EMIT_SYMBOL, 119),
+ (9, HUFFMAN_EMIT_SYMBOL, 119),
+ (23, HUFFMAN_EMIT_SYMBOL, 119),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 119),
+ (2, HUFFMAN_EMIT_SYMBOL, 120),
+ (9, HUFFMAN_EMIT_SYMBOL, 120),
+ (23, HUFFMAN_EMIT_SYMBOL, 120),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 120),
+ (2, HUFFMAN_EMIT_SYMBOL, 121),
+ (9, HUFFMAN_EMIT_SYMBOL, 121),
+ (23, HUFFMAN_EMIT_SYMBOL, 121),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 121),
+ (2, HUFFMAN_EMIT_SYMBOL, 122),
+ (9, HUFFMAN_EMIT_SYMBOL, 122),
+ (23, HUFFMAN_EMIT_SYMBOL, 122),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 122),
+
+ # Node 66
+ (3, HUFFMAN_EMIT_SYMBOL, 119),
+ (6, HUFFMAN_EMIT_SYMBOL, 119),
+ (10, HUFFMAN_EMIT_SYMBOL, 119),
+ (15, HUFFMAN_EMIT_SYMBOL, 119),
+ (24, HUFFMAN_EMIT_SYMBOL, 119),
+ (31, HUFFMAN_EMIT_SYMBOL, 119),
+ (41, HUFFMAN_EMIT_SYMBOL, 119),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 119),
+ (3, HUFFMAN_EMIT_SYMBOL, 120),
+ (6, HUFFMAN_EMIT_SYMBOL, 120),
+ (10, HUFFMAN_EMIT_SYMBOL, 120),
+ (15, HUFFMAN_EMIT_SYMBOL, 120),
+ (24, HUFFMAN_EMIT_SYMBOL, 120),
+ (31, HUFFMAN_EMIT_SYMBOL, 120),
+ (41, HUFFMAN_EMIT_SYMBOL, 120),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 120),
+
+ # Node 67
+ (3, HUFFMAN_EMIT_SYMBOL, 121),
+ (6, HUFFMAN_EMIT_SYMBOL, 121),
+ (10, HUFFMAN_EMIT_SYMBOL, 121),
+ (15, HUFFMAN_EMIT_SYMBOL, 121),
+ (24, HUFFMAN_EMIT_SYMBOL, 121),
+ (31, HUFFMAN_EMIT_SYMBOL, 121),
+ (41, HUFFMAN_EMIT_SYMBOL, 121),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 121),
+ (3, HUFFMAN_EMIT_SYMBOL, 122),
+ (6, HUFFMAN_EMIT_SYMBOL, 122),
+ (10, HUFFMAN_EMIT_SYMBOL, 122),
+ (15, HUFFMAN_EMIT_SYMBOL, 122),
+ (24, HUFFMAN_EMIT_SYMBOL, 122),
+ (31, HUFFMAN_EMIT_SYMBOL, 122),
+ (41, HUFFMAN_EMIT_SYMBOL, 122),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 122),
+
+ # Node 68
+ (1, HUFFMAN_EMIT_SYMBOL, 38),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 38),
+ (1, HUFFMAN_EMIT_SYMBOL, 42),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 42),
+ (1, HUFFMAN_EMIT_SYMBOL, 44),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 44),
+ (1, HUFFMAN_EMIT_SYMBOL, 59),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 59),
+ (1, HUFFMAN_EMIT_SYMBOL, 88),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 88),
+ (1, HUFFMAN_EMIT_SYMBOL, 90),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 90),
+ (76, 0, 0),
+ (77, 0, 0),
+ (79, 0, 0),
+ (81, 0, 0),
+
+ # Node 69
+ (2, HUFFMAN_EMIT_SYMBOL, 38),
+ (9, HUFFMAN_EMIT_SYMBOL, 38),
+ (23, HUFFMAN_EMIT_SYMBOL, 38),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 38),
+ (2, HUFFMAN_EMIT_SYMBOL, 42),
+ (9, HUFFMAN_EMIT_SYMBOL, 42),
+ (23, HUFFMAN_EMIT_SYMBOL, 42),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 42),
+ (2, HUFFMAN_EMIT_SYMBOL, 44),
+ (9, HUFFMAN_EMIT_SYMBOL, 44),
+ (23, HUFFMAN_EMIT_SYMBOL, 44),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 44),
+ (2, HUFFMAN_EMIT_SYMBOL, 59),
+ (9, HUFFMAN_EMIT_SYMBOL, 59),
+ (23, HUFFMAN_EMIT_SYMBOL, 59),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 59),
+
+ # Node 70
+ (3, HUFFMAN_EMIT_SYMBOL, 38),
+ (6, HUFFMAN_EMIT_SYMBOL, 38),
+ (10, HUFFMAN_EMIT_SYMBOL, 38),
+ (15, HUFFMAN_EMIT_SYMBOL, 38),
+ (24, HUFFMAN_EMIT_SYMBOL, 38),
+ (31, HUFFMAN_EMIT_SYMBOL, 38),
+ (41, HUFFMAN_EMIT_SYMBOL, 38),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 38),
+ (3, HUFFMAN_EMIT_SYMBOL, 42),
+ (6, HUFFMAN_EMIT_SYMBOL, 42),
+ (10, HUFFMAN_EMIT_SYMBOL, 42),
+ (15, HUFFMAN_EMIT_SYMBOL, 42),
+ (24, HUFFMAN_EMIT_SYMBOL, 42),
+ (31, HUFFMAN_EMIT_SYMBOL, 42),
+ (41, HUFFMAN_EMIT_SYMBOL, 42),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 42),
+
+ # Node 71
+ (3, HUFFMAN_EMIT_SYMBOL, 44),
+ (6, HUFFMAN_EMIT_SYMBOL, 44),
+ (10, HUFFMAN_EMIT_SYMBOL, 44),
+ (15, HUFFMAN_EMIT_SYMBOL, 44),
+ (24, HUFFMAN_EMIT_SYMBOL, 44),
+ (31, HUFFMAN_EMIT_SYMBOL, 44),
+ (41, HUFFMAN_EMIT_SYMBOL, 44),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 44),
+ (3, HUFFMAN_EMIT_SYMBOL, 59),
+ (6, HUFFMAN_EMIT_SYMBOL, 59),
+ (10, HUFFMAN_EMIT_SYMBOL, 59),
+ (15, HUFFMAN_EMIT_SYMBOL, 59),
+ (24, HUFFMAN_EMIT_SYMBOL, 59),
+ (31, HUFFMAN_EMIT_SYMBOL, 59),
+ (41, HUFFMAN_EMIT_SYMBOL, 59),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 59),
+
+ # Node 72
+ (2, HUFFMAN_EMIT_SYMBOL, 88),
+ (9, HUFFMAN_EMIT_SYMBOL, 88),
+ (23, HUFFMAN_EMIT_SYMBOL, 88),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 88),
+ (2, HUFFMAN_EMIT_SYMBOL, 90),
+ (9, HUFFMAN_EMIT_SYMBOL, 90),
+ (23, HUFFMAN_EMIT_SYMBOL, 90),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 90),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 33),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 34),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 40),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 41),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 63),
+ (80, 0, 0),
+ (82, 0, 0),
+ (84, 0, 0),
+
+ # Node 73
+ (3, HUFFMAN_EMIT_SYMBOL, 88),
+ (6, HUFFMAN_EMIT_SYMBOL, 88),
+ (10, HUFFMAN_EMIT_SYMBOL, 88),
+ (15, HUFFMAN_EMIT_SYMBOL, 88),
+ (24, HUFFMAN_EMIT_SYMBOL, 88),
+ (31, HUFFMAN_EMIT_SYMBOL, 88),
+ (41, HUFFMAN_EMIT_SYMBOL, 88),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 88),
+ (3, HUFFMAN_EMIT_SYMBOL, 90),
+ (6, HUFFMAN_EMIT_SYMBOL, 90),
+ (10, HUFFMAN_EMIT_SYMBOL, 90),
+ (15, HUFFMAN_EMIT_SYMBOL, 90),
+ (24, HUFFMAN_EMIT_SYMBOL, 90),
+ (31, HUFFMAN_EMIT_SYMBOL, 90),
+ (41, HUFFMAN_EMIT_SYMBOL, 90),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 90),
+
+ # Node 74
+ (1, HUFFMAN_EMIT_SYMBOL, 33),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 33),
+ (1, HUFFMAN_EMIT_SYMBOL, 34),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 34),
+ (1, HUFFMAN_EMIT_SYMBOL, 40),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 40),
+ (1, HUFFMAN_EMIT_SYMBOL, 41),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 41),
+ (1, HUFFMAN_EMIT_SYMBOL, 63),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 63),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 39),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 43),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 124),
+ (83, 0, 0),
+ (85, 0, 0),
+ (88, 0, 0),
+
+ # Node 75
+ (2, HUFFMAN_EMIT_SYMBOL, 33),
+ (9, HUFFMAN_EMIT_SYMBOL, 33),
+ (23, HUFFMAN_EMIT_SYMBOL, 33),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 33),
+ (2, HUFFMAN_EMIT_SYMBOL, 34),
+ (9, HUFFMAN_EMIT_SYMBOL, 34),
+ (23, HUFFMAN_EMIT_SYMBOL, 34),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 34),
+ (2, HUFFMAN_EMIT_SYMBOL, 40),
+ (9, HUFFMAN_EMIT_SYMBOL, 40),
+ (23, HUFFMAN_EMIT_SYMBOL, 40),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 40),
+ (2, HUFFMAN_EMIT_SYMBOL, 41),
+ (9, HUFFMAN_EMIT_SYMBOL, 41),
+ (23, HUFFMAN_EMIT_SYMBOL, 41),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 41),
+
+ # Node 76
+ (3, HUFFMAN_EMIT_SYMBOL, 33),
+ (6, HUFFMAN_EMIT_SYMBOL, 33),
+ (10, HUFFMAN_EMIT_SYMBOL, 33),
+ (15, HUFFMAN_EMIT_SYMBOL, 33),
+ (24, HUFFMAN_EMIT_SYMBOL, 33),
+ (31, HUFFMAN_EMIT_SYMBOL, 33),
+ (41, HUFFMAN_EMIT_SYMBOL, 33),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 33),
+ (3, HUFFMAN_EMIT_SYMBOL, 34),
+ (6, HUFFMAN_EMIT_SYMBOL, 34),
+ (10, HUFFMAN_EMIT_SYMBOL, 34),
+ (15, HUFFMAN_EMIT_SYMBOL, 34),
+ (24, HUFFMAN_EMIT_SYMBOL, 34),
+ (31, HUFFMAN_EMIT_SYMBOL, 34),
+ (41, HUFFMAN_EMIT_SYMBOL, 34),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 34),
+
+ # Node 77
+ (3, HUFFMAN_EMIT_SYMBOL, 40),
+ (6, HUFFMAN_EMIT_SYMBOL, 40),
+ (10, HUFFMAN_EMIT_SYMBOL, 40),
+ (15, HUFFMAN_EMIT_SYMBOL, 40),
+ (24, HUFFMAN_EMIT_SYMBOL, 40),
+ (31, HUFFMAN_EMIT_SYMBOL, 40),
+ (41, HUFFMAN_EMIT_SYMBOL, 40),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 40),
+ (3, HUFFMAN_EMIT_SYMBOL, 41),
+ (6, HUFFMAN_EMIT_SYMBOL, 41),
+ (10, HUFFMAN_EMIT_SYMBOL, 41),
+ (15, HUFFMAN_EMIT_SYMBOL, 41),
+ (24, HUFFMAN_EMIT_SYMBOL, 41),
+ (31, HUFFMAN_EMIT_SYMBOL, 41),
+ (41, HUFFMAN_EMIT_SYMBOL, 41),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 41),
+
+ # Node 78
+ (2, HUFFMAN_EMIT_SYMBOL, 63),
+ (9, HUFFMAN_EMIT_SYMBOL, 63),
+ (23, HUFFMAN_EMIT_SYMBOL, 63),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 63),
+ (1, HUFFMAN_EMIT_SYMBOL, 39),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 39),
+ (1, HUFFMAN_EMIT_SYMBOL, 43),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 43),
+ (1, HUFFMAN_EMIT_SYMBOL, 124),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 124),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 35),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 62),
+ (86, 0, 0),
+ (87, 0, 0),
+ (89, 0, 0),
+ (90, 0, 0),
+
+ # Node 79
+ (3, HUFFMAN_EMIT_SYMBOL, 63),
+ (6, HUFFMAN_EMIT_SYMBOL, 63),
+ (10, HUFFMAN_EMIT_SYMBOL, 63),
+ (15, HUFFMAN_EMIT_SYMBOL, 63),
+ (24, HUFFMAN_EMIT_SYMBOL, 63),
+ (31, HUFFMAN_EMIT_SYMBOL, 63),
+ (41, HUFFMAN_EMIT_SYMBOL, 63),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 63),
+ (2, HUFFMAN_EMIT_SYMBOL, 39),
+ (9, HUFFMAN_EMIT_SYMBOL, 39),
+ (23, HUFFMAN_EMIT_SYMBOL, 39),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 39),
+ (2, HUFFMAN_EMIT_SYMBOL, 43),
+ (9, HUFFMAN_EMIT_SYMBOL, 43),
+ (23, HUFFMAN_EMIT_SYMBOL, 43),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 43),
+
+ # Node 80
+ (3, HUFFMAN_EMIT_SYMBOL, 39),
+ (6, HUFFMAN_EMIT_SYMBOL, 39),
+ (10, HUFFMAN_EMIT_SYMBOL, 39),
+ (15, HUFFMAN_EMIT_SYMBOL, 39),
+ (24, HUFFMAN_EMIT_SYMBOL, 39),
+ (31, HUFFMAN_EMIT_SYMBOL, 39),
+ (41, HUFFMAN_EMIT_SYMBOL, 39),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 39),
+ (3, HUFFMAN_EMIT_SYMBOL, 43),
+ (6, HUFFMAN_EMIT_SYMBOL, 43),
+ (10, HUFFMAN_EMIT_SYMBOL, 43),
+ (15, HUFFMAN_EMIT_SYMBOL, 43),
+ (24, HUFFMAN_EMIT_SYMBOL, 43),
+ (31, HUFFMAN_EMIT_SYMBOL, 43),
+ (41, HUFFMAN_EMIT_SYMBOL, 43),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 43),
+
+ # Node 81
+ (2, HUFFMAN_EMIT_SYMBOL, 124),
+ (9, HUFFMAN_EMIT_SYMBOL, 124),
+ (23, HUFFMAN_EMIT_SYMBOL, 124),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 124),
+ (1, HUFFMAN_EMIT_SYMBOL, 35),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 35),
+ (1, HUFFMAN_EMIT_SYMBOL, 62),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 62),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 0),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 36),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 64),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 91),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 93),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 126),
+ (91, 0, 0),
+ (92, 0, 0),
+
+ # Node 82
+ (3, HUFFMAN_EMIT_SYMBOL, 124),
+ (6, HUFFMAN_EMIT_SYMBOL, 124),
+ (10, HUFFMAN_EMIT_SYMBOL, 124),
+ (15, HUFFMAN_EMIT_SYMBOL, 124),
+ (24, HUFFMAN_EMIT_SYMBOL, 124),
+ (31, HUFFMAN_EMIT_SYMBOL, 124),
+ (41, HUFFMAN_EMIT_SYMBOL, 124),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 124),
+ (2, HUFFMAN_EMIT_SYMBOL, 35),
+ (9, HUFFMAN_EMIT_SYMBOL, 35),
+ (23, HUFFMAN_EMIT_SYMBOL, 35),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 35),
+ (2, HUFFMAN_EMIT_SYMBOL, 62),
+ (9, HUFFMAN_EMIT_SYMBOL, 62),
+ (23, HUFFMAN_EMIT_SYMBOL, 62),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 62),
+
+ # Node 83
+ (3, HUFFMAN_EMIT_SYMBOL, 35),
+ (6, HUFFMAN_EMIT_SYMBOL, 35),
+ (10, HUFFMAN_EMIT_SYMBOL, 35),
+ (15, HUFFMAN_EMIT_SYMBOL, 35),
+ (24, HUFFMAN_EMIT_SYMBOL, 35),
+ (31, HUFFMAN_EMIT_SYMBOL, 35),
+ (41, HUFFMAN_EMIT_SYMBOL, 35),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 35),
+ (3, HUFFMAN_EMIT_SYMBOL, 62),
+ (6, HUFFMAN_EMIT_SYMBOL, 62),
+ (10, HUFFMAN_EMIT_SYMBOL, 62),
+ (15, HUFFMAN_EMIT_SYMBOL, 62),
+ (24, HUFFMAN_EMIT_SYMBOL, 62),
+ (31, HUFFMAN_EMIT_SYMBOL, 62),
+ (41, HUFFMAN_EMIT_SYMBOL, 62),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 62),
+
+ # Node 84
+ (1, HUFFMAN_EMIT_SYMBOL, 0),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 0),
+ (1, HUFFMAN_EMIT_SYMBOL, 36),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 36),
+ (1, HUFFMAN_EMIT_SYMBOL, 64),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 64),
+ (1, HUFFMAN_EMIT_SYMBOL, 91),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 91),
+ (1, HUFFMAN_EMIT_SYMBOL, 93),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 93),
+ (1, HUFFMAN_EMIT_SYMBOL, 126),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 126),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 94),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 125),
+ (93, 0, 0),
+ (94, 0, 0),
+
+ # Node 85
+ (2, HUFFMAN_EMIT_SYMBOL, 0),
+ (9, HUFFMAN_EMIT_SYMBOL, 0),
+ (23, HUFFMAN_EMIT_SYMBOL, 0),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 0),
+ (2, HUFFMAN_EMIT_SYMBOL, 36),
+ (9, HUFFMAN_EMIT_SYMBOL, 36),
+ (23, HUFFMAN_EMIT_SYMBOL, 36),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 36),
+ (2, HUFFMAN_EMIT_SYMBOL, 64),
+ (9, HUFFMAN_EMIT_SYMBOL, 64),
+ (23, HUFFMAN_EMIT_SYMBOL, 64),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 64),
+ (2, HUFFMAN_EMIT_SYMBOL, 91),
+ (9, HUFFMAN_EMIT_SYMBOL, 91),
+ (23, HUFFMAN_EMIT_SYMBOL, 91),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 91),
+
+ # Node 86
+ (3, HUFFMAN_EMIT_SYMBOL, 0),
+ (6, HUFFMAN_EMIT_SYMBOL, 0),
+ (10, HUFFMAN_EMIT_SYMBOL, 0),
+ (15, HUFFMAN_EMIT_SYMBOL, 0),
+ (24, HUFFMAN_EMIT_SYMBOL, 0),
+ (31, HUFFMAN_EMIT_SYMBOL, 0),
+ (41, HUFFMAN_EMIT_SYMBOL, 0),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 0),
+ (3, HUFFMAN_EMIT_SYMBOL, 36),
+ (6, HUFFMAN_EMIT_SYMBOL, 36),
+ (10, HUFFMAN_EMIT_SYMBOL, 36),
+ (15, HUFFMAN_EMIT_SYMBOL, 36),
+ (24, HUFFMAN_EMIT_SYMBOL, 36),
+ (31, HUFFMAN_EMIT_SYMBOL, 36),
+ (41, HUFFMAN_EMIT_SYMBOL, 36),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 36),
+
+ # Node 87
+ (3, HUFFMAN_EMIT_SYMBOL, 64),
+ (6, HUFFMAN_EMIT_SYMBOL, 64),
+ (10, HUFFMAN_EMIT_SYMBOL, 64),
+ (15, HUFFMAN_EMIT_SYMBOL, 64),
+ (24, HUFFMAN_EMIT_SYMBOL, 64),
+ (31, HUFFMAN_EMIT_SYMBOL, 64),
+ (41, HUFFMAN_EMIT_SYMBOL, 64),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 64),
+ (3, HUFFMAN_EMIT_SYMBOL, 91),
+ (6, HUFFMAN_EMIT_SYMBOL, 91),
+ (10, HUFFMAN_EMIT_SYMBOL, 91),
+ (15, HUFFMAN_EMIT_SYMBOL, 91),
+ (24, HUFFMAN_EMIT_SYMBOL, 91),
+ (31, HUFFMAN_EMIT_SYMBOL, 91),
+ (41, HUFFMAN_EMIT_SYMBOL, 91),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 91),
+
+ # Node 88
+ (2, HUFFMAN_EMIT_SYMBOL, 93),
+ (9, HUFFMAN_EMIT_SYMBOL, 93),
+ (23, HUFFMAN_EMIT_SYMBOL, 93),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 93),
+ (2, HUFFMAN_EMIT_SYMBOL, 126),
+ (9, HUFFMAN_EMIT_SYMBOL, 126),
+ (23, HUFFMAN_EMIT_SYMBOL, 126),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 126),
+ (1, HUFFMAN_EMIT_SYMBOL, 94),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 94),
+ (1, HUFFMAN_EMIT_SYMBOL, 125),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 125),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 60),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 96),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 123),
+ (95, 0, 0),
+
+ # Node 89
+ (3, HUFFMAN_EMIT_SYMBOL, 93),
+ (6, HUFFMAN_EMIT_SYMBOL, 93),
+ (10, HUFFMAN_EMIT_SYMBOL, 93),
+ (15, HUFFMAN_EMIT_SYMBOL, 93),
+ (24, HUFFMAN_EMIT_SYMBOL, 93),
+ (31, HUFFMAN_EMIT_SYMBOL, 93),
+ (41, HUFFMAN_EMIT_SYMBOL, 93),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 93),
+ (3, HUFFMAN_EMIT_SYMBOL, 126),
+ (6, HUFFMAN_EMIT_SYMBOL, 126),
+ (10, HUFFMAN_EMIT_SYMBOL, 126),
+ (15, HUFFMAN_EMIT_SYMBOL, 126),
+ (24, HUFFMAN_EMIT_SYMBOL, 126),
+ (31, HUFFMAN_EMIT_SYMBOL, 126),
+ (41, HUFFMAN_EMIT_SYMBOL, 126),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 126),
+
+ # Node 90
+ (2, HUFFMAN_EMIT_SYMBOL, 94),
+ (9, HUFFMAN_EMIT_SYMBOL, 94),
+ (23, HUFFMAN_EMIT_SYMBOL, 94),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 94),
+ (2, HUFFMAN_EMIT_SYMBOL, 125),
+ (9, HUFFMAN_EMIT_SYMBOL, 125),
+ (23, HUFFMAN_EMIT_SYMBOL, 125),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 125),
+ (1, HUFFMAN_EMIT_SYMBOL, 60),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 60),
+ (1, HUFFMAN_EMIT_SYMBOL, 96),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 96),
+ (1, HUFFMAN_EMIT_SYMBOL, 123),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 123),
+ (96, 0, 0),
+ (110, 0, 0),
+
+ # Node 91
+ (3, HUFFMAN_EMIT_SYMBOL, 94),
+ (6, HUFFMAN_EMIT_SYMBOL, 94),
+ (10, HUFFMAN_EMIT_SYMBOL, 94),
+ (15, HUFFMAN_EMIT_SYMBOL, 94),
+ (24, HUFFMAN_EMIT_SYMBOL, 94),
+ (31, HUFFMAN_EMIT_SYMBOL, 94),
+ (41, HUFFMAN_EMIT_SYMBOL, 94),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 94),
+ (3, HUFFMAN_EMIT_SYMBOL, 125),
+ (6, HUFFMAN_EMIT_SYMBOL, 125),
+ (10, HUFFMAN_EMIT_SYMBOL, 125),
+ (15, HUFFMAN_EMIT_SYMBOL, 125),
+ (24, HUFFMAN_EMIT_SYMBOL, 125),
+ (31, HUFFMAN_EMIT_SYMBOL, 125),
+ (41, HUFFMAN_EMIT_SYMBOL, 125),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 125),
+
+ # Node 92
+ (2, HUFFMAN_EMIT_SYMBOL, 60),
+ (9, HUFFMAN_EMIT_SYMBOL, 60),
+ (23, HUFFMAN_EMIT_SYMBOL, 60),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 60),
+ (2, HUFFMAN_EMIT_SYMBOL, 96),
+ (9, HUFFMAN_EMIT_SYMBOL, 96),
+ (23, HUFFMAN_EMIT_SYMBOL, 96),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 96),
+ (2, HUFFMAN_EMIT_SYMBOL, 123),
+ (9, HUFFMAN_EMIT_SYMBOL, 123),
+ (23, HUFFMAN_EMIT_SYMBOL, 123),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 123),
+ (97, 0, 0),
+ (101, 0, 0),
+ (111, 0, 0),
+ (133, 0, 0),
+
+ # Node 93
+ (3, HUFFMAN_EMIT_SYMBOL, 60),
+ (6, HUFFMAN_EMIT_SYMBOL, 60),
+ (10, HUFFMAN_EMIT_SYMBOL, 60),
+ (15, HUFFMAN_EMIT_SYMBOL, 60),
+ (24, HUFFMAN_EMIT_SYMBOL, 60),
+ (31, HUFFMAN_EMIT_SYMBOL, 60),
+ (41, HUFFMAN_EMIT_SYMBOL, 60),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 60),
+ (3, HUFFMAN_EMIT_SYMBOL, 96),
+ (6, HUFFMAN_EMIT_SYMBOL, 96),
+ (10, HUFFMAN_EMIT_SYMBOL, 96),
+ (15, HUFFMAN_EMIT_SYMBOL, 96),
+ (24, HUFFMAN_EMIT_SYMBOL, 96),
+ (31, HUFFMAN_EMIT_SYMBOL, 96),
+ (41, HUFFMAN_EMIT_SYMBOL, 96),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 96),
+
+ # Node 94
+ (3, HUFFMAN_EMIT_SYMBOL, 123),
+ (6, HUFFMAN_EMIT_SYMBOL, 123),
+ (10, HUFFMAN_EMIT_SYMBOL, 123),
+ (15, HUFFMAN_EMIT_SYMBOL, 123),
+ (24, HUFFMAN_EMIT_SYMBOL, 123),
+ (31, HUFFMAN_EMIT_SYMBOL, 123),
+ (41, HUFFMAN_EMIT_SYMBOL, 123),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 123),
+ (98, 0, 0),
+ (99, 0, 0),
+ (102, 0, 0),
+ (105, 0, 0),
+ (112, 0, 0),
+ (119, 0, 0),
+ (134, 0, 0),
+ (153, 0, 0),
+
+ # Node 95
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 92),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 195),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 208),
+ (100, 0, 0),
+ (103, 0, 0),
+ (104, 0, 0),
+ (106, 0, 0),
+ (107, 0, 0),
+ (113, 0, 0),
+ (116, 0, 0),
+ (120, 0, 0),
+ (126, 0, 0),
+ (135, 0, 0),
+ (142, 0, 0),
+ (154, 0, 0),
+ (169, 0, 0),
+
+ # Node 96
+ (1, HUFFMAN_EMIT_SYMBOL, 92),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 92),
+ (1, HUFFMAN_EMIT_SYMBOL, 195),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 195),
+ (1, HUFFMAN_EMIT_SYMBOL, 208),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 208),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 128),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 130),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 131),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 162),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 184),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 194),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 224),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 226),
+ (108, 0, 0),
+ (109, 0, 0),
+
+ # Node 97
+ (2, HUFFMAN_EMIT_SYMBOL, 92),
+ (9, HUFFMAN_EMIT_SYMBOL, 92),
+ (23, HUFFMAN_EMIT_SYMBOL, 92),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 92),
+ (2, HUFFMAN_EMIT_SYMBOL, 195),
+ (9, HUFFMAN_EMIT_SYMBOL, 195),
+ (23, HUFFMAN_EMIT_SYMBOL, 195),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 195),
+ (2, HUFFMAN_EMIT_SYMBOL, 208),
+ (9, HUFFMAN_EMIT_SYMBOL, 208),
+ (23, HUFFMAN_EMIT_SYMBOL, 208),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 208),
+ (1, HUFFMAN_EMIT_SYMBOL, 128),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 128),
+ (1, HUFFMAN_EMIT_SYMBOL, 130),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 130),
+
+ # Node 98
+ (3, HUFFMAN_EMIT_SYMBOL, 92),
+ (6, HUFFMAN_EMIT_SYMBOL, 92),
+ (10, HUFFMAN_EMIT_SYMBOL, 92),
+ (15, HUFFMAN_EMIT_SYMBOL, 92),
+ (24, HUFFMAN_EMIT_SYMBOL, 92),
+ (31, HUFFMAN_EMIT_SYMBOL, 92),
+ (41, HUFFMAN_EMIT_SYMBOL, 92),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 92),
+ (3, HUFFMAN_EMIT_SYMBOL, 195),
+ (6, HUFFMAN_EMIT_SYMBOL, 195),
+ (10, HUFFMAN_EMIT_SYMBOL, 195),
+ (15, HUFFMAN_EMIT_SYMBOL, 195),
+ (24, HUFFMAN_EMIT_SYMBOL, 195),
+ (31, HUFFMAN_EMIT_SYMBOL, 195),
+ (41, HUFFMAN_EMIT_SYMBOL, 195),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 195),
+
+ # Node 99
+ (3, HUFFMAN_EMIT_SYMBOL, 208),
+ (6, HUFFMAN_EMIT_SYMBOL, 208),
+ (10, HUFFMAN_EMIT_SYMBOL, 208),
+ (15, HUFFMAN_EMIT_SYMBOL, 208),
+ (24, HUFFMAN_EMIT_SYMBOL, 208),
+ (31, HUFFMAN_EMIT_SYMBOL, 208),
+ (41, HUFFMAN_EMIT_SYMBOL, 208),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 208),
+ (2, HUFFMAN_EMIT_SYMBOL, 128),
+ (9, HUFFMAN_EMIT_SYMBOL, 128),
+ (23, HUFFMAN_EMIT_SYMBOL, 128),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 128),
+ (2, HUFFMAN_EMIT_SYMBOL, 130),
+ (9, HUFFMAN_EMIT_SYMBOL, 130),
+ (23, HUFFMAN_EMIT_SYMBOL, 130),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 130),
+
+ # Node 100
+ (3, HUFFMAN_EMIT_SYMBOL, 128),
+ (6, HUFFMAN_EMIT_SYMBOL, 128),
+ (10, HUFFMAN_EMIT_SYMBOL, 128),
+ (15, HUFFMAN_EMIT_SYMBOL, 128),
+ (24, HUFFMAN_EMIT_SYMBOL, 128),
+ (31, HUFFMAN_EMIT_SYMBOL, 128),
+ (41, HUFFMAN_EMIT_SYMBOL, 128),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 128),
+ (3, HUFFMAN_EMIT_SYMBOL, 130),
+ (6, HUFFMAN_EMIT_SYMBOL, 130),
+ (10, HUFFMAN_EMIT_SYMBOL, 130),
+ (15, HUFFMAN_EMIT_SYMBOL, 130),
+ (24, HUFFMAN_EMIT_SYMBOL, 130),
+ (31, HUFFMAN_EMIT_SYMBOL, 130),
+ (41, HUFFMAN_EMIT_SYMBOL, 130),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 130),
+
+ # Node 101
+ (1, HUFFMAN_EMIT_SYMBOL, 131),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 131),
+ (1, HUFFMAN_EMIT_SYMBOL, 162),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 162),
+ (1, HUFFMAN_EMIT_SYMBOL, 184),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 184),
+ (1, HUFFMAN_EMIT_SYMBOL, 194),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 194),
+ (1, HUFFMAN_EMIT_SYMBOL, 224),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 224),
+ (1, HUFFMAN_EMIT_SYMBOL, 226),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 226),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 153),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 161),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 167),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 172),
+
+ # Node 102
+ (2, HUFFMAN_EMIT_SYMBOL, 131),
+ (9, HUFFMAN_EMIT_SYMBOL, 131),
+ (23, HUFFMAN_EMIT_SYMBOL, 131),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 131),
+ (2, HUFFMAN_EMIT_SYMBOL, 162),
+ (9, HUFFMAN_EMIT_SYMBOL, 162),
+ (23, HUFFMAN_EMIT_SYMBOL, 162),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 162),
+ (2, HUFFMAN_EMIT_SYMBOL, 184),
+ (9, HUFFMAN_EMIT_SYMBOL, 184),
+ (23, HUFFMAN_EMIT_SYMBOL, 184),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 184),
+ (2, HUFFMAN_EMIT_SYMBOL, 194),
+ (9, HUFFMAN_EMIT_SYMBOL, 194),
+ (23, HUFFMAN_EMIT_SYMBOL, 194),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 194),
+
+ # Node 103
+ (3, HUFFMAN_EMIT_SYMBOL, 131),
+ (6, HUFFMAN_EMIT_SYMBOL, 131),
+ (10, HUFFMAN_EMIT_SYMBOL, 131),
+ (15, HUFFMAN_EMIT_SYMBOL, 131),
+ (24, HUFFMAN_EMIT_SYMBOL, 131),
+ (31, HUFFMAN_EMIT_SYMBOL, 131),
+ (41, HUFFMAN_EMIT_SYMBOL, 131),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 131),
+ (3, HUFFMAN_EMIT_SYMBOL, 162),
+ (6, HUFFMAN_EMIT_SYMBOL, 162),
+ (10, HUFFMAN_EMIT_SYMBOL, 162),
+ (15, HUFFMAN_EMIT_SYMBOL, 162),
+ (24, HUFFMAN_EMIT_SYMBOL, 162),
+ (31, HUFFMAN_EMIT_SYMBOL, 162),
+ (41, HUFFMAN_EMIT_SYMBOL, 162),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 162),
+
+ # Node 104
+ (3, HUFFMAN_EMIT_SYMBOL, 184),
+ (6, HUFFMAN_EMIT_SYMBOL, 184),
+ (10, HUFFMAN_EMIT_SYMBOL, 184),
+ (15, HUFFMAN_EMIT_SYMBOL, 184),
+ (24, HUFFMAN_EMIT_SYMBOL, 184),
+ (31, HUFFMAN_EMIT_SYMBOL, 184),
+ (41, HUFFMAN_EMIT_SYMBOL, 184),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 184),
+ (3, HUFFMAN_EMIT_SYMBOL, 194),
+ (6, HUFFMAN_EMIT_SYMBOL, 194),
+ (10, HUFFMAN_EMIT_SYMBOL, 194),
+ (15, HUFFMAN_EMIT_SYMBOL, 194),
+ (24, HUFFMAN_EMIT_SYMBOL, 194),
+ (31, HUFFMAN_EMIT_SYMBOL, 194),
+ (41, HUFFMAN_EMIT_SYMBOL, 194),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 194),
+
+ # Node 105
+ (2, HUFFMAN_EMIT_SYMBOL, 224),
+ (9, HUFFMAN_EMIT_SYMBOL, 224),
+ (23, HUFFMAN_EMIT_SYMBOL, 224),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 224),
+ (2, HUFFMAN_EMIT_SYMBOL, 226),
+ (9, HUFFMAN_EMIT_SYMBOL, 226),
+ (23, HUFFMAN_EMIT_SYMBOL, 226),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 226),
+ (1, HUFFMAN_EMIT_SYMBOL, 153),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 153),
+ (1, HUFFMAN_EMIT_SYMBOL, 161),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 161),
+ (1, HUFFMAN_EMIT_SYMBOL, 167),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 167),
+ (1, HUFFMAN_EMIT_SYMBOL, 172),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 172),
+
+ # Node 106
+ (3, HUFFMAN_EMIT_SYMBOL, 224),
+ (6, HUFFMAN_EMIT_SYMBOL, 224),
+ (10, HUFFMAN_EMIT_SYMBOL, 224),
+ (15, HUFFMAN_EMIT_SYMBOL, 224),
+ (24, HUFFMAN_EMIT_SYMBOL, 224),
+ (31, HUFFMAN_EMIT_SYMBOL, 224),
+ (41, HUFFMAN_EMIT_SYMBOL, 224),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 224),
+ (3, HUFFMAN_EMIT_SYMBOL, 226),
+ (6, HUFFMAN_EMIT_SYMBOL, 226),
+ (10, HUFFMAN_EMIT_SYMBOL, 226),
+ (15, HUFFMAN_EMIT_SYMBOL, 226),
+ (24, HUFFMAN_EMIT_SYMBOL, 226),
+ (31, HUFFMAN_EMIT_SYMBOL, 226),
+ (41, HUFFMAN_EMIT_SYMBOL, 226),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 226),
+
+ # Node 107
+ (2, HUFFMAN_EMIT_SYMBOL, 153),
+ (9, HUFFMAN_EMIT_SYMBOL, 153),
+ (23, HUFFMAN_EMIT_SYMBOL, 153),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 153),
+ (2, HUFFMAN_EMIT_SYMBOL, 161),
+ (9, HUFFMAN_EMIT_SYMBOL, 161),
+ (23, HUFFMAN_EMIT_SYMBOL, 161),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 161),
+ (2, HUFFMAN_EMIT_SYMBOL, 167),
+ (9, HUFFMAN_EMIT_SYMBOL, 167),
+ (23, HUFFMAN_EMIT_SYMBOL, 167),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 167),
+ (2, HUFFMAN_EMIT_SYMBOL, 172),
+ (9, HUFFMAN_EMIT_SYMBOL, 172),
+ (23, HUFFMAN_EMIT_SYMBOL, 172),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 172),
+
+ # Node 108
+ (3, HUFFMAN_EMIT_SYMBOL, 153),
+ (6, HUFFMAN_EMIT_SYMBOL, 153),
+ (10, HUFFMAN_EMIT_SYMBOL, 153),
+ (15, HUFFMAN_EMIT_SYMBOL, 153),
+ (24, HUFFMAN_EMIT_SYMBOL, 153),
+ (31, HUFFMAN_EMIT_SYMBOL, 153),
+ (41, HUFFMAN_EMIT_SYMBOL, 153),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 153),
+ (3, HUFFMAN_EMIT_SYMBOL, 161),
+ (6, HUFFMAN_EMIT_SYMBOL, 161),
+ (10, HUFFMAN_EMIT_SYMBOL, 161),
+ (15, HUFFMAN_EMIT_SYMBOL, 161),
+ (24, HUFFMAN_EMIT_SYMBOL, 161),
+ (31, HUFFMAN_EMIT_SYMBOL, 161),
+ (41, HUFFMAN_EMIT_SYMBOL, 161),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 161),
+
+ # Node 109
+ (3, HUFFMAN_EMIT_SYMBOL, 167),
+ (6, HUFFMAN_EMIT_SYMBOL, 167),
+ (10, HUFFMAN_EMIT_SYMBOL, 167),
+ (15, HUFFMAN_EMIT_SYMBOL, 167),
+ (24, HUFFMAN_EMIT_SYMBOL, 167),
+ (31, HUFFMAN_EMIT_SYMBOL, 167),
+ (41, HUFFMAN_EMIT_SYMBOL, 167),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 167),
+ (3, HUFFMAN_EMIT_SYMBOL, 172),
+ (6, HUFFMAN_EMIT_SYMBOL, 172),
+ (10, HUFFMAN_EMIT_SYMBOL, 172),
+ (15, HUFFMAN_EMIT_SYMBOL, 172),
+ (24, HUFFMAN_EMIT_SYMBOL, 172),
+ (31, HUFFMAN_EMIT_SYMBOL, 172),
+ (41, HUFFMAN_EMIT_SYMBOL, 172),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 172),
+
+ # Node 110
+ (114, 0, 0),
+ (115, 0, 0),
+ (117, 0, 0),
+ (118, 0, 0),
+ (121, 0, 0),
+ (123, 0, 0),
+ (127, 0, 0),
+ (130, 0, 0),
+ (136, 0, 0),
+ (139, 0, 0),
+ (143, 0, 0),
+ (146, 0, 0),
+ (155, 0, 0),
+ (162, 0, 0),
+ (170, 0, 0),
+ (180, 0, 0),
+
+ # Node 111
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 176),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 177),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 179),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 209),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 216),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 217),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 227),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 229),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 230),
+ (122, 0, 0),
+ (124, 0, 0),
+ (125, 0, 0),
+ (128, 0, 0),
+ (129, 0, 0),
+ (131, 0, 0),
+ (132, 0, 0),
+
+ # Node 112
+ (1, HUFFMAN_EMIT_SYMBOL, 176),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 176),
+ (1, HUFFMAN_EMIT_SYMBOL, 177),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 177),
+ (1, HUFFMAN_EMIT_SYMBOL, 179),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 179),
+ (1, HUFFMAN_EMIT_SYMBOL, 209),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 209),
+ (1, HUFFMAN_EMIT_SYMBOL, 216),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 216),
+ (1, HUFFMAN_EMIT_SYMBOL, 217),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 217),
+ (1, HUFFMAN_EMIT_SYMBOL, 227),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 227),
+ (1, HUFFMAN_EMIT_SYMBOL, 229),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 229),
+
+ # Node 113
+ (2, HUFFMAN_EMIT_SYMBOL, 176),
+ (9, HUFFMAN_EMIT_SYMBOL, 176),
+ (23, HUFFMAN_EMIT_SYMBOL, 176),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 176),
+ (2, HUFFMAN_EMIT_SYMBOL, 177),
+ (9, HUFFMAN_EMIT_SYMBOL, 177),
+ (23, HUFFMAN_EMIT_SYMBOL, 177),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 177),
+ (2, HUFFMAN_EMIT_SYMBOL, 179),
+ (9, HUFFMAN_EMIT_SYMBOL, 179),
+ (23, HUFFMAN_EMIT_SYMBOL, 179),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 179),
+ (2, HUFFMAN_EMIT_SYMBOL, 209),
+ (9, HUFFMAN_EMIT_SYMBOL, 209),
+ (23, HUFFMAN_EMIT_SYMBOL, 209),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 209),
+
+ # Node 114
+ (3, HUFFMAN_EMIT_SYMBOL, 176),
+ (6, HUFFMAN_EMIT_SYMBOL, 176),
+ (10, HUFFMAN_EMIT_SYMBOL, 176),
+ (15, HUFFMAN_EMIT_SYMBOL, 176),
+ (24, HUFFMAN_EMIT_SYMBOL, 176),
+ (31, HUFFMAN_EMIT_SYMBOL, 176),
+ (41, HUFFMAN_EMIT_SYMBOL, 176),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 176),
+ (3, HUFFMAN_EMIT_SYMBOL, 177),
+ (6, HUFFMAN_EMIT_SYMBOL, 177),
+ (10, HUFFMAN_EMIT_SYMBOL, 177),
+ (15, HUFFMAN_EMIT_SYMBOL, 177),
+ (24, HUFFMAN_EMIT_SYMBOL, 177),
+ (31, HUFFMAN_EMIT_SYMBOL, 177),
+ (41, HUFFMAN_EMIT_SYMBOL, 177),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 177),
+
+ # Node 115
+ (3, HUFFMAN_EMIT_SYMBOL, 179),
+ (6, HUFFMAN_EMIT_SYMBOL, 179),
+ (10, HUFFMAN_EMIT_SYMBOL, 179),
+ (15, HUFFMAN_EMIT_SYMBOL, 179),
+ (24, HUFFMAN_EMIT_SYMBOL, 179),
+ (31, HUFFMAN_EMIT_SYMBOL, 179),
+ (41, HUFFMAN_EMIT_SYMBOL, 179),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 179),
+ (3, HUFFMAN_EMIT_SYMBOL, 209),
+ (6, HUFFMAN_EMIT_SYMBOL, 209),
+ (10, HUFFMAN_EMIT_SYMBOL, 209),
+ (15, HUFFMAN_EMIT_SYMBOL, 209),
+ (24, HUFFMAN_EMIT_SYMBOL, 209),
+ (31, HUFFMAN_EMIT_SYMBOL, 209),
+ (41, HUFFMAN_EMIT_SYMBOL, 209),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 209),
+
+ # Node 116
+ (2, HUFFMAN_EMIT_SYMBOL, 216),
+ (9, HUFFMAN_EMIT_SYMBOL, 216),
+ (23, HUFFMAN_EMIT_SYMBOL, 216),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 216),
+ (2, HUFFMAN_EMIT_SYMBOL, 217),
+ (9, HUFFMAN_EMIT_SYMBOL, 217),
+ (23, HUFFMAN_EMIT_SYMBOL, 217),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 217),
+ (2, HUFFMAN_EMIT_SYMBOL, 227),
+ (9, HUFFMAN_EMIT_SYMBOL, 227),
+ (23, HUFFMAN_EMIT_SYMBOL, 227),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 227),
+ (2, HUFFMAN_EMIT_SYMBOL, 229),
+ (9, HUFFMAN_EMIT_SYMBOL, 229),
+ (23, HUFFMAN_EMIT_SYMBOL, 229),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 229),
+
+ # Node 117
+ (3, HUFFMAN_EMIT_SYMBOL, 216),
+ (6, HUFFMAN_EMIT_SYMBOL, 216),
+ (10, HUFFMAN_EMIT_SYMBOL, 216),
+ (15, HUFFMAN_EMIT_SYMBOL, 216),
+ (24, HUFFMAN_EMIT_SYMBOL, 216),
+ (31, HUFFMAN_EMIT_SYMBOL, 216),
+ (41, HUFFMAN_EMIT_SYMBOL, 216),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 216),
+ (3, HUFFMAN_EMIT_SYMBOL, 217),
+ (6, HUFFMAN_EMIT_SYMBOL, 217),
+ (10, HUFFMAN_EMIT_SYMBOL, 217),
+ (15, HUFFMAN_EMIT_SYMBOL, 217),
+ (24, HUFFMAN_EMIT_SYMBOL, 217),
+ (31, HUFFMAN_EMIT_SYMBOL, 217),
+ (41, HUFFMAN_EMIT_SYMBOL, 217),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 217),
+
+ # Node 118
+ (3, HUFFMAN_EMIT_SYMBOL, 227),
+ (6, HUFFMAN_EMIT_SYMBOL, 227),
+ (10, HUFFMAN_EMIT_SYMBOL, 227),
+ (15, HUFFMAN_EMIT_SYMBOL, 227),
+ (24, HUFFMAN_EMIT_SYMBOL, 227),
+ (31, HUFFMAN_EMIT_SYMBOL, 227),
+ (41, HUFFMAN_EMIT_SYMBOL, 227),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 227),
+ (3, HUFFMAN_EMIT_SYMBOL, 229),
+ (6, HUFFMAN_EMIT_SYMBOL, 229),
+ (10, HUFFMAN_EMIT_SYMBOL, 229),
+ (15, HUFFMAN_EMIT_SYMBOL, 229),
+ (24, HUFFMAN_EMIT_SYMBOL, 229),
+ (31, HUFFMAN_EMIT_SYMBOL, 229),
+ (41, HUFFMAN_EMIT_SYMBOL, 229),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 229),
+
+ # Node 119
+ (1, HUFFMAN_EMIT_SYMBOL, 230),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 230),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 129),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 132),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 133),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 134),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 136),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 146),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 154),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 156),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 160),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 163),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 164),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 169),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 170),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 173),
+
+ # Node 120
+ (2, HUFFMAN_EMIT_SYMBOL, 230),
+ (9, HUFFMAN_EMIT_SYMBOL, 230),
+ (23, HUFFMAN_EMIT_SYMBOL, 230),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 230),
+ (1, HUFFMAN_EMIT_SYMBOL, 129),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 129),
+ (1, HUFFMAN_EMIT_SYMBOL, 132),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 132),
+ (1, HUFFMAN_EMIT_SYMBOL, 133),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 133),
+ (1, HUFFMAN_EMIT_SYMBOL, 134),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 134),
+ (1, HUFFMAN_EMIT_SYMBOL, 136),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 136),
+ (1, HUFFMAN_EMIT_SYMBOL, 146),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 146),
+
+ # Node 121
+ (3, HUFFMAN_EMIT_SYMBOL, 230),
+ (6, HUFFMAN_EMIT_SYMBOL, 230),
+ (10, HUFFMAN_EMIT_SYMBOL, 230),
+ (15, HUFFMAN_EMIT_SYMBOL, 230),
+ (24, HUFFMAN_EMIT_SYMBOL, 230),
+ (31, HUFFMAN_EMIT_SYMBOL, 230),
+ (41, HUFFMAN_EMIT_SYMBOL, 230),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 230),
+ (2, HUFFMAN_EMIT_SYMBOL, 129),
+ (9, HUFFMAN_EMIT_SYMBOL, 129),
+ (23, HUFFMAN_EMIT_SYMBOL, 129),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 129),
+ (2, HUFFMAN_EMIT_SYMBOL, 132),
+ (9, HUFFMAN_EMIT_SYMBOL, 132),
+ (23, HUFFMAN_EMIT_SYMBOL, 132),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 132),
+
+ # Node 122
+ (3, HUFFMAN_EMIT_SYMBOL, 129),
+ (6, HUFFMAN_EMIT_SYMBOL, 129),
+ (10, HUFFMAN_EMIT_SYMBOL, 129),
+ (15, HUFFMAN_EMIT_SYMBOL, 129),
+ (24, HUFFMAN_EMIT_SYMBOL, 129),
+ (31, HUFFMAN_EMIT_SYMBOL, 129),
+ (41, HUFFMAN_EMIT_SYMBOL, 129),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 129),
+ (3, HUFFMAN_EMIT_SYMBOL, 132),
+ (6, HUFFMAN_EMIT_SYMBOL, 132),
+ (10, HUFFMAN_EMIT_SYMBOL, 132),
+ (15, HUFFMAN_EMIT_SYMBOL, 132),
+ (24, HUFFMAN_EMIT_SYMBOL, 132),
+ (31, HUFFMAN_EMIT_SYMBOL, 132),
+ (41, HUFFMAN_EMIT_SYMBOL, 132),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 132),
+
+ # Node 123
+ (2, HUFFMAN_EMIT_SYMBOL, 133),
+ (9, HUFFMAN_EMIT_SYMBOL, 133),
+ (23, HUFFMAN_EMIT_SYMBOL, 133),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 133),
+ (2, HUFFMAN_EMIT_SYMBOL, 134),
+ (9, HUFFMAN_EMIT_SYMBOL, 134),
+ (23, HUFFMAN_EMIT_SYMBOL, 134),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 134),
+ (2, HUFFMAN_EMIT_SYMBOL, 136),
+ (9, HUFFMAN_EMIT_SYMBOL, 136),
+ (23, HUFFMAN_EMIT_SYMBOL, 136),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 136),
+ (2, HUFFMAN_EMIT_SYMBOL, 146),
+ (9, HUFFMAN_EMIT_SYMBOL, 146),
+ (23, HUFFMAN_EMIT_SYMBOL, 146),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 146),
+
+ # Node 124
+ (3, HUFFMAN_EMIT_SYMBOL, 133),
+ (6, HUFFMAN_EMIT_SYMBOL, 133),
+ (10, HUFFMAN_EMIT_SYMBOL, 133),
+ (15, HUFFMAN_EMIT_SYMBOL, 133),
+ (24, HUFFMAN_EMIT_SYMBOL, 133),
+ (31, HUFFMAN_EMIT_SYMBOL, 133),
+ (41, HUFFMAN_EMIT_SYMBOL, 133),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 133),
+ (3, HUFFMAN_EMIT_SYMBOL, 134),
+ (6, HUFFMAN_EMIT_SYMBOL, 134),
+ (10, HUFFMAN_EMIT_SYMBOL, 134),
+ (15, HUFFMAN_EMIT_SYMBOL, 134),
+ (24, HUFFMAN_EMIT_SYMBOL, 134),
+ (31, HUFFMAN_EMIT_SYMBOL, 134),
+ (41, HUFFMAN_EMIT_SYMBOL, 134),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 134),
+
+ # Node 125
+ (3, HUFFMAN_EMIT_SYMBOL, 136),
+ (6, HUFFMAN_EMIT_SYMBOL, 136),
+ (10, HUFFMAN_EMIT_SYMBOL, 136),
+ (15, HUFFMAN_EMIT_SYMBOL, 136),
+ (24, HUFFMAN_EMIT_SYMBOL, 136),
+ (31, HUFFMAN_EMIT_SYMBOL, 136),
+ (41, HUFFMAN_EMIT_SYMBOL, 136),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 136),
+ (3, HUFFMAN_EMIT_SYMBOL, 146),
+ (6, HUFFMAN_EMIT_SYMBOL, 146),
+ (10, HUFFMAN_EMIT_SYMBOL, 146),
+ (15, HUFFMAN_EMIT_SYMBOL, 146),
+ (24, HUFFMAN_EMIT_SYMBOL, 146),
+ (31, HUFFMAN_EMIT_SYMBOL, 146),
+ (41, HUFFMAN_EMIT_SYMBOL, 146),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 146),
+
+ # Node 126
+ (1, HUFFMAN_EMIT_SYMBOL, 154),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 154),
+ (1, HUFFMAN_EMIT_SYMBOL, 156),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 156),
+ (1, HUFFMAN_EMIT_SYMBOL, 160),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 160),
+ (1, HUFFMAN_EMIT_SYMBOL, 163),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 163),
+ (1, HUFFMAN_EMIT_SYMBOL, 164),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 164),
+ (1, HUFFMAN_EMIT_SYMBOL, 169),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 169),
+ (1, HUFFMAN_EMIT_SYMBOL, 170),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 170),
+ (1, HUFFMAN_EMIT_SYMBOL, 173),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 173),
+
+ # Node 127
+ (2, HUFFMAN_EMIT_SYMBOL, 154),
+ (9, HUFFMAN_EMIT_SYMBOL, 154),
+ (23, HUFFMAN_EMIT_SYMBOL, 154),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 154),
+ (2, HUFFMAN_EMIT_SYMBOL, 156),
+ (9, HUFFMAN_EMIT_SYMBOL, 156),
+ (23, HUFFMAN_EMIT_SYMBOL, 156),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 156),
+ (2, HUFFMAN_EMIT_SYMBOL, 160),
+ (9, HUFFMAN_EMIT_SYMBOL, 160),
+ (23, HUFFMAN_EMIT_SYMBOL, 160),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 160),
+ (2, HUFFMAN_EMIT_SYMBOL, 163),
+ (9, HUFFMAN_EMIT_SYMBOL, 163),
+ (23, HUFFMAN_EMIT_SYMBOL, 163),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 163),
+
+ # Node 128
+ (3, HUFFMAN_EMIT_SYMBOL, 154),
+ (6, HUFFMAN_EMIT_SYMBOL, 154),
+ (10, HUFFMAN_EMIT_SYMBOL, 154),
+ (15, HUFFMAN_EMIT_SYMBOL, 154),
+ (24, HUFFMAN_EMIT_SYMBOL, 154),
+ (31, HUFFMAN_EMIT_SYMBOL, 154),
+ (41, HUFFMAN_EMIT_SYMBOL, 154),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 154),
+ (3, HUFFMAN_EMIT_SYMBOL, 156),
+ (6, HUFFMAN_EMIT_SYMBOL, 156),
+ (10, HUFFMAN_EMIT_SYMBOL, 156),
+ (15, HUFFMAN_EMIT_SYMBOL, 156),
+ (24, HUFFMAN_EMIT_SYMBOL, 156),
+ (31, HUFFMAN_EMIT_SYMBOL, 156),
+ (41, HUFFMAN_EMIT_SYMBOL, 156),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 156),
+
+ # Node 129
+ (3, HUFFMAN_EMIT_SYMBOL, 160),
+ (6, HUFFMAN_EMIT_SYMBOL, 160),
+ (10, HUFFMAN_EMIT_SYMBOL, 160),
+ (15, HUFFMAN_EMIT_SYMBOL, 160),
+ (24, HUFFMAN_EMIT_SYMBOL, 160),
+ (31, HUFFMAN_EMIT_SYMBOL, 160),
+ (41, HUFFMAN_EMIT_SYMBOL, 160),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 160),
+ (3, HUFFMAN_EMIT_SYMBOL, 163),
+ (6, HUFFMAN_EMIT_SYMBOL, 163),
+ (10, HUFFMAN_EMIT_SYMBOL, 163),
+ (15, HUFFMAN_EMIT_SYMBOL, 163),
+ (24, HUFFMAN_EMIT_SYMBOL, 163),
+ (31, HUFFMAN_EMIT_SYMBOL, 163),
+ (41, HUFFMAN_EMIT_SYMBOL, 163),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 163),
+
+ # Node 130
+ (2, HUFFMAN_EMIT_SYMBOL, 164),
+ (9, HUFFMAN_EMIT_SYMBOL, 164),
+ (23, HUFFMAN_EMIT_SYMBOL, 164),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 164),
+ (2, HUFFMAN_EMIT_SYMBOL, 169),
+ (9, HUFFMAN_EMIT_SYMBOL, 169),
+ (23, HUFFMAN_EMIT_SYMBOL, 169),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 169),
+ (2, HUFFMAN_EMIT_SYMBOL, 170),
+ (9, HUFFMAN_EMIT_SYMBOL, 170),
+ (23, HUFFMAN_EMIT_SYMBOL, 170),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 170),
+ (2, HUFFMAN_EMIT_SYMBOL, 173),
+ (9, HUFFMAN_EMIT_SYMBOL, 173),
+ (23, HUFFMAN_EMIT_SYMBOL, 173),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 173),
+
+ # Node 131
+ (3, HUFFMAN_EMIT_SYMBOL, 164),
+ (6, HUFFMAN_EMIT_SYMBOL, 164),
+ (10, HUFFMAN_EMIT_SYMBOL, 164),
+ (15, HUFFMAN_EMIT_SYMBOL, 164),
+ (24, HUFFMAN_EMIT_SYMBOL, 164),
+ (31, HUFFMAN_EMIT_SYMBOL, 164),
+ (41, HUFFMAN_EMIT_SYMBOL, 164),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 164),
+ (3, HUFFMAN_EMIT_SYMBOL, 169),
+ (6, HUFFMAN_EMIT_SYMBOL, 169),
+ (10, HUFFMAN_EMIT_SYMBOL, 169),
+ (15, HUFFMAN_EMIT_SYMBOL, 169),
+ (24, HUFFMAN_EMIT_SYMBOL, 169),
+ (31, HUFFMAN_EMIT_SYMBOL, 169),
+ (41, HUFFMAN_EMIT_SYMBOL, 169),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 169),
+
+ # Node 132
+ (3, HUFFMAN_EMIT_SYMBOL, 170),
+ (6, HUFFMAN_EMIT_SYMBOL, 170),
+ (10, HUFFMAN_EMIT_SYMBOL, 170),
+ (15, HUFFMAN_EMIT_SYMBOL, 170),
+ (24, HUFFMAN_EMIT_SYMBOL, 170),
+ (31, HUFFMAN_EMIT_SYMBOL, 170),
+ (41, HUFFMAN_EMIT_SYMBOL, 170),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 170),
+ (3, HUFFMAN_EMIT_SYMBOL, 173),
+ (6, HUFFMAN_EMIT_SYMBOL, 173),
+ (10, HUFFMAN_EMIT_SYMBOL, 173),
+ (15, HUFFMAN_EMIT_SYMBOL, 173),
+ (24, HUFFMAN_EMIT_SYMBOL, 173),
+ (31, HUFFMAN_EMIT_SYMBOL, 173),
+ (41, HUFFMAN_EMIT_SYMBOL, 173),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 173),
+
+ # Node 133
+ (137, 0, 0),
+ (138, 0, 0),
+ (140, 0, 0),
+ (141, 0, 0),
+ (144, 0, 0),
+ (145, 0, 0),
+ (147, 0, 0),
+ (150, 0, 0),
+ (156, 0, 0),
+ (159, 0, 0),
+ (163, 0, 0),
+ (166, 0, 0),
+ (171, 0, 0),
+ (174, 0, 0),
+ (181, 0, 0),
+ (190, 0, 0),
+
+ # Node 134
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 178),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 181),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 185),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 186),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 187),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 189),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 190),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 196),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 198),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 228),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 232),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 233),
+ (148, 0, 0),
+ (149, 0, 0),
+ (151, 0, 0),
+ (152, 0, 0),
+
+ # Node 135
+ (1, HUFFMAN_EMIT_SYMBOL, 178),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 178),
+ (1, HUFFMAN_EMIT_SYMBOL, 181),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 181),
+ (1, HUFFMAN_EMIT_SYMBOL, 185),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 185),
+ (1, HUFFMAN_EMIT_SYMBOL, 186),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 186),
+ (1, HUFFMAN_EMIT_SYMBOL, 187),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 187),
+ (1, HUFFMAN_EMIT_SYMBOL, 189),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 189),
+ (1, HUFFMAN_EMIT_SYMBOL, 190),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 190),
+ (1, HUFFMAN_EMIT_SYMBOL, 196),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 196),
+
+ # Node 136
+ (2, HUFFMAN_EMIT_SYMBOL, 178),
+ (9, HUFFMAN_EMIT_SYMBOL, 178),
+ (23, HUFFMAN_EMIT_SYMBOL, 178),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 178),
+ (2, HUFFMAN_EMIT_SYMBOL, 181),
+ (9, HUFFMAN_EMIT_SYMBOL, 181),
+ (23, HUFFMAN_EMIT_SYMBOL, 181),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 181),
+ (2, HUFFMAN_EMIT_SYMBOL, 185),
+ (9, HUFFMAN_EMIT_SYMBOL, 185),
+ (23, HUFFMAN_EMIT_SYMBOL, 185),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 185),
+ (2, HUFFMAN_EMIT_SYMBOL, 186),
+ (9, HUFFMAN_EMIT_SYMBOL, 186),
+ (23, HUFFMAN_EMIT_SYMBOL, 186),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 186),
+
+ # Node 137
+ (3, HUFFMAN_EMIT_SYMBOL, 178),
+ (6, HUFFMAN_EMIT_SYMBOL, 178),
+ (10, HUFFMAN_EMIT_SYMBOL, 178),
+ (15, HUFFMAN_EMIT_SYMBOL, 178),
+ (24, HUFFMAN_EMIT_SYMBOL, 178),
+ (31, HUFFMAN_EMIT_SYMBOL, 178),
+ (41, HUFFMAN_EMIT_SYMBOL, 178),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 178),
+ (3, HUFFMAN_EMIT_SYMBOL, 181),
+ (6, HUFFMAN_EMIT_SYMBOL, 181),
+ (10, HUFFMAN_EMIT_SYMBOL, 181),
+ (15, HUFFMAN_EMIT_SYMBOL, 181),
+ (24, HUFFMAN_EMIT_SYMBOL, 181),
+ (31, HUFFMAN_EMIT_SYMBOL, 181),
+ (41, HUFFMAN_EMIT_SYMBOL, 181),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 181),
+
+ # Node 138
+ (3, HUFFMAN_EMIT_SYMBOL, 185),
+ (6, HUFFMAN_EMIT_SYMBOL, 185),
+ (10, HUFFMAN_EMIT_SYMBOL, 185),
+ (15, HUFFMAN_EMIT_SYMBOL, 185),
+ (24, HUFFMAN_EMIT_SYMBOL, 185),
+ (31, HUFFMAN_EMIT_SYMBOL, 185),
+ (41, HUFFMAN_EMIT_SYMBOL, 185),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 185),
+ (3, HUFFMAN_EMIT_SYMBOL, 186),
+ (6, HUFFMAN_EMIT_SYMBOL, 186),
+ (10, HUFFMAN_EMIT_SYMBOL, 186),
+ (15, HUFFMAN_EMIT_SYMBOL, 186),
+ (24, HUFFMAN_EMIT_SYMBOL, 186),
+ (31, HUFFMAN_EMIT_SYMBOL, 186),
+ (41, HUFFMAN_EMIT_SYMBOL, 186),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 186),
+
+ # Node 139
+ (2, HUFFMAN_EMIT_SYMBOL, 187),
+ (9, HUFFMAN_EMIT_SYMBOL, 187),
+ (23, HUFFMAN_EMIT_SYMBOL, 187),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 187),
+ (2, HUFFMAN_EMIT_SYMBOL, 189),
+ (9, HUFFMAN_EMIT_SYMBOL, 189),
+ (23, HUFFMAN_EMIT_SYMBOL, 189),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 189),
+ (2, HUFFMAN_EMIT_SYMBOL, 190),
+ (9, HUFFMAN_EMIT_SYMBOL, 190),
+ (23, HUFFMAN_EMIT_SYMBOL, 190),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 190),
+ (2, HUFFMAN_EMIT_SYMBOL, 196),
+ (9, HUFFMAN_EMIT_SYMBOL, 196),
+ (23, HUFFMAN_EMIT_SYMBOL, 196),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 196),
+
+ # Node 140
+ (3, HUFFMAN_EMIT_SYMBOL, 187),
+ (6, HUFFMAN_EMIT_SYMBOL, 187),
+ (10, HUFFMAN_EMIT_SYMBOL, 187),
+ (15, HUFFMAN_EMIT_SYMBOL, 187),
+ (24, HUFFMAN_EMIT_SYMBOL, 187),
+ (31, HUFFMAN_EMIT_SYMBOL, 187),
+ (41, HUFFMAN_EMIT_SYMBOL, 187),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 187),
+ (3, HUFFMAN_EMIT_SYMBOL, 189),
+ (6, HUFFMAN_EMIT_SYMBOL, 189),
+ (10, HUFFMAN_EMIT_SYMBOL, 189),
+ (15, HUFFMAN_EMIT_SYMBOL, 189),
+ (24, HUFFMAN_EMIT_SYMBOL, 189),
+ (31, HUFFMAN_EMIT_SYMBOL, 189),
+ (41, HUFFMAN_EMIT_SYMBOL, 189),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 189),
+
+ # Node 141
+ (3, HUFFMAN_EMIT_SYMBOL, 190),
+ (6, HUFFMAN_EMIT_SYMBOL, 190),
+ (10, HUFFMAN_EMIT_SYMBOL, 190),
+ (15, HUFFMAN_EMIT_SYMBOL, 190),
+ (24, HUFFMAN_EMIT_SYMBOL, 190),
+ (31, HUFFMAN_EMIT_SYMBOL, 190),
+ (41, HUFFMAN_EMIT_SYMBOL, 190),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 190),
+ (3, HUFFMAN_EMIT_SYMBOL, 196),
+ (6, HUFFMAN_EMIT_SYMBOL, 196),
+ (10, HUFFMAN_EMIT_SYMBOL, 196),
+ (15, HUFFMAN_EMIT_SYMBOL, 196),
+ (24, HUFFMAN_EMIT_SYMBOL, 196),
+ (31, HUFFMAN_EMIT_SYMBOL, 196),
+ (41, HUFFMAN_EMIT_SYMBOL, 196),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 196),
+
+ # Node 142
+ (1, HUFFMAN_EMIT_SYMBOL, 198),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 198),
+ (1, HUFFMAN_EMIT_SYMBOL, 228),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 228),
+ (1, HUFFMAN_EMIT_SYMBOL, 232),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 232),
+ (1, HUFFMAN_EMIT_SYMBOL, 233),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 233),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 1),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 135),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 137),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 138),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 139),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 140),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 141),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 143),
+
+ # Node 143
+ (2, HUFFMAN_EMIT_SYMBOL, 198),
+ (9, HUFFMAN_EMIT_SYMBOL, 198),
+ (23, HUFFMAN_EMIT_SYMBOL, 198),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 198),
+ (2, HUFFMAN_EMIT_SYMBOL, 228),
+ (9, HUFFMAN_EMIT_SYMBOL, 228),
+ (23, HUFFMAN_EMIT_SYMBOL, 228),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 228),
+ (2, HUFFMAN_EMIT_SYMBOL, 232),
+ (9, HUFFMAN_EMIT_SYMBOL, 232),
+ (23, HUFFMAN_EMIT_SYMBOL, 232),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 232),
+ (2, HUFFMAN_EMIT_SYMBOL, 233),
+ (9, HUFFMAN_EMIT_SYMBOL, 233),
+ (23, HUFFMAN_EMIT_SYMBOL, 233),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 233),
+
+ # Node 144
+ (3, HUFFMAN_EMIT_SYMBOL, 198),
+ (6, HUFFMAN_EMIT_SYMBOL, 198),
+ (10, HUFFMAN_EMIT_SYMBOL, 198),
+ (15, HUFFMAN_EMIT_SYMBOL, 198),
+ (24, HUFFMAN_EMIT_SYMBOL, 198),
+ (31, HUFFMAN_EMIT_SYMBOL, 198),
+ (41, HUFFMAN_EMIT_SYMBOL, 198),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 198),
+ (3, HUFFMAN_EMIT_SYMBOL, 228),
+ (6, HUFFMAN_EMIT_SYMBOL, 228),
+ (10, HUFFMAN_EMIT_SYMBOL, 228),
+ (15, HUFFMAN_EMIT_SYMBOL, 228),
+ (24, HUFFMAN_EMIT_SYMBOL, 228),
+ (31, HUFFMAN_EMIT_SYMBOL, 228),
+ (41, HUFFMAN_EMIT_SYMBOL, 228),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 228),
+
+ # Node 145
+ (3, HUFFMAN_EMIT_SYMBOL, 232),
+ (6, HUFFMAN_EMIT_SYMBOL, 232),
+ (10, HUFFMAN_EMIT_SYMBOL, 232),
+ (15, HUFFMAN_EMIT_SYMBOL, 232),
+ (24, HUFFMAN_EMIT_SYMBOL, 232),
+ (31, HUFFMAN_EMIT_SYMBOL, 232),
+ (41, HUFFMAN_EMIT_SYMBOL, 232),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 232),
+ (3, HUFFMAN_EMIT_SYMBOL, 233),
+ (6, HUFFMAN_EMIT_SYMBOL, 233),
+ (10, HUFFMAN_EMIT_SYMBOL, 233),
+ (15, HUFFMAN_EMIT_SYMBOL, 233),
+ (24, HUFFMAN_EMIT_SYMBOL, 233),
+ (31, HUFFMAN_EMIT_SYMBOL, 233),
+ (41, HUFFMAN_EMIT_SYMBOL, 233),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 233),
+
+ # Node 146
+ (1, HUFFMAN_EMIT_SYMBOL, 1),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 1),
+ (1, HUFFMAN_EMIT_SYMBOL, 135),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 135),
+ (1, HUFFMAN_EMIT_SYMBOL, 137),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 137),
+ (1, HUFFMAN_EMIT_SYMBOL, 138),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 138),
+ (1, HUFFMAN_EMIT_SYMBOL, 139),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 139),
+ (1, HUFFMAN_EMIT_SYMBOL, 140),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 140),
+ (1, HUFFMAN_EMIT_SYMBOL, 141),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 141),
+ (1, HUFFMAN_EMIT_SYMBOL, 143),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 143),
+
+ # Node 147
+ (2, HUFFMAN_EMIT_SYMBOL, 1),
+ (9, HUFFMAN_EMIT_SYMBOL, 1),
+ (23, HUFFMAN_EMIT_SYMBOL, 1),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 1),
+ (2, HUFFMAN_EMIT_SYMBOL, 135),
+ (9, HUFFMAN_EMIT_SYMBOL, 135),
+ (23, HUFFMAN_EMIT_SYMBOL, 135),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 135),
+ (2, HUFFMAN_EMIT_SYMBOL, 137),
+ (9, HUFFMAN_EMIT_SYMBOL, 137),
+ (23, HUFFMAN_EMIT_SYMBOL, 137),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 137),
+ (2, HUFFMAN_EMIT_SYMBOL, 138),
+ (9, HUFFMAN_EMIT_SYMBOL, 138),
+ (23, HUFFMAN_EMIT_SYMBOL, 138),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 138),
+
+ # Node 148
+ (3, HUFFMAN_EMIT_SYMBOL, 1),
+ (6, HUFFMAN_EMIT_SYMBOL, 1),
+ (10, HUFFMAN_EMIT_SYMBOL, 1),
+ (15, HUFFMAN_EMIT_SYMBOL, 1),
+ (24, HUFFMAN_EMIT_SYMBOL, 1),
+ (31, HUFFMAN_EMIT_SYMBOL, 1),
+ (41, HUFFMAN_EMIT_SYMBOL, 1),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 1),
+ (3, HUFFMAN_EMIT_SYMBOL, 135),
+ (6, HUFFMAN_EMIT_SYMBOL, 135),
+ (10, HUFFMAN_EMIT_SYMBOL, 135),
+ (15, HUFFMAN_EMIT_SYMBOL, 135),
+ (24, HUFFMAN_EMIT_SYMBOL, 135),
+ (31, HUFFMAN_EMIT_SYMBOL, 135),
+ (41, HUFFMAN_EMIT_SYMBOL, 135),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 135),
+
+ # Node 149
+ (3, HUFFMAN_EMIT_SYMBOL, 137),
+ (6, HUFFMAN_EMIT_SYMBOL, 137),
+ (10, HUFFMAN_EMIT_SYMBOL, 137),
+ (15, HUFFMAN_EMIT_SYMBOL, 137),
+ (24, HUFFMAN_EMIT_SYMBOL, 137),
+ (31, HUFFMAN_EMIT_SYMBOL, 137),
+ (41, HUFFMAN_EMIT_SYMBOL, 137),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 137),
+ (3, HUFFMAN_EMIT_SYMBOL, 138),
+ (6, HUFFMAN_EMIT_SYMBOL, 138),
+ (10, HUFFMAN_EMIT_SYMBOL, 138),
+ (15, HUFFMAN_EMIT_SYMBOL, 138),
+ (24, HUFFMAN_EMIT_SYMBOL, 138),
+ (31, HUFFMAN_EMIT_SYMBOL, 138),
+ (41, HUFFMAN_EMIT_SYMBOL, 138),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 138),
+
+ # Node 150
+ (2, HUFFMAN_EMIT_SYMBOL, 139),
+ (9, HUFFMAN_EMIT_SYMBOL, 139),
+ (23, HUFFMAN_EMIT_SYMBOL, 139),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 139),
+ (2, HUFFMAN_EMIT_SYMBOL, 140),
+ (9, HUFFMAN_EMIT_SYMBOL, 140),
+ (23, HUFFMAN_EMIT_SYMBOL, 140),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 140),
+ (2, HUFFMAN_EMIT_SYMBOL, 141),
+ (9, HUFFMAN_EMIT_SYMBOL, 141),
+ (23, HUFFMAN_EMIT_SYMBOL, 141),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 141),
+ (2, HUFFMAN_EMIT_SYMBOL, 143),
+ (9, HUFFMAN_EMIT_SYMBOL, 143),
+ (23, HUFFMAN_EMIT_SYMBOL, 143),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 143),
+
+ # Node 151
+ (3, HUFFMAN_EMIT_SYMBOL, 139),
+ (6, HUFFMAN_EMIT_SYMBOL, 139),
+ (10, HUFFMAN_EMIT_SYMBOL, 139),
+ (15, HUFFMAN_EMIT_SYMBOL, 139),
+ (24, HUFFMAN_EMIT_SYMBOL, 139),
+ (31, HUFFMAN_EMIT_SYMBOL, 139),
+ (41, HUFFMAN_EMIT_SYMBOL, 139),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 139),
+ (3, HUFFMAN_EMIT_SYMBOL, 140),
+ (6, HUFFMAN_EMIT_SYMBOL, 140),
+ (10, HUFFMAN_EMIT_SYMBOL, 140),
+ (15, HUFFMAN_EMIT_SYMBOL, 140),
+ (24, HUFFMAN_EMIT_SYMBOL, 140),
+ (31, HUFFMAN_EMIT_SYMBOL, 140),
+ (41, HUFFMAN_EMIT_SYMBOL, 140),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 140),
+
+ # Node 152
+ (3, HUFFMAN_EMIT_SYMBOL, 141),
+ (6, HUFFMAN_EMIT_SYMBOL, 141),
+ (10, HUFFMAN_EMIT_SYMBOL, 141),
+ (15, HUFFMAN_EMIT_SYMBOL, 141),
+ (24, HUFFMAN_EMIT_SYMBOL, 141),
+ (31, HUFFMAN_EMIT_SYMBOL, 141),
+ (41, HUFFMAN_EMIT_SYMBOL, 141),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 141),
+ (3, HUFFMAN_EMIT_SYMBOL, 143),
+ (6, HUFFMAN_EMIT_SYMBOL, 143),
+ (10, HUFFMAN_EMIT_SYMBOL, 143),
+ (15, HUFFMAN_EMIT_SYMBOL, 143),
+ (24, HUFFMAN_EMIT_SYMBOL, 143),
+ (31, HUFFMAN_EMIT_SYMBOL, 143),
+ (41, HUFFMAN_EMIT_SYMBOL, 143),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 143),
+
+ # Node 153
+ (157, 0, 0),
+ (158, 0, 0),
+ (160, 0, 0),
+ (161, 0, 0),
+ (164, 0, 0),
+ (165, 0, 0),
+ (167, 0, 0),
+ (168, 0, 0),
+ (172, 0, 0),
+ (173, 0, 0),
+ (175, 0, 0),
+ (177, 0, 0),
+ (182, 0, 0),
+ (185, 0, 0),
+ (191, 0, 0),
+ (207, 0, 0),
+
+ # Node 154
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 147),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 149),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 150),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 151),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 152),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 155),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 157),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 158),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 165),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 166),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 168),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 174),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 175),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 180),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 182),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 183),
+
+ # Node 155
+ (1, HUFFMAN_EMIT_SYMBOL, 147),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 147),
+ (1, HUFFMAN_EMIT_SYMBOL, 149),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 149),
+ (1, HUFFMAN_EMIT_SYMBOL, 150),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 150),
+ (1, HUFFMAN_EMIT_SYMBOL, 151),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 151),
+ (1, HUFFMAN_EMIT_SYMBOL, 152),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 152),
+ (1, HUFFMAN_EMIT_SYMBOL, 155),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 155),
+ (1, HUFFMAN_EMIT_SYMBOL, 157),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 157),
+ (1, HUFFMAN_EMIT_SYMBOL, 158),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 158),
+
+ # Node 156
+ (2, HUFFMAN_EMIT_SYMBOL, 147),
+ (9, HUFFMAN_EMIT_SYMBOL, 147),
+ (23, HUFFMAN_EMIT_SYMBOL, 147),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 147),
+ (2, HUFFMAN_EMIT_SYMBOL, 149),
+ (9, HUFFMAN_EMIT_SYMBOL, 149),
+ (23, HUFFMAN_EMIT_SYMBOL, 149),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 149),
+ (2, HUFFMAN_EMIT_SYMBOL, 150),
+ (9, HUFFMAN_EMIT_SYMBOL, 150),
+ (23, HUFFMAN_EMIT_SYMBOL, 150),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 150),
+ (2, HUFFMAN_EMIT_SYMBOL, 151),
+ (9, HUFFMAN_EMIT_SYMBOL, 151),
+ (23, HUFFMAN_EMIT_SYMBOL, 151),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 151),
+
+ # Node 157
+ (3, HUFFMAN_EMIT_SYMBOL, 147),
+ (6, HUFFMAN_EMIT_SYMBOL, 147),
+ (10, HUFFMAN_EMIT_SYMBOL, 147),
+ (15, HUFFMAN_EMIT_SYMBOL, 147),
+ (24, HUFFMAN_EMIT_SYMBOL, 147),
+ (31, HUFFMAN_EMIT_SYMBOL, 147),
+ (41, HUFFMAN_EMIT_SYMBOL, 147),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 147),
+ (3, HUFFMAN_EMIT_SYMBOL, 149),
+ (6, HUFFMAN_EMIT_SYMBOL, 149),
+ (10, HUFFMAN_EMIT_SYMBOL, 149),
+ (15, HUFFMAN_EMIT_SYMBOL, 149),
+ (24, HUFFMAN_EMIT_SYMBOL, 149),
+ (31, HUFFMAN_EMIT_SYMBOL, 149),
+ (41, HUFFMAN_EMIT_SYMBOL, 149),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 149),
+
+ # Node 158
+ (3, HUFFMAN_EMIT_SYMBOL, 150),
+ (6, HUFFMAN_EMIT_SYMBOL, 150),
+ (10, HUFFMAN_EMIT_SYMBOL, 150),
+ (15, HUFFMAN_EMIT_SYMBOL, 150),
+ (24, HUFFMAN_EMIT_SYMBOL, 150),
+ (31, HUFFMAN_EMIT_SYMBOL, 150),
+ (41, HUFFMAN_EMIT_SYMBOL, 150),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 150),
+ (3, HUFFMAN_EMIT_SYMBOL, 151),
+ (6, HUFFMAN_EMIT_SYMBOL, 151),
+ (10, HUFFMAN_EMIT_SYMBOL, 151),
+ (15, HUFFMAN_EMIT_SYMBOL, 151),
+ (24, HUFFMAN_EMIT_SYMBOL, 151),
+ (31, HUFFMAN_EMIT_SYMBOL, 151),
+ (41, HUFFMAN_EMIT_SYMBOL, 151),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 151),
+
+ # Node 159
+ (2, HUFFMAN_EMIT_SYMBOL, 152),
+ (9, HUFFMAN_EMIT_SYMBOL, 152),
+ (23, HUFFMAN_EMIT_SYMBOL, 152),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 152),
+ (2, HUFFMAN_EMIT_SYMBOL, 155),
+ (9, HUFFMAN_EMIT_SYMBOL, 155),
+ (23, HUFFMAN_EMIT_SYMBOL, 155),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 155),
+ (2, HUFFMAN_EMIT_SYMBOL, 157),
+ (9, HUFFMAN_EMIT_SYMBOL, 157),
+ (23, HUFFMAN_EMIT_SYMBOL, 157),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 157),
+ (2, HUFFMAN_EMIT_SYMBOL, 158),
+ (9, HUFFMAN_EMIT_SYMBOL, 158),
+ (23, HUFFMAN_EMIT_SYMBOL, 158),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 158),
+
+ # Node 160
+ (3, HUFFMAN_EMIT_SYMBOL, 152),
+ (6, HUFFMAN_EMIT_SYMBOL, 152),
+ (10, HUFFMAN_EMIT_SYMBOL, 152),
+ (15, HUFFMAN_EMIT_SYMBOL, 152),
+ (24, HUFFMAN_EMIT_SYMBOL, 152),
+ (31, HUFFMAN_EMIT_SYMBOL, 152),
+ (41, HUFFMAN_EMIT_SYMBOL, 152),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 152),
+ (3, HUFFMAN_EMIT_SYMBOL, 155),
+ (6, HUFFMAN_EMIT_SYMBOL, 155),
+ (10, HUFFMAN_EMIT_SYMBOL, 155),
+ (15, HUFFMAN_EMIT_SYMBOL, 155),
+ (24, HUFFMAN_EMIT_SYMBOL, 155),
+ (31, HUFFMAN_EMIT_SYMBOL, 155),
+ (41, HUFFMAN_EMIT_SYMBOL, 155),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 155),
+
+ # Node 161
+ (3, HUFFMAN_EMIT_SYMBOL, 157),
+ (6, HUFFMAN_EMIT_SYMBOL, 157),
+ (10, HUFFMAN_EMIT_SYMBOL, 157),
+ (15, HUFFMAN_EMIT_SYMBOL, 157),
+ (24, HUFFMAN_EMIT_SYMBOL, 157),
+ (31, HUFFMAN_EMIT_SYMBOL, 157),
+ (41, HUFFMAN_EMIT_SYMBOL, 157),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 157),
+ (3, HUFFMAN_EMIT_SYMBOL, 158),
+ (6, HUFFMAN_EMIT_SYMBOL, 158),
+ (10, HUFFMAN_EMIT_SYMBOL, 158),
+ (15, HUFFMAN_EMIT_SYMBOL, 158),
+ (24, HUFFMAN_EMIT_SYMBOL, 158),
+ (31, HUFFMAN_EMIT_SYMBOL, 158),
+ (41, HUFFMAN_EMIT_SYMBOL, 158),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 158),
+
+ # Node 162
+ (1, HUFFMAN_EMIT_SYMBOL, 165),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 165),
+ (1, HUFFMAN_EMIT_SYMBOL, 166),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 166),
+ (1, HUFFMAN_EMIT_SYMBOL, 168),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 168),
+ (1, HUFFMAN_EMIT_SYMBOL, 174),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 174),
+ (1, HUFFMAN_EMIT_SYMBOL, 175),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 175),
+ (1, HUFFMAN_EMIT_SYMBOL, 180),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 180),
+ (1, HUFFMAN_EMIT_SYMBOL, 182),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 182),
+ (1, HUFFMAN_EMIT_SYMBOL, 183),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 183),
+
+ # Node 163
+ (2, HUFFMAN_EMIT_SYMBOL, 165),
+ (9, HUFFMAN_EMIT_SYMBOL, 165),
+ (23, HUFFMAN_EMIT_SYMBOL, 165),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 165),
+ (2, HUFFMAN_EMIT_SYMBOL, 166),
+ (9, HUFFMAN_EMIT_SYMBOL, 166),
+ (23, HUFFMAN_EMIT_SYMBOL, 166),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 166),
+ (2, HUFFMAN_EMIT_SYMBOL, 168),
+ (9, HUFFMAN_EMIT_SYMBOL, 168),
+ (23, HUFFMAN_EMIT_SYMBOL, 168),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 168),
+ (2, HUFFMAN_EMIT_SYMBOL, 174),
+ (9, HUFFMAN_EMIT_SYMBOL, 174),
+ (23, HUFFMAN_EMIT_SYMBOL, 174),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 174),
+
+ # Node 164
+ (3, HUFFMAN_EMIT_SYMBOL, 165),
+ (6, HUFFMAN_EMIT_SYMBOL, 165),
+ (10, HUFFMAN_EMIT_SYMBOL, 165),
+ (15, HUFFMAN_EMIT_SYMBOL, 165),
+ (24, HUFFMAN_EMIT_SYMBOL, 165),
+ (31, HUFFMAN_EMIT_SYMBOL, 165),
+ (41, HUFFMAN_EMIT_SYMBOL, 165),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 165),
+ (3, HUFFMAN_EMIT_SYMBOL, 166),
+ (6, HUFFMAN_EMIT_SYMBOL, 166),
+ (10, HUFFMAN_EMIT_SYMBOL, 166),
+ (15, HUFFMAN_EMIT_SYMBOL, 166),
+ (24, HUFFMAN_EMIT_SYMBOL, 166),
+ (31, HUFFMAN_EMIT_SYMBOL, 166),
+ (41, HUFFMAN_EMIT_SYMBOL, 166),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 166),
+
+ # Node 165
+ (3, HUFFMAN_EMIT_SYMBOL, 168),
+ (6, HUFFMAN_EMIT_SYMBOL, 168),
+ (10, HUFFMAN_EMIT_SYMBOL, 168),
+ (15, HUFFMAN_EMIT_SYMBOL, 168),
+ (24, HUFFMAN_EMIT_SYMBOL, 168),
+ (31, HUFFMAN_EMIT_SYMBOL, 168),
+ (41, HUFFMAN_EMIT_SYMBOL, 168),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 168),
+ (3, HUFFMAN_EMIT_SYMBOL, 174),
+ (6, HUFFMAN_EMIT_SYMBOL, 174),
+ (10, HUFFMAN_EMIT_SYMBOL, 174),
+ (15, HUFFMAN_EMIT_SYMBOL, 174),
+ (24, HUFFMAN_EMIT_SYMBOL, 174),
+ (31, HUFFMAN_EMIT_SYMBOL, 174),
+ (41, HUFFMAN_EMIT_SYMBOL, 174),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 174),
+
+ # Node 166
+ (2, HUFFMAN_EMIT_SYMBOL, 175),
+ (9, HUFFMAN_EMIT_SYMBOL, 175),
+ (23, HUFFMAN_EMIT_SYMBOL, 175),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 175),
+ (2, HUFFMAN_EMIT_SYMBOL, 180),
+ (9, HUFFMAN_EMIT_SYMBOL, 180),
+ (23, HUFFMAN_EMIT_SYMBOL, 180),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 180),
+ (2, HUFFMAN_EMIT_SYMBOL, 182),
+ (9, HUFFMAN_EMIT_SYMBOL, 182),
+ (23, HUFFMAN_EMIT_SYMBOL, 182),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 182),
+ (2, HUFFMAN_EMIT_SYMBOL, 183),
+ (9, HUFFMAN_EMIT_SYMBOL, 183),
+ (23, HUFFMAN_EMIT_SYMBOL, 183),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 183),
+
+ # Node 167
+ (3, HUFFMAN_EMIT_SYMBOL, 175),
+ (6, HUFFMAN_EMIT_SYMBOL, 175),
+ (10, HUFFMAN_EMIT_SYMBOL, 175),
+ (15, HUFFMAN_EMIT_SYMBOL, 175),
+ (24, HUFFMAN_EMIT_SYMBOL, 175),
+ (31, HUFFMAN_EMIT_SYMBOL, 175),
+ (41, HUFFMAN_EMIT_SYMBOL, 175),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 175),
+ (3, HUFFMAN_EMIT_SYMBOL, 180),
+ (6, HUFFMAN_EMIT_SYMBOL, 180),
+ (10, HUFFMAN_EMIT_SYMBOL, 180),
+ (15, HUFFMAN_EMIT_SYMBOL, 180),
+ (24, HUFFMAN_EMIT_SYMBOL, 180),
+ (31, HUFFMAN_EMIT_SYMBOL, 180),
+ (41, HUFFMAN_EMIT_SYMBOL, 180),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 180),
+
+ # Node 168
+ (3, HUFFMAN_EMIT_SYMBOL, 182),
+ (6, HUFFMAN_EMIT_SYMBOL, 182),
+ (10, HUFFMAN_EMIT_SYMBOL, 182),
+ (15, HUFFMAN_EMIT_SYMBOL, 182),
+ (24, HUFFMAN_EMIT_SYMBOL, 182),
+ (31, HUFFMAN_EMIT_SYMBOL, 182),
+ (41, HUFFMAN_EMIT_SYMBOL, 182),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 182),
+ (3, HUFFMAN_EMIT_SYMBOL, 183),
+ (6, HUFFMAN_EMIT_SYMBOL, 183),
+ (10, HUFFMAN_EMIT_SYMBOL, 183),
+ (15, HUFFMAN_EMIT_SYMBOL, 183),
+ (24, HUFFMAN_EMIT_SYMBOL, 183),
+ (31, HUFFMAN_EMIT_SYMBOL, 183),
+ (41, HUFFMAN_EMIT_SYMBOL, 183),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 183),
+
+ # Node 169
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 188),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 191),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 197),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 231),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 239),
+ (176, 0, 0),
+ (178, 0, 0),
+ (179, 0, 0),
+ (183, 0, 0),
+ (184, 0, 0),
+ (186, 0, 0),
+ (187, 0, 0),
+ (192, 0, 0),
+ (199, 0, 0),
+ (208, 0, 0),
+ (223, 0, 0),
+
+ # Node 170
+ (1, HUFFMAN_EMIT_SYMBOL, 188),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 188),
+ (1, HUFFMAN_EMIT_SYMBOL, 191),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 191),
+ (1, HUFFMAN_EMIT_SYMBOL, 197),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 197),
+ (1, HUFFMAN_EMIT_SYMBOL, 231),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 231),
+ (1, HUFFMAN_EMIT_SYMBOL, 239),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 239),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 9),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 142),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 144),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 145),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 148),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 159),
+
+ # Node 171
+ (2, HUFFMAN_EMIT_SYMBOL, 188),
+ (9, HUFFMAN_EMIT_SYMBOL, 188),
+ (23, HUFFMAN_EMIT_SYMBOL, 188),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 188),
+ (2, HUFFMAN_EMIT_SYMBOL, 191),
+ (9, HUFFMAN_EMIT_SYMBOL, 191),
+ (23, HUFFMAN_EMIT_SYMBOL, 191),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 191),
+ (2, HUFFMAN_EMIT_SYMBOL, 197),
+ (9, HUFFMAN_EMIT_SYMBOL, 197),
+ (23, HUFFMAN_EMIT_SYMBOL, 197),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 197),
+ (2, HUFFMAN_EMIT_SYMBOL, 231),
+ (9, HUFFMAN_EMIT_SYMBOL, 231),
+ (23, HUFFMAN_EMIT_SYMBOL, 231),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 231),
+
+ # Node 172
+ (3, HUFFMAN_EMIT_SYMBOL, 188),
+ (6, HUFFMAN_EMIT_SYMBOL, 188),
+ (10, HUFFMAN_EMIT_SYMBOL, 188),
+ (15, HUFFMAN_EMIT_SYMBOL, 188),
+ (24, HUFFMAN_EMIT_SYMBOL, 188),
+ (31, HUFFMAN_EMIT_SYMBOL, 188),
+ (41, HUFFMAN_EMIT_SYMBOL, 188),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 188),
+ (3, HUFFMAN_EMIT_SYMBOL, 191),
+ (6, HUFFMAN_EMIT_SYMBOL, 191),
+ (10, HUFFMAN_EMIT_SYMBOL, 191),
+ (15, HUFFMAN_EMIT_SYMBOL, 191),
+ (24, HUFFMAN_EMIT_SYMBOL, 191),
+ (31, HUFFMAN_EMIT_SYMBOL, 191),
+ (41, HUFFMAN_EMIT_SYMBOL, 191),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 191),
+
+ # Node 173
+ (3, HUFFMAN_EMIT_SYMBOL, 197),
+ (6, HUFFMAN_EMIT_SYMBOL, 197),
+ (10, HUFFMAN_EMIT_SYMBOL, 197),
+ (15, HUFFMAN_EMIT_SYMBOL, 197),
+ (24, HUFFMAN_EMIT_SYMBOL, 197),
+ (31, HUFFMAN_EMIT_SYMBOL, 197),
+ (41, HUFFMAN_EMIT_SYMBOL, 197),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 197),
+ (3, HUFFMAN_EMIT_SYMBOL, 231),
+ (6, HUFFMAN_EMIT_SYMBOL, 231),
+ (10, HUFFMAN_EMIT_SYMBOL, 231),
+ (15, HUFFMAN_EMIT_SYMBOL, 231),
+ (24, HUFFMAN_EMIT_SYMBOL, 231),
+ (31, HUFFMAN_EMIT_SYMBOL, 231),
+ (41, HUFFMAN_EMIT_SYMBOL, 231),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 231),
+
+ # Node 174
+ (2, HUFFMAN_EMIT_SYMBOL, 239),
+ (9, HUFFMAN_EMIT_SYMBOL, 239),
+ (23, HUFFMAN_EMIT_SYMBOL, 239),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 239),
+ (1, HUFFMAN_EMIT_SYMBOL, 9),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 9),
+ (1, HUFFMAN_EMIT_SYMBOL, 142),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 142),
+ (1, HUFFMAN_EMIT_SYMBOL, 144),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 144),
+ (1, HUFFMAN_EMIT_SYMBOL, 145),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 145),
+ (1, HUFFMAN_EMIT_SYMBOL, 148),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 148),
+ (1, HUFFMAN_EMIT_SYMBOL, 159),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 159),
+
+ # Node 175
+ (3, HUFFMAN_EMIT_SYMBOL, 239),
+ (6, HUFFMAN_EMIT_SYMBOL, 239),
+ (10, HUFFMAN_EMIT_SYMBOL, 239),
+ (15, HUFFMAN_EMIT_SYMBOL, 239),
+ (24, HUFFMAN_EMIT_SYMBOL, 239),
+ (31, HUFFMAN_EMIT_SYMBOL, 239),
+ (41, HUFFMAN_EMIT_SYMBOL, 239),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 239),
+ (2, HUFFMAN_EMIT_SYMBOL, 9),
+ (9, HUFFMAN_EMIT_SYMBOL, 9),
+ (23, HUFFMAN_EMIT_SYMBOL, 9),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 9),
+ (2, HUFFMAN_EMIT_SYMBOL, 142),
+ (9, HUFFMAN_EMIT_SYMBOL, 142),
+ (23, HUFFMAN_EMIT_SYMBOL, 142),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 142),
+
+ # Node 176
+ (3, HUFFMAN_EMIT_SYMBOL, 9),
+ (6, HUFFMAN_EMIT_SYMBOL, 9),
+ (10, HUFFMAN_EMIT_SYMBOL, 9),
+ (15, HUFFMAN_EMIT_SYMBOL, 9),
+ (24, HUFFMAN_EMIT_SYMBOL, 9),
+ (31, HUFFMAN_EMIT_SYMBOL, 9),
+ (41, HUFFMAN_EMIT_SYMBOL, 9),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 9),
+ (3, HUFFMAN_EMIT_SYMBOL, 142),
+ (6, HUFFMAN_EMIT_SYMBOL, 142),
+ (10, HUFFMAN_EMIT_SYMBOL, 142),
+ (15, HUFFMAN_EMIT_SYMBOL, 142),
+ (24, HUFFMAN_EMIT_SYMBOL, 142),
+ (31, HUFFMAN_EMIT_SYMBOL, 142),
+ (41, HUFFMAN_EMIT_SYMBOL, 142),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 142),
+
+ # Node 177
+ (2, HUFFMAN_EMIT_SYMBOL, 144),
+ (9, HUFFMAN_EMIT_SYMBOL, 144),
+ (23, HUFFMAN_EMIT_SYMBOL, 144),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 144),
+ (2, HUFFMAN_EMIT_SYMBOL, 145),
+ (9, HUFFMAN_EMIT_SYMBOL, 145),
+ (23, HUFFMAN_EMIT_SYMBOL, 145),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 145),
+ (2, HUFFMAN_EMIT_SYMBOL, 148),
+ (9, HUFFMAN_EMIT_SYMBOL, 148),
+ (23, HUFFMAN_EMIT_SYMBOL, 148),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 148),
+ (2, HUFFMAN_EMIT_SYMBOL, 159),
+ (9, HUFFMAN_EMIT_SYMBOL, 159),
+ (23, HUFFMAN_EMIT_SYMBOL, 159),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 159),
+
+ # Node 178
+ (3, HUFFMAN_EMIT_SYMBOL, 144),
+ (6, HUFFMAN_EMIT_SYMBOL, 144),
+ (10, HUFFMAN_EMIT_SYMBOL, 144),
+ (15, HUFFMAN_EMIT_SYMBOL, 144),
+ (24, HUFFMAN_EMIT_SYMBOL, 144),
+ (31, HUFFMAN_EMIT_SYMBOL, 144),
+ (41, HUFFMAN_EMIT_SYMBOL, 144),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 144),
+ (3, HUFFMAN_EMIT_SYMBOL, 145),
+ (6, HUFFMAN_EMIT_SYMBOL, 145),
+ (10, HUFFMAN_EMIT_SYMBOL, 145),
+ (15, HUFFMAN_EMIT_SYMBOL, 145),
+ (24, HUFFMAN_EMIT_SYMBOL, 145),
+ (31, HUFFMAN_EMIT_SYMBOL, 145),
+ (41, HUFFMAN_EMIT_SYMBOL, 145),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 145),
+
+ # Node 179
+ (3, HUFFMAN_EMIT_SYMBOL, 148),
+ (6, HUFFMAN_EMIT_SYMBOL, 148),
+ (10, HUFFMAN_EMIT_SYMBOL, 148),
+ (15, HUFFMAN_EMIT_SYMBOL, 148),
+ (24, HUFFMAN_EMIT_SYMBOL, 148),
+ (31, HUFFMAN_EMIT_SYMBOL, 148),
+ (41, HUFFMAN_EMIT_SYMBOL, 148),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 148),
+ (3, HUFFMAN_EMIT_SYMBOL, 159),
+ (6, HUFFMAN_EMIT_SYMBOL, 159),
+ (10, HUFFMAN_EMIT_SYMBOL, 159),
+ (15, HUFFMAN_EMIT_SYMBOL, 159),
+ (24, HUFFMAN_EMIT_SYMBOL, 159),
+ (31, HUFFMAN_EMIT_SYMBOL, 159),
+ (41, HUFFMAN_EMIT_SYMBOL, 159),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 159),
+
+ # Node 180
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 171),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 206),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 215),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 225),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 236),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 237),
+ (188, 0, 0),
+ (189, 0, 0),
+ (193, 0, 0),
+ (196, 0, 0),
+ (200, 0, 0),
+ (203, 0, 0),
+ (209, 0, 0),
+ (216, 0, 0),
+ (224, 0, 0),
+ (238, 0, 0),
+
+ # Node 181
+ (1, HUFFMAN_EMIT_SYMBOL, 171),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 171),
+ (1, HUFFMAN_EMIT_SYMBOL, 206),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 206),
+ (1, HUFFMAN_EMIT_SYMBOL, 215),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 215),
+ (1, HUFFMAN_EMIT_SYMBOL, 225),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 225),
+ (1, HUFFMAN_EMIT_SYMBOL, 236),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 236),
+ (1, HUFFMAN_EMIT_SYMBOL, 237),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 237),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 199),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 207),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 234),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 235),
+
+ # Node 182
+ (2, HUFFMAN_EMIT_SYMBOL, 171),
+ (9, HUFFMAN_EMIT_SYMBOL, 171),
+ (23, HUFFMAN_EMIT_SYMBOL, 171),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 171),
+ (2, HUFFMAN_EMIT_SYMBOL, 206),
+ (9, HUFFMAN_EMIT_SYMBOL, 206),
+ (23, HUFFMAN_EMIT_SYMBOL, 206),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 206),
+ (2, HUFFMAN_EMIT_SYMBOL, 215),
+ (9, HUFFMAN_EMIT_SYMBOL, 215),
+ (23, HUFFMAN_EMIT_SYMBOL, 215),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 215),
+ (2, HUFFMAN_EMIT_SYMBOL, 225),
+ (9, HUFFMAN_EMIT_SYMBOL, 225),
+ (23, HUFFMAN_EMIT_SYMBOL, 225),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 225),
+
+ # Node 183
+ (3, HUFFMAN_EMIT_SYMBOL, 171),
+ (6, HUFFMAN_EMIT_SYMBOL, 171),
+ (10, HUFFMAN_EMIT_SYMBOL, 171),
+ (15, HUFFMAN_EMIT_SYMBOL, 171),
+ (24, HUFFMAN_EMIT_SYMBOL, 171),
+ (31, HUFFMAN_EMIT_SYMBOL, 171),
+ (41, HUFFMAN_EMIT_SYMBOL, 171),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 171),
+ (3, HUFFMAN_EMIT_SYMBOL, 206),
+ (6, HUFFMAN_EMIT_SYMBOL, 206),
+ (10, HUFFMAN_EMIT_SYMBOL, 206),
+ (15, HUFFMAN_EMIT_SYMBOL, 206),
+ (24, HUFFMAN_EMIT_SYMBOL, 206),
+ (31, HUFFMAN_EMIT_SYMBOL, 206),
+ (41, HUFFMAN_EMIT_SYMBOL, 206),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 206),
+
+ # Node 184
+ (3, HUFFMAN_EMIT_SYMBOL, 215),
+ (6, HUFFMAN_EMIT_SYMBOL, 215),
+ (10, HUFFMAN_EMIT_SYMBOL, 215),
+ (15, HUFFMAN_EMIT_SYMBOL, 215),
+ (24, HUFFMAN_EMIT_SYMBOL, 215),
+ (31, HUFFMAN_EMIT_SYMBOL, 215),
+ (41, HUFFMAN_EMIT_SYMBOL, 215),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 215),
+ (3, HUFFMAN_EMIT_SYMBOL, 225),
+ (6, HUFFMAN_EMIT_SYMBOL, 225),
+ (10, HUFFMAN_EMIT_SYMBOL, 225),
+ (15, HUFFMAN_EMIT_SYMBOL, 225),
+ (24, HUFFMAN_EMIT_SYMBOL, 225),
+ (31, HUFFMAN_EMIT_SYMBOL, 225),
+ (41, HUFFMAN_EMIT_SYMBOL, 225),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 225),
+
+ # Node 185
+ (2, HUFFMAN_EMIT_SYMBOL, 236),
+ (9, HUFFMAN_EMIT_SYMBOL, 236),
+ (23, HUFFMAN_EMIT_SYMBOL, 236),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 236),
+ (2, HUFFMAN_EMIT_SYMBOL, 237),
+ (9, HUFFMAN_EMIT_SYMBOL, 237),
+ (23, HUFFMAN_EMIT_SYMBOL, 237),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 237),
+ (1, HUFFMAN_EMIT_SYMBOL, 199),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 199),
+ (1, HUFFMAN_EMIT_SYMBOL, 207),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 207),
+ (1, HUFFMAN_EMIT_SYMBOL, 234),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 234),
+ (1, HUFFMAN_EMIT_SYMBOL, 235),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 235),
+
+ # Node 186
+ (3, HUFFMAN_EMIT_SYMBOL, 236),
+ (6, HUFFMAN_EMIT_SYMBOL, 236),
+ (10, HUFFMAN_EMIT_SYMBOL, 236),
+ (15, HUFFMAN_EMIT_SYMBOL, 236),
+ (24, HUFFMAN_EMIT_SYMBOL, 236),
+ (31, HUFFMAN_EMIT_SYMBOL, 236),
+ (41, HUFFMAN_EMIT_SYMBOL, 236),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 236),
+ (3, HUFFMAN_EMIT_SYMBOL, 237),
+ (6, HUFFMAN_EMIT_SYMBOL, 237),
+ (10, HUFFMAN_EMIT_SYMBOL, 237),
+ (15, HUFFMAN_EMIT_SYMBOL, 237),
+ (24, HUFFMAN_EMIT_SYMBOL, 237),
+ (31, HUFFMAN_EMIT_SYMBOL, 237),
+ (41, HUFFMAN_EMIT_SYMBOL, 237),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 237),
+
+ # Node 187
+ (2, HUFFMAN_EMIT_SYMBOL, 199),
+ (9, HUFFMAN_EMIT_SYMBOL, 199),
+ (23, HUFFMAN_EMIT_SYMBOL, 199),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 199),
+ (2, HUFFMAN_EMIT_SYMBOL, 207),
+ (9, HUFFMAN_EMIT_SYMBOL, 207),
+ (23, HUFFMAN_EMIT_SYMBOL, 207),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 207),
+ (2, HUFFMAN_EMIT_SYMBOL, 234),
+ (9, HUFFMAN_EMIT_SYMBOL, 234),
+ (23, HUFFMAN_EMIT_SYMBOL, 234),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 234),
+ (2, HUFFMAN_EMIT_SYMBOL, 235),
+ (9, HUFFMAN_EMIT_SYMBOL, 235),
+ (23, HUFFMAN_EMIT_SYMBOL, 235),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 235),
+
+ # Node 188
+ (3, HUFFMAN_EMIT_SYMBOL, 199),
+ (6, HUFFMAN_EMIT_SYMBOL, 199),
+ (10, HUFFMAN_EMIT_SYMBOL, 199),
+ (15, HUFFMAN_EMIT_SYMBOL, 199),
+ (24, HUFFMAN_EMIT_SYMBOL, 199),
+ (31, HUFFMAN_EMIT_SYMBOL, 199),
+ (41, HUFFMAN_EMIT_SYMBOL, 199),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 199),
+ (3, HUFFMAN_EMIT_SYMBOL, 207),
+ (6, HUFFMAN_EMIT_SYMBOL, 207),
+ (10, HUFFMAN_EMIT_SYMBOL, 207),
+ (15, HUFFMAN_EMIT_SYMBOL, 207),
+ (24, HUFFMAN_EMIT_SYMBOL, 207),
+ (31, HUFFMAN_EMIT_SYMBOL, 207),
+ (41, HUFFMAN_EMIT_SYMBOL, 207),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 207),
+
+ # Node 189
+ (3, HUFFMAN_EMIT_SYMBOL, 234),
+ (6, HUFFMAN_EMIT_SYMBOL, 234),
+ (10, HUFFMAN_EMIT_SYMBOL, 234),
+ (15, HUFFMAN_EMIT_SYMBOL, 234),
+ (24, HUFFMAN_EMIT_SYMBOL, 234),
+ (31, HUFFMAN_EMIT_SYMBOL, 234),
+ (41, HUFFMAN_EMIT_SYMBOL, 234),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 234),
+ (3, HUFFMAN_EMIT_SYMBOL, 235),
+ (6, HUFFMAN_EMIT_SYMBOL, 235),
+ (10, HUFFMAN_EMIT_SYMBOL, 235),
+ (15, HUFFMAN_EMIT_SYMBOL, 235),
+ (24, HUFFMAN_EMIT_SYMBOL, 235),
+ (31, HUFFMAN_EMIT_SYMBOL, 235),
+ (41, HUFFMAN_EMIT_SYMBOL, 235),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 235),
+
+ # Node 190
+ (194, 0, 0),
+ (195, 0, 0),
+ (197, 0, 0),
+ (198, 0, 0),
+ (201, 0, 0),
+ (202, 0, 0),
+ (204, 0, 0),
+ (205, 0, 0),
+ (210, 0, 0),
+ (213, 0, 0),
+ (217, 0, 0),
+ (220, 0, 0),
+ (225, 0, 0),
+ (231, 0, 0),
+ (239, 0, 0),
+ (246, 0, 0),
+
+ # Node 191
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 192),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 193),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 200),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 201),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 202),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 205),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 210),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 213),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 218),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 219),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 238),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 240),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 242),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 243),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 255),
+ (206, 0, 0),
+
+ # Node 192
+ (1, HUFFMAN_EMIT_SYMBOL, 192),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 192),
+ (1, HUFFMAN_EMIT_SYMBOL, 193),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 193),
+ (1, HUFFMAN_EMIT_SYMBOL, 200),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 200),
+ (1, HUFFMAN_EMIT_SYMBOL, 201),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 201),
+ (1, HUFFMAN_EMIT_SYMBOL, 202),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 202),
+ (1, HUFFMAN_EMIT_SYMBOL, 205),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 205),
+ (1, HUFFMAN_EMIT_SYMBOL, 210),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 210),
+ (1, HUFFMAN_EMIT_SYMBOL, 213),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 213),
+
+ # Node 193
+ (2, HUFFMAN_EMIT_SYMBOL, 192),
+ (9, HUFFMAN_EMIT_SYMBOL, 192),
+ (23, HUFFMAN_EMIT_SYMBOL, 192),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 192),
+ (2, HUFFMAN_EMIT_SYMBOL, 193),
+ (9, HUFFMAN_EMIT_SYMBOL, 193),
+ (23, HUFFMAN_EMIT_SYMBOL, 193),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 193),
+ (2, HUFFMAN_EMIT_SYMBOL, 200),
+ (9, HUFFMAN_EMIT_SYMBOL, 200),
+ (23, HUFFMAN_EMIT_SYMBOL, 200),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 200),
+ (2, HUFFMAN_EMIT_SYMBOL, 201),
+ (9, HUFFMAN_EMIT_SYMBOL, 201),
+ (23, HUFFMAN_EMIT_SYMBOL, 201),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 201),
+
+ # Node 194
+ (3, HUFFMAN_EMIT_SYMBOL, 192),
+ (6, HUFFMAN_EMIT_SYMBOL, 192),
+ (10, HUFFMAN_EMIT_SYMBOL, 192),
+ (15, HUFFMAN_EMIT_SYMBOL, 192),
+ (24, HUFFMAN_EMIT_SYMBOL, 192),
+ (31, HUFFMAN_EMIT_SYMBOL, 192),
+ (41, HUFFMAN_EMIT_SYMBOL, 192),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 192),
+ (3, HUFFMAN_EMIT_SYMBOL, 193),
+ (6, HUFFMAN_EMIT_SYMBOL, 193),
+ (10, HUFFMAN_EMIT_SYMBOL, 193),
+ (15, HUFFMAN_EMIT_SYMBOL, 193),
+ (24, HUFFMAN_EMIT_SYMBOL, 193),
+ (31, HUFFMAN_EMIT_SYMBOL, 193),
+ (41, HUFFMAN_EMIT_SYMBOL, 193),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 193),
+
+ # Node 195
+ (3, HUFFMAN_EMIT_SYMBOL, 200),
+ (6, HUFFMAN_EMIT_SYMBOL, 200),
+ (10, HUFFMAN_EMIT_SYMBOL, 200),
+ (15, HUFFMAN_EMIT_SYMBOL, 200),
+ (24, HUFFMAN_EMIT_SYMBOL, 200),
+ (31, HUFFMAN_EMIT_SYMBOL, 200),
+ (41, HUFFMAN_EMIT_SYMBOL, 200),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 200),
+ (3, HUFFMAN_EMIT_SYMBOL, 201),
+ (6, HUFFMAN_EMIT_SYMBOL, 201),
+ (10, HUFFMAN_EMIT_SYMBOL, 201),
+ (15, HUFFMAN_EMIT_SYMBOL, 201),
+ (24, HUFFMAN_EMIT_SYMBOL, 201),
+ (31, HUFFMAN_EMIT_SYMBOL, 201),
+ (41, HUFFMAN_EMIT_SYMBOL, 201),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 201),
+
+ # Node 196
+ (2, HUFFMAN_EMIT_SYMBOL, 202),
+ (9, HUFFMAN_EMIT_SYMBOL, 202),
+ (23, HUFFMAN_EMIT_SYMBOL, 202),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 202),
+ (2, HUFFMAN_EMIT_SYMBOL, 205),
+ (9, HUFFMAN_EMIT_SYMBOL, 205),
+ (23, HUFFMAN_EMIT_SYMBOL, 205),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 205),
+ (2, HUFFMAN_EMIT_SYMBOL, 210),
+ (9, HUFFMAN_EMIT_SYMBOL, 210),
+ (23, HUFFMAN_EMIT_SYMBOL, 210),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 210),
+ (2, HUFFMAN_EMIT_SYMBOL, 213),
+ (9, HUFFMAN_EMIT_SYMBOL, 213),
+ (23, HUFFMAN_EMIT_SYMBOL, 213),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 213),
+
+ # Node 197
+ (3, HUFFMAN_EMIT_SYMBOL, 202),
+ (6, HUFFMAN_EMIT_SYMBOL, 202),
+ (10, HUFFMAN_EMIT_SYMBOL, 202),
+ (15, HUFFMAN_EMIT_SYMBOL, 202),
+ (24, HUFFMAN_EMIT_SYMBOL, 202),
+ (31, HUFFMAN_EMIT_SYMBOL, 202),
+ (41, HUFFMAN_EMIT_SYMBOL, 202),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 202),
+ (3, HUFFMAN_EMIT_SYMBOL, 205),
+ (6, HUFFMAN_EMIT_SYMBOL, 205),
+ (10, HUFFMAN_EMIT_SYMBOL, 205),
+ (15, HUFFMAN_EMIT_SYMBOL, 205),
+ (24, HUFFMAN_EMIT_SYMBOL, 205),
+ (31, HUFFMAN_EMIT_SYMBOL, 205),
+ (41, HUFFMAN_EMIT_SYMBOL, 205),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 205),
+
+ # Node 198
+ (3, HUFFMAN_EMIT_SYMBOL, 210),
+ (6, HUFFMAN_EMIT_SYMBOL, 210),
+ (10, HUFFMAN_EMIT_SYMBOL, 210),
+ (15, HUFFMAN_EMIT_SYMBOL, 210),
+ (24, HUFFMAN_EMIT_SYMBOL, 210),
+ (31, HUFFMAN_EMIT_SYMBOL, 210),
+ (41, HUFFMAN_EMIT_SYMBOL, 210),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 210),
+ (3, HUFFMAN_EMIT_SYMBOL, 213),
+ (6, HUFFMAN_EMIT_SYMBOL, 213),
+ (10, HUFFMAN_EMIT_SYMBOL, 213),
+ (15, HUFFMAN_EMIT_SYMBOL, 213),
+ (24, HUFFMAN_EMIT_SYMBOL, 213),
+ (31, HUFFMAN_EMIT_SYMBOL, 213),
+ (41, HUFFMAN_EMIT_SYMBOL, 213),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 213),
+
+ # Node 199
+ (1, HUFFMAN_EMIT_SYMBOL, 218),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 218),
+ (1, HUFFMAN_EMIT_SYMBOL, 219),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 219),
+ (1, HUFFMAN_EMIT_SYMBOL, 238),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 238),
+ (1, HUFFMAN_EMIT_SYMBOL, 240),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 240),
+ (1, HUFFMAN_EMIT_SYMBOL, 242),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 242),
+ (1, HUFFMAN_EMIT_SYMBOL, 243),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 243),
+ (1, HUFFMAN_EMIT_SYMBOL, 255),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 255),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 203),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 204),
+
+ # Node 200
+ (2, HUFFMAN_EMIT_SYMBOL, 218),
+ (9, HUFFMAN_EMIT_SYMBOL, 218),
+ (23, HUFFMAN_EMIT_SYMBOL, 218),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 218),
+ (2, HUFFMAN_EMIT_SYMBOL, 219),
+ (9, HUFFMAN_EMIT_SYMBOL, 219),
+ (23, HUFFMAN_EMIT_SYMBOL, 219),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 219),
+ (2, HUFFMAN_EMIT_SYMBOL, 238),
+ (9, HUFFMAN_EMIT_SYMBOL, 238),
+ (23, HUFFMAN_EMIT_SYMBOL, 238),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 238),
+ (2, HUFFMAN_EMIT_SYMBOL, 240),
+ (9, HUFFMAN_EMIT_SYMBOL, 240),
+ (23, HUFFMAN_EMIT_SYMBOL, 240),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 240),
+
+ # Node 201
+ (3, HUFFMAN_EMIT_SYMBOL, 218),
+ (6, HUFFMAN_EMIT_SYMBOL, 218),
+ (10, HUFFMAN_EMIT_SYMBOL, 218),
+ (15, HUFFMAN_EMIT_SYMBOL, 218),
+ (24, HUFFMAN_EMIT_SYMBOL, 218),
+ (31, HUFFMAN_EMIT_SYMBOL, 218),
+ (41, HUFFMAN_EMIT_SYMBOL, 218),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 218),
+ (3, HUFFMAN_EMIT_SYMBOL, 219),
+ (6, HUFFMAN_EMIT_SYMBOL, 219),
+ (10, HUFFMAN_EMIT_SYMBOL, 219),
+ (15, HUFFMAN_EMIT_SYMBOL, 219),
+ (24, HUFFMAN_EMIT_SYMBOL, 219),
+ (31, HUFFMAN_EMIT_SYMBOL, 219),
+ (41, HUFFMAN_EMIT_SYMBOL, 219),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 219),
+
+ # Node 202
+ (3, HUFFMAN_EMIT_SYMBOL, 238),
+ (6, HUFFMAN_EMIT_SYMBOL, 238),
+ (10, HUFFMAN_EMIT_SYMBOL, 238),
+ (15, HUFFMAN_EMIT_SYMBOL, 238),
+ (24, HUFFMAN_EMIT_SYMBOL, 238),
+ (31, HUFFMAN_EMIT_SYMBOL, 238),
+ (41, HUFFMAN_EMIT_SYMBOL, 238),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 238),
+ (3, HUFFMAN_EMIT_SYMBOL, 240),
+ (6, HUFFMAN_EMIT_SYMBOL, 240),
+ (10, HUFFMAN_EMIT_SYMBOL, 240),
+ (15, HUFFMAN_EMIT_SYMBOL, 240),
+ (24, HUFFMAN_EMIT_SYMBOL, 240),
+ (31, HUFFMAN_EMIT_SYMBOL, 240),
+ (41, HUFFMAN_EMIT_SYMBOL, 240),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 240),
+
+ # Node 203
+ (2, HUFFMAN_EMIT_SYMBOL, 242),
+ (9, HUFFMAN_EMIT_SYMBOL, 242),
+ (23, HUFFMAN_EMIT_SYMBOL, 242),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 242),
+ (2, HUFFMAN_EMIT_SYMBOL, 243),
+ (9, HUFFMAN_EMIT_SYMBOL, 243),
+ (23, HUFFMAN_EMIT_SYMBOL, 243),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 243),
+ (2, HUFFMAN_EMIT_SYMBOL, 255),
+ (9, HUFFMAN_EMIT_SYMBOL, 255),
+ (23, HUFFMAN_EMIT_SYMBOL, 255),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 255),
+ (1, HUFFMAN_EMIT_SYMBOL, 203),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 203),
+ (1, HUFFMAN_EMIT_SYMBOL, 204),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 204),
+
+ # Node 204
+ (3, HUFFMAN_EMIT_SYMBOL, 242),
+ (6, HUFFMAN_EMIT_SYMBOL, 242),
+ (10, HUFFMAN_EMIT_SYMBOL, 242),
+ (15, HUFFMAN_EMIT_SYMBOL, 242),
+ (24, HUFFMAN_EMIT_SYMBOL, 242),
+ (31, HUFFMAN_EMIT_SYMBOL, 242),
+ (41, HUFFMAN_EMIT_SYMBOL, 242),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 242),
+ (3, HUFFMAN_EMIT_SYMBOL, 243),
+ (6, HUFFMAN_EMIT_SYMBOL, 243),
+ (10, HUFFMAN_EMIT_SYMBOL, 243),
+ (15, HUFFMAN_EMIT_SYMBOL, 243),
+ (24, HUFFMAN_EMIT_SYMBOL, 243),
+ (31, HUFFMAN_EMIT_SYMBOL, 243),
+ (41, HUFFMAN_EMIT_SYMBOL, 243),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 243),
+
+ # Node 205
+ (3, HUFFMAN_EMIT_SYMBOL, 255),
+ (6, HUFFMAN_EMIT_SYMBOL, 255),
+ (10, HUFFMAN_EMIT_SYMBOL, 255),
+ (15, HUFFMAN_EMIT_SYMBOL, 255),
+ (24, HUFFMAN_EMIT_SYMBOL, 255),
+ (31, HUFFMAN_EMIT_SYMBOL, 255),
+ (41, HUFFMAN_EMIT_SYMBOL, 255),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 255),
+ (2, HUFFMAN_EMIT_SYMBOL, 203),
+ (9, HUFFMAN_EMIT_SYMBOL, 203),
+ (23, HUFFMAN_EMIT_SYMBOL, 203),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 203),
+ (2, HUFFMAN_EMIT_SYMBOL, 204),
+ (9, HUFFMAN_EMIT_SYMBOL, 204),
+ (23, HUFFMAN_EMIT_SYMBOL, 204),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 204),
+
+ # Node 206
+ (3, HUFFMAN_EMIT_SYMBOL, 203),
+ (6, HUFFMAN_EMIT_SYMBOL, 203),
+ (10, HUFFMAN_EMIT_SYMBOL, 203),
+ (15, HUFFMAN_EMIT_SYMBOL, 203),
+ (24, HUFFMAN_EMIT_SYMBOL, 203),
+ (31, HUFFMAN_EMIT_SYMBOL, 203),
+ (41, HUFFMAN_EMIT_SYMBOL, 203),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 203),
+ (3, HUFFMAN_EMIT_SYMBOL, 204),
+ (6, HUFFMAN_EMIT_SYMBOL, 204),
+ (10, HUFFMAN_EMIT_SYMBOL, 204),
+ (15, HUFFMAN_EMIT_SYMBOL, 204),
+ (24, HUFFMAN_EMIT_SYMBOL, 204),
+ (31, HUFFMAN_EMIT_SYMBOL, 204),
+ (41, HUFFMAN_EMIT_SYMBOL, 204),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 204),
+
+ # Node 207
+ (211, 0, 0),
+ (212, 0, 0),
+ (214, 0, 0),
+ (215, 0, 0),
+ (218, 0, 0),
+ (219, 0, 0),
+ (221, 0, 0),
+ (222, 0, 0),
+ (226, 0, 0),
+ (228, 0, 0),
+ (232, 0, 0),
+ (235, 0, 0),
+ (240, 0, 0),
+ (243, 0, 0),
+ (247, 0, 0),
+ (250, 0, 0),
+
+ # Node 208
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 211),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 212),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 214),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 221),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 222),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 223),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 241),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 244),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 245),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 246),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 247),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 248),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 250),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 251),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 252),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 253),
+
+ # Node 209
+ (1, HUFFMAN_EMIT_SYMBOL, 211),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 211),
+ (1, HUFFMAN_EMIT_SYMBOL, 212),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 212),
+ (1, HUFFMAN_EMIT_SYMBOL, 214),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 214),
+ (1, HUFFMAN_EMIT_SYMBOL, 221),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 221),
+ (1, HUFFMAN_EMIT_SYMBOL, 222),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 222),
+ (1, HUFFMAN_EMIT_SYMBOL, 223),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 223),
+ (1, HUFFMAN_EMIT_SYMBOL, 241),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 241),
+ (1, HUFFMAN_EMIT_SYMBOL, 244),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 244),
+
+ # Node 210
+ (2, HUFFMAN_EMIT_SYMBOL, 211),
+ (9, HUFFMAN_EMIT_SYMBOL, 211),
+ (23, HUFFMAN_EMIT_SYMBOL, 211),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 211),
+ (2, HUFFMAN_EMIT_SYMBOL, 212),
+ (9, HUFFMAN_EMIT_SYMBOL, 212),
+ (23, HUFFMAN_EMIT_SYMBOL, 212),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 212),
+ (2, HUFFMAN_EMIT_SYMBOL, 214),
+ (9, HUFFMAN_EMIT_SYMBOL, 214),
+ (23, HUFFMAN_EMIT_SYMBOL, 214),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 214),
+ (2, HUFFMAN_EMIT_SYMBOL, 221),
+ (9, HUFFMAN_EMIT_SYMBOL, 221),
+ (23, HUFFMAN_EMIT_SYMBOL, 221),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 221),
+
+ # Node 211
+ (3, HUFFMAN_EMIT_SYMBOL, 211),
+ (6, HUFFMAN_EMIT_SYMBOL, 211),
+ (10, HUFFMAN_EMIT_SYMBOL, 211),
+ (15, HUFFMAN_EMIT_SYMBOL, 211),
+ (24, HUFFMAN_EMIT_SYMBOL, 211),
+ (31, HUFFMAN_EMIT_SYMBOL, 211),
+ (41, HUFFMAN_EMIT_SYMBOL, 211),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 211),
+ (3, HUFFMAN_EMIT_SYMBOL, 212),
+ (6, HUFFMAN_EMIT_SYMBOL, 212),
+ (10, HUFFMAN_EMIT_SYMBOL, 212),
+ (15, HUFFMAN_EMIT_SYMBOL, 212),
+ (24, HUFFMAN_EMIT_SYMBOL, 212),
+ (31, HUFFMAN_EMIT_SYMBOL, 212),
+ (41, HUFFMAN_EMIT_SYMBOL, 212),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 212),
+
+ # Node 212
+ (3, HUFFMAN_EMIT_SYMBOL, 214),
+ (6, HUFFMAN_EMIT_SYMBOL, 214),
+ (10, HUFFMAN_EMIT_SYMBOL, 214),
+ (15, HUFFMAN_EMIT_SYMBOL, 214),
+ (24, HUFFMAN_EMIT_SYMBOL, 214),
+ (31, HUFFMAN_EMIT_SYMBOL, 214),
+ (41, HUFFMAN_EMIT_SYMBOL, 214),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 214),
+ (3, HUFFMAN_EMIT_SYMBOL, 221),
+ (6, HUFFMAN_EMIT_SYMBOL, 221),
+ (10, HUFFMAN_EMIT_SYMBOL, 221),
+ (15, HUFFMAN_EMIT_SYMBOL, 221),
+ (24, HUFFMAN_EMIT_SYMBOL, 221),
+ (31, HUFFMAN_EMIT_SYMBOL, 221),
+ (41, HUFFMAN_EMIT_SYMBOL, 221),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 221),
+
+ # Node 213
+ (2, HUFFMAN_EMIT_SYMBOL, 222),
+ (9, HUFFMAN_EMIT_SYMBOL, 222),
+ (23, HUFFMAN_EMIT_SYMBOL, 222),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 222),
+ (2, HUFFMAN_EMIT_SYMBOL, 223),
+ (9, HUFFMAN_EMIT_SYMBOL, 223),
+ (23, HUFFMAN_EMIT_SYMBOL, 223),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 223),
+ (2, HUFFMAN_EMIT_SYMBOL, 241),
+ (9, HUFFMAN_EMIT_SYMBOL, 241),
+ (23, HUFFMAN_EMIT_SYMBOL, 241),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 241),
+ (2, HUFFMAN_EMIT_SYMBOL, 244),
+ (9, HUFFMAN_EMIT_SYMBOL, 244),
+ (23, HUFFMAN_EMIT_SYMBOL, 244),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 244),
+
+ # Node 214
+ (3, HUFFMAN_EMIT_SYMBOL, 222),
+ (6, HUFFMAN_EMIT_SYMBOL, 222),
+ (10, HUFFMAN_EMIT_SYMBOL, 222),
+ (15, HUFFMAN_EMIT_SYMBOL, 222),
+ (24, HUFFMAN_EMIT_SYMBOL, 222),
+ (31, HUFFMAN_EMIT_SYMBOL, 222),
+ (41, HUFFMAN_EMIT_SYMBOL, 222),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 222),
+ (3, HUFFMAN_EMIT_SYMBOL, 223),
+ (6, HUFFMAN_EMIT_SYMBOL, 223),
+ (10, HUFFMAN_EMIT_SYMBOL, 223),
+ (15, HUFFMAN_EMIT_SYMBOL, 223),
+ (24, HUFFMAN_EMIT_SYMBOL, 223),
+ (31, HUFFMAN_EMIT_SYMBOL, 223),
+ (41, HUFFMAN_EMIT_SYMBOL, 223),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 223),
+
+ # Node 215
+ (3, HUFFMAN_EMIT_SYMBOL, 241),
+ (6, HUFFMAN_EMIT_SYMBOL, 241),
+ (10, HUFFMAN_EMIT_SYMBOL, 241),
+ (15, HUFFMAN_EMIT_SYMBOL, 241),
+ (24, HUFFMAN_EMIT_SYMBOL, 241),
+ (31, HUFFMAN_EMIT_SYMBOL, 241),
+ (41, HUFFMAN_EMIT_SYMBOL, 241),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 241),
+ (3, HUFFMAN_EMIT_SYMBOL, 244),
+ (6, HUFFMAN_EMIT_SYMBOL, 244),
+ (10, HUFFMAN_EMIT_SYMBOL, 244),
+ (15, HUFFMAN_EMIT_SYMBOL, 244),
+ (24, HUFFMAN_EMIT_SYMBOL, 244),
+ (31, HUFFMAN_EMIT_SYMBOL, 244),
+ (41, HUFFMAN_EMIT_SYMBOL, 244),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 244),
+
+ # Node 216
+ (1, HUFFMAN_EMIT_SYMBOL, 245),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 245),
+ (1, HUFFMAN_EMIT_SYMBOL, 246),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 246),
+ (1, HUFFMAN_EMIT_SYMBOL, 247),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 247),
+ (1, HUFFMAN_EMIT_SYMBOL, 248),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 248),
+ (1, HUFFMAN_EMIT_SYMBOL, 250),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 250),
+ (1, HUFFMAN_EMIT_SYMBOL, 251),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 251),
+ (1, HUFFMAN_EMIT_SYMBOL, 252),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 252),
+ (1, HUFFMAN_EMIT_SYMBOL, 253),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 253),
+
+ # Node 217
+ (2, HUFFMAN_EMIT_SYMBOL, 245),
+ (9, HUFFMAN_EMIT_SYMBOL, 245),
+ (23, HUFFMAN_EMIT_SYMBOL, 245),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 245),
+ (2, HUFFMAN_EMIT_SYMBOL, 246),
+ (9, HUFFMAN_EMIT_SYMBOL, 246),
+ (23, HUFFMAN_EMIT_SYMBOL, 246),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 246),
+ (2, HUFFMAN_EMIT_SYMBOL, 247),
+ (9, HUFFMAN_EMIT_SYMBOL, 247),
+ (23, HUFFMAN_EMIT_SYMBOL, 247),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 247),
+ (2, HUFFMAN_EMIT_SYMBOL, 248),
+ (9, HUFFMAN_EMIT_SYMBOL, 248),
+ (23, HUFFMAN_EMIT_SYMBOL, 248),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 248),
+
+ # Node 218
+ (3, HUFFMAN_EMIT_SYMBOL, 245),
+ (6, HUFFMAN_EMIT_SYMBOL, 245),
+ (10, HUFFMAN_EMIT_SYMBOL, 245),
+ (15, HUFFMAN_EMIT_SYMBOL, 245),
+ (24, HUFFMAN_EMIT_SYMBOL, 245),
+ (31, HUFFMAN_EMIT_SYMBOL, 245),
+ (41, HUFFMAN_EMIT_SYMBOL, 245),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 245),
+ (3, HUFFMAN_EMIT_SYMBOL, 246),
+ (6, HUFFMAN_EMIT_SYMBOL, 246),
+ (10, HUFFMAN_EMIT_SYMBOL, 246),
+ (15, HUFFMAN_EMIT_SYMBOL, 246),
+ (24, HUFFMAN_EMIT_SYMBOL, 246),
+ (31, HUFFMAN_EMIT_SYMBOL, 246),
+ (41, HUFFMAN_EMIT_SYMBOL, 246),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 246),
+
+ # Node 219
+ (3, HUFFMAN_EMIT_SYMBOL, 247),
+ (6, HUFFMAN_EMIT_SYMBOL, 247),
+ (10, HUFFMAN_EMIT_SYMBOL, 247),
+ (15, HUFFMAN_EMIT_SYMBOL, 247),
+ (24, HUFFMAN_EMIT_SYMBOL, 247),
+ (31, HUFFMAN_EMIT_SYMBOL, 247),
+ (41, HUFFMAN_EMIT_SYMBOL, 247),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 247),
+ (3, HUFFMAN_EMIT_SYMBOL, 248),
+ (6, HUFFMAN_EMIT_SYMBOL, 248),
+ (10, HUFFMAN_EMIT_SYMBOL, 248),
+ (15, HUFFMAN_EMIT_SYMBOL, 248),
+ (24, HUFFMAN_EMIT_SYMBOL, 248),
+ (31, HUFFMAN_EMIT_SYMBOL, 248),
+ (41, HUFFMAN_EMIT_SYMBOL, 248),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 248),
+
+ # Node 220
+ (2, HUFFMAN_EMIT_SYMBOL, 250),
+ (9, HUFFMAN_EMIT_SYMBOL, 250),
+ (23, HUFFMAN_EMIT_SYMBOL, 250),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 250),
+ (2, HUFFMAN_EMIT_SYMBOL, 251),
+ (9, HUFFMAN_EMIT_SYMBOL, 251),
+ (23, HUFFMAN_EMIT_SYMBOL, 251),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 251),
+ (2, HUFFMAN_EMIT_SYMBOL, 252),
+ (9, HUFFMAN_EMIT_SYMBOL, 252),
+ (23, HUFFMAN_EMIT_SYMBOL, 252),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 252),
+ (2, HUFFMAN_EMIT_SYMBOL, 253),
+ (9, HUFFMAN_EMIT_SYMBOL, 253),
+ (23, HUFFMAN_EMIT_SYMBOL, 253),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 253),
+
+ # Node 221
+ (3, HUFFMAN_EMIT_SYMBOL, 250),
+ (6, HUFFMAN_EMIT_SYMBOL, 250),
+ (10, HUFFMAN_EMIT_SYMBOL, 250),
+ (15, HUFFMAN_EMIT_SYMBOL, 250),
+ (24, HUFFMAN_EMIT_SYMBOL, 250),
+ (31, HUFFMAN_EMIT_SYMBOL, 250),
+ (41, HUFFMAN_EMIT_SYMBOL, 250),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 250),
+ (3, HUFFMAN_EMIT_SYMBOL, 251),
+ (6, HUFFMAN_EMIT_SYMBOL, 251),
+ (10, HUFFMAN_EMIT_SYMBOL, 251),
+ (15, HUFFMAN_EMIT_SYMBOL, 251),
+ (24, HUFFMAN_EMIT_SYMBOL, 251),
+ (31, HUFFMAN_EMIT_SYMBOL, 251),
+ (41, HUFFMAN_EMIT_SYMBOL, 251),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 251),
+
+ # Node 222
+ (3, HUFFMAN_EMIT_SYMBOL, 252),
+ (6, HUFFMAN_EMIT_SYMBOL, 252),
+ (10, HUFFMAN_EMIT_SYMBOL, 252),
+ (15, HUFFMAN_EMIT_SYMBOL, 252),
+ (24, HUFFMAN_EMIT_SYMBOL, 252),
+ (31, HUFFMAN_EMIT_SYMBOL, 252),
+ (41, HUFFMAN_EMIT_SYMBOL, 252),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 252),
+ (3, HUFFMAN_EMIT_SYMBOL, 253),
+ (6, HUFFMAN_EMIT_SYMBOL, 253),
+ (10, HUFFMAN_EMIT_SYMBOL, 253),
+ (15, HUFFMAN_EMIT_SYMBOL, 253),
+ (24, HUFFMAN_EMIT_SYMBOL, 253),
+ (31, HUFFMAN_EMIT_SYMBOL, 253),
+ (41, HUFFMAN_EMIT_SYMBOL, 253),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 253),
+
+ # Node 223
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 254),
+ (227, 0, 0),
+ (229, 0, 0),
+ (230, 0, 0),
+ (233, 0, 0),
+ (234, 0, 0),
+ (236, 0, 0),
+ (237, 0, 0),
+ (241, 0, 0),
+ (242, 0, 0),
+ (244, 0, 0),
+ (245, 0, 0),
+ (248, 0, 0),
+ (249, 0, 0),
+ (251, 0, 0),
+ (252, 0, 0),
+
+ # Node 224
+ (1, HUFFMAN_EMIT_SYMBOL, 254),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 254),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 2),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 3),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 4),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 5),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 6),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 7),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 8),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 11),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 12),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 14),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 15),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 16),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 17),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 18),
+
+ # Node 225
+ (2, HUFFMAN_EMIT_SYMBOL, 254),
+ (9, HUFFMAN_EMIT_SYMBOL, 254),
+ (23, HUFFMAN_EMIT_SYMBOL, 254),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 254),
+ (1, HUFFMAN_EMIT_SYMBOL, 2),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 2),
+ (1, HUFFMAN_EMIT_SYMBOL, 3),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 3),
+ (1, HUFFMAN_EMIT_SYMBOL, 4),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 4),
+ (1, HUFFMAN_EMIT_SYMBOL, 5),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 5),
+ (1, HUFFMAN_EMIT_SYMBOL, 6),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 6),
+ (1, HUFFMAN_EMIT_SYMBOL, 7),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 7),
+
+ # Node 226
+ (3, HUFFMAN_EMIT_SYMBOL, 254),
+ (6, HUFFMAN_EMIT_SYMBOL, 254),
+ (10, HUFFMAN_EMIT_SYMBOL, 254),
+ (15, HUFFMAN_EMIT_SYMBOL, 254),
+ (24, HUFFMAN_EMIT_SYMBOL, 254),
+ (31, HUFFMAN_EMIT_SYMBOL, 254),
+ (41, HUFFMAN_EMIT_SYMBOL, 254),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 254),
+ (2, HUFFMAN_EMIT_SYMBOL, 2),
+ (9, HUFFMAN_EMIT_SYMBOL, 2),
+ (23, HUFFMAN_EMIT_SYMBOL, 2),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 2),
+ (2, HUFFMAN_EMIT_SYMBOL, 3),
+ (9, HUFFMAN_EMIT_SYMBOL, 3),
+ (23, HUFFMAN_EMIT_SYMBOL, 3),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 3),
+
+ # Node 227
+ (3, HUFFMAN_EMIT_SYMBOL, 2),
+ (6, HUFFMAN_EMIT_SYMBOL, 2),
+ (10, HUFFMAN_EMIT_SYMBOL, 2),
+ (15, HUFFMAN_EMIT_SYMBOL, 2),
+ (24, HUFFMAN_EMIT_SYMBOL, 2),
+ (31, HUFFMAN_EMIT_SYMBOL, 2),
+ (41, HUFFMAN_EMIT_SYMBOL, 2),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 2),
+ (3, HUFFMAN_EMIT_SYMBOL, 3),
+ (6, HUFFMAN_EMIT_SYMBOL, 3),
+ (10, HUFFMAN_EMIT_SYMBOL, 3),
+ (15, HUFFMAN_EMIT_SYMBOL, 3),
+ (24, HUFFMAN_EMIT_SYMBOL, 3),
+ (31, HUFFMAN_EMIT_SYMBOL, 3),
+ (41, HUFFMAN_EMIT_SYMBOL, 3),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 3),
+
+ # Node 228
+ (2, HUFFMAN_EMIT_SYMBOL, 4),
+ (9, HUFFMAN_EMIT_SYMBOL, 4),
+ (23, HUFFMAN_EMIT_SYMBOL, 4),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 4),
+ (2, HUFFMAN_EMIT_SYMBOL, 5),
+ (9, HUFFMAN_EMIT_SYMBOL, 5),
+ (23, HUFFMAN_EMIT_SYMBOL, 5),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 5),
+ (2, HUFFMAN_EMIT_SYMBOL, 6),
+ (9, HUFFMAN_EMIT_SYMBOL, 6),
+ (23, HUFFMAN_EMIT_SYMBOL, 6),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 6),
+ (2, HUFFMAN_EMIT_SYMBOL, 7),
+ (9, HUFFMAN_EMIT_SYMBOL, 7),
+ (23, HUFFMAN_EMIT_SYMBOL, 7),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 7),
+
+ # Node 229
+ (3, HUFFMAN_EMIT_SYMBOL, 4),
+ (6, HUFFMAN_EMIT_SYMBOL, 4),
+ (10, HUFFMAN_EMIT_SYMBOL, 4),
+ (15, HUFFMAN_EMIT_SYMBOL, 4),
+ (24, HUFFMAN_EMIT_SYMBOL, 4),
+ (31, HUFFMAN_EMIT_SYMBOL, 4),
+ (41, HUFFMAN_EMIT_SYMBOL, 4),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 4),
+ (3, HUFFMAN_EMIT_SYMBOL, 5),
+ (6, HUFFMAN_EMIT_SYMBOL, 5),
+ (10, HUFFMAN_EMIT_SYMBOL, 5),
+ (15, HUFFMAN_EMIT_SYMBOL, 5),
+ (24, HUFFMAN_EMIT_SYMBOL, 5),
+ (31, HUFFMAN_EMIT_SYMBOL, 5),
+ (41, HUFFMAN_EMIT_SYMBOL, 5),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 5),
+
+ # Node 230
+ (3, HUFFMAN_EMIT_SYMBOL, 6),
+ (6, HUFFMAN_EMIT_SYMBOL, 6),
+ (10, HUFFMAN_EMIT_SYMBOL, 6),
+ (15, HUFFMAN_EMIT_SYMBOL, 6),
+ (24, HUFFMAN_EMIT_SYMBOL, 6),
+ (31, HUFFMAN_EMIT_SYMBOL, 6),
+ (41, HUFFMAN_EMIT_SYMBOL, 6),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 6),
+ (3, HUFFMAN_EMIT_SYMBOL, 7),
+ (6, HUFFMAN_EMIT_SYMBOL, 7),
+ (10, HUFFMAN_EMIT_SYMBOL, 7),
+ (15, HUFFMAN_EMIT_SYMBOL, 7),
+ (24, HUFFMAN_EMIT_SYMBOL, 7),
+ (31, HUFFMAN_EMIT_SYMBOL, 7),
+ (41, HUFFMAN_EMIT_SYMBOL, 7),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 7),
+
+ # Node 231
+ (1, HUFFMAN_EMIT_SYMBOL, 8),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 8),
+ (1, HUFFMAN_EMIT_SYMBOL, 11),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 11),
+ (1, HUFFMAN_EMIT_SYMBOL, 12),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 12),
+ (1, HUFFMAN_EMIT_SYMBOL, 14),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 14),
+ (1, HUFFMAN_EMIT_SYMBOL, 15),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 15),
+ (1, HUFFMAN_EMIT_SYMBOL, 16),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 16),
+ (1, HUFFMAN_EMIT_SYMBOL, 17),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 17),
+ (1, HUFFMAN_EMIT_SYMBOL, 18),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 18),
+
+ # Node 232
+ (2, HUFFMAN_EMIT_SYMBOL, 8),
+ (9, HUFFMAN_EMIT_SYMBOL, 8),
+ (23, HUFFMAN_EMIT_SYMBOL, 8),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 8),
+ (2, HUFFMAN_EMIT_SYMBOL, 11),
+ (9, HUFFMAN_EMIT_SYMBOL, 11),
+ (23, HUFFMAN_EMIT_SYMBOL, 11),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 11),
+ (2, HUFFMAN_EMIT_SYMBOL, 12),
+ (9, HUFFMAN_EMIT_SYMBOL, 12),
+ (23, HUFFMAN_EMIT_SYMBOL, 12),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 12),
+ (2, HUFFMAN_EMIT_SYMBOL, 14),
+ (9, HUFFMAN_EMIT_SYMBOL, 14),
+ (23, HUFFMAN_EMIT_SYMBOL, 14),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 14),
+
+ # Node 233
+ (3, HUFFMAN_EMIT_SYMBOL, 8),
+ (6, HUFFMAN_EMIT_SYMBOL, 8),
+ (10, HUFFMAN_EMIT_SYMBOL, 8),
+ (15, HUFFMAN_EMIT_SYMBOL, 8),
+ (24, HUFFMAN_EMIT_SYMBOL, 8),
+ (31, HUFFMAN_EMIT_SYMBOL, 8),
+ (41, HUFFMAN_EMIT_SYMBOL, 8),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 8),
+ (3, HUFFMAN_EMIT_SYMBOL, 11),
+ (6, HUFFMAN_EMIT_SYMBOL, 11),
+ (10, HUFFMAN_EMIT_SYMBOL, 11),
+ (15, HUFFMAN_EMIT_SYMBOL, 11),
+ (24, HUFFMAN_EMIT_SYMBOL, 11),
+ (31, HUFFMAN_EMIT_SYMBOL, 11),
+ (41, HUFFMAN_EMIT_SYMBOL, 11),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 11),
+
+ # Node 234
+ (3, HUFFMAN_EMIT_SYMBOL, 12),
+ (6, HUFFMAN_EMIT_SYMBOL, 12),
+ (10, HUFFMAN_EMIT_SYMBOL, 12),
+ (15, HUFFMAN_EMIT_SYMBOL, 12),
+ (24, HUFFMAN_EMIT_SYMBOL, 12),
+ (31, HUFFMAN_EMIT_SYMBOL, 12),
+ (41, HUFFMAN_EMIT_SYMBOL, 12),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 12),
+ (3, HUFFMAN_EMIT_SYMBOL, 14),
+ (6, HUFFMAN_EMIT_SYMBOL, 14),
+ (10, HUFFMAN_EMIT_SYMBOL, 14),
+ (15, HUFFMAN_EMIT_SYMBOL, 14),
+ (24, HUFFMAN_EMIT_SYMBOL, 14),
+ (31, HUFFMAN_EMIT_SYMBOL, 14),
+ (41, HUFFMAN_EMIT_SYMBOL, 14),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 14),
+
+ # Node 235
+ (2, HUFFMAN_EMIT_SYMBOL, 15),
+ (9, HUFFMAN_EMIT_SYMBOL, 15),
+ (23, HUFFMAN_EMIT_SYMBOL, 15),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 15),
+ (2, HUFFMAN_EMIT_SYMBOL, 16),
+ (9, HUFFMAN_EMIT_SYMBOL, 16),
+ (23, HUFFMAN_EMIT_SYMBOL, 16),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 16),
+ (2, HUFFMAN_EMIT_SYMBOL, 17),
+ (9, HUFFMAN_EMIT_SYMBOL, 17),
+ (23, HUFFMAN_EMIT_SYMBOL, 17),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 17),
+ (2, HUFFMAN_EMIT_SYMBOL, 18),
+ (9, HUFFMAN_EMIT_SYMBOL, 18),
+ (23, HUFFMAN_EMIT_SYMBOL, 18),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 18),
+
+ # Node 236
+ (3, HUFFMAN_EMIT_SYMBOL, 15),
+ (6, HUFFMAN_EMIT_SYMBOL, 15),
+ (10, HUFFMAN_EMIT_SYMBOL, 15),
+ (15, HUFFMAN_EMIT_SYMBOL, 15),
+ (24, HUFFMAN_EMIT_SYMBOL, 15),
+ (31, HUFFMAN_EMIT_SYMBOL, 15),
+ (41, HUFFMAN_EMIT_SYMBOL, 15),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 15),
+ (3, HUFFMAN_EMIT_SYMBOL, 16),
+ (6, HUFFMAN_EMIT_SYMBOL, 16),
+ (10, HUFFMAN_EMIT_SYMBOL, 16),
+ (15, HUFFMAN_EMIT_SYMBOL, 16),
+ (24, HUFFMAN_EMIT_SYMBOL, 16),
+ (31, HUFFMAN_EMIT_SYMBOL, 16),
+ (41, HUFFMAN_EMIT_SYMBOL, 16),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 16),
+
+ # Node 237
+ (3, HUFFMAN_EMIT_SYMBOL, 17),
+ (6, HUFFMAN_EMIT_SYMBOL, 17),
+ (10, HUFFMAN_EMIT_SYMBOL, 17),
+ (15, HUFFMAN_EMIT_SYMBOL, 17),
+ (24, HUFFMAN_EMIT_SYMBOL, 17),
+ (31, HUFFMAN_EMIT_SYMBOL, 17),
+ (41, HUFFMAN_EMIT_SYMBOL, 17),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 17),
+ (3, HUFFMAN_EMIT_SYMBOL, 18),
+ (6, HUFFMAN_EMIT_SYMBOL, 18),
+ (10, HUFFMAN_EMIT_SYMBOL, 18),
+ (15, HUFFMAN_EMIT_SYMBOL, 18),
+ (24, HUFFMAN_EMIT_SYMBOL, 18),
+ (31, HUFFMAN_EMIT_SYMBOL, 18),
+ (41, HUFFMAN_EMIT_SYMBOL, 18),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 18),
+
+ # Node 238
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 19),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 20),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 21),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 23),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 24),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 25),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 26),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 27),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 28),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 29),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 30),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 31),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 127),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 220),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 249),
+ (253, 0, 0),
+
+ # Node 239
+ (1, HUFFMAN_EMIT_SYMBOL, 19),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 19),
+ (1, HUFFMAN_EMIT_SYMBOL, 20),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 20),
+ (1, HUFFMAN_EMIT_SYMBOL, 21),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 21),
+ (1, HUFFMAN_EMIT_SYMBOL, 23),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 23),
+ (1, HUFFMAN_EMIT_SYMBOL, 24),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 24),
+ (1, HUFFMAN_EMIT_SYMBOL, 25),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 25),
+ (1, HUFFMAN_EMIT_SYMBOL, 26),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 26),
+ (1, HUFFMAN_EMIT_SYMBOL, 27),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 27),
+
+ # Node 240
+ (2, HUFFMAN_EMIT_SYMBOL, 19),
+ (9, HUFFMAN_EMIT_SYMBOL, 19),
+ (23, HUFFMAN_EMIT_SYMBOL, 19),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 19),
+ (2, HUFFMAN_EMIT_SYMBOL, 20),
+ (9, HUFFMAN_EMIT_SYMBOL, 20),
+ (23, HUFFMAN_EMIT_SYMBOL, 20),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 20),
+ (2, HUFFMAN_EMIT_SYMBOL, 21),
+ (9, HUFFMAN_EMIT_SYMBOL, 21),
+ (23, HUFFMAN_EMIT_SYMBOL, 21),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 21),
+ (2, HUFFMAN_EMIT_SYMBOL, 23),
+ (9, HUFFMAN_EMIT_SYMBOL, 23),
+ (23, HUFFMAN_EMIT_SYMBOL, 23),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 23),
+
+ # Node 241
+ (3, HUFFMAN_EMIT_SYMBOL, 19),
+ (6, HUFFMAN_EMIT_SYMBOL, 19),
+ (10, HUFFMAN_EMIT_SYMBOL, 19),
+ (15, HUFFMAN_EMIT_SYMBOL, 19),
+ (24, HUFFMAN_EMIT_SYMBOL, 19),
+ (31, HUFFMAN_EMIT_SYMBOL, 19),
+ (41, HUFFMAN_EMIT_SYMBOL, 19),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 19),
+ (3, HUFFMAN_EMIT_SYMBOL, 20),
+ (6, HUFFMAN_EMIT_SYMBOL, 20),
+ (10, HUFFMAN_EMIT_SYMBOL, 20),
+ (15, HUFFMAN_EMIT_SYMBOL, 20),
+ (24, HUFFMAN_EMIT_SYMBOL, 20),
+ (31, HUFFMAN_EMIT_SYMBOL, 20),
+ (41, HUFFMAN_EMIT_SYMBOL, 20),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 20),
+
+ # Node 242
+ (3, HUFFMAN_EMIT_SYMBOL, 21),
+ (6, HUFFMAN_EMIT_SYMBOL, 21),
+ (10, HUFFMAN_EMIT_SYMBOL, 21),
+ (15, HUFFMAN_EMIT_SYMBOL, 21),
+ (24, HUFFMAN_EMIT_SYMBOL, 21),
+ (31, HUFFMAN_EMIT_SYMBOL, 21),
+ (41, HUFFMAN_EMIT_SYMBOL, 21),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 21),
+ (3, HUFFMAN_EMIT_SYMBOL, 23),
+ (6, HUFFMAN_EMIT_SYMBOL, 23),
+ (10, HUFFMAN_EMIT_SYMBOL, 23),
+ (15, HUFFMAN_EMIT_SYMBOL, 23),
+ (24, HUFFMAN_EMIT_SYMBOL, 23),
+ (31, HUFFMAN_EMIT_SYMBOL, 23),
+ (41, HUFFMAN_EMIT_SYMBOL, 23),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 23),
+
+ # Node 243
+ (2, HUFFMAN_EMIT_SYMBOL, 24),
+ (9, HUFFMAN_EMIT_SYMBOL, 24),
+ (23, HUFFMAN_EMIT_SYMBOL, 24),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 24),
+ (2, HUFFMAN_EMIT_SYMBOL, 25),
+ (9, HUFFMAN_EMIT_SYMBOL, 25),
+ (23, HUFFMAN_EMIT_SYMBOL, 25),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 25),
+ (2, HUFFMAN_EMIT_SYMBOL, 26),
+ (9, HUFFMAN_EMIT_SYMBOL, 26),
+ (23, HUFFMAN_EMIT_SYMBOL, 26),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 26),
+ (2, HUFFMAN_EMIT_SYMBOL, 27),
+ (9, HUFFMAN_EMIT_SYMBOL, 27),
+ (23, HUFFMAN_EMIT_SYMBOL, 27),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 27),
+
+ # Node 244
+ (3, HUFFMAN_EMIT_SYMBOL, 24),
+ (6, HUFFMAN_EMIT_SYMBOL, 24),
+ (10, HUFFMAN_EMIT_SYMBOL, 24),
+ (15, HUFFMAN_EMIT_SYMBOL, 24),
+ (24, HUFFMAN_EMIT_SYMBOL, 24),
+ (31, HUFFMAN_EMIT_SYMBOL, 24),
+ (41, HUFFMAN_EMIT_SYMBOL, 24),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 24),
+ (3, HUFFMAN_EMIT_SYMBOL, 25),
+ (6, HUFFMAN_EMIT_SYMBOL, 25),
+ (10, HUFFMAN_EMIT_SYMBOL, 25),
+ (15, HUFFMAN_EMIT_SYMBOL, 25),
+ (24, HUFFMAN_EMIT_SYMBOL, 25),
+ (31, HUFFMAN_EMIT_SYMBOL, 25),
+ (41, HUFFMAN_EMIT_SYMBOL, 25),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 25),
+
+ # Node 245
+ (3, HUFFMAN_EMIT_SYMBOL, 26),
+ (6, HUFFMAN_EMIT_SYMBOL, 26),
+ (10, HUFFMAN_EMIT_SYMBOL, 26),
+ (15, HUFFMAN_EMIT_SYMBOL, 26),
+ (24, HUFFMAN_EMIT_SYMBOL, 26),
+ (31, HUFFMAN_EMIT_SYMBOL, 26),
+ (41, HUFFMAN_EMIT_SYMBOL, 26),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 26),
+ (3, HUFFMAN_EMIT_SYMBOL, 27),
+ (6, HUFFMAN_EMIT_SYMBOL, 27),
+ (10, HUFFMAN_EMIT_SYMBOL, 27),
+ (15, HUFFMAN_EMIT_SYMBOL, 27),
+ (24, HUFFMAN_EMIT_SYMBOL, 27),
+ (31, HUFFMAN_EMIT_SYMBOL, 27),
+ (41, HUFFMAN_EMIT_SYMBOL, 27),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 27),
+
+ # Node 246
+ (1, HUFFMAN_EMIT_SYMBOL, 28),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 28),
+ (1, HUFFMAN_EMIT_SYMBOL, 29),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 29),
+ (1, HUFFMAN_EMIT_SYMBOL, 30),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 30),
+ (1, HUFFMAN_EMIT_SYMBOL, 31),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 31),
+ (1, HUFFMAN_EMIT_SYMBOL, 127),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 127),
+ (1, HUFFMAN_EMIT_SYMBOL, 220),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 220),
+ (1, HUFFMAN_EMIT_SYMBOL, 249),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 249),
+ (254, 0, 0),
+ (255, 0, 0),
+
+ # Node 247
+ (2, HUFFMAN_EMIT_SYMBOL, 28),
+ (9, HUFFMAN_EMIT_SYMBOL, 28),
+ (23, HUFFMAN_EMIT_SYMBOL, 28),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 28),
+ (2, HUFFMAN_EMIT_SYMBOL, 29),
+ (9, HUFFMAN_EMIT_SYMBOL, 29),
+ (23, HUFFMAN_EMIT_SYMBOL, 29),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 29),
+ (2, HUFFMAN_EMIT_SYMBOL, 30),
+ (9, HUFFMAN_EMIT_SYMBOL, 30),
+ (23, HUFFMAN_EMIT_SYMBOL, 30),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 30),
+ (2, HUFFMAN_EMIT_SYMBOL, 31),
+ (9, HUFFMAN_EMIT_SYMBOL, 31),
+ (23, HUFFMAN_EMIT_SYMBOL, 31),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 31),
+
+ # Node 248
+ (3, HUFFMAN_EMIT_SYMBOL, 28),
+ (6, HUFFMAN_EMIT_SYMBOL, 28),
+ (10, HUFFMAN_EMIT_SYMBOL, 28),
+ (15, HUFFMAN_EMIT_SYMBOL, 28),
+ (24, HUFFMAN_EMIT_SYMBOL, 28),
+ (31, HUFFMAN_EMIT_SYMBOL, 28),
+ (41, HUFFMAN_EMIT_SYMBOL, 28),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 28),
+ (3, HUFFMAN_EMIT_SYMBOL, 29),
+ (6, HUFFMAN_EMIT_SYMBOL, 29),
+ (10, HUFFMAN_EMIT_SYMBOL, 29),
+ (15, HUFFMAN_EMIT_SYMBOL, 29),
+ (24, HUFFMAN_EMIT_SYMBOL, 29),
+ (31, HUFFMAN_EMIT_SYMBOL, 29),
+ (41, HUFFMAN_EMIT_SYMBOL, 29),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 29),
+
+ # Node 249
+ (3, HUFFMAN_EMIT_SYMBOL, 30),
+ (6, HUFFMAN_EMIT_SYMBOL, 30),
+ (10, HUFFMAN_EMIT_SYMBOL, 30),
+ (15, HUFFMAN_EMIT_SYMBOL, 30),
+ (24, HUFFMAN_EMIT_SYMBOL, 30),
+ (31, HUFFMAN_EMIT_SYMBOL, 30),
+ (41, HUFFMAN_EMIT_SYMBOL, 30),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 30),
+ (3, HUFFMAN_EMIT_SYMBOL, 31),
+ (6, HUFFMAN_EMIT_SYMBOL, 31),
+ (10, HUFFMAN_EMIT_SYMBOL, 31),
+ (15, HUFFMAN_EMIT_SYMBOL, 31),
+ (24, HUFFMAN_EMIT_SYMBOL, 31),
+ (31, HUFFMAN_EMIT_SYMBOL, 31),
+ (41, HUFFMAN_EMIT_SYMBOL, 31),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 31),
+
+ # Node 250
+ (2, HUFFMAN_EMIT_SYMBOL, 127),
+ (9, HUFFMAN_EMIT_SYMBOL, 127),
+ (23, HUFFMAN_EMIT_SYMBOL, 127),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 127),
+ (2, HUFFMAN_EMIT_SYMBOL, 220),
+ (9, HUFFMAN_EMIT_SYMBOL, 220),
+ (23, HUFFMAN_EMIT_SYMBOL, 220),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 220),
+ (2, HUFFMAN_EMIT_SYMBOL, 249),
+ (9, HUFFMAN_EMIT_SYMBOL, 249),
+ (23, HUFFMAN_EMIT_SYMBOL, 249),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 249),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 10),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 13),
+ (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 22),
+ (0, HUFFMAN_FAIL, 0),
+
+ # Node 251
+ (3, HUFFMAN_EMIT_SYMBOL, 127),
+ (6, HUFFMAN_EMIT_SYMBOL, 127),
+ (10, HUFFMAN_EMIT_SYMBOL, 127),
+ (15, HUFFMAN_EMIT_SYMBOL, 127),
+ (24, HUFFMAN_EMIT_SYMBOL, 127),
+ (31, HUFFMAN_EMIT_SYMBOL, 127),
+ (41, HUFFMAN_EMIT_SYMBOL, 127),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 127),
+ (3, HUFFMAN_EMIT_SYMBOL, 220),
+ (6, HUFFMAN_EMIT_SYMBOL, 220),
+ (10, HUFFMAN_EMIT_SYMBOL, 220),
+ (15, HUFFMAN_EMIT_SYMBOL, 220),
+ (24, HUFFMAN_EMIT_SYMBOL, 220),
+ (31, HUFFMAN_EMIT_SYMBOL, 220),
+ (41, HUFFMAN_EMIT_SYMBOL, 220),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 220),
+
+ # Node 252
+ (3, HUFFMAN_EMIT_SYMBOL, 249),
+ (6, HUFFMAN_EMIT_SYMBOL, 249),
+ (10, HUFFMAN_EMIT_SYMBOL, 249),
+ (15, HUFFMAN_EMIT_SYMBOL, 249),
+ (24, HUFFMAN_EMIT_SYMBOL, 249),
+ (31, HUFFMAN_EMIT_SYMBOL, 249),
+ (41, HUFFMAN_EMIT_SYMBOL, 249),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 249),
+ (1, HUFFMAN_EMIT_SYMBOL, 10),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 10),
+ (1, HUFFMAN_EMIT_SYMBOL, 13),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 13),
+ (1, HUFFMAN_EMIT_SYMBOL, 22),
+ (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 22),
+ (0, HUFFMAN_FAIL, 0),
+ (0, HUFFMAN_FAIL, 0),
+
+ # Node 253
+ (2, HUFFMAN_EMIT_SYMBOL, 10),
+ (9, HUFFMAN_EMIT_SYMBOL, 10),
+ (23, HUFFMAN_EMIT_SYMBOL, 10),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 10),
+ (2, HUFFMAN_EMIT_SYMBOL, 13),
+ (9, HUFFMAN_EMIT_SYMBOL, 13),
+ (23, HUFFMAN_EMIT_SYMBOL, 13),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 13),
+ (2, HUFFMAN_EMIT_SYMBOL, 22),
+ (9, HUFFMAN_EMIT_SYMBOL, 22),
+ (23, HUFFMAN_EMIT_SYMBOL, 22),
+ (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 22),
+ (0, HUFFMAN_FAIL, 0),
+ (0, HUFFMAN_FAIL, 0),
+ (0, HUFFMAN_FAIL, 0),
+ (0, HUFFMAN_FAIL, 0),
+
+ # Node 254
+ (3, HUFFMAN_EMIT_SYMBOL, 10),
+ (6, HUFFMAN_EMIT_SYMBOL, 10),
+ (10, HUFFMAN_EMIT_SYMBOL, 10),
+ (15, HUFFMAN_EMIT_SYMBOL, 10),
+ (24, HUFFMAN_EMIT_SYMBOL, 10),
+ (31, HUFFMAN_EMIT_SYMBOL, 10),
+ (41, HUFFMAN_EMIT_SYMBOL, 10),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 10),
+ (3, HUFFMAN_EMIT_SYMBOL, 13),
+ (6, HUFFMAN_EMIT_SYMBOL, 13),
+ (10, HUFFMAN_EMIT_SYMBOL, 13),
+ (15, HUFFMAN_EMIT_SYMBOL, 13),
+ (24, HUFFMAN_EMIT_SYMBOL, 13),
+ (31, HUFFMAN_EMIT_SYMBOL, 13),
+ (41, HUFFMAN_EMIT_SYMBOL, 13),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 13),
+
+ # Node 255
+ (3, HUFFMAN_EMIT_SYMBOL, 22),
+ (6, HUFFMAN_EMIT_SYMBOL, 22),
+ (10, HUFFMAN_EMIT_SYMBOL, 22),
+ (15, HUFFMAN_EMIT_SYMBOL, 22),
+ (24, HUFFMAN_EMIT_SYMBOL, 22),
+ (31, HUFFMAN_EMIT_SYMBOL, 22),
+ (41, HUFFMAN_EMIT_SYMBOL, 22),
+ (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 22),
+ (0, HUFFMAN_FAIL, 0),
+ (0, HUFFMAN_FAIL, 0),
+ (0, HUFFMAN_FAIL, 0),
+ (0, HUFFMAN_FAIL, 0),
+ (0, HUFFMAN_FAIL, 0),
+ (0, HUFFMAN_FAIL, 0),
+ (0, HUFFMAN_FAIL, 0),
+ (0, HUFFMAN_FAIL, 0),
+]
diff --git a/packages/hpack/struct.py b/packages/hpack/struct.py
new file mode 100644
index 000000000..fcab92956
--- /dev/null
+++ b/packages/hpack/struct.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+"""
+hpack/struct
+~~~~~~~~~~~~
+
+Contains structures for representing header fields with associated metadata.
+"""
+
+
+class HeaderTuple(tuple):
+ """
+ A data structure that stores a single header field.
+
+ HTTP headers can be thought of as tuples of ``(field name, field value)``.
+ A single header block is a sequence of such tuples.
+
+ In HTTP/2, however, certain bits of additional information are required for
+ compressing these headers: in particular, whether the header field can be
+ safely added to the HPACK compression context.
+
+ This class stores a header that can be added to the compression context. In
+ all other ways it behaves exactly like a tuple.
+ """
+ __slots__ = ()
+
+ indexable = True
+
+ def __new__(cls, *args):
+ return tuple.__new__(cls, args)
+
+
+class NeverIndexedHeaderTuple(HeaderTuple):
+ """
+ A data structure that stores a single header field that cannot be added to
+ a HTTP/2 header compression context.
+ """
+ __slots__ = ()
+
+ indexable = False
diff --git a/packages/hpack/table.py b/packages/hpack/table.py
new file mode 100644
index 000000000..2b656f32a
--- /dev/null
+++ b/packages/hpack/table.py
@@ -0,0 +1,235 @@
+# -*- coding: utf-8 -*-
+# flake8: noqa
+from collections import deque
+import logging
+
+from .exceptions import InvalidTableIndex
+
+log = logging.getLogger(__name__)
+
+
+def table_entry_size(name, value):
+ """
+ Calculates the size of a single entry
+
+ This size is mostly irrelevant to us and defined
+ specifically to accommodate memory management for
+ lower level implementations. The 32 extra bytes are
+ considered the "maximum" overhead that would be
+ required to represent each entry in the table.
+
+ See RFC7541 Section 4.1
+ """
+ return 32 + len(name) + len(value)
+
+
+class HeaderTable:
+ """
+ Implements the combined static and dynamic header table
+
+ The name and value arguments for all the functions
+ should ONLY be byte strings (b'') however this is not
+ strictly enforced in the interface.
+
+ See RFC7541 Section 2.3
+ """
+ #: Default maximum size of the dynamic table. See
+ #: RFC7540 Section 6.5.2.
+ DEFAULT_SIZE = 4096
+
+ #: Constant list of static headers. See RFC7541 Section
+ #: 2.3.1 and Appendix A
+ STATIC_TABLE = (
+ (b':authority' , b'' ), # noqa
+ (b':method' , b'GET' ), # noqa
+ (b':method' , b'POST' ), # noqa
+ (b':path' , b'/' ), # noqa
+ (b':path' , b'/index.html' ), # noqa
+ (b':scheme' , b'http' ), # noqa
+ (b':scheme' , b'https' ), # noqa
+ (b':status' , b'200' ), # noqa
+ (b':status' , b'204' ), # noqa
+ (b':status' , b'206' ), # noqa
+ (b':status' , b'304' ), # noqa
+ (b':status' , b'400' ), # noqa
+ (b':status' , b'404' ), # noqa
+ (b':status' , b'500' ), # noqa
+ (b'accept-charset' , b'' ), # noqa
+ (b'accept-encoding' , b'gzip, deflate'), # noqa
+ (b'accept-language' , b'' ), # noqa
+ (b'accept-ranges' , b'' ), # noqa
+ (b'accept' , b'' ), # noqa
+ (b'access-control-allow-origin' , b'' ), # noqa
+ (b'age' , b'' ), # noqa
+ (b'allow' , b'' ), # noqa
+ (b'authorization' , b'' ), # noqa
+ (b'cache-control' , b'' ), # noqa
+ (b'content-disposition' , b'' ), # noqa
+ (b'content-encoding' , b'' ), # noqa
+ (b'content-language' , b'' ), # noqa
+ (b'content-length' , b'' ), # noqa
+ (b'content-location' , b'' ), # noqa
+ (b'content-range' , b'' ), # noqa
+ (b'content-type' , b'' ), # noqa
+ (b'cookie' , b'' ), # noqa
+ (b'date' , b'' ), # noqa
+ (b'etag' , b'' ), # noqa
+ (b'expect' , b'' ), # noqa
+ (b'expires' , b'' ), # noqa
+ (b'from' , b'' ), # noqa
+ (b'host' , b'' ), # noqa
+ (b'if-match' , b'' ), # noqa
+ (b'if-modified-since' , b'' ), # noqa
+ (b'if-none-match' , b'' ), # noqa
+ (b'if-range' , b'' ), # noqa
+ (b'if-unmodified-since' , b'' ), # noqa
+ (b'last-modified' , b'' ), # noqa
+ (b'link' , b'' ), # noqa
+ (b'location' , b'' ), # noqa
+ (b'max-forwards' , b'' ), # noqa
+ (b'proxy-authenticate' , b'' ), # noqa
+ (b'proxy-authorization' , b'' ), # noqa
+ (b'range' , b'' ), # noqa
+ (b'referer' , b'' ), # noqa
+ (b'refresh' , b'' ), # noqa
+ (b'retry-after' , b'' ), # noqa
+ (b'server' , b'' ), # noqa
+ (b'set-cookie' , b'' ), # noqa
+ (b'strict-transport-security' , b'' ), # noqa
+ (b'transfer-encoding' , b'' ), # noqa
+ (b'user-agent' , b'' ), # noqa
+ (b'vary' , b'' ), # noqa
+ (b'via' , b'' ), # noqa
+ (b'www-authenticate' , b'' ), # noqa
+ ) # noqa
+
+ STATIC_TABLE_LENGTH = len(STATIC_TABLE)
+
+ def __init__(self):
+ self._maxsize = HeaderTable.DEFAULT_SIZE
+ self._current_size = 0
+ self.resized = False
+ self.dynamic_entries = deque()
+
+ def get_by_index(self, index):
+ """
+ Returns the entry specified by index
+
+ Note that the table is 1-based ie an index of 0 is
+ invalid. This is due to the fact that a zero value
+ index signals that a completely unindexed header
+ follows.
+
+ The entry will either be from the static table or
+ the dynamic table depending on the value of index.
+ """
+ original_index = index
+ index -= 1
+ if 0 <= index:
+ if index < HeaderTable.STATIC_TABLE_LENGTH:
+ return HeaderTable.STATIC_TABLE[index]
+
+ index -= HeaderTable.STATIC_TABLE_LENGTH
+ if index < len(self.dynamic_entries):
+ return self.dynamic_entries[index]
+
+ raise InvalidTableIndex("Invalid table index %d" % original_index)
+
+ def __repr__(self):
+ return "HeaderTable(%d, %s, %r)" % (
+ self._maxsize,
+ self.resized,
+ self.dynamic_entries
+ )
+
+ def add(self, name, value):
+ """
+ Adds a new entry to the table
+
+ We reduce the table size if the entry will make the
+ table size greater than maxsize.
+ """
+ # We just clear the table if the entry is too big
+ size = table_entry_size(name, value)
+ if size > self._maxsize:
+ self.dynamic_entries.clear()
+ self._current_size = 0
+ else:
+ # Add new entry
+ self.dynamic_entries.appendleft((name, value))
+ self._current_size += size
+ self._shrink()
+
+ def search(self, name, value):
+ """
+ Searches the table for the entry specified by name
+ and value
+
+ Returns one of the following:
+ - ``None``, no match at all
+ - ``(index, name, None)`` for partial matches on name only.
+ - ``(index, name, value)`` for perfect matches.
+ """
+ partial = None
+
+ header_name_search_result = HeaderTable.STATIC_TABLE_MAPPING.get(name)
+ if header_name_search_result:
+ index = header_name_search_result[1].get(value)
+ if index is not None:
+ return index, name, value
+ else:
+ partial = (header_name_search_result[0], name, None)
+
+ offset = HeaderTable.STATIC_TABLE_LENGTH + 1
+ for (i, (n, v)) in enumerate(self.dynamic_entries):
+ if n == name:
+ if v == value:
+ return i + offset, n, v
+ elif partial is None:
+ partial = (i + offset, n, None)
+ return partial
+
+ @property
+ def maxsize(self):
+ return self._maxsize
+
+ @maxsize.setter
+ def maxsize(self, newmax):
+ newmax = int(newmax)
+ log.debug("Resizing header table to %d from %d", newmax, self._maxsize)
+ oldmax = self._maxsize
+ self._maxsize = newmax
+ self.resized = (newmax != oldmax)
+ if newmax <= 0:
+ self.dynamic_entries.clear()
+ self._current_size = 0
+ elif oldmax > newmax:
+ self._shrink()
+
+ def _shrink(self):
+ """
+ Shrinks the dynamic table to be at or below maxsize
+ """
+ cursize = self._current_size
+ while cursize > self._maxsize:
+ name, value = self.dynamic_entries.pop()
+ cursize -= table_entry_size(name, value)
+ log.debug("Evicting %s: %s from the header table", name, value)
+ self._current_size = cursize
+
+
+def _build_static_table_mapping():
+ """
+ Build static table mapping from header name to tuple with next structure:
+ (, ).
+
+ static_table_mapping used for hash searching.
+ """
+ static_table_mapping = {}
+ for index, (name, value) in enumerate(HeaderTable.STATIC_TABLE, 1):
+ header_name_search_result = static_table_mapping.setdefault(name, (index, {}))
+ header_name_search_result[1][value] = index
+ return static_table_mapping
+
+
+HeaderTable.STATIC_TABLE_MAPPING = _build_static_table_mapping()
diff --git a/packages/httpcore/__init__.py b/packages/httpcore/__init__.py
new file mode 100644
index 000000000..2dde34e39
--- /dev/null
+++ b/packages/httpcore/__init__.py
@@ -0,0 +1,63 @@
+from ._async.base import AsyncByteStream, AsyncHTTPTransport
+from ._async.connection_pool import AsyncConnectionPool
+from ._async.http_proxy import AsyncHTTPProxy
+from ._bytestreams import AsyncIteratorByteStream, ByteStream, IteratorByteStream
+from ._exceptions import (
+ CloseError,
+ ConnectError,
+ ConnectTimeout,
+ LocalProtocolError,
+ NetworkError,
+ PoolTimeout,
+ ProtocolError,
+ ProxyError,
+ ReadError,
+ ReadTimeout,
+ RemoteProtocolError,
+ TimeoutException,
+ UnsupportedProtocol,
+ WriteError,
+ WriteTimeout,
+)
+from ._sync.base import SyncByteStream, SyncHTTPTransport
+from ._sync.connection_pool import SyncConnectionPool
+from ._sync.http_proxy import SyncHTTPProxy
+
+__all__ = [
+ "AsyncByteStream",
+ "AsyncConnectionPool",
+ "AsyncHTTPProxy",
+ "AsyncHTTPTransport",
+ "AsyncIteratorByteStream",
+ "ByteStream",
+ "CloseError",
+ "ConnectError",
+ "ConnectTimeout",
+ "IteratorByteStream",
+ "LocalProtocolError",
+ "NetworkError",
+ "PoolTimeout",
+ "ProtocolError",
+ "ProxyError",
+ "ReadError",
+ "ReadTimeout",
+ "RemoteProtocolError",
+ "SyncByteStream",
+ "SyncConnectionPool",
+ "SyncHTTPProxy",
+ "SyncHTTPTransport",
+ "TimeoutException",
+ "UnsupportedProtocol",
+ "WriteError",
+ "WriteTimeout",
+]
+__version__ = "0.13.6"
+
+__locals = locals()
+
+for _name in __all__:
+ if not _name.startswith("__"):
+ # Save original source module, used by Sphinx.
+ __locals[_name].__source_module__ = __locals[_name].__module__
+ # Override module for prettier repr().
+ setattr(__locals[_name], "__module__", "httpcore") # noqa
diff --git a/packages/httpcore/_async/__init__.py b/packages/httpcore/_async/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/packages/httpcore/_async/base.py b/packages/httpcore/_async/base.py
new file mode 100644
index 000000000..2b3961c29
--- /dev/null
+++ b/packages/httpcore/_async/base.py
@@ -0,0 +1,122 @@
+import enum
+from types import TracebackType
+from typing import AsyncIterator, Tuple, Type
+
+from .._types import URL, Headers, T
+
+
+class NewConnectionRequired(Exception):
+ pass
+
+
+class ConnectionState(enum.IntEnum):
+ """
+ PENDING READY
+ | | ^
+ v V |
+ ACTIVE |
+ | | |
+ | V |
+ V IDLE-+
+ FULL |
+ | |
+ V V
+ CLOSED
+ """
+
+ PENDING = 0 # Connection not yet acquired.
+ READY = 1 # Re-acquired from pool, about to send a request.
+ ACTIVE = 2 # Active requests.
+ FULL = 3 # Active requests, no more stream IDs available.
+ IDLE = 4 # No active requests.
+ CLOSED = 5 # Connection closed.
+
+
+class AsyncByteStream:
+ """
+ The base interface for request and response bodies.
+
+ Concrete implementations should subclass this class, and implement
+ the :meth:`__aiter__` method, and optionally the :meth:`aclose` method.
+ """
+
+ async def __aiter__(self) -> AsyncIterator[bytes]:
+ """
+ Yield bytes representing the request or response body.
+ """
+ yield b"" # pragma: nocover
+
+ async def aclose(self) -> None:
+ """
+ Must be called by the client to indicate that the stream has been closed.
+ """
+ pass # pragma: nocover
+
+ async def aread(self) -> bytes:
+ try:
+ return b"".join([part async for part in self])
+ finally:
+ await self.aclose()
+
+
+class AsyncHTTPTransport:
+ """
+ The base interface for sending HTTP requests.
+
+ Concrete implementations should subclass this class, and implement
+ the :meth:`handle_async_request` method, and optionally the :meth:`aclose` method.
+ """
+
+ async def handle_async_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: AsyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, AsyncByteStream, dict]:
+ """
+ The interface for sending a single HTTP request, and returning a response.
+
+ Parameters
+ ----------
+ method:
+ The HTTP method, such as ``b'GET'``.
+ url:
+ The URL as a 4-tuple of (scheme, host, port, path).
+ headers:
+ Any HTTP headers to send with the request.
+ stream:
+ The body of the HTTP request.
+ extensions:
+ A dictionary of optional extensions.
+
+ Returns
+ -------
+ status_code:
+ The HTTP status code, such as ``200``.
+ headers:
+ Any HTTP headers included on the response.
+ stream:
+ The body of the HTTP response.
+ extensions:
+ A dictionary of optional extensions.
+ """
+ raise NotImplementedError() # pragma: nocover
+
+ async def aclose(self) -> None:
+ """
+ Close the implementation, which should close any outstanding response streams,
+ and any keep alive connections.
+ """
+
+ async def __aenter__(self: T) -> T:
+ return self
+
+ async def __aexit__(
+ self,
+ exc_type: Type[BaseException] = None,
+ exc_value: BaseException = None,
+ traceback: TracebackType = None,
+ ) -> None:
+ await self.aclose()
diff --git a/packages/httpcore/_async/connection.py b/packages/httpcore/_async/connection.py
new file mode 100644
index 000000000..2add4d857
--- /dev/null
+++ b/packages/httpcore/_async/connection.py
@@ -0,0 +1,220 @@
+from ssl import SSLContext
+from typing import List, Optional, Tuple, cast
+
+from .._backends.auto import AsyncBackend, AsyncLock, AsyncSocketStream, AutoBackend
+from .._exceptions import ConnectError, ConnectTimeout
+from .._types import URL, Headers, Origin, TimeoutDict
+from .._utils import exponential_backoff, get_logger, url_to_origin
+from .base import AsyncByteStream, AsyncHTTPTransport, NewConnectionRequired
+from .http import AsyncBaseHTTPConnection
+from .http11 import AsyncHTTP11Connection
+
+logger = get_logger(__name__)
+
+RETRIES_BACKOFF_FACTOR = 0.5 # 0s, 0.5s, 1s, 2s, 4s, etc.
+
+
+class AsyncHTTPConnection(AsyncHTTPTransport):
+ def __init__(
+ self,
+ origin: Origin,
+ http1: bool = True,
+ http2: bool = False,
+ keepalive_expiry: float = None,
+ uds: str = None,
+ ssl_context: SSLContext = None,
+ socket: AsyncSocketStream = None,
+ local_address: str = None,
+ retries: int = 0,
+ backend: AsyncBackend = None,
+ ):
+ self.origin = origin
+ self._http1_enabled = http1
+ self._http2_enabled = http2
+ self._keepalive_expiry = keepalive_expiry
+ self._uds = uds
+ self._ssl_context = SSLContext() if ssl_context is None else ssl_context
+ self.socket = socket
+ self._local_address = local_address
+ self._retries = retries
+
+ alpn_protocols: List[str] = []
+ if http1:
+ alpn_protocols.append("http/1.1")
+ if http2:
+ alpn_protocols.append("h2")
+
+ self._ssl_context.set_alpn_protocols(alpn_protocols)
+
+ self.connection: Optional[AsyncBaseHTTPConnection] = None
+ self._is_http11 = False
+ self._is_http2 = False
+ self._connect_failed = False
+ self._expires_at: Optional[float] = None
+ self._backend = AutoBackend() if backend is None else backend
+
+ def __repr__(self) -> str:
+ return f""
+
+ def info(self) -> str:
+ if self.connection is None:
+ return "Connection failed" if self._connect_failed else "Connecting"
+ return self.connection.info()
+
+ def should_close(self) -> bool:
+ """
+ Return `True` if the connection is in a state where it should be closed.
+ This occurs when any of the following occur:
+
+ * There are no active requests on an HTTP/1.1 connection, and the underlying
+ socket is readable. The only valid state the socket can be readable in
+ if this occurs is when the b"" EOF marker is about to be returned,
+ indicating a server disconnect.
+ * There are no active requests being made and the keepalive timeout has passed.
+ """
+ if self.connection is None:
+ return False
+ return self.connection.should_close()
+
+ def is_idle(self) -> bool:
+ """
+ Return `True` if the connection is currently idle.
+ """
+ if self.connection is None:
+ return False
+ return self.connection.is_idle()
+
+ def is_closed(self) -> bool:
+ if self.connection is None:
+ return self._connect_failed
+ return self.connection.is_closed()
+
+ def is_available(self) -> bool:
+ """
+ Return `True` if the connection is currently able to accept an outgoing request.
+ This occurs when any of the following occur:
+
+ * The connection has not yet been opened, and HTTP/2 support is enabled.
+ We don't *know* at this point if we'll end up on an HTTP/2 connection or
+ not, but we *might* do, so we indicate availability.
+ * The connection has been opened, and is currently idle.
+ * The connection is open, and is an HTTP/2 connection. The connection must
+ also not currently be exceeding the maximum number of allowable concurrent
+ streams and must not have exhausted the maximum total number of stream IDs.
+ """
+ if self.connection is None:
+ return self._http2_enabled and not self.is_closed
+ return self.connection.is_available()
+
+ @property
+ def request_lock(self) -> AsyncLock:
+ # We do this lazily, to make sure backend autodetection always
+ # runs within an async context.
+ if not hasattr(self, "_request_lock"):
+ self._request_lock = self._backend.create_lock()
+ return self._request_lock
+
+ async def handle_async_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: AsyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, AsyncByteStream, dict]:
+ assert url_to_origin(url) == self.origin
+ timeout = cast(TimeoutDict, extensions.get("timeout", {}))
+
+ async with self.request_lock:
+ if self.connection is None:
+ if self._connect_failed:
+ raise NewConnectionRequired()
+ if not self.socket:
+ logger.trace(
+ "open_socket origin=%r timeout=%r", self.origin, timeout
+ )
+ self.socket = await self._open_socket(timeout)
+ self._create_connection(self.socket)
+ elif not self.connection.is_available():
+ raise NewConnectionRequired()
+
+ assert self.connection is not None
+ logger.trace(
+ "connection.handle_async_request method=%r url=%r headers=%r",
+ method,
+ url,
+ headers,
+ )
+ return await self.connection.handle_async_request(
+ method, url, headers, stream, extensions
+ )
+
+ async def _open_socket(self, timeout: TimeoutDict = None) -> AsyncSocketStream:
+ scheme, hostname, port = self.origin
+ timeout = {} if timeout is None else timeout
+ ssl_context = self._ssl_context if scheme == b"https" else None
+
+ retries_left = self._retries
+ delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR)
+
+ while True:
+ try:
+ if self._uds is None:
+ return await self._backend.open_tcp_stream(
+ hostname,
+ port,
+ ssl_context,
+ timeout,
+ local_address=self._local_address,
+ )
+ else:
+ return await self._backend.open_uds_stream(
+ self._uds, hostname, ssl_context, timeout
+ )
+ except (ConnectError, ConnectTimeout):
+ if retries_left <= 0:
+ self._connect_failed = True
+ raise
+ retries_left -= 1
+ delay = next(delays)
+ await self._backend.sleep(delay)
+ except Exception: # noqa: PIE786
+ self._connect_failed = True
+ raise
+
+ def _create_connection(self, socket: AsyncSocketStream) -> None:
+ http_version = socket.get_http_version()
+ logger.trace(
+ "create_connection socket=%r http_version=%r", socket, http_version
+ )
+ if http_version == "HTTP/2" or (
+ self._http2_enabled and not self._http1_enabled
+ ):
+ from .http2 import AsyncHTTP2Connection
+
+ self._is_http2 = True
+ self.connection = AsyncHTTP2Connection(
+ socket=socket,
+ keepalive_expiry=self._keepalive_expiry,
+ backend=self._backend,
+ )
+ else:
+ self._is_http11 = True
+ self.connection = AsyncHTTP11Connection(
+ socket=socket, keepalive_expiry=self._keepalive_expiry
+ )
+
+ async def start_tls(
+ self, hostname: bytes, ssl_context: SSLContext, timeout: TimeoutDict = None
+ ) -> None:
+ if self.connection is not None:
+ logger.trace("start_tls hostname=%r timeout=%r", hostname, timeout)
+ self.socket = await self.connection.start_tls(
+ hostname, ssl_context, timeout
+ )
+ logger.trace("start_tls complete hostname=%r timeout=%r", hostname, timeout)
+
+ async def aclose(self) -> None:
+ async with self.request_lock:
+ if self.connection is not None:
+ await self.connection.aclose()
diff --git a/packages/httpcore/_async/connection_pool.py b/packages/httpcore/_async/connection_pool.py
new file mode 100644
index 000000000..f86c2277c
--- /dev/null
+++ b/packages/httpcore/_async/connection_pool.py
@@ -0,0 +1,365 @@
+import warnings
+from ssl import SSLContext
+from typing import (
+ AsyncIterator,
+ Callable,
+ Dict,
+ List,
+ Optional,
+ Set,
+ Tuple,
+ Union,
+ cast,
+)
+
+from .._backends.auto import AsyncBackend, AsyncLock, AsyncSemaphore
+from .._backends.base import lookup_async_backend
+from .._exceptions import LocalProtocolError, PoolTimeout, UnsupportedProtocol
+from .._threadlock import ThreadLock
+from .._types import URL, Headers, Origin, TimeoutDict
+from .._utils import get_logger, origin_to_url_string, url_to_origin
+from .base import AsyncByteStream, AsyncHTTPTransport, NewConnectionRequired
+from .connection import AsyncHTTPConnection
+
+logger = get_logger(__name__)
+
+
+class NullSemaphore(AsyncSemaphore):
+ def __init__(self) -> None:
+ pass
+
+ async def acquire(self, timeout: float = None) -> None:
+ return
+
+ async def release(self) -> None:
+ return
+
+
+class ResponseByteStream(AsyncByteStream):
+ def __init__(
+ self,
+ stream: AsyncByteStream,
+ connection: AsyncHTTPConnection,
+ callback: Callable,
+ ) -> None:
+ """
+ A wrapper around the response stream that we return from
+ `.handle_async_request()`.
+
+ Ensures that when `stream.aclose()` is called, the connection pool
+ is notified via a callback.
+ """
+ self.stream = stream
+ self.connection = connection
+ self.callback = callback
+
+ async def __aiter__(self) -> AsyncIterator[bytes]:
+ async for chunk in self.stream:
+ yield chunk
+
+ async def aclose(self) -> None:
+ try:
+ # Call the underlying stream close callback.
+ # This will be a call to `AsyncHTTP11Connection._response_closed()`
+ # or `AsyncHTTP2Stream._response_closed()`.
+ await self.stream.aclose()
+ finally:
+ # Call the connection pool close callback.
+ # This will be a call to `AsyncConnectionPool._response_closed()`.
+ await self.callback(self.connection)
+
+
+class AsyncConnectionPool(AsyncHTTPTransport):
+ """
+ A connection pool for making HTTP requests.
+
+ Parameters
+ ----------
+ ssl_context:
+ An SSL context to use for verifying connections.
+ max_connections:
+ The maximum number of concurrent connections to allow.
+ max_keepalive_connections:
+ The maximum number of connections to allow before closing keep-alive
+ connections.
+ keepalive_expiry:
+ The maximum time to allow before closing a keep-alive connection.
+ http1:
+ Enable/Disable HTTP/1.1 support. Defaults to True.
+ http2:
+ Enable/Disable HTTP/2 support. Defaults to False.
+ uds:
+ Path to a Unix Domain Socket to use instead of TCP sockets.
+ local_address:
+ Local address to connect from. Can also be used to connect using a particular
+ address family. Using ``local_address="0.0.0.0"`` will connect using an
+ ``AF_INET`` address (IPv4), while using ``local_address="::"`` will connect
+ using an ``AF_INET6`` address (IPv6).
+ retries:
+ The maximum number of retries when trying to establish a connection.
+ backend:
+ A name indicating which concurrency backend to use.
+ """
+
+ def __init__(
+ self,
+ ssl_context: SSLContext = None,
+ max_connections: int = None,
+ max_keepalive_connections: int = None,
+ keepalive_expiry: float = None,
+ http1: bool = True,
+ http2: bool = False,
+ uds: str = None,
+ local_address: str = None,
+ retries: int = 0,
+ max_keepalive: int = None,
+ backend: Union[AsyncBackend, str] = "auto",
+ ):
+ if max_keepalive is not None:
+ warnings.warn(
+ "'max_keepalive' is deprecated. Use 'max_keepalive_connections'.",
+ DeprecationWarning,
+ )
+ max_keepalive_connections = max_keepalive
+
+ if isinstance(backend, str):
+ backend = lookup_async_backend(backend)
+
+ self._ssl_context = SSLContext() if ssl_context is None else ssl_context
+ self._max_connections = max_connections
+ self._max_keepalive_connections = max_keepalive_connections
+ self._keepalive_expiry = keepalive_expiry
+ self._http1 = http1
+ self._http2 = http2
+ self._uds = uds
+ self._local_address = local_address
+ self._retries = retries
+ self._connections: Dict[Origin, Set[AsyncHTTPConnection]] = {}
+ self._thread_lock = ThreadLock()
+ self._backend = backend
+ self._next_keepalive_check = 0.0
+
+ if not (http1 or http2):
+ raise ValueError("Either http1 or http2 must be True.")
+
+ if http2:
+ try:
+ import h2 # noqa: F401
+ except ImportError:
+ raise ImportError(
+ "Attempted to use http2=True, but the 'h2' "
+ "package is not installed. Use 'pip install httpcore[http2]'."
+ )
+
+ @property
+ def _connection_semaphore(self) -> AsyncSemaphore:
+ # We do this lazily, to make sure backend autodetection always
+ # runs within an async context.
+ if not hasattr(self, "_internal_semaphore"):
+ if self._max_connections is not None:
+ self._internal_semaphore = self._backend.create_semaphore(
+ self._max_connections, exc_class=PoolTimeout
+ )
+ else:
+ self._internal_semaphore = NullSemaphore()
+
+ return self._internal_semaphore
+
+ @property
+ def _connection_acquiry_lock(self) -> AsyncLock:
+ if not hasattr(self, "_internal_connection_acquiry_lock"):
+ self._internal_connection_acquiry_lock = self._backend.create_lock()
+ return self._internal_connection_acquiry_lock
+
+ def _create_connection(
+ self,
+ origin: Tuple[bytes, bytes, int],
+ ) -> AsyncHTTPConnection:
+ return AsyncHTTPConnection(
+ origin=origin,
+ http1=self._http1,
+ http2=self._http2,
+ keepalive_expiry=self._keepalive_expiry,
+ uds=self._uds,
+ ssl_context=self._ssl_context,
+ local_address=self._local_address,
+ retries=self._retries,
+ backend=self._backend,
+ )
+
+ async def handle_async_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: AsyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, AsyncByteStream, dict]:
+ if url[0] not in (b"http", b"https"):
+ scheme = url[0].decode("latin-1")
+ host = url[1].decode("latin-1")
+ if scheme == "":
+ raise UnsupportedProtocol(
+ f"The request to '://{host}/' is missing either an 'http://' \
+ or 'https://' protocol."
+ )
+ else:
+ raise UnsupportedProtocol(
+ f"The request to '{scheme}://{host}' has \
+ an unsupported protocol {scheme!r}"
+ )
+
+ if not url[1]:
+ raise LocalProtocolError("Missing hostname in URL.")
+
+ origin = url_to_origin(url)
+ timeout = cast(TimeoutDict, extensions.get("timeout", {}))
+
+ await self._keepalive_sweep()
+
+ connection: Optional[AsyncHTTPConnection] = None
+ while connection is None:
+ async with self._connection_acquiry_lock:
+ # We get-or-create a connection as an atomic operation, to ensure
+ # that HTTP/2 requests issued in close concurrency will end up
+ # on the same connection.
+ logger.trace("get_connection_from_pool=%r", origin)
+ connection = await self._get_connection_from_pool(origin)
+
+ if connection is None:
+ connection = self._create_connection(origin=origin)
+ logger.trace("created connection=%r", connection)
+ await self._add_to_pool(connection, timeout=timeout)
+ else:
+ logger.trace("reuse connection=%r", connection)
+
+ try:
+ response = await connection.handle_async_request(
+ method, url, headers=headers, stream=stream, extensions=extensions
+ )
+ except NewConnectionRequired:
+ connection = None
+ except BaseException: # noqa: PIE786
+ # See https://github.com/encode/httpcore/pull/305 for motivation
+ # behind catching 'BaseException' rather than 'Exception' here.
+ logger.trace("remove from pool connection=%r", connection)
+ await self._remove_from_pool(connection)
+ raise
+
+ status_code, headers, stream, extensions = response
+ wrapped_stream = ResponseByteStream(
+ stream, connection=connection, callback=self._response_closed
+ )
+ return status_code, headers, wrapped_stream, extensions
+
+ async def _get_connection_from_pool(
+ self, origin: Origin
+ ) -> Optional[AsyncHTTPConnection]:
+ # Determine expired keep alive connections on this origin.
+ reuse_connection = None
+ connections_to_close = set()
+
+ for connection in self._connections_for_origin(origin):
+ if connection.should_close():
+ connections_to_close.add(connection)
+ await self._remove_from_pool(connection)
+ elif connection.is_available():
+ reuse_connection = connection
+
+ # Close any dropped connections.
+ for connection in connections_to_close:
+ await connection.aclose()
+
+ return reuse_connection
+
+ async def _response_closed(self, connection: AsyncHTTPConnection) -> None:
+ remove_from_pool = False
+ close_connection = False
+
+ if connection.is_closed():
+ remove_from_pool = True
+ elif connection.is_idle():
+ num_connections = len(self._get_all_connections())
+ if (
+ self._max_keepalive_connections is not None
+ and num_connections > self._max_keepalive_connections
+ ):
+ remove_from_pool = True
+ close_connection = True
+
+ if remove_from_pool:
+ await self._remove_from_pool(connection)
+
+ if close_connection:
+ await connection.aclose()
+
+ async def _keepalive_sweep(self) -> None:
+ """
+ Remove any IDLE connections that have expired past their keep-alive time.
+ """
+ if self._keepalive_expiry is None:
+ return
+
+ now = await self._backend.time()
+ if now < self._next_keepalive_check:
+ return
+
+ self._next_keepalive_check = now + min(1.0, self._keepalive_expiry)
+ connections_to_close = set()
+
+ for connection in self._get_all_connections():
+ if connection.should_close():
+ connections_to_close.add(connection)
+ await self._remove_from_pool(connection)
+
+ for connection in connections_to_close:
+ await connection.aclose()
+
+ async def _add_to_pool(
+ self, connection: AsyncHTTPConnection, timeout: TimeoutDict
+ ) -> None:
+ logger.trace("adding connection to pool=%r", connection)
+ await self._connection_semaphore.acquire(timeout=timeout.get("pool", None))
+ async with self._thread_lock:
+ self._connections.setdefault(connection.origin, set())
+ self._connections[connection.origin].add(connection)
+
+ async def _remove_from_pool(self, connection: AsyncHTTPConnection) -> None:
+ logger.trace("removing connection from pool=%r", connection)
+ async with self._thread_lock:
+ if connection in self._connections.get(connection.origin, set()):
+ await self._connection_semaphore.release()
+ self._connections[connection.origin].remove(connection)
+ if not self._connections[connection.origin]:
+ del self._connections[connection.origin]
+
+ def _connections_for_origin(self, origin: Origin) -> Set[AsyncHTTPConnection]:
+ return set(self._connections.get(origin, set()))
+
+ def _get_all_connections(self) -> Set[AsyncHTTPConnection]:
+ connections: Set[AsyncHTTPConnection] = set()
+ for connection_set in self._connections.values():
+ connections |= connection_set
+ return connections
+
+ async def aclose(self) -> None:
+ connections = self._get_all_connections()
+ for connection in connections:
+ await self._remove_from_pool(connection)
+
+ # Close all connections
+ for connection in connections:
+ await connection.aclose()
+
+ async def get_connection_info(self) -> Dict[str, List[str]]:
+ """
+ Returns a dict of origin URLs to a list of summary strings for each connection.
+ """
+ await self._keepalive_sweep()
+
+ stats = {}
+ for origin, connections in self._connections.items():
+ stats[origin_to_url_string(origin)] = sorted(
+ [connection.info() for connection in connections]
+ )
+ return stats
diff --git a/packages/httpcore/_async/http.py b/packages/httpcore/_async/http.py
new file mode 100644
index 000000000..06270f0f0
--- /dev/null
+++ b/packages/httpcore/_async/http.py
@@ -0,0 +1,42 @@
+from ssl import SSLContext
+
+from .._backends.auto import AsyncSocketStream
+from .._types import TimeoutDict
+from .base import AsyncHTTPTransport
+
+
+class AsyncBaseHTTPConnection(AsyncHTTPTransport):
+ def info(self) -> str:
+ raise NotImplementedError() # pragma: nocover
+
+ def should_close(self) -> bool:
+ """
+ Return `True` if the connection is in a state where it should be closed.
+ """
+ raise NotImplementedError() # pragma: nocover
+
+ def is_idle(self) -> bool:
+ """
+ Return `True` if the connection is currently idle.
+ """
+ raise NotImplementedError() # pragma: nocover
+
+ def is_closed(self) -> bool:
+ """
+ Return `True` if the connection has been closed.
+ """
+ raise NotImplementedError() # pragma: nocover
+
+ def is_available(self) -> bool:
+ """
+ Return `True` if the connection is currently able to accept an outgoing request.
+ """
+ raise NotImplementedError() # pragma: nocover
+
+ async def start_tls(
+ self, hostname: bytes, ssl_context: SSLContext, timeout: TimeoutDict = None
+ ) -> AsyncSocketStream:
+ """
+ Upgrade the underlying socket to TLS.
+ """
+ raise NotImplementedError() # pragma: nocover
diff --git a/packages/httpcore/_async/http11.py b/packages/httpcore/_async/http11.py
new file mode 100644
index 000000000..a265657c6
--- /dev/null
+++ b/packages/httpcore/_async/http11.py
@@ -0,0 +1,269 @@
+import enum
+import time
+from ssl import SSLContext
+from typing import AsyncIterator, List, Optional, Tuple, Union, cast
+
+import h11
+
+from .._backends.auto import AsyncSocketStream
+from .._bytestreams import AsyncIteratorByteStream
+from .._exceptions import LocalProtocolError, RemoteProtocolError, map_exceptions
+from .._types import URL, Headers, TimeoutDict
+from .._utils import get_logger
+from .base import AsyncByteStream, NewConnectionRequired
+from .http import AsyncBaseHTTPConnection
+
+H11Event = Union[
+ h11.Request,
+ h11.Response,
+ h11.InformationalResponse,
+ h11.Data,
+ h11.EndOfMessage,
+ h11.ConnectionClosed,
+]
+
+
+class ConnectionState(enum.IntEnum):
+ NEW = 0
+ ACTIVE = 1
+ IDLE = 2
+ CLOSED = 3
+
+
+logger = get_logger(__name__)
+
+
+class AsyncHTTP11Connection(AsyncBaseHTTPConnection):
+ READ_NUM_BYTES = 64 * 1024
+
+ def __init__(self, socket: AsyncSocketStream, keepalive_expiry: float = None):
+ self.socket = socket
+
+ self._keepalive_expiry: Optional[float] = keepalive_expiry
+ self._should_expire_at: Optional[float] = None
+ self._h11_state = h11.Connection(our_role=h11.CLIENT)
+ self._state = ConnectionState.NEW
+
+ def __repr__(self) -> str:
+ return f""
+
+ def _now(self) -> float:
+ return time.monotonic()
+
+ def _server_disconnected(self) -> bool:
+ """
+ Return True if the connection is idle, and the underlying socket is readable.
+ The only valid state the socket can be readable here is when the b""
+ EOF marker is about to be returned, indicating a server disconnect.
+ """
+ return self._state == ConnectionState.IDLE and self.socket.is_readable()
+
+ def _keepalive_expired(self) -> bool:
+ """
+ Return True if the connection is idle, and has passed it's keepalive
+ expiry time.
+ """
+ return (
+ self._state == ConnectionState.IDLE
+ and self._should_expire_at is not None
+ and self._now() >= self._should_expire_at
+ )
+
+ def info(self) -> str:
+ return f"HTTP/1.1, {self._state.name}"
+
+ def should_close(self) -> bool:
+ """
+ Return `True` if the connection is in a state where it should be closed.
+ """
+ return self._server_disconnected() or self._keepalive_expired()
+
+ def is_idle(self) -> bool:
+ """
+ Return `True` if the connection is currently idle.
+ """
+ return self._state == ConnectionState.IDLE
+
+ def is_closed(self) -> bool:
+ """
+ Return `True` if the connection has been closed.
+ """
+ return self._state == ConnectionState.CLOSED
+
+ def is_available(self) -> bool:
+ """
+ Return `True` if the connection is currently able to accept an outgoing request.
+ """
+ return self._state == ConnectionState.IDLE
+
+ async def handle_async_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: AsyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, AsyncByteStream, dict]:
+ """
+ Send a single HTTP/1.1 request.
+
+ Note that there is no kind of task/thread locking at this layer of interface.
+ Dealing with locking for concurrency is handled by the `AsyncHTTPConnection`.
+ """
+ timeout = cast(TimeoutDict, extensions.get("timeout", {}))
+
+ if self._state in (ConnectionState.NEW, ConnectionState.IDLE):
+ self._state = ConnectionState.ACTIVE
+ self._should_expire_at = None
+ else:
+ raise NewConnectionRequired()
+
+ await self._send_request(method, url, headers, timeout)
+ await self._send_request_body(stream, timeout)
+ (
+ http_version,
+ status_code,
+ reason_phrase,
+ headers,
+ ) = await self._receive_response(timeout)
+ response_stream = AsyncIteratorByteStream(
+ aiterator=self._receive_response_data(timeout),
+ aclose_func=self._response_closed,
+ )
+ extensions = {
+ "http_version": http_version,
+ "reason_phrase": reason_phrase,
+ }
+ return (status_code, headers, response_stream, extensions)
+
+ async def start_tls(
+ self, hostname: bytes, ssl_context: SSLContext, timeout: TimeoutDict = None
+ ) -> AsyncSocketStream:
+ timeout = {} if timeout is None else timeout
+ self.socket = await self.socket.start_tls(hostname, ssl_context, timeout)
+ return self.socket
+
+ async def _send_request(
+ self, method: bytes, url: URL, headers: Headers, timeout: TimeoutDict
+ ) -> None:
+ """
+ Send the request line and headers.
+ """
+ logger.trace("send_request method=%r url=%r headers=%s", method, url, headers)
+ _scheme, _host, _port, target = url
+ with map_exceptions({h11.LocalProtocolError: LocalProtocolError}):
+ event = h11.Request(method=method, target=target, headers=headers)
+ await self._send_event(event, timeout)
+
+ async def _send_request_body(
+ self, stream: AsyncByteStream, timeout: TimeoutDict
+ ) -> None:
+ """
+ Send the request body.
+ """
+ # Send the request body.
+ async for chunk in stream:
+ logger.trace("send_data=Data(<%d bytes>)", len(chunk))
+ event = h11.Data(data=chunk)
+ await self._send_event(event, timeout)
+
+ # Finalize sending the request.
+ event = h11.EndOfMessage()
+ await self._send_event(event, timeout)
+
+ async def _send_event(self, event: H11Event, timeout: TimeoutDict) -> None:
+ """
+ Send a single `h11` event to the network, waiting for the data to
+ drain before returning.
+ """
+ bytes_to_send = self._h11_state.send(event)
+ await self.socket.write(bytes_to_send, timeout)
+
+ async def _receive_response(
+ self, timeout: TimeoutDict
+ ) -> Tuple[bytes, int, bytes, List[Tuple[bytes, bytes]]]:
+ """
+ Read the response status and headers from the network.
+ """
+ while True:
+ event = await self._receive_event(timeout)
+ if isinstance(event, h11.Response):
+ break
+
+ http_version = b"HTTP/" + event.http_version
+
+ # h11 version 0.11+ supports a `raw_items` interface to get the
+ # raw header casing, rather than the enforced lowercase headers.
+ headers = event.headers.raw_items()
+
+ return http_version, event.status_code, event.reason, headers
+
+ async def _receive_response_data(
+ self, timeout: TimeoutDict
+ ) -> AsyncIterator[bytes]:
+ """
+ Read the response data from the network.
+ """
+ while True:
+ event = await self._receive_event(timeout)
+ if isinstance(event, h11.Data):
+ logger.trace("receive_event=Data(<%d bytes>)", len(event.data))
+ yield bytes(event.data)
+ elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)):
+ logger.trace("receive_event=%r", event)
+ break
+
+ async def _receive_event(self, timeout: TimeoutDict) -> H11Event:
+ """
+ Read a single `h11` event, reading more data from the network if needed.
+ """
+ while True:
+ with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}):
+ event = self._h11_state.next_event()
+
+ if event is h11.NEED_DATA:
+ data = await self.socket.read(self.READ_NUM_BYTES, timeout)
+
+ # If we feed this case through h11 we'll raise an exception like:
+ #
+ # httpcore.RemoteProtocolError: can't handle event type
+ # ConnectionClosed when role=SERVER and state=SEND_RESPONSE
+ #
+ # Which is accurate, but not very informative from an end-user
+ # perspective. Instead we handle messaging for this case distinctly.
+ if data == b"" and self._h11_state.their_state == h11.SEND_RESPONSE:
+ msg = "Server disconnected without sending a response."
+ raise RemoteProtocolError(msg)
+
+ self._h11_state.receive_data(data)
+ else:
+ assert event is not h11.NEED_DATA
+ break
+ return event
+
+ async def _response_closed(self) -> None:
+ logger.trace(
+ "response_closed our_state=%r their_state=%r",
+ self._h11_state.our_state,
+ self._h11_state.their_state,
+ )
+ if (
+ self._h11_state.our_state is h11.DONE
+ and self._h11_state.their_state is h11.DONE
+ ):
+ self._h11_state.start_next_cycle()
+ self._state = ConnectionState.IDLE
+ if self._keepalive_expiry is not None:
+ self._should_expire_at = self._now() + self._keepalive_expiry
+ else:
+ await self.aclose()
+
+ async def aclose(self) -> None:
+ if self._state != ConnectionState.CLOSED:
+ self._state = ConnectionState.CLOSED
+
+ if self._h11_state.our_state is h11.MUST_CLOSE:
+ event = h11.ConnectionClosed()
+ self._h11_state.send(event)
+
+ await self.socket.aclose()
diff --git a/packages/httpcore/_async/http2.py b/packages/httpcore/_async/http2.py
new file mode 100644
index 000000000..35a4e0911
--- /dev/null
+++ b/packages/httpcore/_async/http2.py
@@ -0,0 +1,446 @@
+import enum
+import time
+from ssl import SSLContext
+from typing import AsyncIterator, Dict, List, Optional, Tuple, cast
+
+import h2.connection
+import h2.events
+from h2.config import H2Configuration
+from h2.exceptions import NoAvailableStreamIDError
+from h2.settings import SettingCodes, Settings
+
+from .._backends.auto import AsyncBackend, AsyncLock, AsyncSemaphore, AsyncSocketStream
+from .._bytestreams import AsyncIteratorByteStream
+from .._exceptions import LocalProtocolError, PoolTimeout, RemoteProtocolError
+from .._types import URL, Headers, TimeoutDict
+from .._utils import get_logger
+from .base import AsyncByteStream, NewConnectionRequired
+from .http import AsyncBaseHTTPConnection
+
+logger = get_logger(__name__)
+
+
+class ConnectionState(enum.IntEnum):
+ IDLE = 0
+ ACTIVE = 1
+ CLOSED = 2
+
+
+class AsyncHTTP2Connection(AsyncBaseHTTPConnection):
+ READ_NUM_BYTES = 64 * 1024
+ CONFIG = H2Configuration(validate_inbound_headers=False)
+
+ def __init__(
+ self,
+ socket: AsyncSocketStream,
+ backend: AsyncBackend,
+ keepalive_expiry: float = None,
+ ):
+ self.socket = socket
+
+ self._backend = backend
+ self._h2_state = h2.connection.H2Connection(config=self.CONFIG)
+
+ self._sent_connection_init = False
+ self._streams: Dict[int, AsyncHTTP2Stream] = {}
+ self._events: Dict[int, List[h2.events.Event]] = {}
+
+ self._keepalive_expiry: Optional[float] = keepalive_expiry
+ self._should_expire_at: Optional[float] = None
+ self._state = ConnectionState.ACTIVE
+ self._exhausted_available_stream_ids = False
+
+ def __repr__(self) -> str:
+ return f""
+
+ def info(self) -> str:
+ return f"HTTP/2, {self._state.name}, {len(self._streams)} streams"
+
+ def _now(self) -> float:
+ return time.monotonic()
+
+ def should_close(self) -> bool:
+ """
+ Return `True` if the connection is currently idle, and the keepalive
+ timeout has passed.
+ """
+ return (
+ self._state == ConnectionState.IDLE
+ and self._should_expire_at is not None
+ and self._now() >= self._should_expire_at
+ )
+
+ def is_idle(self) -> bool:
+ """
+ Return `True` if the connection is currently idle.
+ """
+ return self._state == ConnectionState.IDLE
+
+ def is_closed(self) -> bool:
+ """
+ Return `True` if the connection has been closed.
+ """
+ return self._state == ConnectionState.CLOSED
+
+ def is_available(self) -> bool:
+ """
+ Return `True` if the connection is currently able to accept an outgoing request.
+ This occurs when any of the following occur:
+
+ * The connection has not yet been opened, and HTTP/2 support is enabled.
+ We don't *know* at this point if we'll end up on an HTTP/2 connection or
+ not, but we *might* do, so we indicate availability.
+ * The connection has been opened, and is currently idle.
+ * The connection is open, and is an HTTP/2 connection. The connection must
+ also not have exhausted the maximum total number of stream IDs.
+ """
+ return (
+ self._state != ConnectionState.CLOSED
+ and not self._exhausted_available_stream_ids
+ )
+
+ @property
+ def init_lock(self) -> AsyncLock:
+ # We do this lazily, to make sure backend autodetection always
+ # runs within an async context.
+ if not hasattr(self, "_initialization_lock"):
+ self._initialization_lock = self._backend.create_lock()
+ return self._initialization_lock
+
+ @property
+ def read_lock(self) -> AsyncLock:
+ # We do this lazily, to make sure backend autodetection always
+ # runs within an async context.
+ if not hasattr(self, "_read_lock"):
+ self._read_lock = self._backend.create_lock()
+ return self._read_lock
+
+ @property
+ def max_streams_semaphore(self) -> AsyncSemaphore:
+ # We do this lazily, to make sure backend autodetection always
+ # runs within an async context.
+ if not hasattr(self, "_max_streams_semaphore"):
+ max_streams = self._h2_state.local_settings.max_concurrent_streams
+ self._max_streams_semaphore = self._backend.create_semaphore(
+ max_streams, exc_class=PoolTimeout
+ )
+ return self._max_streams_semaphore
+
+ async def start_tls(
+ self, hostname: bytes, ssl_context: SSLContext, timeout: TimeoutDict = None
+ ) -> AsyncSocketStream:
+ raise NotImplementedError("TLS upgrade not supported on HTTP/2 connections.")
+
+ async def handle_async_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: AsyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, AsyncByteStream, dict]:
+ timeout = cast(TimeoutDict, extensions.get("timeout", {}))
+
+ async with self.init_lock:
+ if not self._sent_connection_init:
+ # The very first stream is responsible for initiating the connection.
+ self._state = ConnectionState.ACTIVE
+ await self.send_connection_init(timeout)
+ self._sent_connection_init = True
+
+ await self.max_streams_semaphore.acquire()
+ try:
+ try:
+ stream_id = self._h2_state.get_next_available_stream_id()
+ except NoAvailableStreamIDError:
+ self._exhausted_available_stream_ids = True
+ raise NewConnectionRequired()
+ else:
+ self._state = ConnectionState.ACTIVE
+ self._should_expire_at = None
+
+ h2_stream = AsyncHTTP2Stream(stream_id=stream_id, connection=self)
+ self._streams[stream_id] = h2_stream
+ self._events[stream_id] = []
+ return await h2_stream.handle_async_request(
+ method, url, headers, stream, extensions
+ )
+ except Exception: # noqa: PIE786
+ await self.max_streams_semaphore.release()
+ raise
+
+ async def send_connection_init(self, timeout: TimeoutDict) -> None:
+ """
+ The HTTP/2 connection requires some initial setup before we can start
+ using individual request/response streams on it.
+ """
+ # Need to set these manually here instead of manipulating via
+ # __setitem__() otherwise the H2Connection will emit SettingsUpdate
+ # frames in addition to sending the undesired defaults.
+ self._h2_state.local_settings = Settings(
+ client=True,
+ initial_values={
+ # Disable PUSH_PROMISE frames from the server since we don't do anything
+ # with them for now. Maybe when we support caching?
+ SettingCodes.ENABLE_PUSH: 0,
+ # These two are taken from h2 for safe defaults
+ SettingCodes.MAX_CONCURRENT_STREAMS: 100,
+ SettingCodes.MAX_HEADER_LIST_SIZE: 65536,
+ },
+ )
+
+ # Some websites (*cough* Yahoo *cough*) balk at this setting being
+ # present in the initial handshake since it's not defined in the original
+ # RFC despite the RFC mandating ignoring settings you don't know about.
+ del self._h2_state.local_settings[
+ h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL
+ ]
+
+ logger.trace("initiate_connection=%r", self)
+ self._h2_state.initiate_connection()
+ self._h2_state.increment_flow_control_window(2 ** 24)
+ data_to_send = self._h2_state.data_to_send()
+ await self.socket.write(data_to_send, timeout)
+
+ def is_socket_readable(self) -> bool:
+ return self.socket.is_readable()
+
+ async def aclose(self) -> None:
+ logger.trace("close_connection=%r", self)
+ if self._state != ConnectionState.CLOSED:
+ self._state = ConnectionState.CLOSED
+
+ await self.socket.aclose()
+
+ async def wait_for_outgoing_flow(self, stream_id: int, timeout: TimeoutDict) -> int:
+ """
+ Returns the maximum allowable outgoing flow for a given stream.
+ If the allowable flow is zero, then waits on the network until
+ WindowUpdated frames have increased the flow rate.
+ https://tools.ietf.org/html/rfc7540#section-6.9
+ """
+ local_flow = self._h2_state.local_flow_control_window(stream_id)
+ connection_flow = self._h2_state.max_outbound_frame_size
+ flow = min(local_flow, connection_flow)
+ while flow == 0:
+ await self.receive_events(timeout)
+ local_flow = self._h2_state.local_flow_control_window(stream_id)
+ connection_flow = self._h2_state.max_outbound_frame_size
+ flow = min(local_flow, connection_flow)
+ return flow
+
+ async def wait_for_event(
+ self, stream_id: int, timeout: TimeoutDict
+ ) -> h2.events.Event:
+ """
+ Returns the next event for a given stream.
+ If no events are available yet, then waits on the network until
+ an event is available.
+ """
+ async with self.read_lock:
+ while not self._events[stream_id]:
+ await self.receive_events(timeout)
+ return self._events[stream_id].pop(0)
+
+ async def receive_events(self, timeout: TimeoutDict) -> None:
+ """
+ Read some data from the network, and update the H2 state.
+ """
+ data = await self.socket.read(self.READ_NUM_BYTES, timeout)
+ if data == b"":
+ raise RemoteProtocolError("Server disconnected")
+
+ events = self._h2_state.receive_data(data)
+ for event in events:
+ event_stream_id = getattr(event, "stream_id", 0)
+ logger.trace("receive_event stream_id=%r event=%s", event_stream_id, event)
+
+ if hasattr(event, "error_code"):
+ raise RemoteProtocolError(event)
+
+ if event_stream_id in self._events:
+ self._events[event_stream_id].append(event)
+
+ data_to_send = self._h2_state.data_to_send()
+ await self.socket.write(data_to_send, timeout)
+
+ async def send_headers(
+ self, stream_id: int, headers: Headers, end_stream: bool, timeout: TimeoutDict
+ ) -> None:
+ logger.trace("send_headers stream_id=%r headers=%r", stream_id, headers)
+ self._h2_state.send_headers(stream_id, headers, end_stream=end_stream)
+ self._h2_state.increment_flow_control_window(2 ** 24, stream_id=stream_id)
+ data_to_send = self._h2_state.data_to_send()
+ await self.socket.write(data_to_send, timeout)
+
+ async def send_data(
+ self, stream_id: int, chunk: bytes, timeout: TimeoutDict
+ ) -> None:
+ logger.trace("send_data stream_id=%r chunk=%r", stream_id, chunk)
+ self._h2_state.send_data(stream_id, chunk)
+ data_to_send = self._h2_state.data_to_send()
+ await self.socket.write(data_to_send, timeout)
+
+ async def end_stream(self, stream_id: int, timeout: TimeoutDict) -> None:
+ logger.trace("end_stream stream_id=%r", stream_id)
+ self._h2_state.end_stream(stream_id)
+ data_to_send = self._h2_state.data_to_send()
+ await self.socket.write(data_to_send, timeout)
+
+ async def acknowledge_received_data(
+ self, stream_id: int, amount: int, timeout: TimeoutDict
+ ) -> None:
+ self._h2_state.acknowledge_received_data(amount, stream_id)
+ data_to_send = self._h2_state.data_to_send()
+ await self.socket.write(data_to_send, timeout)
+
+ async def close_stream(self, stream_id: int) -> None:
+ try:
+ logger.trace("close_stream stream_id=%r", stream_id)
+ del self._streams[stream_id]
+ del self._events[stream_id]
+
+ if not self._streams:
+ if self._state == ConnectionState.ACTIVE:
+ if self._exhausted_available_stream_ids:
+ await self.aclose()
+ else:
+ self._state = ConnectionState.IDLE
+ if self._keepalive_expiry is not None:
+ self._should_expire_at = (
+ self._now() + self._keepalive_expiry
+ )
+ finally:
+ await self.max_streams_semaphore.release()
+
+
+class AsyncHTTP2Stream:
+ def __init__(self, stream_id: int, connection: AsyncHTTP2Connection) -> None:
+ self.stream_id = stream_id
+ self.connection = connection
+
+ async def handle_async_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: AsyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, AsyncByteStream, dict]:
+ headers = [(k.lower(), v) for (k, v) in headers]
+ timeout = cast(TimeoutDict, extensions.get("timeout", {}))
+
+ # Send the request.
+ seen_headers = set(key for key, value in headers)
+ has_body = (
+ b"content-length" in seen_headers or b"transfer-encoding" in seen_headers
+ )
+
+ await self.send_headers(method, url, headers, has_body, timeout)
+ if has_body:
+ await self.send_body(stream, timeout)
+
+ # Receive the response.
+ status_code, headers = await self.receive_response(timeout)
+ response_stream = AsyncIteratorByteStream(
+ aiterator=self.body_iter(timeout), aclose_func=self._response_closed
+ )
+
+ extensions = {
+ "http_version": b"HTTP/2",
+ }
+ return (status_code, headers, response_stream, extensions)
+
+ async def send_headers(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ has_body: bool,
+ timeout: TimeoutDict,
+ ) -> None:
+ scheme, hostname, port, path = url
+
+ # In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'.
+ # In order to gracefully handle HTTP/1.1 and HTTP/2 we always require
+ # HTTP/1.1 style headers, and map them appropriately if we end up on
+ # an HTTP/2 connection.
+ authority = None
+
+ for k, v in headers:
+ if k == b"host":
+ authority = v
+ break
+
+ if authority is None:
+ # Mirror the same error we'd see with `h11`, so that the behaviour
+ # is consistent. Although we're dealing with an `:authority`
+ # pseudo-header by this point, from an end-user perspective the issue
+ # is that the outgoing request needed to include a `host` header.
+ raise LocalProtocolError("Missing mandatory Host: header")
+
+ headers = [
+ (b":method", method),
+ (b":authority", authority),
+ (b":scheme", scheme),
+ (b":path", path),
+ ] + [
+ (k, v)
+ for k, v in headers
+ if k
+ not in (
+ b"host",
+ b"transfer-encoding",
+ )
+ ]
+ end_stream = not has_body
+
+ await self.connection.send_headers(self.stream_id, headers, end_stream, timeout)
+
+ async def send_body(self, stream: AsyncByteStream, timeout: TimeoutDict) -> None:
+ async for data in stream:
+ while data:
+ max_flow = await self.connection.wait_for_outgoing_flow(
+ self.stream_id, timeout
+ )
+ chunk_size = min(len(data), max_flow)
+ chunk, data = data[:chunk_size], data[chunk_size:]
+ await self.connection.send_data(self.stream_id, chunk, timeout)
+
+ await self.connection.end_stream(self.stream_id, timeout)
+
+ async def receive_response(
+ self, timeout: TimeoutDict
+ ) -> Tuple[int, List[Tuple[bytes, bytes]]]:
+ """
+ Read the response status and headers from the network.
+ """
+ while True:
+ event = await self.connection.wait_for_event(self.stream_id, timeout)
+ if isinstance(event, h2.events.ResponseReceived):
+ break
+
+ status_code = 200
+ headers = []
+ for k, v in event.headers:
+ if k == b":status":
+ status_code = int(v.decode("ascii", errors="ignore"))
+ elif not k.startswith(b":"):
+ headers.append((k, v))
+
+ return (status_code, headers)
+
+ async def body_iter(self, timeout: TimeoutDict) -> AsyncIterator[bytes]:
+ while True:
+ event = await self.connection.wait_for_event(self.stream_id, timeout)
+ if isinstance(event, h2.events.DataReceived):
+ amount = event.flow_controlled_length
+ await self.connection.acknowledge_received_data(
+ self.stream_id, amount, timeout
+ )
+ yield event.data
+ elif isinstance(event, (h2.events.StreamEnded, h2.events.StreamReset)):
+ break
+
+ async def _response_closed(self) -> None:
+ await self.connection.close_stream(self.stream_id)
diff --git a/packages/httpcore/_async/http_proxy.py b/packages/httpcore/_async/http_proxy.py
new file mode 100644
index 000000000..275bf214f
--- /dev/null
+++ b/packages/httpcore/_async/http_proxy.py
@@ -0,0 +1,290 @@
+from http import HTTPStatus
+from ssl import SSLContext
+from typing import Tuple, cast
+
+from .._bytestreams import ByteStream
+from .._exceptions import ProxyError
+from .._types import URL, Headers, TimeoutDict
+from .._utils import get_logger, url_to_origin
+from .base import AsyncByteStream
+from .connection import AsyncHTTPConnection
+from .connection_pool import AsyncConnectionPool, ResponseByteStream
+
+logger = get_logger(__name__)
+
+
+def get_reason_phrase(status_code: int) -> str:
+ try:
+ return HTTPStatus(status_code).phrase
+ except ValueError:
+ return ""
+
+
+def merge_headers(
+ default_headers: Headers = None, override_headers: Headers = None
+) -> Headers:
+ """
+ Append default_headers and override_headers, de-duplicating if a key existing in
+ both cases.
+ """
+ default_headers = [] if default_headers is None else default_headers
+ override_headers = [] if override_headers is None else override_headers
+ has_override = set([key.lower() for key, value in override_headers])
+ default_headers = [
+ (key, value)
+ for key, value in default_headers
+ if key.lower() not in has_override
+ ]
+ return default_headers + override_headers
+
+
+class AsyncHTTPProxy(AsyncConnectionPool):
+ """
+ A connection pool for making HTTP requests via an HTTP proxy.
+
+ Parameters
+ ----------
+ proxy_url:
+ The URL of the proxy service as a 4-tuple of (scheme, host, port, path).
+ proxy_headers:
+ A list of proxy headers to include.
+ proxy_mode:
+ A proxy mode to operate in. May be "DEFAULT", "FORWARD_ONLY", or "TUNNEL_ONLY".
+ ssl_context:
+ An SSL context to use for verifying connections.
+ max_connections:
+ The maximum number of concurrent connections to allow.
+ max_keepalive_connections:
+ The maximum number of connections to allow before closing keep-alive
+ connections.
+ http2:
+ Enable HTTP/2 support.
+ """
+
+ def __init__(
+ self,
+ proxy_url: URL,
+ proxy_headers: Headers = None,
+ proxy_mode: str = "DEFAULT",
+ ssl_context: SSLContext = None,
+ max_connections: int = None,
+ max_keepalive_connections: int = None,
+ keepalive_expiry: float = None,
+ http2: bool = False,
+ backend: str = "auto",
+ # Deprecated argument style:
+ max_keepalive: int = None,
+ ):
+ assert proxy_mode in ("DEFAULT", "FORWARD_ONLY", "TUNNEL_ONLY")
+
+ self.proxy_origin = url_to_origin(proxy_url)
+ self.proxy_headers = [] if proxy_headers is None else proxy_headers
+ self.proxy_mode = proxy_mode
+ super().__init__(
+ ssl_context=ssl_context,
+ max_connections=max_connections,
+ max_keepalive_connections=max_keepalive_connections,
+ keepalive_expiry=keepalive_expiry,
+ http2=http2,
+ backend=backend,
+ max_keepalive=max_keepalive,
+ )
+
+ async def handle_async_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: AsyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, AsyncByteStream, dict]:
+ if self._keepalive_expiry is not None:
+ await self._keepalive_sweep()
+
+ if (
+ self.proxy_mode == "DEFAULT" and url[0] == b"http"
+ ) or self.proxy_mode == "FORWARD_ONLY":
+ # By default HTTP requests should be forwarded.
+ logger.trace(
+ "forward_request proxy_origin=%r proxy_headers=%r method=%r url=%r",
+ self.proxy_origin,
+ self.proxy_headers,
+ method,
+ url,
+ )
+ return await self._forward_request(
+ method, url, headers=headers, stream=stream, extensions=extensions
+ )
+ else:
+ # By default HTTPS should be tunnelled.
+ logger.trace(
+ "tunnel_request proxy_origin=%r proxy_headers=%r method=%r url=%r",
+ self.proxy_origin,
+ self.proxy_headers,
+ method,
+ url,
+ )
+ return await self._tunnel_request(
+ method, url, headers=headers, stream=stream, extensions=extensions
+ )
+
+ async def _forward_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: AsyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, AsyncByteStream, dict]:
+ """
+ Forwarded proxy requests include the entire URL as the HTTP target,
+ rather than just the path.
+ """
+ timeout = cast(TimeoutDict, extensions.get("timeout", {}))
+ origin = self.proxy_origin
+ connection = await self._get_connection_from_pool(origin)
+
+ if connection is None:
+ connection = AsyncHTTPConnection(
+ origin=origin,
+ http2=self._http2,
+ keepalive_expiry=self._keepalive_expiry,
+ ssl_context=self._ssl_context,
+ )
+ await self._add_to_pool(connection, timeout)
+
+ # Issue a forwarded proxy request...
+
+ # GET https://www.example.org/path HTTP/1.1
+ # [proxy headers]
+ # [headers]
+ scheme, host, port, path = url
+ if port is None:
+ target = b"%b://%b%b" % (scheme, host, path)
+ else:
+ target = b"%b://%b:%d%b" % (scheme, host, port, path)
+
+ url = self.proxy_origin + (target,)
+ headers = merge_headers(self.proxy_headers, headers)
+
+ (
+ status_code,
+ headers,
+ stream,
+ extensions,
+ ) = await connection.handle_async_request(
+ method, url, headers=headers, stream=stream, extensions=extensions
+ )
+
+ wrapped_stream = ResponseByteStream(
+ stream, connection=connection, callback=self._response_closed
+ )
+
+ return status_code, headers, wrapped_stream, extensions
+
+ async def _tunnel_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: AsyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, AsyncByteStream, dict]:
+ """
+ Tunnelled proxy requests require an initial CONNECT request to
+ establish the connection, and then send regular requests.
+ """
+ timeout = cast(TimeoutDict, extensions.get("timeout", {}))
+ origin = url_to_origin(url)
+ connection = await self._get_connection_from_pool(origin)
+
+ if connection is None:
+ scheme, host, port = origin
+
+ # First, create a connection to the proxy server
+ proxy_connection = AsyncHTTPConnection(
+ origin=self.proxy_origin,
+ http2=self._http2,
+ keepalive_expiry=self._keepalive_expiry,
+ ssl_context=self._ssl_context,
+ )
+
+ # Issue a CONNECT request...
+
+ # CONNECT www.example.org:80 HTTP/1.1
+ # [proxy-headers]
+ target = b"%b:%d" % (host, port)
+ connect_url = self.proxy_origin + (target,)
+ connect_headers = [(b"Host", target), (b"Accept", b"*/*")]
+ connect_headers = merge_headers(connect_headers, self.proxy_headers)
+
+ try:
+ (
+ proxy_status_code,
+ _,
+ proxy_stream,
+ _,
+ ) = await proxy_connection.handle_async_request(
+ b"CONNECT",
+ connect_url,
+ headers=connect_headers,
+ stream=ByteStream(b""),
+ extensions=extensions,
+ )
+
+ proxy_reason = get_reason_phrase(proxy_status_code)
+ logger.trace(
+ "tunnel_response proxy_status_code=%r proxy_reason=%r ",
+ proxy_status_code,
+ proxy_reason,
+ )
+ # Read the response data without closing the socket
+ async for _ in proxy_stream:
+ pass
+
+ # See if the tunnel was successfully established.
+ if proxy_status_code < 200 or proxy_status_code > 299:
+ msg = "%d %s" % (proxy_status_code, proxy_reason)
+ raise ProxyError(msg)
+
+ # Upgrade to TLS if required
+ # We assume the target speaks TLS on the specified port
+ if scheme == b"https":
+ await proxy_connection.start_tls(host, self._ssl_context, timeout)
+ except Exception as exc:
+ await proxy_connection.aclose()
+ raise ProxyError(exc)
+
+ # The CONNECT request is successful, so we have now SWITCHED PROTOCOLS.
+ # This means the proxy connection is now unusable, and we must create
+ # a new one for regular requests, making sure to use the same socket to
+ # retain the tunnel.
+ connection = AsyncHTTPConnection(
+ origin=origin,
+ http2=self._http2,
+ keepalive_expiry=self._keepalive_expiry,
+ ssl_context=self._ssl_context,
+ socket=proxy_connection.socket,
+ )
+ await self._add_to_pool(connection, timeout)
+
+ # Once the connection has been established we can send requests on
+ # it as normal.
+ (
+ status_code,
+ headers,
+ stream,
+ extensions,
+ ) = await connection.handle_async_request(
+ method,
+ url,
+ headers=headers,
+ stream=stream,
+ extensions=extensions,
+ )
+
+ wrapped_stream = ResponseByteStream(
+ stream, connection=connection, callback=self._response_closed
+ )
+
+ return status_code, headers, wrapped_stream, extensions
diff --git a/packages/httpcore/_backends/__init__.py b/packages/httpcore/_backends/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/packages/httpcore/_backends/anyio.py b/packages/httpcore/_backends/anyio.py
new file mode 100644
index 000000000..b1332a27f
--- /dev/null
+++ b/packages/httpcore/_backends/anyio.py
@@ -0,0 +1,201 @@
+from ssl import SSLContext
+from typing import Optional
+
+import anyio.abc
+from anyio import BrokenResourceError, EndOfStream
+from anyio.abc import ByteStream, SocketAttribute
+from anyio.streams.tls import TLSAttribute, TLSStream
+
+from .._exceptions import (
+ ConnectError,
+ ConnectTimeout,
+ ReadError,
+ ReadTimeout,
+ WriteError,
+ WriteTimeout,
+ map_exceptions,
+)
+from .._types import TimeoutDict
+from .._utils import is_socket_readable
+from .base import AsyncBackend, AsyncLock, AsyncSemaphore, AsyncSocketStream
+
+
+class SocketStream(AsyncSocketStream):
+ def __init__(self, stream: ByteStream) -> None:
+ self.stream = stream
+ self.read_lock = anyio.Lock()
+ self.write_lock = anyio.Lock()
+
+ def get_http_version(self) -> str:
+ alpn_protocol = self.stream.extra(TLSAttribute.alpn_protocol, None)
+ return "HTTP/2" if alpn_protocol == "h2" else "HTTP/1.1"
+
+ async def start_tls(
+ self,
+ hostname: bytes,
+ ssl_context: SSLContext,
+ timeout: TimeoutDict,
+ ) -> "SocketStream":
+ connect_timeout = timeout.get("connect")
+ try:
+ with anyio.fail_after(connect_timeout):
+ ssl_stream = await TLSStream.wrap(
+ self.stream,
+ ssl_context=ssl_context,
+ hostname=hostname.decode("ascii"),
+ standard_compatible=False,
+ )
+ except TimeoutError:
+ raise ConnectTimeout from None
+ except BrokenResourceError as exc:
+ raise ConnectError from exc
+
+ return SocketStream(ssl_stream)
+
+ async def read(self, n: int, timeout: TimeoutDict) -> bytes:
+ read_timeout = timeout.get("read")
+ async with self.read_lock:
+ try:
+ with anyio.fail_after(read_timeout):
+ return await self.stream.receive(n)
+ except TimeoutError:
+ await self.stream.aclose()
+ raise ReadTimeout from None
+ except BrokenResourceError as exc:
+ raise ReadError from exc
+ except EndOfStream:
+ return b""
+
+ async def write(self, data: bytes, timeout: TimeoutDict) -> None:
+ if not data:
+ return
+
+ write_timeout = timeout.get("write")
+ async with self.write_lock:
+ try:
+ with anyio.fail_after(write_timeout):
+ return await self.stream.send(data)
+ except TimeoutError:
+ await self.stream.aclose()
+ raise WriteTimeout from None
+ except BrokenResourceError as exc:
+ raise WriteError from exc
+
+ async def aclose(self) -> None:
+ async with self.write_lock:
+ try:
+ await self.stream.aclose()
+ except BrokenResourceError:
+ pass
+
+ def is_readable(self) -> bool:
+ sock = self.stream.extra(SocketAttribute.raw_socket)
+ return is_socket_readable(sock)
+
+
+class Lock(AsyncLock):
+ def __init__(self) -> None:
+ self._lock = anyio.Lock()
+
+ async def release(self) -> None:
+ self._lock.release()
+
+ async def acquire(self) -> None:
+ await self._lock.acquire()
+
+
+class Semaphore(AsyncSemaphore):
+ def __init__(self, max_value: int, exc_class: type):
+ self.max_value = max_value
+ self.exc_class = exc_class
+
+ @property
+ def semaphore(self) -> anyio.abc.Semaphore:
+ if not hasattr(self, "_semaphore"):
+ self._semaphore = anyio.Semaphore(self.max_value)
+ return self._semaphore
+
+ async def acquire(self, timeout: float = None) -> None:
+ with anyio.move_on_after(timeout):
+ await self.semaphore.acquire()
+ return
+
+ raise self.exc_class()
+
+ async def release(self) -> None:
+ self.semaphore.release()
+
+
+class AnyIOBackend(AsyncBackend):
+ async def open_tcp_stream(
+ self,
+ hostname: bytes,
+ port: int,
+ ssl_context: Optional[SSLContext],
+ timeout: TimeoutDict,
+ *,
+ local_address: Optional[str],
+ ) -> AsyncSocketStream:
+ connect_timeout = timeout.get("connect")
+ unicode_host = hostname.decode("utf-8")
+ exc_map = {
+ TimeoutError: ConnectTimeout,
+ OSError: ConnectError,
+ BrokenResourceError: ConnectError,
+ }
+
+ with map_exceptions(exc_map):
+ with anyio.fail_after(connect_timeout):
+ stream: anyio.abc.ByteStream
+ stream = await anyio.connect_tcp(
+ unicode_host, port, local_host=local_address
+ )
+ if ssl_context:
+ stream = await TLSStream.wrap(
+ stream,
+ hostname=unicode_host,
+ ssl_context=ssl_context,
+ standard_compatible=False,
+ )
+
+ return SocketStream(stream=stream)
+
+ async def open_uds_stream(
+ self,
+ path: str,
+ hostname: bytes,
+ ssl_context: Optional[SSLContext],
+ timeout: TimeoutDict,
+ ) -> AsyncSocketStream:
+ connect_timeout = timeout.get("connect")
+ unicode_host = hostname.decode("utf-8")
+ exc_map = {
+ TimeoutError: ConnectTimeout,
+ OSError: ConnectError,
+ BrokenResourceError: ConnectError,
+ }
+
+ with map_exceptions(exc_map):
+ with anyio.fail_after(connect_timeout):
+ stream: anyio.abc.ByteStream = await anyio.connect_unix(path)
+ if ssl_context:
+ stream = await TLSStream.wrap(
+ stream,
+ hostname=unicode_host,
+ ssl_context=ssl_context,
+ standard_compatible=False,
+ )
+
+ return SocketStream(stream=stream)
+
+ def create_lock(self) -> AsyncLock:
+ return Lock()
+
+ def create_semaphore(self, max_value: int, exc_class: type) -> AsyncSemaphore:
+ return Semaphore(max_value, exc_class=exc_class)
+
+ async def time(self) -> float:
+ return float(anyio.current_time())
+
+ async def sleep(self, seconds: float) -> None:
+ await anyio.sleep(seconds)
diff --git a/packages/httpcore/_backends/asyncio.py b/packages/httpcore/_backends/asyncio.py
new file mode 100644
index 000000000..5142072e0
--- /dev/null
+++ b/packages/httpcore/_backends/asyncio.py
@@ -0,0 +1,303 @@
+import asyncio
+import socket
+from ssl import SSLContext
+from typing import Optional
+
+from .._exceptions import (
+ ConnectError,
+ ConnectTimeout,
+ ReadError,
+ ReadTimeout,
+ WriteError,
+ WriteTimeout,
+ map_exceptions,
+)
+from .._types import TimeoutDict
+from .._utils import is_socket_readable
+from .base import AsyncBackend, AsyncLock, AsyncSemaphore, AsyncSocketStream
+
+SSL_MONKEY_PATCH_APPLIED = False
+
+
+def ssl_monkey_patch() -> None:
+ """
+ Monkey-patch for https://bugs.python.org/issue36709
+
+ This prevents console errors when outstanding HTTPS connections
+ still exist at the point of exiting.
+
+ Clients which have been opened using a `with` block, or which have
+ had `close()` closed, will not exhibit this issue in the first place.
+ """
+ MonkeyPatch = asyncio.selector_events._SelectorSocketTransport # type: ignore
+
+ _write = MonkeyPatch.write
+
+ def _fixed_write(self, data: bytes) -> None: # type: ignore
+ if self._loop and not self._loop.is_closed():
+ _write(self, data)
+
+ MonkeyPatch.write = _fixed_write
+
+
+async def backport_start_tls(
+ transport: asyncio.BaseTransport,
+ protocol: asyncio.BaseProtocol,
+ ssl_context: SSLContext,
+ *,
+ server_side: bool = False,
+ server_hostname: str = None,
+ ssl_handshake_timeout: float = None,
+) -> asyncio.Transport: # pragma: nocover (Since it's not used on all Python versions.)
+ """
+ Python 3.6 asyncio doesn't have a start_tls() method on the loop
+ so we use this function in place of the loop's start_tls() method.
+ Adapted from this comment:
+ https://github.com/urllib3/urllib3/issues/1323#issuecomment-362494839
+ """
+ import asyncio.sslproto
+
+ loop = asyncio.get_event_loop()
+ waiter = loop.create_future()
+ ssl_protocol = asyncio.sslproto.SSLProtocol(
+ loop,
+ protocol,
+ ssl_context,
+ waiter,
+ server_side=False,
+ server_hostname=server_hostname,
+ call_connection_made=False,
+ )
+
+ transport.set_protocol(ssl_protocol)
+ loop.call_soon(ssl_protocol.connection_made, transport)
+ loop.call_soon(transport.resume_reading) # type: ignore
+
+ await waiter
+ return ssl_protocol._app_transport
+
+
+class SocketStream(AsyncSocketStream):
+ def __init__(
+ self, stream_reader: asyncio.StreamReader, stream_writer: asyncio.StreamWriter
+ ):
+ self.stream_reader = stream_reader
+ self.stream_writer = stream_writer
+ self.read_lock = asyncio.Lock()
+ self.write_lock = asyncio.Lock()
+
+ def get_http_version(self) -> str:
+ ssl_object = self.stream_writer.get_extra_info("ssl_object")
+
+ if ssl_object is None:
+ return "HTTP/1.1"
+
+ ident = ssl_object.selected_alpn_protocol()
+ return "HTTP/2" if ident == "h2" else "HTTP/1.1"
+
+ async def start_tls(
+ self, hostname: bytes, ssl_context: SSLContext, timeout: TimeoutDict
+ ) -> "SocketStream":
+ loop = asyncio.get_event_loop()
+
+ stream_reader = asyncio.StreamReader()
+ protocol = asyncio.StreamReaderProtocol(stream_reader)
+ transport = self.stream_writer.transport
+
+ loop_start_tls = getattr(loop, "start_tls", backport_start_tls)
+
+ exc_map = {asyncio.TimeoutError: ConnectTimeout, OSError: ConnectError}
+
+ with map_exceptions(exc_map):
+ transport = await asyncio.wait_for(
+ loop_start_tls(
+ transport,
+ protocol,
+ ssl_context,
+ server_hostname=hostname.decode("ascii"),
+ ),
+ timeout=timeout.get("connect"),
+ )
+
+ # Initialize the protocol, so it is made aware of being tied to
+ # a TLS connection.
+ # See: https://github.com/encode/httpx/issues/859
+ protocol.connection_made(transport)
+
+ stream_writer = asyncio.StreamWriter(
+ transport=transport, protocol=protocol, reader=stream_reader, loop=loop
+ )
+
+ ssl_stream = SocketStream(stream_reader, stream_writer)
+ # When we return a new SocketStream with new StreamReader/StreamWriter instances
+ # we need to keep references to the old StreamReader/StreamWriter so that they
+ # are not garbage collected and closed while we're still using them.
+ ssl_stream._inner = self # type: ignore
+ return ssl_stream
+
+ async def read(self, n: int, timeout: TimeoutDict) -> bytes:
+ exc_map = {asyncio.TimeoutError: ReadTimeout, OSError: ReadError}
+ async with self.read_lock:
+ with map_exceptions(exc_map):
+ try:
+ return await asyncio.wait_for(
+ self.stream_reader.read(n), timeout.get("read")
+ )
+ except AttributeError as exc: # pragma: nocover
+ if "resume_reading" in str(exc):
+ # Python's asyncio has a bug that can occur when a
+ # connection has been closed, while it is paused.
+ # See: https://github.com/encode/httpx/issues/1213
+ #
+ # Returning an empty byte-string to indicate connection
+ # close will eventually raise an httpcore.RemoteProtocolError
+ # to the user when this goes through our HTTP parsing layer.
+ return b""
+ raise
+
+ async def write(self, data: bytes, timeout: TimeoutDict) -> None:
+ if not data:
+ return
+
+ exc_map = {asyncio.TimeoutError: WriteTimeout, OSError: WriteError}
+ async with self.write_lock:
+ with map_exceptions(exc_map):
+ self.stream_writer.write(data)
+ return await asyncio.wait_for(
+ self.stream_writer.drain(), timeout.get("write")
+ )
+
+ async def aclose(self) -> None:
+ # SSL connections should issue the close and then abort, rather than
+ # waiting for the remote end of the connection to signal the EOF.
+ #
+ # See:
+ #
+ # * https://bugs.python.org/issue39758
+ # * https://github.com/python-trio/trio/blob/
+ # 31e2ae866ad549f1927d45ce073d4f0ea9f12419/trio/_ssl.py#L779-L829
+ #
+ # And related issues caused if we simply omit the 'wait_closed' call,
+ # without first using `.abort()`
+ #
+ # * https://github.com/encode/httpx/issues/825
+ # * https://github.com/encode/httpx/issues/914
+ is_ssl = self.stream_writer.get_extra_info("ssl_object") is not None
+
+ async with self.write_lock:
+ try:
+ self.stream_writer.close()
+ if is_ssl:
+ # Give the connection a chance to write any data in the buffer,
+ # and then forcibly tear down the SSL connection.
+ await asyncio.sleep(0)
+ self.stream_writer.transport.abort() # type: ignore
+ if hasattr(self.stream_writer, "wait_closed"):
+ # Python 3.7+ only.
+ await self.stream_writer.wait_closed() # type: ignore
+ except OSError:
+ pass
+
+ def is_readable(self) -> bool:
+ transport = self.stream_reader._transport # type: ignore
+ sock: Optional[socket.socket] = transport.get_extra_info("socket")
+ return is_socket_readable(sock)
+
+
+class Lock(AsyncLock):
+ def __init__(self) -> None:
+ self._lock = asyncio.Lock()
+
+ async def release(self) -> None:
+ self._lock.release()
+
+ async def acquire(self) -> None:
+ await self._lock.acquire()
+
+
+class Semaphore(AsyncSemaphore):
+ def __init__(self, max_value: int, exc_class: type) -> None:
+ self.max_value = max_value
+ self.exc_class = exc_class
+
+ @property
+ def semaphore(self) -> asyncio.BoundedSemaphore:
+ if not hasattr(self, "_semaphore"):
+ self._semaphore = asyncio.BoundedSemaphore(value=self.max_value)
+ return self._semaphore
+
+ async def acquire(self, timeout: float = None) -> None:
+ try:
+ await asyncio.wait_for(self.semaphore.acquire(), timeout)
+ except asyncio.TimeoutError:
+ raise self.exc_class()
+
+ async def release(self) -> None:
+ self.semaphore.release()
+
+
+class AsyncioBackend(AsyncBackend):
+ def __init__(self) -> None:
+ global SSL_MONKEY_PATCH_APPLIED
+
+ if not SSL_MONKEY_PATCH_APPLIED:
+ ssl_monkey_patch()
+ SSL_MONKEY_PATCH_APPLIED = True
+
+ async def open_tcp_stream(
+ self,
+ hostname: bytes,
+ port: int,
+ ssl_context: Optional[SSLContext],
+ timeout: TimeoutDict,
+ *,
+ local_address: Optional[str],
+ ) -> SocketStream:
+ host = hostname.decode("ascii")
+ connect_timeout = timeout.get("connect")
+ local_addr = None if local_address is None else (local_address, 0)
+
+ exc_map = {asyncio.TimeoutError: ConnectTimeout, OSError: ConnectError}
+ with map_exceptions(exc_map):
+ stream_reader, stream_writer = await asyncio.wait_for(
+ asyncio.open_connection(
+ host, port, ssl=ssl_context, local_addr=local_addr
+ ),
+ connect_timeout,
+ )
+ return SocketStream(
+ stream_reader=stream_reader, stream_writer=stream_writer
+ )
+
+ async def open_uds_stream(
+ self,
+ path: str,
+ hostname: bytes,
+ ssl_context: Optional[SSLContext],
+ timeout: TimeoutDict,
+ ) -> AsyncSocketStream:
+ host = hostname.decode("ascii")
+ connect_timeout = timeout.get("connect")
+ kwargs: dict = {"server_hostname": host} if ssl_context is not None else {}
+ exc_map = {asyncio.TimeoutError: ConnectTimeout, OSError: ConnectError}
+ with map_exceptions(exc_map):
+ stream_reader, stream_writer = await asyncio.wait_for(
+ asyncio.open_unix_connection(path, ssl=ssl_context, **kwargs),
+ connect_timeout,
+ )
+ return SocketStream(
+ stream_reader=stream_reader, stream_writer=stream_writer
+ )
+
+ def create_lock(self) -> AsyncLock:
+ return Lock()
+
+ def create_semaphore(self, max_value: int, exc_class: type) -> AsyncSemaphore:
+ return Semaphore(max_value, exc_class=exc_class)
+
+ async def time(self) -> float:
+ loop = asyncio.get_event_loop()
+ return loop.time()
+
+ async def sleep(self, seconds: float) -> None:
+ await asyncio.sleep(seconds)
diff --git a/packages/httpcore/_backends/auto.py b/packages/httpcore/_backends/auto.py
new file mode 100644
index 000000000..5579ab467
--- /dev/null
+++ b/packages/httpcore/_backends/auto.py
@@ -0,0 +1,67 @@
+from ssl import SSLContext
+from typing import Optional
+
+import sniffio
+
+from .._types import TimeoutDict
+from .base import AsyncBackend, AsyncLock, AsyncSemaphore, AsyncSocketStream
+
+# The following line is imported from the _sync modules
+from .sync import SyncBackend, SyncLock, SyncSemaphore, SyncSocketStream # noqa
+
+
+class AutoBackend(AsyncBackend):
+ @property
+ def backend(self) -> AsyncBackend:
+ if not hasattr(self, "_backend_implementation"):
+ backend = sniffio.current_async_library()
+
+ if backend == "asyncio":
+ from .anyio import AnyIOBackend
+
+ self._backend_implementation: AsyncBackend = AnyIOBackend()
+ elif backend == "trio":
+ from .trio import TrioBackend
+
+ self._backend_implementation = TrioBackend()
+ elif backend == "curio":
+ from .curio import CurioBackend
+
+ self._backend_implementation = CurioBackend()
+ else: # pragma: nocover
+ raise RuntimeError(f"Unsupported concurrency backend {backend!r}")
+ return self._backend_implementation
+
+ async def open_tcp_stream(
+ self,
+ hostname: bytes,
+ port: int,
+ ssl_context: Optional[SSLContext],
+ timeout: TimeoutDict,
+ *,
+ local_address: Optional[str],
+ ) -> AsyncSocketStream:
+ return await self.backend.open_tcp_stream(
+ hostname, port, ssl_context, timeout, local_address=local_address
+ )
+
+ async def open_uds_stream(
+ self,
+ path: str,
+ hostname: bytes,
+ ssl_context: Optional[SSLContext],
+ timeout: TimeoutDict,
+ ) -> AsyncSocketStream:
+ return await self.backend.open_uds_stream(path, hostname, ssl_context, timeout)
+
+ def create_lock(self) -> AsyncLock:
+ return self.backend.create_lock()
+
+ def create_semaphore(self, max_value: int, exc_class: type) -> AsyncSemaphore:
+ return self.backend.create_semaphore(max_value, exc_class=exc_class)
+
+ async def time(self) -> float:
+ return await self.backend.time()
+
+ async def sleep(self, seconds: float) -> None:
+ await self.backend.sleep(seconds)
diff --git a/packages/httpcore/_backends/base.py b/packages/httpcore/_backends/base.py
new file mode 100644
index 000000000..1ca6e31b5
--- /dev/null
+++ b/packages/httpcore/_backends/base.py
@@ -0,0 +1,137 @@
+from ssl import SSLContext
+from types import TracebackType
+from typing import TYPE_CHECKING, Optional, Type
+
+from .._types import TimeoutDict
+
+if TYPE_CHECKING: # pragma: no cover
+ from .sync import SyncBackend
+
+
+def lookup_async_backend(name: str) -> "AsyncBackend":
+ if name == "auto":
+ from .auto import AutoBackend
+
+ return AutoBackend()
+ elif name == "asyncio":
+ from .asyncio import AsyncioBackend
+
+ return AsyncioBackend()
+ elif name == "trio":
+ from .trio import TrioBackend
+
+ return TrioBackend()
+ elif name == "curio":
+ from .curio import CurioBackend
+
+ return CurioBackend()
+ elif name == "anyio":
+ from .anyio import AnyIOBackend
+
+ return AnyIOBackend()
+
+ raise ValueError("Invalid backend name {name!r}")
+
+
+def lookup_sync_backend(name: str) -> "SyncBackend":
+ from .sync import SyncBackend
+
+ return SyncBackend()
+
+
+class AsyncSocketStream:
+ """
+ A socket stream with read/write operations. Abstracts away any asyncio-specific
+ interfaces into a more generic base class, that we can use with alternate
+ backends, or for stand-alone test cases.
+ """
+
+ def get_http_version(self) -> str:
+ raise NotImplementedError() # pragma: no cover
+
+ async def start_tls(
+ self, hostname: bytes, ssl_context: SSLContext, timeout: TimeoutDict
+ ) -> "AsyncSocketStream":
+ raise NotImplementedError() # pragma: no cover
+
+ async def read(self, n: int, timeout: TimeoutDict) -> bytes:
+ raise NotImplementedError() # pragma: no cover
+
+ async def write(self, data: bytes, timeout: TimeoutDict) -> None:
+ raise NotImplementedError() # pragma: no cover
+
+ async def aclose(self) -> None:
+ raise NotImplementedError() # pragma: no cover
+
+ def is_readable(self) -> bool:
+ raise NotImplementedError() # pragma: no cover
+
+
+class AsyncLock:
+ """
+ An abstract interface for Lock classes.
+ """
+
+ async def __aenter__(self) -> None:
+ await self.acquire()
+
+ async def __aexit__(
+ self,
+ exc_type: Type[BaseException] = None,
+ exc_value: BaseException = None,
+ traceback: TracebackType = None,
+ ) -> None:
+ await self.release()
+
+ async def release(self) -> None:
+ raise NotImplementedError() # pragma: no cover
+
+ async def acquire(self) -> None:
+ raise NotImplementedError() # pragma: no cover
+
+
+class AsyncSemaphore:
+ """
+ An abstract interface for Semaphore classes.
+ Abstracts away any asyncio-specific interfaces.
+ """
+
+ async def acquire(self, timeout: float = None) -> None:
+ raise NotImplementedError() # pragma: no cover
+
+ async def release(self) -> None:
+ raise NotImplementedError() # pragma: no cover
+
+
+class AsyncBackend:
+ async def open_tcp_stream(
+ self,
+ hostname: bytes,
+ port: int,
+ ssl_context: Optional[SSLContext],
+ timeout: TimeoutDict,
+ *,
+ local_address: Optional[str],
+ ) -> AsyncSocketStream:
+ raise NotImplementedError() # pragma: no cover
+
+ async def open_uds_stream(
+ self,
+ path: str,
+ hostname: bytes,
+ ssl_context: Optional[SSLContext],
+ timeout: TimeoutDict,
+ ) -> AsyncSocketStream:
+ raise NotImplementedError() # pragma: no cover
+
+ def create_lock(self) -> AsyncLock:
+ raise NotImplementedError() # pragma: no cover
+
+ def create_semaphore(self, max_value: int, exc_class: type) -> AsyncSemaphore:
+ raise NotImplementedError() # pragma: no cover
+
+ async def time(self) -> float:
+ raise NotImplementedError() # pragma: no cover
+
+ async def sleep(self, seconds: float) -> None:
+ raise NotImplementedError() # pragma: no cover
diff --git a/packages/httpcore/_backends/curio.py b/packages/httpcore/_backends/curio.py
new file mode 100644
index 000000000..99a7b2cc8
--- /dev/null
+++ b/packages/httpcore/_backends/curio.py
@@ -0,0 +1,206 @@
+from ssl import SSLContext, SSLSocket
+from typing import Optional
+
+import curio
+import curio.io
+
+from .._exceptions import (
+ ConnectError,
+ ConnectTimeout,
+ ReadError,
+ ReadTimeout,
+ WriteError,
+ WriteTimeout,
+ map_exceptions,
+)
+from .._types import TimeoutDict
+from .._utils import get_logger, is_socket_readable
+from .base import AsyncBackend, AsyncLock, AsyncSemaphore, AsyncSocketStream
+
+logger = get_logger(__name__)
+
+ONE_DAY_IN_SECONDS = float(60 * 60 * 24)
+
+
+def convert_timeout(value: Optional[float]) -> float:
+ return value if value is not None else ONE_DAY_IN_SECONDS
+
+
+class Lock(AsyncLock):
+ def __init__(self) -> None:
+ self._lock = curio.Lock()
+
+ async def acquire(self) -> None:
+ await self._lock.acquire()
+
+ async def release(self) -> None:
+ await self._lock.release()
+
+
+class Semaphore(AsyncSemaphore):
+ def __init__(self, max_value: int, exc_class: type) -> None:
+ self.max_value = max_value
+ self.exc_class = exc_class
+
+ @property
+ def semaphore(self) -> curio.Semaphore:
+ if not hasattr(self, "_semaphore"):
+ self._semaphore = curio.Semaphore(value=self.max_value)
+ return self._semaphore
+
+ async def acquire(self, timeout: float = None) -> None:
+ timeout = convert_timeout(timeout)
+
+ try:
+ return await curio.timeout_after(timeout, self.semaphore.acquire())
+ except curio.TaskTimeout:
+ raise self.exc_class()
+
+ async def release(self) -> None:
+ await self.semaphore.release()
+
+
+class SocketStream(AsyncSocketStream):
+ def __init__(self, socket: curio.io.Socket) -> None:
+ self.read_lock = curio.Lock()
+ self.write_lock = curio.Lock()
+ self.socket = socket
+ self.stream = socket.as_stream()
+
+ def get_http_version(self) -> str:
+ if hasattr(self.socket, "_socket"):
+ raw_socket = self.socket._socket
+
+ if isinstance(raw_socket, SSLSocket):
+ ident = raw_socket.selected_alpn_protocol()
+ return "HTTP/2" if ident == "h2" else "HTTP/1.1"
+
+ return "HTTP/1.1"
+
+ async def start_tls(
+ self, hostname: bytes, ssl_context: SSLContext, timeout: TimeoutDict
+ ) -> "AsyncSocketStream":
+ connect_timeout = convert_timeout(timeout.get("connect"))
+ exc_map = {
+ curio.TaskTimeout: ConnectTimeout,
+ curio.CurioError: ConnectError,
+ OSError: ConnectError,
+ }
+
+ with map_exceptions(exc_map):
+ wrapped_sock = curio.io.Socket(
+ ssl_context.wrap_socket(
+ self.socket._socket,
+ do_handshake_on_connect=False,
+ server_hostname=hostname.decode("ascii"),
+ )
+ )
+
+ await curio.timeout_after(
+ connect_timeout,
+ wrapped_sock.do_handshake(),
+ )
+
+ return SocketStream(wrapped_sock)
+
+ async def read(self, n: int, timeout: TimeoutDict) -> bytes:
+ read_timeout = convert_timeout(timeout.get("read"))
+ exc_map = {
+ curio.TaskTimeout: ReadTimeout,
+ curio.CurioError: ReadError,
+ OSError: ReadError,
+ }
+
+ with map_exceptions(exc_map):
+ async with self.read_lock:
+ return await curio.timeout_after(read_timeout, self.stream.read(n))
+
+ async def write(self, data: bytes, timeout: TimeoutDict) -> None:
+ write_timeout = convert_timeout(timeout.get("write"))
+ exc_map = {
+ curio.TaskTimeout: WriteTimeout,
+ curio.CurioError: WriteError,
+ OSError: WriteError,
+ }
+
+ with map_exceptions(exc_map):
+ async with self.write_lock:
+ await curio.timeout_after(write_timeout, self.stream.write(data))
+
+ async def aclose(self) -> None:
+ await self.stream.close()
+ await self.socket.close()
+
+ def is_readable(self) -> bool:
+ return is_socket_readable(self.socket)
+
+
+class CurioBackend(AsyncBackend):
+ async def open_tcp_stream(
+ self,
+ hostname: bytes,
+ port: int,
+ ssl_context: Optional[SSLContext],
+ timeout: TimeoutDict,
+ *,
+ local_address: Optional[str],
+ ) -> AsyncSocketStream:
+ connect_timeout = convert_timeout(timeout.get("connect"))
+ exc_map = {
+ curio.TaskTimeout: ConnectTimeout,
+ curio.CurioError: ConnectError,
+ OSError: ConnectError,
+ }
+ host = hostname.decode("ascii")
+
+ kwargs: dict = {}
+ if ssl_context is not None:
+ kwargs["ssl"] = ssl_context
+ kwargs["server_hostname"] = host
+ if local_address is not None:
+ kwargs["source_addr"] = (local_address, 0)
+
+ with map_exceptions(exc_map):
+ sock: curio.io.Socket = await curio.timeout_after(
+ connect_timeout,
+ curio.open_connection(hostname, port, **kwargs),
+ )
+
+ return SocketStream(sock)
+
+ async def open_uds_stream(
+ self,
+ path: str,
+ hostname: bytes,
+ ssl_context: Optional[SSLContext],
+ timeout: TimeoutDict,
+ ) -> AsyncSocketStream:
+ connect_timeout = convert_timeout(timeout.get("connect"))
+ exc_map = {
+ curio.TaskTimeout: ConnectTimeout,
+ curio.CurioError: ConnectError,
+ OSError: ConnectError,
+ }
+ host = hostname.decode("ascii")
+ kwargs = (
+ {} if ssl_context is None else {"ssl": ssl_context, "server_hostname": host}
+ )
+
+ with map_exceptions(exc_map):
+ sock: curio.io.Socket = await curio.timeout_after(
+ connect_timeout, curio.open_unix_connection(path, **kwargs)
+ )
+
+ return SocketStream(sock)
+
+ def create_lock(self) -> AsyncLock:
+ return Lock()
+
+ def create_semaphore(self, max_value: int, exc_class: type) -> AsyncSemaphore:
+ return Semaphore(max_value, exc_class)
+
+ async def time(self) -> float:
+ return await curio.clock()
+
+ async def sleep(self, seconds: float) -> None:
+ await curio.sleep(seconds)
diff --git a/packages/httpcore/_backends/sync.py b/packages/httpcore/_backends/sync.py
new file mode 100644
index 000000000..968aead9e
--- /dev/null
+++ b/packages/httpcore/_backends/sync.py
@@ -0,0 +1,182 @@
+import socket
+import threading
+import time
+from ssl import SSLContext
+from types import TracebackType
+from typing import Optional, Type
+
+from .._exceptions import (
+ ConnectError,
+ ConnectTimeout,
+ ReadError,
+ ReadTimeout,
+ WriteError,
+ WriteTimeout,
+ map_exceptions,
+)
+from .tcp_keep_alive import enable_tcp_keep_alive
+from .._types import TimeoutDict
+from .._utils import is_socket_readable
+
+
+class SyncSocketStream:
+ """
+ A socket stream with read/write operations. Abstracts away any asyncio-specific
+ interfaces into a more generic base class, that we can use with alternate
+ backends, or for stand-alone test cases.
+ """
+
+ def __init__(self, sock: socket.socket) -> None:
+ self.sock = sock
+ self.read_lock = threading.Lock()
+ self.write_lock = threading.Lock()
+
+ def get_http_version(self) -> str:
+ selected_alpn_protocol = getattr(self.sock, "selected_alpn_protocol", None)
+ if selected_alpn_protocol is not None:
+ ident = selected_alpn_protocol()
+ return "HTTP/2" if ident == "h2" else "HTTP/1.1"
+ return "HTTP/1.1"
+
+ def start_tls(
+ self, hostname: bytes, ssl_context: SSLContext, timeout: TimeoutDict
+ ) -> "SyncSocketStream":
+ connect_timeout = timeout.get("connect")
+ exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError}
+
+ with map_exceptions(exc_map):
+ self.sock.settimeout(connect_timeout)
+ wrapped = ssl_context.wrap_socket(
+ self.sock, server_hostname=hostname.decode("ascii")
+ )
+
+ return SyncSocketStream(wrapped)
+
+ def read(self, n: int, timeout: TimeoutDict) -> bytes:
+ read_timeout = timeout.get("read")
+ exc_map = {socket.timeout: ReadTimeout, socket.error: ReadError}
+
+ with self.read_lock:
+ with map_exceptions(exc_map):
+ self.sock.settimeout(read_timeout)
+ return self.sock.recv(n)
+
+ def write(self, data: bytes, timeout: TimeoutDict) -> None:
+ write_timeout = timeout.get("write")
+ exc_map = {socket.timeout: WriteTimeout, socket.error: WriteError}
+
+ with self.write_lock:
+ with map_exceptions(exc_map):
+ while data:
+ self.sock.settimeout(write_timeout)
+ n = self.sock.send(data)
+ data = data[n:]
+
+ def close(self) -> None:
+ with self.write_lock:
+ try:
+ self.sock.close()
+ except socket.error:
+ pass
+
+ def is_readable(self) -> bool:
+ return is_socket_readable(self.sock)
+
+
+class SyncLock:
+ def __init__(self) -> None:
+ self._lock = threading.Lock()
+
+ def __enter__(self) -> None:
+ self.acquire()
+
+ def __exit__(
+ self,
+ exc_type: Type[BaseException] = None,
+ exc_value: BaseException = None,
+ traceback: TracebackType = None,
+ ) -> None:
+ self.release()
+
+ def release(self) -> None:
+ self._lock.release()
+
+ def acquire(self) -> None:
+ self._lock.acquire()
+
+
+class SyncSemaphore:
+ def __init__(self, max_value: int, exc_class: type) -> None:
+ self.max_value = max_value
+ self.exc_class = exc_class
+ self._semaphore = threading.Semaphore(max_value)
+
+ def acquire(self, timeout: float = None) -> None:
+ if not self._semaphore.acquire(timeout=timeout): # type: ignore
+ raise self.exc_class()
+
+ def release(self) -> None:
+ self._semaphore.release()
+
+
+class SyncBackend:
+ def open_tcp_stream(
+ self,
+ hostname: bytes,
+ port: int,
+ ssl_context: Optional[SSLContext],
+ timeout: TimeoutDict,
+ *,
+ local_address: Optional[str],
+ ) -> SyncSocketStream:
+ address = (hostname.decode("ascii"), port)
+ connect_timeout = timeout.get("connect")
+ source_address = None if local_address is None else (local_address, 0)
+ exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError}
+
+ with map_exceptions(exc_map):
+ sock = socket.create_connection(
+ address, connect_timeout, source_address=source_address # type: ignore
+ )
+ # Enable TCP Keep-Alive
+ enable_tcp_keep_alive(sock)
+
+ if ssl_context is not None:
+ sock = ssl_context.wrap_socket(
+ sock, server_hostname=hostname.decode("ascii")
+ )
+ return SyncSocketStream(sock=sock)
+
+ def open_uds_stream(
+ self,
+ path: str,
+ hostname: bytes,
+ ssl_context: Optional[SSLContext],
+ timeout: TimeoutDict,
+ ) -> SyncSocketStream:
+ connect_timeout = timeout.get("connect")
+ exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError}
+
+ with map_exceptions(exc_map):
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.settimeout(connect_timeout)
+ sock.connect(path)
+
+ if ssl_context is not None:
+ sock = ssl_context.wrap_socket(
+ sock, server_hostname=hostname.decode("ascii")
+ )
+
+ return SyncSocketStream(sock=sock)
+
+ def create_lock(self) -> SyncLock:
+ return SyncLock()
+
+ def create_semaphore(self, max_value: int, exc_class: type) -> SyncSemaphore:
+ return SyncSemaphore(max_value, exc_class=exc_class)
+
+ def time(self) -> float:
+ return time.monotonic()
+
+ def sleep(self, seconds: float) -> None:
+ time.sleep(seconds)
diff --git a/packages/httpcore/_backends/tcp_keep_alive.py b/packages/httpcore/_backends/tcp_keep_alive.py
new file mode 100644
index 000000000..90a08c7f6
--- /dev/null
+++ b/packages/httpcore/_backends/tcp_keep_alive.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+"""
+ Copyright (C) 2021 Stefano Gottardo - @CastagnaIT
+ Helper to enable TCP Keep Alive
+
+ SPDX-License-Identifier: MIT
+ See LICENSES/MIT.md for more information.
+"""
+import socket
+import sys
+
+TCP_KEEP_IDLE = 45
+TCP_KEEPALIVE_INTERVAL = 10
+TCP_KEEP_CNT = 6
+
+
+def enable_tcp_keep_alive(sock):
+ """Enable TCP Keep-Alive (by default disabled)"""
+ # More info on PR: https://github.com/CastagnaIT/plugin.video.netflix/pull/1065
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+ if sys.platform == 'linux':
+ # TCP Keep Alive Probes for Linux/Android
+ if hasattr(socket, 'TCP_KEEPIDLE'):
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, TCP_KEEP_IDLE)
+ if hasattr(socket, 'TCP_KEEPINTVL'):
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, TCP_KEEPALIVE_INTERVAL)
+ if hasattr(socket, 'TCP_KEEPCNT'):
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, TCP_KEEP_CNT)
+ elif sys.platform == 'darwin':
+ # TCP Keep Alive Probes for MacOS
+ # NOTE: The socket constants from MacOS netinet/tcp.h are not exported by python's socket module
+ # The MacOS TCP_KEEPALIVE(0x10) constant should be the same thing of the linux TCP_KEEPIDLE constant
+ sock.setsockopt(socket.IPPROTO_TCP, getattr(socket, 'TCP_KEEPIDLE', 0x10), TCP_KEEP_IDLE)
+ sock.setsockopt(socket.IPPROTO_TCP, getattr(socket, 'TCP_KEEPINTVL', 0x101), TCP_KEEPALIVE_INTERVAL)
+ sock.setsockopt(socket.IPPROTO_TCP, getattr(socket, 'TCP_KEEPCNT', 0x102), TCP_KEEP_CNT)
+ elif sys.platform == 'win32':
+ # TCP Keep Alive Probes for Windows
+ sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, TCP_KEEP_IDLE * 1000, TCP_KEEPALIVE_INTERVAL * 1000))
diff --git a/packages/httpcore/_backends/trio.py b/packages/httpcore/_backends/trio.py
new file mode 100644
index 000000000..d6e67c2e3
--- /dev/null
+++ b/packages/httpcore/_backends/trio.py
@@ -0,0 +1,212 @@
+from ssl import SSLContext
+from typing import Optional
+
+import trio
+
+from .._exceptions import (
+ ConnectError,
+ ConnectTimeout,
+ ReadError,
+ ReadTimeout,
+ WriteError,
+ WriteTimeout,
+ map_exceptions,
+)
+from .._types import TimeoutDict
+from .base import AsyncBackend, AsyncLock, AsyncSemaphore, AsyncSocketStream
+
+
+def none_as_inf(value: Optional[float]) -> float:
+ return value if value is not None else float("inf")
+
+
+class SocketStream(AsyncSocketStream):
+ def __init__(self, stream: trio.abc.Stream) -> None:
+ self.stream = stream
+ self.read_lock = trio.Lock()
+ self.write_lock = trio.Lock()
+
+ def get_http_version(self) -> str:
+ if not isinstance(self.stream, trio.SSLStream):
+ return "HTTP/1.1"
+
+ ident = self.stream.selected_alpn_protocol()
+ return "HTTP/2" if ident == "h2" else "HTTP/1.1"
+
+ async def start_tls(
+ self, hostname: bytes, ssl_context: SSLContext, timeout: TimeoutDict
+ ) -> "SocketStream":
+ connect_timeout = none_as_inf(timeout.get("connect"))
+ exc_map = {
+ trio.TooSlowError: ConnectTimeout,
+ trio.BrokenResourceError: ConnectError,
+ }
+ ssl_stream = trio.SSLStream(
+ self.stream,
+ ssl_context=ssl_context,
+ server_hostname=hostname.decode("ascii"),
+ )
+
+ with map_exceptions(exc_map):
+ with trio.fail_after(connect_timeout):
+ await ssl_stream.do_handshake()
+ return SocketStream(ssl_stream)
+
+ async def read(self, n: int, timeout: TimeoutDict) -> bytes:
+ read_timeout = none_as_inf(timeout.get("read"))
+ exc_map = {trio.TooSlowError: ReadTimeout, trio.BrokenResourceError: ReadError}
+
+ async with self.read_lock:
+ with map_exceptions(exc_map):
+ try:
+ with trio.fail_after(read_timeout):
+ return await self.stream.receive_some(max_bytes=n)
+ except trio.TooSlowError as exc:
+ await self.stream.aclose()
+ raise exc
+
+ async def write(self, data: bytes, timeout: TimeoutDict) -> None:
+ if not data:
+ return
+
+ write_timeout = none_as_inf(timeout.get("write"))
+ exc_map = {
+ trio.TooSlowError: WriteTimeout,
+ trio.BrokenResourceError: WriteError,
+ }
+
+ async with self.write_lock:
+ with map_exceptions(exc_map):
+ try:
+ with trio.fail_after(write_timeout):
+ return await self.stream.send_all(data)
+ except trio.TooSlowError as exc:
+ await self.stream.aclose()
+ raise exc
+
+ async def aclose(self) -> None:
+ async with self.write_lock:
+ try:
+ await self.stream.aclose()
+ except trio.BrokenResourceError:
+ pass
+
+ def is_readable(self) -> bool:
+ # Adapted from: https://github.com/encode/httpx/pull/143#issuecomment-515202982
+ stream = self.stream
+
+ # Peek through any SSLStream wrappers to get the underlying SocketStream.
+ while isinstance(stream, trio.SSLStream):
+ stream = stream.transport_stream
+ assert isinstance(stream, trio.SocketStream)
+
+ return stream.socket.is_readable()
+
+
+class Lock(AsyncLock):
+ def __init__(self) -> None:
+ self._lock = trio.Lock()
+
+ async def release(self) -> None:
+ self._lock.release()
+
+ async def acquire(self) -> None:
+ await self._lock.acquire()
+
+
+class Semaphore(AsyncSemaphore):
+ def __init__(self, max_value: int, exc_class: type):
+ self.max_value = max_value
+ self.exc_class = exc_class
+
+ @property
+ def semaphore(self) -> trio.Semaphore:
+ if not hasattr(self, "_semaphore"):
+ self._semaphore = trio.Semaphore(self.max_value, max_value=self.max_value)
+ return self._semaphore
+
+ async def acquire(self, timeout: float = None) -> None:
+ timeout = none_as_inf(timeout)
+
+ with trio.move_on_after(timeout):
+ await self.semaphore.acquire()
+ return
+
+ raise self.exc_class()
+
+ async def release(self) -> None:
+ self.semaphore.release()
+
+
+class TrioBackend(AsyncBackend):
+ async def open_tcp_stream(
+ self,
+ hostname: bytes,
+ port: int,
+ ssl_context: Optional[SSLContext],
+ timeout: TimeoutDict,
+ *,
+ local_address: Optional[str],
+ ) -> AsyncSocketStream:
+ connect_timeout = none_as_inf(timeout.get("connect"))
+ # Trio will support local_address from 0.16.1 onwards.
+ # We only include the keyword argument if a local_address
+ # argument has been passed.
+ kwargs: dict = {} if local_address is None else {"local_address": local_address}
+ exc_map = {
+ OSError: ConnectError,
+ trio.TooSlowError: ConnectTimeout,
+ trio.BrokenResourceError: ConnectError,
+ }
+
+ with map_exceptions(exc_map):
+ with trio.fail_after(connect_timeout):
+ stream: trio.abc.Stream = await trio.open_tcp_stream(
+ hostname, port, **kwargs
+ )
+
+ if ssl_context is not None:
+ stream = trio.SSLStream(
+ stream, ssl_context, server_hostname=hostname.decode("ascii")
+ )
+ await stream.do_handshake()
+
+ return SocketStream(stream=stream)
+
+ async def open_uds_stream(
+ self,
+ path: str,
+ hostname: bytes,
+ ssl_context: Optional[SSLContext],
+ timeout: TimeoutDict,
+ ) -> AsyncSocketStream:
+ connect_timeout = none_as_inf(timeout.get("connect"))
+ exc_map = {
+ OSError: ConnectError,
+ trio.TooSlowError: ConnectTimeout,
+ trio.BrokenResourceError: ConnectError,
+ }
+
+ with map_exceptions(exc_map):
+ with trio.fail_after(connect_timeout):
+ stream: trio.abc.Stream = await trio.open_unix_socket(path)
+
+ if ssl_context is not None:
+ stream = trio.SSLStream(
+ stream, ssl_context, server_hostname=hostname.decode("ascii")
+ )
+ await stream.do_handshake()
+
+ return SocketStream(stream=stream)
+
+ def create_lock(self) -> AsyncLock:
+ return Lock()
+
+ def create_semaphore(self, max_value: int, exc_class: type) -> AsyncSemaphore:
+ return Semaphore(max_value, exc_class=exc_class)
+
+ async def time(self) -> float:
+ return trio.current_time()
+
+ async def sleep(self, seconds: float) -> None:
+ await trio.sleep(seconds)
diff --git a/packages/httpcore/_bytestreams.py b/packages/httpcore/_bytestreams.py
new file mode 100644
index 000000000..317f41103
--- /dev/null
+++ b/packages/httpcore/_bytestreams.py
@@ -0,0 +1,96 @@
+from typing import AsyncIterator, Callable, Iterator
+
+from ._async.base import AsyncByteStream
+from ._sync.base import SyncByteStream
+
+
+class ByteStream(AsyncByteStream, SyncByteStream):
+ """
+ A concrete implementation for either sync or async byte streams.
+
+ Example::
+
+ stream = httpcore.ByteStream(b"123")
+
+ Parameters
+ ----------
+ content:
+ A plain byte string used as the content of the stream.
+ """
+
+ def __init__(self, content: bytes) -> None:
+ self._content = content
+
+ def __iter__(self) -> Iterator[bytes]:
+ yield self._content
+
+ async def __aiter__(self) -> AsyncIterator[bytes]:
+ yield self._content
+
+
+class IteratorByteStream(SyncByteStream):
+ """
+ A concrete implementation for sync byte streams.
+
+ Example::
+
+ def generate_content():
+ yield b"Hello, world!"
+ ...
+
+ stream = httpcore.IteratorByteStream(generate_content())
+
+ Parameters
+ ----------
+ iterator:
+ A sync byte iterator, used as the content of the stream.
+ close_func:
+ An optional function called when closing the stream.
+ """
+
+ def __init__(self, iterator: Iterator[bytes], close_func: Callable = None) -> None:
+ self._iterator = iterator
+ self._close_func = close_func
+
+ def __iter__(self) -> Iterator[bytes]:
+ for chunk in self._iterator:
+ yield chunk
+
+ def close(self) -> None:
+ if self._close_func is not None:
+ self._close_func()
+
+
+class AsyncIteratorByteStream(AsyncByteStream):
+ """
+ A concrete implementation for async byte streams.
+
+ Example::
+
+ async def generate_content():
+ yield b"Hello, world!"
+ ...
+
+ stream = httpcore.AsyncIteratorByteStream(generate_content())
+
+ Parameters
+ ----------
+ aiterator:
+ An async byte iterator, used as the content of the stream.
+ aclose_func:
+ An optional async function called when closing the stream.
+ """
+
+ def __init__(
+ self, aiterator: AsyncIterator[bytes], aclose_func: Callable = None
+ ) -> None:
+ self._aiterator = aiterator
+ self._aclose_func = aclose_func
+
+ async def __aiter__(self) -> AsyncIterator[bytes]:
+ async for chunk in self._aiterator:
+ yield chunk
+
+ async def aclose(self) -> None:
+ if self._aclose_func is not None:
+ await self._aclose_func()
diff --git a/packages/httpcore/_exceptions.py b/packages/httpcore/_exceptions.py
new file mode 100644
index 000000000..ba5682999
--- /dev/null
+++ b/packages/httpcore/_exceptions.py
@@ -0,0 +1,79 @@
+import contextlib
+from typing import Dict, Iterator, Type
+
+
+@contextlib.contextmanager
+def map_exceptions(map: Dict[Type[Exception], Type[Exception]]) -> Iterator[None]:
+ try:
+ yield
+ except Exception as exc: # noqa: PIE786
+ for from_exc, to_exc in map.items():
+ if isinstance(exc, from_exc):
+ raise to_exc(exc) from None
+ raise
+
+
+class UnsupportedProtocol(Exception):
+ pass
+
+
+class ProtocolError(Exception):
+ pass
+
+
+class RemoteProtocolError(ProtocolError):
+ pass
+
+
+class LocalProtocolError(ProtocolError):
+ pass
+
+
+class ProxyError(Exception):
+ pass
+
+
+# Timeout errors
+
+
+class TimeoutException(Exception):
+ pass
+
+
+class PoolTimeout(TimeoutException):
+ pass
+
+
+class ConnectTimeout(TimeoutException):
+ pass
+
+
+class ReadTimeout(TimeoutException):
+ pass
+
+
+class WriteTimeout(TimeoutException):
+ pass
+
+
+# Network errors
+
+
+class NetworkError(Exception):
+ pass
+
+
+class ConnectError(NetworkError):
+ pass
+
+
+class ReadError(NetworkError):
+ pass
+
+
+class WriteError(NetworkError):
+ pass
+
+
+class CloseError(NetworkError):
+ pass
diff --git a/packages/httpcore/_sync/__init__.py b/packages/httpcore/_sync/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/packages/httpcore/_sync/base.py b/packages/httpcore/_sync/base.py
new file mode 100644
index 000000000..45ef4abfc
--- /dev/null
+++ b/packages/httpcore/_sync/base.py
@@ -0,0 +1,122 @@
+import enum
+from types import TracebackType
+from typing import Iterator, Tuple, Type
+
+from .._types import URL, Headers, T
+
+
+class NewConnectionRequired(Exception):
+ pass
+
+
+class ConnectionState(enum.IntEnum):
+ """
+ PENDING READY
+ | | ^
+ v V |
+ ACTIVE |
+ | | |
+ | V |
+ V IDLE-+
+ FULL |
+ | |
+ V V
+ CLOSED
+ """
+
+ PENDING = 0 # Connection not yet acquired.
+ READY = 1 # Re-acquired from pool, about to send a request.
+ ACTIVE = 2 # Active requests.
+ FULL = 3 # Active requests, no more stream IDs available.
+ IDLE = 4 # No active requests.
+ CLOSED = 5 # Connection closed.
+
+
+class SyncByteStream:
+ """
+ The base interface for request and response bodies.
+
+ Concrete implementations should subclass this class, and implement
+ the :meth:`__iter__` method, and optionally the :meth:`close` method.
+ """
+
+ def __iter__(self) -> Iterator[bytes]:
+ """
+ Yield bytes representing the request or response body.
+ """
+ yield b"" # pragma: nocover
+
+ def close(self) -> None:
+ """
+ Must be called by the client to indicate that the stream has been closed.
+ """
+ pass # pragma: nocover
+
+ def read(self) -> bytes:
+ try:
+ return b"".join([part for part in self])
+ finally:
+ self.close()
+
+
+class SyncHTTPTransport:
+ """
+ The base interface for sending HTTP requests.
+
+ Concrete implementations should subclass this class, and implement
+ the :meth:`handle_request` method, and optionally the :meth:`close` method.
+ """
+
+ def handle_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: SyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, SyncByteStream, dict]:
+ """
+ The interface for sending a single HTTP request, and returning a response.
+
+ Parameters
+ ----------
+ method:
+ The HTTP method, such as ``b'GET'``.
+ url:
+ The URL as a 4-tuple of (scheme, host, port, path).
+ headers:
+ Any HTTP headers to send with the request.
+ stream:
+ The body of the HTTP request.
+ extensions:
+ A dictionary of optional extensions.
+
+ Returns
+ -------
+ status_code:
+ The HTTP status code, such as ``200``.
+ headers:
+ Any HTTP headers included on the response.
+ stream:
+ The body of the HTTP response.
+ extensions:
+ A dictionary of optional extensions.
+ """
+ raise NotImplementedError() # pragma: nocover
+
+ def close(self) -> None:
+ """
+ Close the implementation, which should close any outstanding response streams,
+ and any keep alive connections.
+ """
+
+ def __enter__(self: T) -> T:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Type[BaseException] = None,
+ exc_value: BaseException = None,
+ traceback: TracebackType = None,
+ ) -> None:
+ self.close()
diff --git a/packages/httpcore/_sync/connection.py b/packages/httpcore/_sync/connection.py
new file mode 100644
index 000000000..382a4f9f6
--- /dev/null
+++ b/packages/httpcore/_sync/connection.py
@@ -0,0 +1,220 @@
+from ssl import SSLContext
+from typing import List, Optional, Tuple, cast
+
+from .._backends.sync import SyncBackend, SyncLock, SyncSocketStream, SyncBackend
+from .._exceptions import ConnectError, ConnectTimeout
+from .._types import URL, Headers, Origin, TimeoutDict
+from .._utils import exponential_backoff, get_logger, url_to_origin
+from .base import SyncByteStream, SyncHTTPTransport, NewConnectionRequired
+from .http import SyncBaseHTTPConnection
+from .http11 import SyncHTTP11Connection
+
+logger = get_logger(__name__)
+
+RETRIES_BACKOFF_FACTOR = 0.5 # 0s, 0.5s, 1s, 2s, 4s, etc.
+
+
+class SyncHTTPConnection(SyncHTTPTransport):
+ def __init__(
+ self,
+ origin: Origin,
+ http1: bool = True,
+ http2: bool = False,
+ keepalive_expiry: float = None,
+ uds: str = None,
+ ssl_context: SSLContext = None,
+ socket: SyncSocketStream = None,
+ local_address: str = None,
+ retries: int = 0,
+ backend: SyncBackend = None,
+ ):
+ self.origin = origin
+ self._http1_enabled = http1
+ self._http2_enabled = http2
+ self._keepalive_expiry = keepalive_expiry
+ self._uds = uds
+ self._ssl_context = SSLContext() if ssl_context is None else ssl_context
+ self.socket = socket
+ self._local_address = local_address
+ self._retries = retries
+
+ alpn_protocols: List[str] = []
+ if http1:
+ alpn_protocols.append("http/1.1")
+ if http2:
+ alpn_protocols.append("h2")
+
+ self._ssl_context.set_alpn_protocols(alpn_protocols)
+
+ self.connection: Optional[SyncBaseHTTPConnection] = None
+ self._is_http11 = False
+ self._is_http2 = False
+ self._connect_failed = False
+ self._expires_at: Optional[float] = None
+ self._backend = SyncBackend() if backend is None else backend
+
+ def __repr__(self) -> str:
+ return f""
+
+ def info(self) -> str:
+ if self.connection is None:
+ return "Connection failed" if self._connect_failed else "Connecting"
+ return self.connection.info()
+
+ def should_close(self) -> bool:
+ """
+ Return `True` if the connection is in a state where it should be closed.
+ This occurs when any of the following occur:
+
+ * There are no active requests on an HTTP/1.1 connection, and the underlying
+ socket is readable. The only valid state the socket can be readable in
+ if this occurs is when the b"" EOF marker is about to be returned,
+ indicating a server disconnect.
+ * There are no active requests being made and the keepalive timeout has passed.
+ """
+ if self.connection is None:
+ return False
+ return self.connection.should_close()
+
+ def is_idle(self) -> bool:
+ """
+ Return `True` if the connection is currently idle.
+ """
+ if self.connection is None:
+ return False
+ return self.connection.is_idle()
+
+ def is_closed(self) -> bool:
+ if self.connection is None:
+ return self._connect_failed
+ return self.connection.is_closed()
+
+ def is_available(self) -> bool:
+ """
+ Return `True` if the connection is currently able to accept an outgoing request.
+ This occurs when any of the following occur:
+
+ * The connection has not yet been opened, and HTTP/2 support is enabled.
+ We don't *know* at this point if we'll end up on an HTTP/2 connection or
+ not, but we *might* do, so we indicate availability.
+ * The connection has been opened, and is currently idle.
+ * The connection is open, and is an HTTP/2 connection. The connection must
+ also not currently be exceeding the maximum number of allowable concurrent
+ streams and must not have exhausted the maximum total number of stream IDs.
+ """
+ if self.connection is None:
+ return self._http2_enabled and not self.is_closed
+ return self.connection.is_available()
+
+ @property
+ def request_lock(self) -> SyncLock:
+ # We do this lazily, to make sure backend autodetection always
+ # runs within an async context.
+ if not hasattr(self, "_request_lock"):
+ self._request_lock = self._backend.create_lock()
+ return self._request_lock
+
+ def handle_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: SyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, SyncByteStream, dict]:
+ assert url_to_origin(url) == self.origin
+ timeout = cast(TimeoutDict, extensions.get("timeout", {}))
+
+ with self.request_lock:
+ if self.connection is None:
+ if self._connect_failed:
+ raise NewConnectionRequired()
+ if not self.socket:
+ logger.trace(
+ "open_socket origin=%r timeout=%r", self.origin, timeout
+ )
+ self.socket = self._open_socket(timeout)
+ self._create_connection(self.socket)
+ elif not self.connection.is_available():
+ raise NewConnectionRequired()
+
+ assert self.connection is not None
+ logger.trace(
+ "connection.handle_request method=%r url=%r headers=%r",
+ method,
+ url,
+ headers,
+ )
+ return self.connection.handle_request(
+ method, url, headers, stream, extensions
+ )
+
+ def _open_socket(self, timeout: TimeoutDict = None) -> SyncSocketStream:
+ scheme, hostname, port = self.origin
+ timeout = {} if timeout is None else timeout
+ ssl_context = self._ssl_context if scheme == b"https" else None
+
+ retries_left = self._retries
+ delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR)
+
+ while True:
+ try:
+ if self._uds is None:
+ return self._backend.open_tcp_stream(
+ hostname,
+ port,
+ ssl_context,
+ timeout,
+ local_address=self._local_address,
+ )
+ else:
+ return self._backend.open_uds_stream(
+ self._uds, hostname, ssl_context, timeout
+ )
+ except (ConnectError, ConnectTimeout):
+ if retries_left <= 0:
+ self._connect_failed = True
+ raise
+ retries_left -= 1
+ delay = next(delays)
+ self._backend.sleep(delay)
+ except Exception: # noqa: PIE786
+ self._connect_failed = True
+ raise
+
+ def _create_connection(self, socket: SyncSocketStream) -> None:
+ http_version = socket.get_http_version()
+ logger.trace(
+ "create_connection socket=%r http_version=%r", socket, http_version
+ )
+ if http_version == "HTTP/2" or (
+ self._http2_enabled and not self._http1_enabled
+ ):
+ from .http2 import SyncHTTP2Connection
+
+ self._is_http2 = True
+ self.connection = SyncHTTP2Connection(
+ socket=socket,
+ keepalive_expiry=self._keepalive_expiry,
+ backend=self._backend,
+ )
+ else:
+ self._is_http11 = True
+ self.connection = SyncHTTP11Connection(
+ socket=socket, keepalive_expiry=self._keepalive_expiry
+ )
+
+ def start_tls(
+ self, hostname: bytes, ssl_context: SSLContext, timeout: TimeoutDict = None
+ ) -> None:
+ if self.connection is not None:
+ logger.trace("start_tls hostname=%r timeout=%r", hostname, timeout)
+ self.socket = self.connection.start_tls(
+ hostname, ssl_context, timeout
+ )
+ logger.trace("start_tls complete hostname=%r timeout=%r", hostname, timeout)
+
+ def close(self) -> None:
+ with self.request_lock:
+ if self.connection is not None:
+ self.connection.close()
diff --git a/packages/httpcore/_sync/connection_pool.py b/packages/httpcore/_sync/connection_pool.py
new file mode 100644
index 000000000..22ca98fa4
--- /dev/null
+++ b/packages/httpcore/_sync/connection_pool.py
@@ -0,0 +1,365 @@
+import warnings
+from ssl import SSLContext
+from typing import (
+ Iterator,
+ Callable,
+ Dict,
+ List,
+ Optional,
+ Set,
+ Tuple,
+ Union,
+ cast,
+)
+
+from .._backends.sync import SyncBackend, SyncLock, SyncSemaphore
+from .._backends.base import lookup_sync_backend
+from .._exceptions import LocalProtocolError, PoolTimeout, UnsupportedProtocol
+from .._threadlock import ThreadLock
+from .._types import URL, Headers, Origin, TimeoutDict
+from .._utils import get_logger, origin_to_url_string, url_to_origin
+from .base import SyncByteStream, SyncHTTPTransport, NewConnectionRequired
+from .connection import SyncHTTPConnection
+
+logger = get_logger(__name__)
+
+
+class NullSemaphore(SyncSemaphore):
+ def __init__(self) -> None:
+ pass
+
+ def acquire(self, timeout: float = None) -> None:
+ return
+
+ def release(self) -> None:
+ return
+
+
+class ResponseByteStream(SyncByteStream):
+ def __init__(
+ self,
+ stream: SyncByteStream,
+ connection: SyncHTTPConnection,
+ callback: Callable,
+ ) -> None:
+ """
+ A wrapper around the response stream that we return from
+ `.handle_request()`.
+
+ Ensures that when `stream.close()` is called, the connection pool
+ is notified via a callback.
+ """
+ self.stream = stream
+ self.connection = connection
+ self.callback = callback
+
+ def __iter__(self) -> Iterator[bytes]:
+ for chunk in self.stream:
+ yield chunk
+
+ def close(self) -> None:
+ try:
+ # Call the underlying stream close callback.
+ # This will be a call to `SyncHTTP11Connection._response_closed()`
+ # or `SyncHTTP2Stream._response_closed()`.
+ self.stream.close()
+ finally:
+ # Call the connection pool close callback.
+ # This will be a call to `SyncConnectionPool._response_closed()`.
+ self.callback(self.connection)
+
+
+class SyncConnectionPool(SyncHTTPTransport):
+ """
+ A connection pool for making HTTP requests.
+
+ Parameters
+ ----------
+ ssl_context:
+ An SSL context to use for verifying connections.
+ max_connections:
+ The maximum number of concurrent connections to allow.
+ max_keepalive_connections:
+ The maximum number of connections to allow before closing keep-alive
+ connections.
+ keepalive_expiry:
+ The maximum time to allow before closing a keep-alive connection.
+ http1:
+ Enable/Disable HTTP/1.1 support. Defaults to True.
+ http2:
+ Enable/Disable HTTP/2 support. Defaults to False.
+ uds:
+ Path to a Unix Domain Socket to use instead of TCP sockets.
+ local_address:
+ Local address to connect from. Can also be used to connect using a particular
+ address family. Using ``local_address="0.0.0.0"`` will connect using an
+ ``AF_INET`` address (IPv4), while using ``local_address="::"`` will connect
+ using an ``AF_INET6`` address (IPv6).
+ retries:
+ The maximum number of retries when trying to establish a connection.
+ backend:
+ A name indicating which concurrency backend to use.
+ """
+
+ def __init__(
+ self,
+ ssl_context: SSLContext = None,
+ max_connections: int = None,
+ max_keepalive_connections: int = None,
+ keepalive_expiry: float = None,
+ http1: bool = True,
+ http2: bool = False,
+ uds: str = None,
+ local_address: str = None,
+ retries: int = 0,
+ max_keepalive: int = None,
+ backend: Union[SyncBackend, str] = "sync",
+ ):
+ if max_keepalive is not None:
+ warnings.warn(
+ "'max_keepalive' is deprecated. Use 'max_keepalive_connections'.",
+ DeprecationWarning,
+ )
+ max_keepalive_connections = max_keepalive
+
+ if isinstance(backend, str):
+ backend = lookup_sync_backend(backend)
+
+ self._ssl_context = SSLContext() if ssl_context is None else ssl_context
+ self._max_connections = max_connections
+ self._max_keepalive_connections = max_keepalive_connections
+ self._keepalive_expiry = keepalive_expiry
+ self._http1 = http1
+ self._http2 = http2
+ self._uds = uds
+ self._local_address = local_address
+ self._retries = retries
+ self._connections: Dict[Origin, Set[SyncHTTPConnection]] = {}
+ self._thread_lock = ThreadLock()
+ self._backend = backend
+ self._next_keepalive_check = 0.0
+
+ if not (http1 or http2):
+ raise ValueError("Either http1 or http2 must be True.")
+
+ if http2:
+ try:
+ import h2 # noqa: F401
+ except ImportError:
+ raise ImportError(
+ "Attempted to use http2=True, but the 'h2' "
+ "package is not installed. Use 'pip install httpcore[http2]'."
+ )
+
+ @property
+ def _connection_semaphore(self) -> SyncSemaphore:
+ # We do this lazily, to make sure backend autodetection always
+ # runs within an async context.
+ if not hasattr(self, "_internal_semaphore"):
+ if self._max_connections is not None:
+ self._internal_semaphore = self._backend.create_semaphore(
+ self._max_connections, exc_class=PoolTimeout
+ )
+ else:
+ self._internal_semaphore = NullSemaphore()
+
+ return self._internal_semaphore
+
+ @property
+ def _connection_acquiry_lock(self) -> SyncLock:
+ if not hasattr(self, "_internal_connection_acquiry_lock"):
+ self._internal_connection_acquiry_lock = self._backend.create_lock()
+ return self._internal_connection_acquiry_lock
+
+ def _create_connection(
+ self,
+ origin: Tuple[bytes, bytes, int],
+ ) -> SyncHTTPConnection:
+ return SyncHTTPConnection(
+ origin=origin,
+ http1=self._http1,
+ http2=self._http2,
+ keepalive_expiry=self._keepalive_expiry,
+ uds=self._uds,
+ ssl_context=self._ssl_context,
+ local_address=self._local_address,
+ retries=self._retries,
+ backend=self._backend,
+ )
+
+ def handle_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: SyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, SyncByteStream, dict]:
+ if url[0] not in (b"http", b"https"):
+ scheme = url[0].decode("latin-1")
+ host = url[1].decode("latin-1")
+ if scheme == "":
+ raise UnsupportedProtocol(
+ f"The request to '://{host}/' is missing either an 'http://' \
+ or 'https://' protocol."
+ )
+ else:
+ raise UnsupportedProtocol(
+ f"The request to '{scheme}://{host}' has \
+ an unsupported protocol {scheme!r}"
+ )
+
+ if not url[1]:
+ raise LocalProtocolError("Missing hostname in URL.")
+
+ origin = url_to_origin(url)
+ timeout = cast(TimeoutDict, extensions.get("timeout", {}))
+
+ self._keepalive_sweep()
+
+ connection: Optional[SyncHTTPConnection] = None
+ while connection is None:
+ with self._connection_acquiry_lock:
+ # We get-or-create a connection as an atomic operation, to ensure
+ # that HTTP/2 requests issued in close concurrency will end up
+ # on the same connection.
+ logger.trace("get_connection_from_pool=%r", origin)
+ connection = self._get_connection_from_pool(origin)
+
+ if connection is None:
+ connection = self._create_connection(origin=origin)
+ logger.trace("created connection=%r", connection)
+ self._add_to_pool(connection, timeout=timeout)
+ else:
+ logger.trace("reuse connection=%r", connection)
+
+ try:
+ response = connection.handle_request(
+ method, url, headers=headers, stream=stream, extensions=extensions
+ )
+ except NewConnectionRequired:
+ connection = None
+ except BaseException: # noqa: PIE786
+ # See https://github.com/encode/httpcore/pull/305 for motivation
+ # behind catching 'BaseException' rather than 'Exception' here.
+ logger.trace("remove from pool connection=%r", connection)
+ self._remove_from_pool(connection)
+ raise
+
+ status_code, headers, stream, extensions = response
+ wrapped_stream = ResponseByteStream(
+ stream, connection=connection, callback=self._response_closed
+ )
+ return status_code, headers, wrapped_stream, extensions
+
+ def _get_connection_from_pool(
+ self, origin: Origin
+ ) -> Optional[SyncHTTPConnection]:
+ # Determine expired keep alive connections on this origin.
+ reuse_connection = None
+ connections_to_close = set()
+
+ for connection in self._connections_for_origin(origin):
+ if connection.should_close():
+ connections_to_close.add(connection)
+ self._remove_from_pool(connection)
+ elif connection.is_available():
+ reuse_connection = connection
+
+ # Close any dropped connections.
+ for connection in connections_to_close:
+ connection.close()
+
+ return reuse_connection
+
+ def _response_closed(self, connection: SyncHTTPConnection) -> None:
+ remove_from_pool = False
+ close_connection = False
+
+ if connection.is_closed():
+ remove_from_pool = True
+ elif connection.is_idle():
+ num_connections = len(self._get_all_connections())
+ if (
+ self._max_keepalive_connections is not None
+ and num_connections > self._max_keepalive_connections
+ ):
+ remove_from_pool = True
+ close_connection = True
+
+ if remove_from_pool:
+ self._remove_from_pool(connection)
+
+ if close_connection:
+ connection.close()
+
+ def _keepalive_sweep(self) -> None:
+ """
+ Remove any IDLE connections that have expired past their keep-alive time.
+ """
+ if self._keepalive_expiry is None:
+ return
+
+ now = self._backend.time()
+ if now < self._next_keepalive_check:
+ return
+
+ self._next_keepalive_check = now + min(1.0, self._keepalive_expiry)
+ connections_to_close = set()
+
+ for connection in self._get_all_connections():
+ if connection.should_close():
+ connections_to_close.add(connection)
+ self._remove_from_pool(connection)
+
+ for connection in connections_to_close:
+ connection.close()
+
+ def _add_to_pool(
+ self, connection: SyncHTTPConnection, timeout: TimeoutDict
+ ) -> None:
+ logger.trace("adding connection to pool=%r", connection)
+ self._connection_semaphore.acquire(timeout=timeout.get("pool", None))
+ with self._thread_lock:
+ self._connections.setdefault(connection.origin, set())
+ self._connections[connection.origin].add(connection)
+
+ def _remove_from_pool(self, connection: SyncHTTPConnection) -> None:
+ logger.trace("removing connection from pool=%r", connection)
+ with self._thread_lock:
+ if connection in self._connections.get(connection.origin, set()):
+ self._connection_semaphore.release()
+ self._connections[connection.origin].remove(connection)
+ if not self._connections[connection.origin]:
+ del self._connections[connection.origin]
+
+ def _connections_for_origin(self, origin: Origin) -> Set[SyncHTTPConnection]:
+ return set(self._connections.get(origin, set()))
+
+ def _get_all_connections(self) -> Set[SyncHTTPConnection]:
+ connections: Set[SyncHTTPConnection] = set()
+ for connection_set in self._connections.values():
+ connections |= connection_set
+ return connections
+
+ def close(self) -> None:
+ connections = self._get_all_connections()
+ for connection in connections:
+ self._remove_from_pool(connection)
+
+ # Close all connections
+ for connection in connections:
+ connection.close()
+
+ def get_connection_info(self) -> Dict[str, List[str]]:
+ """
+ Returns a dict of origin URLs to a list of summary strings for each connection.
+ """
+ self._keepalive_sweep()
+
+ stats = {}
+ for origin, connections in self._connections.items():
+ stats[origin_to_url_string(origin)] = sorted(
+ [connection.info() for connection in connections]
+ )
+ return stats
diff --git a/packages/httpcore/_sync/http.py b/packages/httpcore/_sync/http.py
new file mode 100644
index 000000000..c128a96b2
--- /dev/null
+++ b/packages/httpcore/_sync/http.py
@@ -0,0 +1,42 @@
+from ssl import SSLContext
+
+from .._backends.sync import SyncSocketStream
+from .._types import TimeoutDict
+from .base import SyncHTTPTransport
+
+
+class SyncBaseHTTPConnection(SyncHTTPTransport):
+ def info(self) -> str:
+ raise NotImplementedError() # pragma: nocover
+
+ def should_close(self) -> bool:
+ """
+ Return `True` if the connection is in a state where it should be closed.
+ """
+ raise NotImplementedError() # pragma: nocover
+
+ def is_idle(self) -> bool:
+ """
+ Return `True` if the connection is currently idle.
+ """
+ raise NotImplementedError() # pragma: nocover
+
+ def is_closed(self) -> bool:
+ """
+ Return `True` if the connection has been closed.
+ """
+ raise NotImplementedError() # pragma: nocover
+
+ def is_available(self) -> bool:
+ """
+ Return `True` if the connection is currently able to accept an outgoing request.
+ """
+ raise NotImplementedError() # pragma: nocover
+
+ def start_tls(
+ self, hostname: bytes, ssl_context: SSLContext, timeout: TimeoutDict = None
+ ) -> SyncSocketStream:
+ """
+ Upgrade the underlying socket to TLS.
+ """
+ raise NotImplementedError() # pragma: nocover
diff --git a/packages/httpcore/_sync/http11.py b/packages/httpcore/_sync/http11.py
new file mode 100644
index 000000000..5dbb42e02
--- /dev/null
+++ b/packages/httpcore/_sync/http11.py
@@ -0,0 +1,269 @@
+import enum
+import time
+from ssl import SSLContext
+from typing import Iterator, List, Optional, Tuple, Union, cast
+
+import h11
+
+from .._backends.sync import SyncSocketStream
+from .._bytestreams import IteratorByteStream
+from .._exceptions import LocalProtocolError, RemoteProtocolError, map_exceptions
+from .._types import URL, Headers, TimeoutDict
+from .._utils import get_logger
+from .base import SyncByteStream, NewConnectionRequired
+from .http import SyncBaseHTTPConnection
+
+H11Event = Union[
+ h11.Request,
+ h11.Response,
+ h11.InformationalResponse,
+ h11.Data,
+ h11.EndOfMessage,
+ h11.ConnectionClosed,
+]
+
+
+class ConnectionState(enum.IntEnum):
+ NEW = 0
+ ACTIVE = 1
+ IDLE = 2
+ CLOSED = 3
+
+
+logger = get_logger(__name__)
+
+
+class SyncHTTP11Connection(SyncBaseHTTPConnection):
+ READ_NUM_BYTES = 64 * 1024
+
+ def __init__(self, socket: SyncSocketStream, keepalive_expiry: float = None):
+ self.socket = socket
+
+ self._keepalive_expiry: Optional[float] = keepalive_expiry
+ self._should_expire_at: Optional[float] = None
+ self._h11_state = h11.Connection(our_role=h11.CLIENT)
+ self._state = ConnectionState.NEW
+
+ def __repr__(self) -> str:
+ return f""
+
+ def _now(self) -> float:
+ return time.monotonic()
+
+ def _server_disconnected(self) -> bool:
+ """
+ Return True if the connection is idle, and the underlying socket is readable.
+ The only valid state the socket can be readable here is when the b""
+ EOF marker is about to be returned, indicating a server disconnect.
+ """
+ return self._state == ConnectionState.IDLE and self.socket.is_readable()
+
+ def _keepalive_expired(self) -> bool:
+ """
+ Return True if the connection is idle, and has passed it's keepalive
+ expiry time.
+ """
+ return (
+ self._state == ConnectionState.IDLE
+ and self._should_expire_at is not None
+ and self._now() >= self._should_expire_at
+ )
+
+ def info(self) -> str:
+ return f"HTTP/1.1, {self._state.name}"
+
+ def should_close(self) -> bool:
+ """
+ Return `True` if the connection is in a state where it should be closed.
+ """
+ return self._server_disconnected() or self._keepalive_expired()
+
+ def is_idle(self) -> bool:
+ """
+ Return `True` if the connection is currently idle.
+ """
+ return self._state == ConnectionState.IDLE
+
+ def is_closed(self) -> bool:
+ """
+ Return `True` if the connection has been closed.
+ """
+ return self._state == ConnectionState.CLOSED
+
+ def is_available(self) -> bool:
+ """
+ Return `True` if the connection is currently able to accept an outgoing request.
+ """
+ return self._state == ConnectionState.IDLE
+
+ def handle_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: SyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, SyncByteStream, dict]:
+ """
+ Send a single HTTP/1.1 request.
+
+ Note that there is no kind of task/thread locking at this layer of interface.
+ Dealing with locking for concurrency is handled by the `SyncHTTPConnection`.
+ """
+ timeout = cast(TimeoutDict, extensions.get("timeout", {}))
+
+ if self._state in (ConnectionState.NEW, ConnectionState.IDLE):
+ self._state = ConnectionState.ACTIVE
+ self._should_expire_at = None
+ else:
+ raise NewConnectionRequired()
+
+ self._send_request(method, url, headers, timeout)
+ self._send_request_body(stream, timeout)
+ (
+ http_version,
+ status_code,
+ reason_phrase,
+ headers,
+ ) = self._receive_response(timeout)
+ response_stream = IteratorByteStream(
+ iterator=self._receive_response_data(timeout),
+ close_func=self._response_closed,
+ )
+ extensions = {
+ "http_version": http_version,
+ "reason_phrase": reason_phrase,
+ }
+ return (status_code, headers, response_stream, extensions)
+
+ def start_tls(
+ self, hostname: bytes, ssl_context: SSLContext, timeout: TimeoutDict = None
+ ) -> SyncSocketStream:
+ timeout = {} if timeout is None else timeout
+ self.socket = self.socket.start_tls(hostname, ssl_context, timeout)
+ return self.socket
+
+ def _send_request(
+ self, method: bytes, url: URL, headers: Headers, timeout: TimeoutDict
+ ) -> None:
+ """
+ Send the request line and headers.
+ """
+ logger.trace("send_request method=%r url=%r headers=%s", method, url, headers)
+ _scheme, _host, _port, target = url
+ with map_exceptions({h11.LocalProtocolError: LocalProtocolError}):
+ event = h11.Request(method=method, target=target, headers=headers)
+ self._send_event(event, timeout)
+
+ def _send_request_body(
+ self, stream: SyncByteStream, timeout: TimeoutDict
+ ) -> None:
+ """
+ Send the request body.
+ """
+ # Send the request body.
+ for chunk in stream:
+ logger.trace("send_data=Data(<%d bytes>)", len(chunk))
+ event = h11.Data(data=chunk)
+ self._send_event(event, timeout)
+
+ # Finalize sending the request.
+ event = h11.EndOfMessage()
+ self._send_event(event, timeout)
+
+ def _send_event(self, event: H11Event, timeout: TimeoutDict) -> None:
+ """
+ Send a single `h11` event to the network, waiting for the data to
+ drain before returning.
+ """
+ bytes_to_send = self._h11_state.send(event)
+ self.socket.write(bytes_to_send, timeout)
+
+ def _receive_response(
+ self, timeout: TimeoutDict
+ ) -> Tuple[bytes, int, bytes, List[Tuple[bytes, bytes]]]:
+ """
+ Read the response status and headers from the network.
+ """
+ while True:
+ event = self._receive_event(timeout)
+ if isinstance(event, h11.Response):
+ break
+
+ http_version = b"HTTP/" + event.http_version
+
+ # h11 version 0.11+ supports a `raw_items` interface to get the
+ # raw header casing, rather than the enforced lowercase headers.
+ headers = event.headers.raw_items()
+
+ return http_version, event.status_code, event.reason, headers
+
+ def _receive_response_data(
+ self, timeout: TimeoutDict
+ ) -> Iterator[bytes]:
+ """
+ Read the response data from the network.
+ """
+ while True:
+ event = self._receive_event(timeout)
+ if isinstance(event, h11.Data):
+ logger.trace("receive_event=Data(<%d bytes>)", len(event.data))
+ yield bytes(event.data)
+ elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)):
+ logger.trace("receive_event=%r", event)
+ break
+
+ def _receive_event(self, timeout: TimeoutDict) -> H11Event:
+ """
+ Read a single `h11` event, reading more data from the network if needed.
+ """
+ while True:
+ with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}):
+ event = self._h11_state.next_event()
+
+ if event is h11.NEED_DATA:
+ data = self.socket.read(self.READ_NUM_BYTES, timeout)
+
+ # If we feed this case through h11 we'll raise an exception like:
+ #
+ # httpcore.RemoteProtocolError: can't handle event type
+ # ConnectionClosed when role=SERVER and state=SEND_RESPONSE
+ #
+ # Which is accurate, but not very informative from an end-user
+ # perspective. Instead we handle messaging for this case distinctly.
+ if data == b"" and self._h11_state.their_state == h11.SEND_RESPONSE:
+ msg = "Server disconnected without sending a response."
+ raise RemoteProtocolError(msg)
+
+ self._h11_state.receive_data(data)
+ else:
+ assert event is not h11.NEED_DATA
+ break
+ return event
+
+ def _response_closed(self) -> None:
+ logger.trace(
+ "response_closed our_state=%r their_state=%r",
+ self._h11_state.our_state,
+ self._h11_state.their_state,
+ )
+ if (
+ self._h11_state.our_state is h11.DONE
+ and self._h11_state.their_state is h11.DONE
+ ):
+ self._h11_state.start_next_cycle()
+ self._state = ConnectionState.IDLE
+ if self._keepalive_expiry is not None:
+ self._should_expire_at = self._now() + self._keepalive_expiry
+ else:
+ self.close()
+
+ def close(self) -> None:
+ if self._state != ConnectionState.CLOSED:
+ self._state = ConnectionState.CLOSED
+
+ if self._h11_state.our_state is h11.MUST_CLOSE:
+ event = h11.ConnectionClosed()
+ self._h11_state.send(event)
+
+ self.socket.close()
diff --git a/packages/httpcore/_sync/http2.py b/packages/httpcore/_sync/http2.py
new file mode 100644
index 000000000..90caf5faf
--- /dev/null
+++ b/packages/httpcore/_sync/http2.py
@@ -0,0 +1,446 @@
+import enum
+import time
+from ssl import SSLContext
+from typing import Iterator, Dict, List, Optional, Tuple, cast
+
+import h2.connection
+import h2.events
+from h2.config import H2Configuration
+from h2.exceptions import NoAvailableStreamIDError
+from h2.settings import SettingCodes, Settings
+
+from .._backends.sync import SyncBackend, SyncLock, SyncSemaphore, SyncSocketStream
+from .._bytestreams import IteratorByteStream
+from .._exceptions import LocalProtocolError, PoolTimeout, RemoteProtocolError
+from .._types import URL, Headers, TimeoutDict
+from .._utils import get_logger
+from .base import SyncByteStream, NewConnectionRequired
+from .http import SyncBaseHTTPConnection
+
+logger = get_logger(__name__)
+
+
+class ConnectionState(enum.IntEnum):
+ IDLE = 0
+ ACTIVE = 1
+ CLOSED = 2
+
+
+class SyncHTTP2Connection(SyncBaseHTTPConnection):
+ READ_NUM_BYTES = 64 * 1024
+ CONFIG = H2Configuration(validate_inbound_headers=False)
+
+ def __init__(
+ self,
+ socket: SyncSocketStream,
+ backend: SyncBackend,
+ keepalive_expiry: float = None,
+ ):
+ self.socket = socket
+
+ self._backend = backend
+ self._h2_state = h2.connection.H2Connection(config=self.CONFIG)
+
+ self._sent_connection_init = False
+ self._streams: Dict[int, SyncHTTP2Stream] = {}
+ self._events: Dict[int, List[h2.events.Event]] = {}
+
+ self._keepalive_expiry: Optional[float] = keepalive_expiry
+ self._should_expire_at: Optional[float] = None
+ self._state = ConnectionState.ACTIVE
+ self._exhausted_available_stream_ids = False
+
+ def __repr__(self) -> str:
+ return f""
+
+ def info(self) -> str:
+ return f"HTTP/2, {self._state.name}, {len(self._streams)} streams"
+
+ def _now(self) -> float:
+ return time.monotonic()
+
+ def should_close(self) -> bool:
+ """
+ Return `True` if the connection is currently idle, and the keepalive
+ timeout has passed.
+ """
+ return (
+ self._state == ConnectionState.IDLE
+ and self._should_expire_at is not None
+ and self._now() >= self._should_expire_at
+ )
+
+ def is_idle(self) -> bool:
+ """
+ Return `True` if the connection is currently idle.
+ """
+ return self._state == ConnectionState.IDLE
+
+ def is_closed(self) -> bool:
+ """
+ Return `True` if the connection has been closed.
+ """
+ return self._state == ConnectionState.CLOSED
+
+ def is_available(self) -> bool:
+ """
+ Return `True` if the connection is currently able to accept an outgoing request.
+ This occurs when any of the following occur:
+
+ * The connection has not yet been opened, and HTTP/2 support is enabled.
+ We don't *know* at this point if we'll end up on an HTTP/2 connection or
+ not, but we *might* do, so we indicate availability.
+ * The connection has been opened, and is currently idle.
+ * The connection is open, and is an HTTP/2 connection. The connection must
+ also not have exhausted the maximum total number of stream IDs.
+ """
+ return (
+ self._state != ConnectionState.CLOSED
+ and not self._exhausted_available_stream_ids
+ )
+
+ @property
+ def init_lock(self) -> SyncLock:
+ # We do this lazily, to make sure backend autodetection always
+ # runs within an async context.
+ if not hasattr(self, "_initialization_lock"):
+ self._initialization_lock = self._backend.create_lock()
+ return self._initialization_lock
+
+ @property
+ def read_lock(self) -> SyncLock:
+ # We do this lazily, to make sure backend autodetection always
+ # runs within an async context.
+ if not hasattr(self, "_read_lock"):
+ self._read_lock = self._backend.create_lock()
+ return self._read_lock
+
+ @property
+ def max_streams_semaphore(self) -> SyncSemaphore:
+ # We do this lazily, to make sure backend autodetection always
+ # runs within an async context.
+ if not hasattr(self, "_max_streams_semaphore"):
+ max_streams = self._h2_state.local_settings.max_concurrent_streams
+ self._max_streams_semaphore = self._backend.create_semaphore(
+ max_streams, exc_class=PoolTimeout
+ )
+ return self._max_streams_semaphore
+
+ def start_tls(
+ self, hostname: bytes, ssl_context: SSLContext, timeout: TimeoutDict = None
+ ) -> SyncSocketStream:
+ raise NotImplementedError("TLS upgrade not supported on HTTP/2 connections.")
+
+ def handle_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: SyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, SyncByteStream, dict]:
+ timeout = cast(TimeoutDict, extensions.get("timeout", {}))
+
+ with self.init_lock:
+ if not self._sent_connection_init:
+ # The very first stream is responsible for initiating the connection.
+ self._state = ConnectionState.ACTIVE
+ self.send_connection_init(timeout)
+ self._sent_connection_init = True
+
+ self.max_streams_semaphore.acquire()
+ try:
+ try:
+ stream_id = self._h2_state.get_next_available_stream_id()
+ except NoAvailableStreamIDError:
+ self._exhausted_available_stream_ids = True
+ raise NewConnectionRequired()
+ else:
+ self._state = ConnectionState.ACTIVE
+ self._should_expire_at = None
+
+ h2_stream = SyncHTTP2Stream(stream_id=stream_id, connection=self)
+ self._streams[stream_id] = h2_stream
+ self._events[stream_id] = []
+ return h2_stream.handle_request(
+ method, url, headers, stream, extensions
+ )
+ except Exception: # noqa: PIE786
+ self.max_streams_semaphore.release()
+ raise
+
+ def send_connection_init(self, timeout: TimeoutDict) -> None:
+ """
+ The HTTP/2 connection requires some initial setup before we can start
+ using individual request/response streams on it.
+ """
+ # Need to set these manually here instead of manipulating via
+ # __setitem__() otherwise the H2Connection will emit SettingsUpdate
+ # frames in addition to sending the undesired defaults.
+ self._h2_state.local_settings = Settings(
+ client=True,
+ initial_values={
+ # Disable PUSH_PROMISE frames from the server since we don't do anything
+ # with them for now. Maybe when we support caching?
+ SettingCodes.ENABLE_PUSH: 0,
+ # These two are taken from h2 for safe defaults
+ SettingCodes.MAX_CONCURRENT_STREAMS: 100,
+ SettingCodes.MAX_HEADER_LIST_SIZE: 65536,
+ },
+ )
+
+ # Some websites (*cough* Yahoo *cough*) balk at this setting being
+ # present in the initial handshake since it's not defined in the original
+ # RFC despite the RFC mandating ignoring settings you don't know about.
+ del self._h2_state.local_settings[
+ h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL
+ ]
+
+ logger.trace("initiate_connection=%r", self)
+ self._h2_state.initiate_connection()
+ self._h2_state.increment_flow_control_window(2 ** 24)
+ data_to_send = self._h2_state.data_to_send()
+ self.socket.write(data_to_send, timeout)
+
+ def is_socket_readable(self) -> bool:
+ return self.socket.is_readable()
+
+ def close(self) -> None:
+ logger.trace("close_connection=%r", self)
+ if self._state != ConnectionState.CLOSED:
+ self._state = ConnectionState.CLOSED
+
+ self.socket.close()
+
+ def wait_for_outgoing_flow(self, stream_id: int, timeout: TimeoutDict) -> int:
+ """
+ Returns the maximum allowable outgoing flow for a given stream.
+ If the allowable flow is zero, then waits on the network until
+ WindowUpdated frames have increased the flow rate.
+ https://tools.ietf.org/html/rfc7540#section-6.9
+ """
+ local_flow = self._h2_state.local_flow_control_window(stream_id)
+ connection_flow = self._h2_state.max_outbound_frame_size
+ flow = min(local_flow, connection_flow)
+ while flow == 0:
+ self.receive_events(timeout)
+ local_flow = self._h2_state.local_flow_control_window(stream_id)
+ connection_flow = self._h2_state.max_outbound_frame_size
+ flow = min(local_flow, connection_flow)
+ return flow
+
+ def wait_for_event(
+ self, stream_id: int, timeout: TimeoutDict
+ ) -> h2.events.Event:
+ """
+ Returns the next event for a given stream.
+ If no events are available yet, then waits on the network until
+ an event is available.
+ """
+ with self.read_lock:
+ while not self._events[stream_id]:
+ self.receive_events(timeout)
+ return self._events[stream_id].pop(0)
+
+ def receive_events(self, timeout: TimeoutDict) -> None:
+ """
+ Read some data from the network, and update the H2 state.
+ """
+ data = self.socket.read(self.READ_NUM_BYTES, timeout)
+ if data == b"":
+ raise RemoteProtocolError("Server disconnected")
+
+ events = self._h2_state.receive_data(data)
+ for event in events:
+ event_stream_id = getattr(event, "stream_id", 0)
+ logger.trace("receive_event stream_id=%r event=%s", event_stream_id, event)
+
+ if hasattr(event, "error_code"):
+ raise RemoteProtocolError(event)
+
+ if event_stream_id in self._events:
+ self._events[event_stream_id].append(event)
+
+ data_to_send = self._h2_state.data_to_send()
+ self.socket.write(data_to_send, timeout)
+
+ def send_headers(
+ self, stream_id: int, headers: Headers, end_stream: bool, timeout: TimeoutDict
+ ) -> None:
+ logger.trace("send_headers stream_id=%r headers=%r", stream_id, headers)
+ self._h2_state.send_headers(stream_id, headers, end_stream=end_stream)
+ self._h2_state.increment_flow_control_window(2 ** 24, stream_id=stream_id)
+ data_to_send = self._h2_state.data_to_send()
+ self.socket.write(data_to_send, timeout)
+
+ def send_data(
+ self, stream_id: int, chunk: bytes, timeout: TimeoutDict
+ ) -> None:
+ logger.trace("send_data stream_id=%r chunk=%r", stream_id, chunk)
+ self._h2_state.send_data(stream_id, chunk)
+ data_to_send = self._h2_state.data_to_send()
+ self.socket.write(data_to_send, timeout)
+
+ def end_stream(self, stream_id: int, timeout: TimeoutDict) -> None:
+ logger.trace("end_stream stream_id=%r", stream_id)
+ self._h2_state.end_stream(stream_id)
+ data_to_send = self._h2_state.data_to_send()
+ self.socket.write(data_to_send, timeout)
+
+ def acknowledge_received_data(
+ self, stream_id: int, amount: int, timeout: TimeoutDict
+ ) -> None:
+ self._h2_state.acknowledge_received_data(amount, stream_id)
+ data_to_send = self._h2_state.data_to_send()
+ self.socket.write(data_to_send, timeout)
+
+ def close_stream(self, stream_id: int) -> None:
+ try:
+ logger.trace("close_stream stream_id=%r", stream_id)
+ del self._streams[stream_id]
+ del self._events[stream_id]
+
+ if not self._streams:
+ if self._state == ConnectionState.ACTIVE:
+ if self._exhausted_available_stream_ids:
+ self.close()
+ else:
+ self._state = ConnectionState.IDLE
+ if self._keepalive_expiry is not None:
+ self._should_expire_at = (
+ self._now() + self._keepalive_expiry
+ )
+ finally:
+ self.max_streams_semaphore.release()
+
+
+class SyncHTTP2Stream:
+ def __init__(self, stream_id: int, connection: SyncHTTP2Connection) -> None:
+ self.stream_id = stream_id
+ self.connection = connection
+
+ def handle_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: SyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, SyncByteStream, dict]:
+ headers = [(k.lower(), v) for (k, v) in headers]
+ timeout = cast(TimeoutDict, extensions.get("timeout", {}))
+
+ # Send the request.
+ seen_headers = set(key for key, value in headers)
+ has_body = (
+ b"content-length" in seen_headers or b"transfer-encoding" in seen_headers
+ )
+
+ self.send_headers(method, url, headers, has_body, timeout)
+ if has_body:
+ self.send_body(stream, timeout)
+
+ # Receive the response.
+ status_code, headers = self.receive_response(timeout)
+ response_stream = IteratorByteStream(
+ iterator=self.body_iter(timeout), close_func=self._response_closed
+ )
+
+ extensions = {
+ "http_version": b"HTTP/2",
+ }
+ return (status_code, headers, response_stream, extensions)
+
+ def send_headers(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ has_body: bool,
+ timeout: TimeoutDict,
+ ) -> None:
+ scheme, hostname, port, path = url
+
+ # In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'.
+ # In order to gracefully handle HTTP/1.1 and HTTP/2 we always require
+ # HTTP/1.1 style headers, and map them appropriately if we end up on
+ # an HTTP/2 connection.
+ authority = None
+
+ for k, v in headers:
+ if k == b"host":
+ authority = v
+ break
+
+ if authority is None:
+ # Mirror the same error we'd see with `h11`, so that the behaviour
+ # is consistent. Although we're dealing with an `:authority`
+ # pseudo-header by this point, from an end-user perspective the issue
+ # is that the outgoing request needed to include a `host` header.
+ raise LocalProtocolError("Missing mandatory Host: header")
+
+ headers = [
+ (b":method", method),
+ (b":authority", authority),
+ (b":scheme", scheme),
+ (b":path", path),
+ ] + [
+ (k, v)
+ for k, v in headers
+ if k
+ not in (
+ b"host",
+ b"transfer-encoding",
+ )
+ ]
+ end_stream = not has_body
+
+ self.connection.send_headers(self.stream_id, headers, end_stream, timeout)
+
+ def send_body(self, stream: SyncByteStream, timeout: TimeoutDict) -> None:
+ for data in stream:
+ while data:
+ max_flow = self.connection.wait_for_outgoing_flow(
+ self.stream_id, timeout
+ )
+ chunk_size = min(len(data), max_flow)
+ chunk, data = data[:chunk_size], data[chunk_size:]
+ self.connection.send_data(self.stream_id, chunk, timeout)
+
+ self.connection.end_stream(self.stream_id, timeout)
+
+ def receive_response(
+ self, timeout: TimeoutDict
+ ) -> Tuple[int, List[Tuple[bytes, bytes]]]:
+ """
+ Read the response status and headers from the network.
+ """
+ while True:
+ event = self.connection.wait_for_event(self.stream_id, timeout)
+ if isinstance(event, h2.events.ResponseReceived):
+ break
+
+ status_code = 200
+ headers = []
+ for k, v in event.headers:
+ if k == b":status":
+ status_code = int(v.decode("ascii", errors="ignore"))
+ elif not k.startswith(b":"):
+ headers.append((k, v))
+
+ return (status_code, headers)
+
+ def body_iter(self, timeout: TimeoutDict) -> Iterator[bytes]:
+ while True:
+ event = self.connection.wait_for_event(self.stream_id, timeout)
+ if isinstance(event, h2.events.DataReceived):
+ amount = event.flow_controlled_length
+ self.connection.acknowledge_received_data(
+ self.stream_id, amount, timeout
+ )
+ yield event.data
+ elif isinstance(event, (h2.events.StreamEnded, h2.events.StreamReset)):
+ break
+
+ def _response_closed(self) -> None:
+ self.connection.close_stream(self.stream_id)
diff --git a/packages/httpcore/_sync/http_proxy.py b/packages/httpcore/_sync/http_proxy.py
new file mode 100644
index 000000000..78c02e29b
--- /dev/null
+++ b/packages/httpcore/_sync/http_proxy.py
@@ -0,0 +1,290 @@
+from http import HTTPStatus
+from ssl import SSLContext
+from typing import Tuple, cast
+
+from .._bytestreams import ByteStream
+from .._exceptions import ProxyError
+from .._types import URL, Headers, TimeoutDict
+from .._utils import get_logger, url_to_origin
+from .base import SyncByteStream
+from .connection import SyncHTTPConnection
+from .connection_pool import SyncConnectionPool, ResponseByteStream
+
+logger = get_logger(__name__)
+
+
+def get_reason_phrase(status_code: int) -> str:
+ try:
+ return HTTPStatus(status_code).phrase
+ except ValueError:
+ return ""
+
+
+def merge_headers(
+ default_headers: Headers = None, override_headers: Headers = None
+) -> Headers:
+ """
+ Append default_headers and override_headers, de-duplicating if a key existing in
+ both cases.
+ """
+ default_headers = [] if default_headers is None else default_headers
+ override_headers = [] if override_headers is None else override_headers
+ has_override = set([key.lower() for key, value in override_headers])
+ default_headers = [
+ (key, value)
+ for key, value in default_headers
+ if key.lower() not in has_override
+ ]
+ return default_headers + override_headers
+
+
+class SyncHTTPProxy(SyncConnectionPool):
+ """
+ A connection pool for making HTTP requests via an HTTP proxy.
+
+ Parameters
+ ----------
+ proxy_url:
+ The URL of the proxy service as a 4-tuple of (scheme, host, port, path).
+ proxy_headers:
+ A list of proxy headers to include.
+ proxy_mode:
+ A proxy mode to operate in. May be "DEFAULT", "FORWARD_ONLY", or "TUNNEL_ONLY".
+ ssl_context:
+ An SSL context to use for verifying connections.
+ max_connections:
+ The maximum number of concurrent connections to allow.
+ max_keepalive_connections:
+ The maximum number of connections to allow before closing keep-alive
+ connections.
+ http2:
+ Enable HTTP/2 support.
+ """
+
+ def __init__(
+ self,
+ proxy_url: URL,
+ proxy_headers: Headers = None,
+ proxy_mode: str = "DEFAULT",
+ ssl_context: SSLContext = None,
+ max_connections: int = None,
+ max_keepalive_connections: int = None,
+ keepalive_expiry: float = None,
+ http2: bool = False,
+ backend: str = "sync",
+ # Deprecated argument style:
+ max_keepalive: int = None,
+ ):
+ assert proxy_mode in ("DEFAULT", "FORWARD_ONLY", "TUNNEL_ONLY")
+
+ self.proxy_origin = url_to_origin(proxy_url)
+ self.proxy_headers = [] if proxy_headers is None else proxy_headers
+ self.proxy_mode = proxy_mode
+ super().__init__(
+ ssl_context=ssl_context,
+ max_connections=max_connections,
+ max_keepalive_connections=max_keepalive_connections,
+ keepalive_expiry=keepalive_expiry,
+ http2=http2,
+ backend=backend,
+ max_keepalive=max_keepalive,
+ )
+
+ def handle_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: SyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, SyncByteStream, dict]:
+ if self._keepalive_expiry is not None:
+ self._keepalive_sweep()
+
+ if (
+ self.proxy_mode == "DEFAULT" and url[0] == b"http"
+ ) or self.proxy_mode == "FORWARD_ONLY":
+ # By default HTTP requests should be forwarded.
+ logger.trace(
+ "forward_request proxy_origin=%r proxy_headers=%r method=%r url=%r",
+ self.proxy_origin,
+ self.proxy_headers,
+ method,
+ url,
+ )
+ return self._forward_request(
+ method, url, headers=headers, stream=stream, extensions=extensions
+ )
+ else:
+ # By default HTTPS should be tunnelled.
+ logger.trace(
+ "tunnel_request proxy_origin=%r proxy_headers=%r method=%r url=%r",
+ self.proxy_origin,
+ self.proxy_headers,
+ method,
+ url,
+ )
+ return self._tunnel_request(
+ method, url, headers=headers, stream=stream, extensions=extensions
+ )
+
+ def _forward_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: SyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, SyncByteStream, dict]:
+ """
+ Forwarded proxy requests include the entire URL as the HTTP target,
+ rather than just the path.
+ """
+ timeout = cast(TimeoutDict, extensions.get("timeout", {}))
+ origin = self.proxy_origin
+ connection = self._get_connection_from_pool(origin)
+
+ if connection is None:
+ connection = SyncHTTPConnection(
+ origin=origin,
+ http2=self._http2,
+ keepalive_expiry=self._keepalive_expiry,
+ ssl_context=self._ssl_context,
+ )
+ self._add_to_pool(connection, timeout)
+
+ # Issue a forwarded proxy request...
+
+ # GET https://www.example.org/path HTTP/1.1
+ # [proxy headers]
+ # [headers]
+ scheme, host, port, path = url
+ if port is None:
+ target = b"%b://%b%b" % (scheme, host, path)
+ else:
+ target = b"%b://%b:%d%b" % (scheme, host, port, path)
+
+ url = self.proxy_origin + (target,)
+ headers = merge_headers(self.proxy_headers, headers)
+
+ (
+ status_code,
+ headers,
+ stream,
+ extensions,
+ ) = connection.handle_request(
+ method, url, headers=headers, stream=stream, extensions=extensions
+ )
+
+ wrapped_stream = ResponseByteStream(
+ stream, connection=connection, callback=self._response_closed
+ )
+
+ return status_code, headers, wrapped_stream, extensions
+
+ def _tunnel_request(
+ self,
+ method: bytes,
+ url: URL,
+ headers: Headers,
+ stream: SyncByteStream,
+ extensions: dict,
+ ) -> Tuple[int, Headers, SyncByteStream, dict]:
+ """
+ Tunnelled proxy requests require an initial CONNECT request to
+ establish the connection, and then send regular requests.
+ """
+ timeout = cast(TimeoutDict, extensions.get("timeout", {}))
+ origin = url_to_origin(url)
+ connection = self._get_connection_from_pool(origin)
+
+ if connection is None:
+ scheme, host, port = origin
+
+ # First, create a connection to the proxy server
+ proxy_connection = SyncHTTPConnection(
+ origin=self.proxy_origin,
+ http2=self._http2,
+ keepalive_expiry=self._keepalive_expiry,
+ ssl_context=self._ssl_context,
+ )
+
+ # Issue a CONNECT request...
+
+ # CONNECT www.example.org:80 HTTP/1.1
+ # [proxy-headers]
+ target = b"%b:%d" % (host, port)
+ connect_url = self.proxy_origin + (target,)
+ connect_headers = [(b"Host", target), (b"Accept", b"*/*")]
+ connect_headers = merge_headers(connect_headers, self.proxy_headers)
+
+ try:
+ (
+ proxy_status_code,
+ _,
+ proxy_stream,
+ _,
+ ) = proxy_connection.handle_request(
+ b"CONNECT",
+ connect_url,
+ headers=connect_headers,
+ stream=ByteStream(b""),
+ extensions=extensions,
+ )
+
+ proxy_reason = get_reason_phrase(proxy_status_code)
+ logger.trace(
+ "tunnel_response proxy_status_code=%r proxy_reason=%r ",
+ proxy_status_code,
+ proxy_reason,
+ )
+ # Read the response data without closing the socket
+ for _ in proxy_stream:
+ pass
+
+ # See if the tunnel was successfully established.
+ if proxy_status_code < 200 or proxy_status_code > 299:
+ msg = "%d %s" % (proxy_status_code, proxy_reason)
+ raise ProxyError(msg)
+
+ # Upgrade to TLS if required
+ # We assume the target speaks TLS on the specified port
+ if scheme == b"https":
+ proxy_connection.start_tls(host, self._ssl_context, timeout)
+ except Exception as exc:
+ proxy_connection.close()
+ raise ProxyError(exc)
+
+ # The CONNECT request is successful, so we have now SWITCHED PROTOCOLS.
+ # This means the proxy connection is now unusable, and we must create
+ # a new one for regular requests, making sure to use the same socket to
+ # retain the tunnel.
+ connection = SyncHTTPConnection(
+ origin=origin,
+ http2=self._http2,
+ keepalive_expiry=self._keepalive_expiry,
+ ssl_context=self._ssl_context,
+ socket=proxy_connection.socket,
+ )
+ self._add_to_pool(connection, timeout)
+
+ # Once the connection has been established we can send requests on
+ # it as normal.
+ (
+ status_code,
+ headers,
+ stream,
+ extensions,
+ ) = connection.handle_request(
+ method,
+ url,
+ headers=headers,
+ stream=stream,
+ extensions=extensions,
+ )
+
+ wrapped_stream = ResponseByteStream(
+ stream, connection=connection, callback=self._response_closed
+ )
+
+ return status_code, headers, wrapped_stream, extensions
diff --git a/packages/httpcore/_threadlock.py b/packages/httpcore/_threadlock.py
new file mode 100644
index 000000000..2ff2bc378
--- /dev/null
+++ b/packages/httpcore/_threadlock.py
@@ -0,0 +1,35 @@
+import threading
+from types import TracebackType
+from typing import Type
+
+
+class ThreadLock:
+ """
+ Provides thread safety when used as a sync context manager, or a
+ no-op when used as an async context manager.
+ """
+
+ def __init__(self) -> None:
+ self.lock = threading.Lock()
+
+ def __enter__(self) -> None:
+ self.lock.acquire()
+
+ def __exit__(
+ self,
+ exc_type: Type[BaseException] = None,
+ exc_value: BaseException = None,
+ traceback: TracebackType = None,
+ ) -> None:
+ self.lock.release()
+
+ async def __aenter__(self) -> None:
+ pass
+
+ async def __aexit__(
+ self,
+ exc_type: Type[BaseException] = None,
+ exc_value: BaseException = None,
+ traceback: TracebackType = None,
+ ) -> None:
+ pass
diff --git a/packages/httpcore/_types.py b/packages/httpcore/_types.py
new file mode 100644
index 000000000..2f9eeba7f
--- /dev/null
+++ b/packages/httpcore/_types.py
@@ -0,0 +1,12 @@
+"""
+Type definitions for type checking purposes.
+"""
+
+from typing import List, Mapping, Optional, Tuple, TypeVar, Union
+
+T = TypeVar("T")
+StrOrBytes = Union[str, bytes]
+Origin = Tuple[bytes, bytes, int]
+URL = Tuple[bytes, bytes, Optional[int], bytes]
+Headers = List[Tuple[bytes, bytes]]
+TimeoutDict = Mapping[str, Optional[float]]
diff --git a/packages/httpcore/_utils.py b/packages/httpcore/_utils.py
new file mode 100644
index 000000000..978b87a27
--- /dev/null
+++ b/packages/httpcore/_utils.py
@@ -0,0 +1,105 @@
+import itertools
+import logging
+import os
+import select
+import socket
+import sys
+import typing
+
+from ._types import URL, Origin
+
+_LOGGER_INITIALIZED = False
+TRACE_LOG_LEVEL = 5
+DEFAULT_PORTS = {b"http": 80, b"https": 443}
+
+
+class Logger(logging.Logger):
+ # Stub for type checkers.
+ def trace(self, message: str, *args: typing.Any, **kwargs: typing.Any) -> None:
+ ... # pragma: nocover
+
+
+def get_logger(name: str) -> Logger:
+ """
+ Get a `logging.Logger` instance, and optionally
+ set up debug logging based on the HTTPCORE_LOG_LEVEL or HTTPX_LOG_LEVEL
+ environment variables.
+ """
+ global _LOGGER_INITIALIZED
+ if not _LOGGER_INITIALIZED:
+ _LOGGER_INITIALIZED = True
+ logging.addLevelName(TRACE_LOG_LEVEL, "TRACE")
+
+ log_level = os.environ.get(
+ "HTTPCORE_LOG_LEVEL", os.environ.get("HTTPX_LOG_LEVEL", "")
+ ).upper()
+ if log_level in ("DEBUG", "TRACE"):
+ logger = logging.getLogger("httpcore")
+ logger.setLevel(logging.DEBUG if log_level == "DEBUG" else TRACE_LOG_LEVEL)
+ handler = logging.StreamHandler(sys.stderr)
+ handler.setFormatter(
+ logging.Formatter(
+ fmt="%(levelname)s [%(asctime)s] %(name)s - %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ )
+ )
+ logger.addHandler(handler)
+
+ logger = logging.getLogger(name)
+
+ def trace(message: str, *args: typing.Any, **kwargs: typing.Any) -> None:
+ logger.log(TRACE_LOG_LEVEL, message, *args, **kwargs)
+
+ logger.trace = trace # type: ignore
+
+ return typing.cast(Logger, logger)
+
+
+def url_to_origin(url: URL) -> Origin:
+ scheme, host, explicit_port = url[:3]
+ default_port = DEFAULT_PORTS[scheme]
+ port = default_port if explicit_port is None else explicit_port
+ return scheme, host, port
+
+
+def origin_to_url_string(origin: Origin) -> str:
+ scheme, host, explicit_port = origin
+ port = f":{explicit_port}" if explicit_port != DEFAULT_PORTS[scheme] else ""
+ return f"{scheme.decode('ascii')}://{host.decode('ascii')}{port}"
+
+
+def exponential_backoff(factor: float) -> typing.Iterator[float]:
+ yield 0
+ for n in itertools.count(2):
+ yield factor * (2 ** (n - 2))
+
+
+def is_socket_readable(sock: typing.Optional[socket.socket]) -> bool:
+ """
+ Return whether a socket, as identifed by its file descriptor, is readable.
+
+ "A socket is readable" means that the read buffer isn't empty, i.e. that calling
+ .recv() on it would immediately return some data.
+ """
+ # NOTE: we want check for readability without actually attempting to read, because
+ # we don't want to block forever if it's not readable.
+
+ # In the case that the socket no longer exists, or cannot return a file
+ # descriptor, we treat it as being readable, as if it the next read operation
+ # on it is ready to return the terminating `b""`.
+ sock_fd = None if sock is None else sock.fileno()
+ if sock_fd is None or sock_fd < 0:
+ return True
+
+ # The implementation below was stolen from:
+ # https://github.com/python-trio/trio/blob/20ee2b1b7376db637435d80e266212a35837ddcc/trio/_socket.py#L471-L478
+ # See also: https://github.com/encode/httpcore/pull/193#issuecomment-703129316
+
+ # Use select.select on Windows, and when poll is unavailable and select.poll
+ # everywhere else. (E.g. When eventlet is in use. See #327)
+ if sys.platform == "win32" or getattr(select, "poll", None) is None:
+ rready, _, _ = select.select([sock_fd], [], [], 0)
+ return bool(rready)
+ p = select.poll()
+ p.register(sock_fd, select.POLLIN)
+ return bool(p.poll(0))
diff --git a/packages/httpcore/py.typed b/packages/httpcore/py.typed
new file mode 100644
index 000000000..e69de29bb
diff --git a/packages/httpx/__init__.py b/packages/httpx/__init__.py
new file mode 100644
index 000000000..4af3904fd
--- /dev/null
+++ b/packages/httpx/__init__.py
@@ -0,0 +1,124 @@
+from .__version__ import __description__, __title__, __version__
+from ._api import delete, get, head, options, patch, post, put, request, stream
+from ._auth import Auth, BasicAuth, DigestAuth
+from ._client import USE_CLIENT_DEFAULT, AsyncClient, Client
+from ._config import Limits, Proxy, Timeout, create_ssl_context
+from ._content import ByteStream
+from ._exceptions import (
+ CloseError,
+ ConnectError,
+ ConnectTimeout,
+ CookieConflict,
+ DecodingError,
+ HTTPError,
+ HTTPStatusError,
+ InvalidURL,
+ LocalProtocolError,
+ NetworkError,
+ PoolTimeout,
+ ProtocolError,
+ ProxyError,
+ ReadError,
+ ReadTimeout,
+ RemoteProtocolError,
+ RequestError,
+ RequestNotRead,
+ ResponseNotRead,
+ StreamClosed,
+ StreamConsumed,
+ StreamError,
+ TimeoutException,
+ TooManyRedirects,
+ TransportError,
+ UnsupportedProtocol,
+ WriteError,
+ WriteTimeout,
+)
+from ._models import URL, Cookies, Headers, QueryParams, Request, Response
+from ._status_codes import codes
+from ._transports.asgi import ASGITransport
+from ._transports.base import (
+ AsyncBaseTransport,
+ AsyncByteStream,
+ BaseTransport,
+ SyncByteStream,
+)
+from ._transports.default import AsyncHTTPTransport, HTTPTransport
+from ._transports.mock import MockTransport
+from ._transports.wsgi import WSGITransport
+
+__all__ = [
+ "__description__",
+ "__title__",
+ "__version__",
+ "ASGITransport",
+ "AsyncBaseTransport",
+ "AsyncByteStream",
+ "AsyncClient",
+ "AsyncHTTPTransport",
+ "Auth",
+ "BaseTransport",
+ "BasicAuth",
+ "ByteStream",
+ "Client",
+ "CloseError",
+ "codes",
+ "ConnectError",
+ "ConnectTimeout",
+ "CookieConflict",
+ "Cookies",
+ "create_ssl_context",
+ "DecodingError",
+ "delete",
+ "DigestAuth",
+ "get",
+ "head",
+ "Headers",
+ "HTTPError",
+ "HTTPStatusError",
+ "HTTPTransport",
+ "InvalidURL",
+ "Limits",
+ "LocalProtocolError",
+ "MockTransport",
+ "NetworkError",
+ "options",
+ "patch",
+ "PoolTimeout",
+ "post",
+ "ProtocolError",
+ "Proxy",
+ "ProxyError",
+ "put",
+ "QueryParams",
+ "ReadError",
+ "ReadTimeout",
+ "RemoteProtocolError",
+ "request",
+ "Request",
+ "RequestError",
+ "RequestNotRead",
+ "Response",
+ "ResponseNotRead",
+ "stream",
+ "StreamClosed",
+ "StreamConsumed",
+ "StreamError",
+ "SyncByteStream",
+ "Timeout",
+ "TimeoutException",
+ "TooManyRedirects",
+ "TransportError",
+ "UnsupportedProtocol",
+ "URL",
+ "USE_CLIENT_DEFAULT",
+ "WriteError",
+ "WriteTimeout",
+ "WSGITransport",
+]
+
+
+__locals = locals()
+for __name in __all__:
+ if not __name.startswith("__"):
+ setattr(__locals[__name], "__module__", "httpx") # noqa
diff --git a/packages/httpx/__version__.py b/packages/httpx/__version__.py
new file mode 100644
index 000000000..cc8296544
--- /dev/null
+++ b/packages/httpx/__version__.py
@@ -0,0 +1,3 @@
+__title__ = "httpx"
+__description__ = "A next generation HTTP client, for Python 3."
+__version__ = "0.18.2"
diff --git a/packages/httpx/_api.py b/packages/httpx/_api.py
new file mode 100644
index 000000000..da8185388
--- /dev/null
+++ b/packages/httpx/_api.py
@@ -0,0 +1,445 @@
+import typing
+from contextlib import contextmanager
+
+from ._client import Client
+from ._config import DEFAULT_TIMEOUT_CONFIG
+from ._models import Response
+from ._types import (
+ AuthTypes,
+ CertTypes,
+ CookieTypes,
+ HeaderTypes,
+ ProxiesTypes,
+ QueryParamTypes,
+ RequestContent,
+ RequestData,
+ RequestFiles,
+ TimeoutTypes,
+ URLTypes,
+ VerifyTypes,
+)
+
+
+def request(
+ method: str,
+ url: URLTypes,
+ *,
+ params: QueryParamTypes = None,
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: typing.Any = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: AuthTypes = None,
+ proxies: ProxiesTypes = None,
+ timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
+ allow_redirects: bool = True,
+ verify: VerifyTypes = True,
+ cert: CertTypes = None,
+ trust_env: bool = True,
+) -> Response:
+ """
+ Sends an HTTP request.
+
+ **Parameters:**
+
+ * **method** - HTTP method for the new `Request` object: `GET`, `OPTIONS`,
+ `HEAD`, `POST`, `PUT`, `PATCH`, or `DELETE`.
+ * **url** - URL for the new `Request` object.
+ * **params** - *(optional)* Query parameters to include in the URL, as a
+ string, dictionary, or sequence of two-tuples.
+ * **content** - *(optional)* Binary content to include in the body of the
+ request, as bytes or a byte iterator.
+ * **data** - *(optional)* Form data to include in the body of the request,
+ as a dictionary.
+ * **files** - *(optional)* A dictionary of upload files to include in the
+ body of the request.
+ * **json** - *(optional)* A JSON serializable object to include in the body
+ of the request.
+ * **headers** - *(optional)* Dictionary of HTTP headers to include in the
+ request.
+ * **cookies** - *(optional)* Dictionary of Cookie items to include in the
+ request.
+ * **auth** - *(optional)* An authentication class to use when sending the
+ request.
+ * **proxies** - *(optional)* A dictionary mapping proxy keys to proxy URLs.
+ * **timeout** - *(optional)* The timeout configuration to use when sending
+ the request.
+ * **allow_redirects** - *(optional)* Enables or disables HTTP redirects.
+ * **verify** - *(optional)* SSL certificates (a.k.a CA bundle) used to
+ verify the identity of requested hosts. Either `True` (default CA bundle),
+ a path to an SSL certificate file, an `ssl.SSLContext`, or `False`
+ (which will disable verification).
+ * **cert** - *(optional)* An SSL certificate used by the requested host
+ to authenticate the client. Either a path to an SSL certificate file, or
+ two-tuple of (certificate file, key file), or a three-tuple of (certificate
+ file, key file, password).
+ * **trust_env** - *(optional)* Enables or disables usage of environment
+ variables for configuration.
+
+ **Returns:** `Response`
+
+ Usage:
+
+ ```
+ >>> import httpx
+ >>> response = httpx.request('GET', 'https://httpbin.org/get')
+ >>> response
+
+ ```
+ """
+ with Client(
+ cookies=cookies,
+ proxies=proxies,
+ cert=cert,
+ verify=verify,
+ timeout=timeout,
+ trust_env=trust_env,
+ ) as client:
+ return client.request(
+ method=method,
+ url=url,
+ content=content,
+ data=data,
+ files=files,
+ json=json,
+ params=params,
+ headers=headers,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ )
+
+
+@contextmanager
+def stream(
+ method: str,
+ url: URLTypes,
+ *,
+ params: QueryParamTypes = None,
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: typing.Any = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: AuthTypes = None,
+ proxies: ProxiesTypes = None,
+ timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
+ allow_redirects: bool = True,
+ verify: VerifyTypes = True,
+ cert: CertTypes = None,
+ trust_env: bool = True,
+) -> typing.Iterator[Response]:
+ """
+ Alternative to `httpx.request()` that streams the response body
+ instead of loading it into memory at once.
+
+ **Parameters**: See `httpx.request`.
+
+ See also: [Streaming Responses][0]
+
+ [0]: /quickstart#streaming-responses
+ """
+ with Client(
+ cookies=cookies,
+ proxies=proxies,
+ cert=cert,
+ verify=verify,
+ timeout=timeout,
+ trust_env=trust_env,
+ ) as client:
+ with client.stream(
+ method=method,
+ url=url,
+ content=content,
+ data=data,
+ files=files,
+ json=json,
+ params=params,
+ headers=headers,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ ) as response:
+ yield response
+
+
+def get(
+ url: URLTypes,
+ *,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: AuthTypes = None,
+ proxies: ProxiesTypes = None,
+ allow_redirects: bool = True,
+ cert: CertTypes = None,
+ verify: VerifyTypes = True,
+ timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
+ trust_env: bool = True,
+) -> Response:
+ """
+ Sends a `GET` request.
+
+ **Parameters**: See `httpx.request`.
+
+ Note that the `data`, `files`, and `json` parameters are not available on
+ this function, as `GET` requests should not include a request body.
+ """
+ return request(
+ "GET",
+ url,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ proxies=proxies,
+ allow_redirects=allow_redirects,
+ cert=cert,
+ verify=verify,
+ timeout=timeout,
+ trust_env=trust_env,
+ )
+
+
+def options(
+ url: URLTypes,
+ *,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: AuthTypes = None,
+ proxies: ProxiesTypes = None,
+ allow_redirects: bool = True,
+ cert: CertTypes = None,
+ verify: VerifyTypes = True,
+ timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
+ trust_env: bool = True,
+) -> Response:
+ """
+ Sends an `OPTIONS` request.
+
+ **Parameters**: See `httpx.request`.
+
+ Note that the `data`, `files`, and `json` parameters are not available on
+ this function, as `OPTIONS` requests should not include a request body.
+ """
+ return request(
+ "OPTIONS",
+ url,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ proxies=proxies,
+ allow_redirects=allow_redirects,
+ cert=cert,
+ verify=verify,
+ timeout=timeout,
+ trust_env=trust_env,
+ )
+
+
+def head(
+ url: URLTypes,
+ *,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: AuthTypes = None,
+ proxies: ProxiesTypes = None,
+ allow_redirects: bool = True,
+ cert: CertTypes = None,
+ verify: VerifyTypes = True,
+ timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
+ trust_env: bool = True,
+) -> Response:
+ """
+ Sends a `HEAD` request.
+
+ **Parameters**: See `httpx.request`.
+
+ Note that the `data`, `files`, and `json` parameters are not available on
+ this function, as `HEAD` requests should not include a request body.
+ """
+ return request(
+ "HEAD",
+ url,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ proxies=proxies,
+ allow_redirects=allow_redirects,
+ cert=cert,
+ verify=verify,
+ timeout=timeout,
+ trust_env=trust_env,
+ )
+
+
+def post(
+ url: URLTypes,
+ *,
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: typing.Any = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: AuthTypes = None,
+ proxies: ProxiesTypes = None,
+ allow_redirects: bool = True,
+ cert: CertTypes = None,
+ verify: VerifyTypes = True,
+ timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
+ trust_env: bool = True,
+) -> Response:
+ """
+ Sends a `POST` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return request(
+ "POST",
+ url,
+ content=content,
+ data=data,
+ files=files,
+ json=json,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ proxies=proxies,
+ allow_redirects=allow_redirects,
+ cert=cert,
+ verify=verify,
+ timeout=timeout,
+ trust_env=trust_env,
+ )
+
+
+def put(
+ url: URLTypes,
+ *,
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: typing.Any = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: AuthTypes = None,
+ proxies: ProxiesTypes = None,
+ allow_redirects: bool = True,
+ cert: CertTypes = None,
+ verify: VerifyTypes = True,
+ timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
+ trust_env: bool = True,
+) -> Response:
+ """
+ Sends a `PUT` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return request(
+ "PUT",
+ url,
+ content=content,
+ data=data,
+ files=files,
+ json=json,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ proxies=proxies,
+ allow_redirects=allow_redirects,
+ cert=cert,
+ verify=verify,
+ timeout=timeout,
+ trust_env=trust_env,
+ )
+
+
+def patch(
+ url: URLTypes,
+ *,
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: typing.Any = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: AuthTypes = None,
+ proxies: ProxiesTypes = None,
+ allow_redirects: bool = True,
+ cert: CertTypes = None,
+ verify: VerifyTypes = True,
+ timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
+ trust_env: bool = True,
+) -> Response:
+ """
+ Sends a `PATCH` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return request(
+ "PATCH",
+ url,
+ content=content,
+ data=data,
+ files=files,
+ json=json,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ proxies=proxies,
+ allow_redirects=allow_redirects,
+ cert=cert,
+ verify=verify,
+ timeout=timeout,
+ trust_env=trust_env,
+ )
+
+
+def delete(
+ url: URLTypes,
+ *,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: AuthTypes = None,
+ proxies: ProxiesTypes = None,
+ allow_redirects: bool = True,
+ cert: CertTypes = None,
+ verify: VerifyTypes = True,
+ timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
+ trust_env: bool = True,
+) -> Response:
+ """
+ Sends a `DELETE` request.
+
+ **Parameters**: See `httpx.request`.
+
+ Note that the `data`, `files`, and `json` parameters are not available on
+ this function, as `DELETE` requests should not include a request body.
+ """
+ return request(
+ "DELETE",
+ url,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ proxies=proxies,
+ allow_redirects=allow_redirects,
+ cert=cert,
+ verify=verify,
+ timeout=timeout,
+ trust_env=trust_env,
+ )
diff --git a/packages/httpx/_auth.py b/packages/httpx/_auth.py
new file mode 100644
index 000000000..343f9cdd1
--- /dev/null
+++ b/packages/httpx/_auth.py
@@ -0,0 +1,304 @@
+import hashlib
+import os
+import re
+import time
+import typing
+from base64 import b64encode
+from urllib.request import parse_http_list
+
+from ._exceptions import ProtocolError
+from ._models import Request, Response
+from ._utils import to_bytes, to_str, unquote
+
+
+class Auth:
+ """
+ Base class for all authentication schemes.
+
+ To implement a custom authentication scheme, subclass `Auth` and override
+ the `.auth_flow()` method.
+
+ If the authentication scheme does I/O such as disk access or network calls, or uses
+ synchronization primitives such as locks, you should override `.sync_auth_flow()`
+ and/or `.async_auth_flow()` instead of `.auth_flow()` to provide specialized
+ implementations that will be used by `Client` and `AsyncClient` respectively.
+ """
+
+ requires_request_body = False
+ requires_response_body = False
+
+ def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:
+ """
+ Execute the authentication flow.
+
+ To dispatch a request, `yield` it:
+
+ ```
+ yield request
+ ```
+
+ The client will `.send()` the response back into the flow generator. You can
+ access it like so:
+
+ ```
+ response = yield request
+ ```
+
+ A `return` (or reaching the end of the generator) will result in the
+ client returning the last response obtained from the server.
+
+ You can dispatch as many requests as is necessary.
+ """
+ yield request
+
+ def sync_auth_flow(
+ self, request: Request
+ ) -> typing.Generator[Request, Response, None]:
+ """
+ Execute the authentication flow synchronously.
+
+ By default, this defers to `.auth_flow()`. You should override this method
+ when the authentication scheme does I/O and/or uses concurrency primitives.
+ """
+ if self.requires_request_body:
+ request.read()
+
+ flow = self.auth_flow(request)
+ request = next(flow)
+
+ while True:
+ response = yield request
+ if self.requires_response_body:
+ response.read()
+
+ try:
+ request = flow.send(response)
+ except StopIteration:
+ break
+
+ async def async_auth_flow(
+ self, request: Request
+ ) -> typing.AsyncGenerator[Request, Response]:
+ """
+ Execute the authentication flow asynchronously.
+
+ By default, this defers to `.auth_flow()`. You should override this method
+ when the authentication scheme does I/O and/or uses concurrency primitives.
+ """
+ if self.requires_request_body:
+ await request.aread()
+
+ flow = self.auth_flow(request)
+ request = next(flow)
+
+ while True:
+ response = yield request
+ if self.requires_response_body:
+ await response.aread()
+
+ try:
+ request = flow.send(response)
+ except StopIteration:
+ break
+
+
+class FunctionAuth(Auth):
+ """
+ Allows the 'auth' argument to be passed as a simple callable function,
+ that takes the request, and returns a new, modified request.
+ """
+
+ def __init__(self, func: typing.Callable[[Request], Request]) -> None:
+ self._func = func
+
+ def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:
+ yield self._func(request)
+
+
+class BasicAuth(Auth):
+ """
+ Allows the 'auth' argument to be passed as a (username, password) pair,
+ and uses HTTP Basic authentication.
+ """
+
+ def __init__(
+ self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]
+ ):
+ self._auth_header = self._build_auth_header(username, password)
+
+ def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:
+ request.headers["Authorization"] = self._auth_header
+ yield request
+
+ def _build_auth_header(
+ self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]
+ ) -> str:
+ userpass = b":".join((to_bytes(username), to_bytes(password)))
+ token = b64encode(userpass).decode()
+ return f"Basic {token}"
+
+
+class DigestAuth(Auth):
+ _ALGORITHM_TO_HASH_FUNCTION: typing.Dict[str, typing.Callable] = {
+ "MD5": hashlib.md5,
+ "MD5-SESS": hashlib.md5,
+ "SHA": hashlib.sha1,
+ "SHA-SESS": hashlib.sha1,
+ "SHA-256": hashlib.sha256,
+ "SHA-256-SESS": hashlib.sha256,
+ "SHA-512": hashlib.sha512,
+ "SHA-512-SESS": hashlib.sha512,
+ }
+
+ def __init__(
+ self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]
+ ) -> None:
+ self._username = to_bytes(username)
+ self._password = to_bytes(password)
+
+ def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:
+ response = yield request
+
+ if response.status_code != 401 or "www-authenticate" not in response.headers:
+ # If the response is not a 401 then we don't
+ # need to build an authenticated request.
+ return
+
+ for auth_header in response.headers.get_list("www-authenticate"):
+ if auth_header.lower().startswith("digest "):
+ break
+ else:
+ # If the response does not include a 'WWW-Authenticate: Digest ...'
+ # header, then we don't need to build an authenticated request.
+ return
+
+ challenge = self._parse_challenge(request, response, auth_header)
+ request.headers["Authorization"] = self._build_auth_header(request, challenge)
+ yield request
+
+ def _parse_challenge(
+ self, request: Request, response: Response, auth_header: str
+ ) -> "_DigestAuthChallenge":
+ """
+ Returns a challenge from a Digest WWW-Authenticate header.
+ These take the form of:
+ `Digest realm="realm@host.com",qop="auth,auth-int",nonce="abc",opaque="xyz"`
+ """
+ scheme, _, fields = auth_header.partition(" ")
+
+ # This method should only ever have been called with a Digest auth header.
+ assert scheme.lower() == "digest"
+
+ header_dict: typing.Dict[str, str] = {}
+ for field in parse_http_list(fields):
+ key, value = field.strip().split("=", 1)
+ header_dict[key] = unquote(value)
+
+ try:
+ realm = header_dict["realm"].encode()
+ nonce = header_dict["nonce"].encode()
+ algorithm = header_dict.get("algorithm", "MD5")
+ opaque = header_dict["opaque"].encode() if "opaque" in header_dict else None
+ qop = header_dict["qop"].encode() if "qop" in header_dict else None
+ return _DigestAuthChallenge(
+ realm=realm, nonce=nonce, algorithm=algorithm, opaque=opaque, qop=qop
+ )
+ except KeyError as exc:
+ message = "Malformed Digest WWW-Authenticate header"
+ raise ProtocolError(message, request=request) from exc
+
+ def _build_auth_header(
+ self, request: Request, challenge: "_DigestAuthChallenge"
+ ) -> str:
+ hash_func = self._ALGORITHM_TO_HASH_FUNCTION[challenge.algorithm]
+
+ def digest(data: bytes) -> bytes:
+ return hash_func(data).hexdigest().encode()
+
+ A1 = b":".join((self._username, challenge.realm, self._password))
+
+ path = request.url.raw_path
+ A2 = b":".join((request.method.encode(), path))
+ # TODO: implement auth-int
+ HA2 = digest(A2)
+
+ nonce_count = 1 # TODO: implement nonce counting
+ nc_value = b"%08x" % nonce_count
+ cnonce = self._get_client_nonce(nonce_count, challenge.nonce)
+
+ HA1 = digest(A1)
+ if challenge.algorithm.lower().endswith("-sess"):
+ HA1 = digest(b":".join((HA1, challenge.nonce, cnonce)))
+
+ qop = self._resolve_qop(challenge.qop, request=request)
+ if qop is None:
+ digest_data = [HA1, challenge.nonce, HA2]
+ else:
+ digest_data = [challenge.nonce, nc_value, cnonce, qop, HA2]
+ key_digest = b":".join(digest_data)
+
+ format_args = {
+ "username": self._username,
+ "realm": challenge.realm,
+ "nonce": challenge.nonce,
+ "uri": path,
+ "response": digest(b":".join((HA1, key_digest))),
+ "algorithm": challenge.algorithm.encode(),
+ }
+ if challenge.opaque:
+ format_args["opaque"] = challenge.opaque
+ if qop:
+ format_args["qop"] = b"auth"
+ format_args["nc"] = nc_value
+ format_args["cnonce"] = cnonce
+
+ return "Digest " + self._get_header_value(format_args)
+
+ def _get_client_nonce(self, nonce_count: int, nonce: bytes) -> bytes:
+ s = str(nonce_count).encode()
+ s += nonce
+ s += time.ctime().encode()
+ s += os.urandom(8)
+
+ return hashlib.sha1(s).hexdigest()[:16].encode()
+
+ def _get_header_value(self, header_fields: typing.Dict[str, bytes]) -> str:
+ NON_QUOTED_FIELDS = ("algorithm", "qop", "nc")
+ QUOTED_TEMPLATE = '{}="{}"'
+ NON_QUOTED_TEMPLATE = "{}={}"
+
+ header_value = ""
+ for i, (field, value) in enumerate(header_fields.items()):
+ if i > 0:
+ header_value += ", "
+ template = (
+ QUOTED_TEMPLATE
+ if field not in NON_QUOTED_FIELDS
+ else NON_QUOTED_TEMPLATE
+ )
+ header_value += template.format(field, to_str(value))
+
+ return header_value
+
+ def _resolve_qop(
+ self, qop: typing.Optional[bytes], request: Request
+ ) -> typing.Optional[bytes]:
+ if qop is None:
+ return None
+ qops = re.split(b", ?", qop)
+ if b"auth" in qops:
+ return b"auth"
+
+ if qops == [b"auth-int"]:
+ raise NotImplementedError("Digest auth-int support is not yet implemented")
+
+ message = f'Unexpected qop value "{qop!r}" in digest auth'
+ raise ProtocolError(message, request=request)
+
+
+class _DigestAuthChallenge(typing.NamedTuple):
+ realm: bytes
+ nonce: bytes
+ algorithm: str
+ opaque: typing.Optional[bytes]
+ qop: typing.Optional[bytes]
diff --git a/packages/httpx/_client.py b/packages/httpx/_client.py
new file mode 100644
index 000000000..c6e1efbe6
--- /dev/null
+++ b/packages/httpx/_client.py
@@ -0,0 +1,1982 @@
+import datetime
+import enum
+import typing
+import warnings
+from contextlib import contextmanager
+from types import TracebackType
+
+from .__version__ import __version__
+from ._auth import Auth, BasicAuth, FunctionAuth
+from ._compat import asynccontextmanager
+from ._config import (
+ DEFAULT_LIMITS,
+ DEFAULT_MAX_REDIRECTS,
+ DEFAULT_TIMEOUT_CONFIG,
+ Limits,
+ Proxy,
+ Timeout,
+)
+from ._decoders import SUPPORTED_DECODERS
+from ._exceptions import (
+ InvalidURL,
+ RemoteProtocolError,
+ TooManyRedirects,
+ request_context,
+)
+from ._models import URL, Cookies, Headers, QueryParams, Request, Response
+from ._status_codes import codes
+from ._transports.asgi import ASGITransport
+from ._transports.base import (
+ AsyncBaseTransport,
+ AsyncByteStream,
+ BaseTransport,
+ SyncByteStream,
+)
+from ._transports.default import AsyncHTTPTransport, HTTPTransport
+from ._transports.wsgi import WSGITransport
+from ._types import (
+ AuthTypes,
+ CertTypes,
+ CookieTypes,
+ HeaderTypes,
+ ProxiesTypes,
+ QueryParamTypes,
+ RequestContent,
+ RequestData,
+ RequestFiles,
+ TimeoutTypes,
+ URLTypes,
+ VerifyTypes,
+)
+from ._utils import (
+ NetRCInfo,
+ Timer,
+ URLPattern,
+ get_environment_proxies,
+ get_logger,
+ same_origin,
+)
+
+# The type annotation for @classmethod and context managers here follows PEP 484
+# https://www.python.org/dev/peps/pep-0484/#annotating-instance-and-class-methods
+T = typing.TypeVar("T", bound="Client")
+U = typing.TypeVar("U", bound="AsyncClient")
+
+
+class UseClientDefault:
+ """
+ For some parameters such as `auth=...` and `timeout=...` we need to be able
+ to indicate the default "unset" state, in a way that is distinctly different
+ to using `None`.
+
+ The default "unset" state indicates that whatever default is set on the
+ client should be used. This is different to setting `None`, which
+ explicitly disables the parameter, possibly overriding a client default.
+
+ For example we use `timeout=USE_CLIENT_DEFAULT` in the `request()` signature.
+ Omitting the `timeout` parameter will send a request using whatever default
+ timeout has been configured on the client. Including `timeout=None` will
+ ensure no timeout is used.
+
+ Note that user code shouldn't need to use the `USE_CLIENT_DEFAULT` constant,
+ but it is used internally when a parameter is not included.
+ """
+
+ pass # pragma: nocover
+
+
+USE_CLIENT_DEFAULT = UseClientDefault()
+
+
+logger = get_logger(__name__)
+
+USER_AGENT = f"python-httpx/{__version__}"
+ACCEPT_ENCODING = ", ".join(
+ [key for key in SUPPORTED_DECODERS.keys() if key != "identity"]
+)
+
+
+class ClientState(enum.Enum):
+ # UNOPENED:
+ # The client has been instantiated, but has not been used to send a request,
+ # or been opened by entering the context of a `with` block.
+ UNOPENED = 1
+ # OPENED:
+ # The client has either sent a request, or is within a `with` block.
+ OPENED = 2
+ # CLOSED:
+ # The client has either exited the `with` block, or `close()` has
+ # been called explicitly.
+ CLOSED = 3
+
+
+class BoundSyncStream(SyncByteStream):
+ """
+ A byte stream that is bound to a given response instance, and that
+ ensures the `response.elapsed` is set once the response is closed.
+ """
+
+ def __init__(
+ self, stream: SyncByteStream, response: Response, timer: Timer
+ ) -> None:
+ self._stream = stream
+ self._response = response
+ self._timer = timer
+
+ def __iter__(self) -> typing.Iterator[bytes]:
+ for chunk in self._stream:
+ yield chunk
+
+ def close(self) -> None:
+ seconds = self._timer.sync_elapsed()
+ self._response.elapsed = datetime.timedelta(seconds=seconds)
+ self._stream.close()
+
+
+class BoundAsyncStream(AsyncByteStream):
+ """
+ An async byte stream that is bound to a given response instance, and that
+ ensures the `response.elapsed` is set once the response is closed.
+ """
+
+ def __init__(
+ self, stream: AsyncByteStream, response: Response, timer: Timer
+ ) -> None:
+ self._stream = stream
+ self._response = response
+ self._timer = timer
+
+ async def __aiter__(self) -> typing.AsyncIterator[bytes]:
+ async for chunk in self._stream:
+ yield chunk
+
+ async def aclose(self) -> None:
+ seconds = await self._timer.async_elapsed()
+ self._response.elapsed = datetime.timedelta(seconds=seconds)
+ await self._stream.aclose()
+
+
+class BaseClient:
+ def __init__(
+ self,
+ *,
+ auth: AuthTypes = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
+ max_redirects: int = DEFAULT_MAX_REDIRECTS,
+ event_hooks: typing.Mapping[str, typing.List[typing.Callable]] = None,
+ base_url: URLTypes = "",
+ trust_env: bool = True,
+ ):
+ event_hooks = {} if event_hooks is None else event_hooks
+
+ self._base_url = self._enforce_trailing_slash(URL(base_url))
+
+ self._auth = self._build_auth(auth)
+ self._params = QueryParams(params)
+ self.headers = Headers(headers)
+ self._cookies = Cookies(cookies)
+ self._timeout = Timeout(timeout)
+ self.max_redirects = max_redirects
+ self._event_hooks = {
+ "request": list(event_hooks.get("request", [])),
+ "response": list(event_hooks.get("response", [])),
+ }
+ self._trust_env = trust_env
+ self._netrc = NetRCInfo()
+ self._state = ClientState.UNOPENED
+
+ @property
+ def is_closed(self) -> bool:
+ """
+ Check if the client being closed
+ """
+ return self._state == ClientState.CLOSED
+
+ @property
+ def trust_env(self) -> bool:
+ return self._trust_env
+
+ def _enforce_trailing_slash(self, url: URL) -> URL:
+ if url.raw_path.endswith(b"/"):
+ return url
+ return url.copy_with(raw_path=url.raw_path + b"/")
+
+ def _get_proxy_map(
+ self, proxies: typing.Optional[ProxiesTypes], allow_env_proxies: bool
+ ) -> typing.Dict[str, typing.Optional[Proxy]]:
+ if proxies is None:
+ if allow_env_proxies:
+ return {
+ key: None if url is None else Proxy(url=url)
+ for key, url in get_environment_proxies().items()
+ }
+ return {}
+ if isinstance(proxies, dict):
+ new_proxies = {}
+ for key, value in proxies.items():
+ proxy = Proxy(url=value) if isinstance(value, (str, URL)) else value
+ new_proxies[str(key)] = proxy
+ return new_proxies
+ else:
+ proxy = Proxy(url=proxies) if isinstance(proxies, (str, URL)) else proxies
+ return {"all://": proxy}
+
+ @property
+ def timeout(self) -> Timeout:
+ return self._timeout
+
+ @timeout.setter
+ def timeout(self, timeout: TimeoutTypes) -> None:
+ self._timeout = Timeout(timeout)
+
+ @property
+ def event_hooks(self) -> typing.Dict[str, typing.List[typing.Callable]]:
+ return self._event_hooks
+
+ @event_hooks.setter
+ def event_hooks(
+ self, event_hooks: typing.Dict[str, typing.List[typing.Callable]]
+ ) -> None:
+ self._event_hooks = {
+ "request": list(event_hooks.get("request", [])),
+ "response": list(event_hooks.get("response", [])),
+ }
+
+ @property
+ def auth(self) -> typing.Optional[Auth]:
+ """
+ Authentication class used when none is passed at the request-level.
+
+ See also [Authentication][0].
+
+ [0]: /quickstart/#authentication
+ """
+ return self._auth
+
+ @auth.setter
+ def auth(self, auth: AuthTypes) -> None:
+ self._auth = self._build_auth(auth)
+
+ @property
+ def base_url(self) -> URL:
+ """
+ Base URL to use when sending requests with relative URLs.
+ """
+ return self._base_url
+
+ @base_url.setter
+ def base_url(self, url: URLTypes) -> None:
+ self._base_url = self._enforce_trailing_slash(URL(url))
+
+ @property
+ def headers(self) -> Headers:
+ """
+ HTTP headers to include when sending requests.
+ """
+ return self._headers
+
+ @headers.setter
+ def headers(self, headers: HeaderTypes) -> None:
+ client_headers = Headers(
+ {
+ b"Accept": b"*/*",
+ b"Accept-Encoding": ACCEPT_ENCODING.encode("ascii"),
+ b"Connection": b"keep-alive",
+ b"User-Agent": USER_AGENT.encode("ascii"),
+ }
+ )
+ client_headers.update(headers)
+ self._headers = client_headers
+
+ @property
+ def cookies(self) -> Cookies:
+ """
+ Cookie values to include when sending requests.
+ """
+ return self._cookies
+
+ @cookies.setter
+ def cookies(self, cookies: CookieTypes) -> None:
+ self._cookies = Cookies(cookies)
+
+ @property
+ def params(self) -> QueryParams:
+ """
+ Query parameters to include in the URL when sending requests.
+ """
+ return self._params
+
+ @params.setter
+ def params(self, params: QueryParamTypes) -> None:
+ self._params = QueryParams(params)
+
+ def build_request(
+ self,
+ method: str,
+ url: URLTypes,
+ *,
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: typing.Any = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ ) -> Request:
+ """
+ Build and return a request instance.
+
+ * The `params`, `headers` and `cookies` arguments
+ are merged with any values set on the client.
+ * The `url` argument is merged with any `base_url` set on the client.
+
+ See also: [Request instances][0]
+
+ [0]: /advanced/#request-instances
+ """
+ url = self._merge_url(url)
+ headers = self._merge_headers(headers)
+ cookies = self._merge_cookies(cookies)
+ params = self._merge_queryparams(params)
+ return Request(
+ method,
+ url,
+ content=content,
+ data=data,
+ files=files,
+ json=json,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ )
+
+ def _merge_url(self, url: URLTypes) -> URL:
+ """
+ Merge a URL argument together with any 'base_url' on the client,
+ to create the URL used for the outgoing request.
+ """
+ merge_url = URL(url)
+ if merge_url.is_relative_url:
+ # To merge URLs we always append to the base URL. To get this
+ # behaviour correct we always ensure the base URL ends in a '/'
+ # seperator, and strip any leading '/' from the merge URL.
+ #
+ # So, eg...
+ #
+ # >>> client = Client(base_url="https://www.example.com/subpath")
+ # >>> client.base_url
+ # URL('https://www.example.com/subpath/')
+ # >>> client.build_request("GET", "/path").url
+ # URL('https://www.example.com/subpath/path')
+ merge_raw_path = self.base_url.raw_path + merge_url.raw_path.lstrip(b"/")
+ return self.base_url.copy_with(raw_path=merge_raw_path)
+ return merge_url
+
+ def _merge_cookies(
+ self, cookies: CookieTypes = None
+ ) -> typing.Optional[CookieTypes]:
+ """
+ Merge a cookies argument together with any cookies on the client,
+ to create the cookies used for the outgoing request.
+ """
+ if cookies or self.cookies:
+ merged_cookies = Cookies(self.cookies)
+ merged_cookies.update(cookies)
+ return merged_cookies
+ return cookies
+
+ def _merge_headers(
+ self, headers: HeaderTypes = None
+ ) -> typing.Optional[HeaderTypes]:
+ """
+ Merge a headers argument together with any headers on the client,
+ to create the headers used for the outgoing request.
+ """
+ merged_headers = Headers(self.headers)
+ merged_headers.update(headers)
+ return merged_headers
+
+ def _merge_queryparams(
+ self, params: QueryParamTypes = None
+ ) -> typing.Optional[QueryParamTypes]:
+ """
+ Merge a queryparams argument together with any queryparams on the client,
+ to create the queryparams used for the outgoing request.
+ """
+ if params or self.params:
+ merged_queryparams = QueryParams(self.params)
+ merged_queryparams = merged_queryparams.merge(params)
+ return merged_queryparams
+ return params
+
+ def _build_auth(self, auth: AuthTypes) -> typing.Optional[Auth]:
+ if auth is None:
+ return None
+ elif isinstance(auth, tuple):
+ return BasicAuth(username=auth[0], password=auth[1])
+ elif isinstance(auth, Auth):
+ return auth
+ elif callable(auth):
+ return FunctionAuth(func=auth)
+ else:
+ raise TypeError(f'Invalid "auth" argument: {auth!r}')
+
+ def _build_request_auth(
+ self,
+ request: Request,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Auth:
+ auth = (
+ self._auth if isinstance(auth, UseClientDefault) else self._build_auth(auth)
+ )
+
+ if auth is not None:
+ return auth
+
+ username, password = request.url.username, request.url.password
+ if username or password:
+ return BasicAuth(username=username, password=password)
+
+ if self.trust_env and "Authorization" not in request.headers:
+ credentials = self._netrc.get_credentials(request.url.host)
+ if credentials is not None:
+ return BasicAuth(username=credentials[0], password=credentials[1])
+
+ return Auth()
+
+ def _build_redirect_request(self, request: Request, response: Response) -> Request:
+ """
+ Given a request and a redirect response, return a new request that
+ should be used to effect the redirect.
+ """
+ method = self._redirect_method(request, response)
+ url = self._redirect_url(request, response)
+ headers = self._redirect_headers(request, url, method)
+ stream = self._redirect_stream(request, method)
+ cookies = Cookies(self.cookies)
+ return Request(
+ method=method, url=url, headers=headers, cookies=cookies, stream=stream
+ )
+
+ def _redirect_method(self, request: Request, response: Response) -> str:
+ """
+ When being redirected we may want to change the method of the request
+ based on certain specs or browser behavior.
+ """
+ method = request.method
+
+ # https://tools.ietf.org/html/rfc7231#section-6.4.4
+ if response.status_code == codes.SEE_OTHER and method != "HEAD":
+ method = "GET"
+
+ # Do what the browsers do, despite standards...
+ # Turn 302s into GETs.
+ if response.status_code == codes.FOUND and method != "HEAD":
+ method = "GET"
+
+ # If a POST is responded to with a 301, turn it into a GET.
+ # This bizarre behaviour is explained in 'requests' issue 1704.
+ if response.status_code == codes.MOVED_PERMANENTLY and method == "POST":
+ method = "GET"
+
+ return method
+
+ def _redirect_url(self, request: Request, response: Response) -> URL:
+ """
+ Return the URL for the redirect to follow.
+ """
+ location = response.headers["Location"]
+
+ try:
+ url = URL(location)
+ except InvalidURL as exc:
+ raise RemoteProtocolError(
+ f"Invalid URL in location header: {exc}.", request=request
+ ) from None
+
+ # Handle malformed 'Location' headers that are "absolute" form, have no host.
+ # See: https://github.com/encode/httpx/issues/771
+ if url.scheme and not url.host:
+ url = url.copy_with(host=request.url.host)
+
+ # Facilitate relative 'Location' headers, as allowed by RFC 7231.
+ # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
+ if url.is_relative_url:
+ url = request.url.join(url)
+
+ # Attach previous fragment if needed (RFC 7231 7.1.2)
+ if request.url.fragment and not url.fragment:
+ url = url.copy_with(fragment=request.url.fragment)
+
+ return url
+
+ def _redirect_headers(self, request: Request, url: URL, method: str) -> Headers:
+ """
+ Return the headers that should be used for the redirect request.
+ """
+ headers = Headers(request.headers)
+
+ if not same_origin(url, request.url):
+ # Strip Authorization headers when responses are redirected away from
+ # the origin.
+ headers.pop("Authorization", None)
+
+ # Update the Host header.
+ headers["Host"] = url.netloc.decode("ascii")
+
+ if method != request.method and method == "GET":
+ # If we've switch to a 'GET' request, then strip any headers which
+ # are only relevant to the request body.
+ headers.pop("Content-Length", None)
+ headers.pop("Transfer-Encoding", None)
+
+ # We should use the client cookie store to determine any cookie header,
+ # rather than whatever was on the original outgoing request.
+ headers.pop("Cookie", None)
+
+ return headers
+
+ def _redirect_stream(
+ self, request: Request, method: str
+ ) -> typing.Optional[typing.Union[SyncByteStream, AsyncByteStream]]:
+ """
+ Return the body that should be used for the redirect request.
+ """
+ if method != request.method and method == "GET":
+ return None
+
+ return request.stream
+
+
+class Client(BaseClient):
+ """
+ An HTTP client, with connection pooling, HTTP/2, redirects, cookie persistence, etc.
+
+ Usage:
+
+ ```python
+ >>> client = httpx.Client()
+ >>> response = client.get('https://example.org')
+ ```
+
+ **Parameters:**
+
+ * **auth** - *(optional)* An authentication class to use when sending
+ requests.
+ * **params** - *(optional)* Query parameters to include in request URLs, as
+ a string, dictionary, or sequence of two-tuples.
+ * **headers** - *(optional)* Dictionary of HTTP headers to include when
+ sending requests.
+ * **cookies** - *(optional)* Dictionary of Cookie items to include when
+ sending requests.
+ * **verify** - *(optional)* SSL certificates (a.k.a CA bundle) used to
+ verify the identity of requested hosts. Either `True` (default CA bundle),
+ a path to an SSL certificate file, an `ssl.SSLContext`, or `False`
+ (which will disable verification).
+ * **cert** - *(optional)* An SSL certificate used by the requested host
+ to authenticate the client. Either a path to an SSL certificate file, or
+ two-tuple of (certificate file, key file), or a three-tuple of (certificate
+ file, key file, password).
+ * **proxies** - *(optional)* A dictionary mapping proxy keys to proxy
+ URLs.
+ * **timeout** - *(optional)* The timeout configuration to use when sending
+ requests.
+ * **limits** - *(optional)* The limits configuration to use.
+ * **max_redirects** - *(optional)* The maximum number of redirect responses
+ that should be followed.
+ * **base_url** - *(optional)* A URL to use as the base when building
+ request URLs.
+ * **transport** - *(optional)* A transport class to use for sending requests
+ over the network.
+ * **app** - *(optional)* An WSGI application to send requests to,
+ rather than sending actual network requests.
+ * **trust_env** - *(optional)* Enables or disables usage of environment
+ variables for configuration.
+ """
+
+ def __init__(
+ self,
+ *,
+ auth: AuthTypes = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ verify: VerifyTypes = True,
+ cert: CertTypes = None,
+ http1: bool = True,
+ http2: bool = False,
+ proxies: ProxiesTypes = None,
+ mounts: typing.Mapping[str, BaseTransport] = None,
+ timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
+ limits: Limits = DEFAULT_LIMITS,
+ max_redirects: int = DEFAULT_MAX_REDIRECTS,
+ event_hooks: typing.Mapping[str, typing.List[typing.Callable]] = None,
+ base_url: URLTypes = "",
+ transport: BaseTransport = None,
+ app: typing.Callable = None,
+ trust_env: bool = True,
+ ):
+ super().__init__(
+ auth=auth,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ timeout=timeout,
+ max_redirects=max_redirects,
+ event_hooks=event_hooks,
+ base_url=base_url,
+ trust_env=trust_env,
+ )
+
+ if http2:
+ try:
+ import h2 # noqa
+ except ImportError: # pragma: nocover
+ raise ImportError(
+ "Using http2=True, but the 'h2' package is not installed. "
+ "Make sure to install httpx using `pip install httpx[http2]`."
+ ) from None
+
+ allow_env_proxies = trust_env and app is None and transport is None
+ proxy_map = self._get_proxy_map(proxies, allow_env_proxies)
+
+ self._transport = self._init_transport(
+ verify=verify,
+ cert=cert,
+ http1=http1,
+ http2=http2,
+ limits=limits,
+ transport=transport,
+ app=app,
+ trust_env=trust_env,
+ )
+ self._mounts: typing.Dict[URLPattern, typing.Optional[BaseTransport]] = {
+ URLPattern(key): None
+ if proxy is None
+ else self._init_proxy_transport(
+ proxy,
+ verify=verify,
+ cert=cert,
+ http1=http1,
+ http2=http2,
+ limits=limits,
+ trust_env=trust_env,
+ )
+ for key, proxy in proxy_map.items()
+ }
+ if mounts is not None:
+ self._mounts.update(
+ {URLPattern(key): transport for key, transport in mounts.items()}
+ )
+
+ self._mounts = dict(sorted(self._mounts.items()))
+
+ def _init_transport(
+ self,
+ verify: VerifyTypes = True,
+ cert: CertTypes = None,
+ http1: bool = True,
+ http2: bool = False,
+ limits: Limits = DEFAULT_LIMITS,
+ transport: BaseTransport = None,
+ app: typing.Callable = None,
+ trust_env: bool = True,
+ ) -> BaseTransport:
+ if transport is not None:
+ return transport
+
+ if app is not None:
+ return WSGITransport(app=app)
+
+ return HTTPTransport(
+ verify=verify,
+ cert=cert,
+ http1=http1,
+ http2=http2,
+ limits=limits,
+ trust_env=trust_env,
+ )
+
+ def _init_proxy_transport(
+ self,
+ proxy: Proxy,
+ verify: VerifyTypes = True,
+ cert: CertTypes = None,
+ http1: bool = True,
+ http2: bool = False,
+ limits: Limits = DEFAULT_LIMITS,
+ trust_env: bool = True,
+ ) -> BaseTransport:
+ return HTTPTransport(
+ verify=verify,
+ cert=cert,
+ http1=http1,
+ http2=http2,
+ limits=limits,
+ trust_env=trust_env,
+ proxy=proxy,
+ )
+
+ def _transport_for_url(self, url: URL) -> BaseTransport:
+ """
+ Returns the transport instance that should be used for a given URL.
+ This will either be the standard connection pool, or a proxy.
+ """
+ for pattern, transport in self._mounts.items():
+ if pattern.matches(url):
+ return self._transport if transport is None else transport
+
+ return self._transport
+
+ def request(
+ self,
+ method: str,
+ url: URLTypes,
+ *,
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: typing.Any = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Build and send a request.
+
+ Equivalent to:
+
+ ```python
+ request = client.build_request(...)
+ response = client.send(request, ...)
+ ```
+
+ See `Client.build_request()`, `Client.send()` and
+ [Merging of configuration][0] for how the various parameters
+ are merged with client-level configuration.
+
+ [0]: /advanced/#merging-of-configuration
+ """
+ if cookies is not None:
+ message = (
+ "Setting per-request cookies=<...> is being deprecated, because "
+ "the expected behaviour on cookie persistence is ambiguous. Set "
+ "cookies directly on the client instance instead."
+ )
+ warnings.warn(message, DeprecationWarning)
+
+ request = self.build_request(
+ method=method,
+ url=url,
+ content=content,
+ data=data,
+ files=files,
+ json=json,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ )
+ return self.send(
+ request, auth=auth, allow_redirects=allow_redirects, timeout=timeout
+ )
+
+ @contextmanager
+ def stream(
+ self,
+ method: str,
+ url: URLTypes,
+ *,
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: typing.Any = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> typing.Iterator[Response]:
+ """
+ Alternative to `httpx.request()` that streams the response body
+ instead of loading it into memory at once.
+
+ **Parameters**: See `httpx.request`.
+
+ See also: [Streaming Responses][0]
+
+ [0]: /quickstart#streaming-responses
+ """
+ request = self.build_request(
+ method=method,
+ url=url,
+ content=content,
+ data=data,
+ files=files,
+ json=json,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ )
+ response = self.send(
+ request=request,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ stream=True,
+ )
+ try:
+ yield response
+ finally:
+ response.close()
+
+ def send(
+ self,
+ request: Request,
+ *,
+ stream: bool = False,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Send a request.
+
+ The request is sent as-is, unmodified.
+
+ Typically you'll want to build one with `Client.build_request()`
+ so that any client-level configuration is merged into the request,
+ but passing an explicit `httpx.Request()` is supported as well.
+
+ See also: [Request instances][0]
+
+ [0]: /advanced/#request-instances
+ """
+ if self._state == ClientState.CLOSED:
+ raise RuntimeError("Cannot send a request, as the client has been closed.")
+
+ self._state = ClientState.OPENED
+ timeout = (
+ self.timeout if isinstance(timeout, UseClientDefault) else Timeout(timeout)
+ )
+
+ auth = self._build_request_auth(request, auth)
+
+ response = self._send_handling_auth(
+ request,
+ auth=auth,
+ timeout=timeout,
+ allow_redirects=allow_redirects,
+ history=[],
+ )
+ try:
+ if not stream:
+ response.read()
+
+ for hook in self._event_hooks["response"]:
+ hook(response)
+
+ return response
+
+ except Exception as exc:
+ response.close()
+ raise exc
+
+ def _send_handling_auth(
+ self,
+ request: Request,
+ auth: Auth,
+ timeout: Timeout,
+ allow_redirects: bool,
+ history: typing.List[Response],
+ ) -> Response:
+ auth_flow = auth.sync_auth_flow(request)
+ try:
+ request = next(auth_flow)
+
+ for hook in self._event_hooks["request"]:
+ hook(request)
+
+ while True:
+ response = self._send_handling_redirects(
+ request,
+ timeout=timeout,
+ allow_redirects=allow_redirects,
+ history=history,
+ )
+ try:
+ try:
+ next_request = auth_flow.send(response)
+ except StopIteration:
+ return response
+
+ response.history = list(history)
+ response.read()
+ request = next_request
+ history.append(response)
+
+ except Exception as exc:
+ response.close()
+ raise exc
+ finally:
+ auth_flow.close()
+
+ def _send_handling_redirects(
+ self,
+ request: Request,
+ timeout: Timeout,
+ allow_redirects: bool,
+ history: typing.List[Response],
+ ) -> Response:
+ while True:
+ if len(history) > self.max_redirects:
+ raise TooManyRedirects(
+ "Exceeded maximum allowed redirects.", request=request
+ )
+
+ response = self._send_single_request(request, timeout)
+ try:
+ response.history = list(history)
+
+ if not response.is_redirect:
+ return response
+
+ request = self._build_redirect_request(request, response)
+ history = history + [response]
+
+ if allow_redirects:
+ response.read()
+ else:
+ response.next_request = request
+ return response
+
+ except Exception as exc:
+ response.close()
+ raise exc
+
+ def _send_single_request(self, request: Request, timeout: Timeout) -> Response:
+ """
+ Sends a single request, without handling any redirections.
+ """
+ transport = self._transport_for_url(request.url)
+ timer = Timer()
+ timer.sync_start()
+
+ if not isinstance(request.stream, SyncByteStream):
+ raise RuntimeError(
+ "Attempted to send an async request with a sync Client instance."
+ )
+
+ with request_context(request=request):
+ (status_code, headers, stream, extensions) = transport.handle_request(
+ request.method.encode(),
+ request.url.raw,
+ headers=request.headers.raw,
+ stream=request.stream,
+ extensions={"timeout": timeout.as_dict()},
+ )
+
+ response = Response(
+ status_code,
+ headers=headers,
+ stream=stream,
+ extensions=extensions,
+ request=request,
+ )
+
+ response.stream = BoundSyncStream(stream, response=response, timer=timer)
+ self.cookies.extract_cookies(response)
+
+ status = f"{response.status_code} {response.reason_phrase}"
+ response_line = f"{response.http_version} {status}"
+ logger.debug(f'HTTP Request: {request.method} {request.url} "{response_line}"')
+
+ return response
+
+ def get(
+ self,
+ url: URLTypes,
+ *,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Send a `GET` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return self.request(
+ "GET",
+ url,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ )
+
+ def options(
+ self,
+ url: URLTypes,
+ *,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Send an `OPTIONS` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return self.request(
+ "OPTIONS",
+ url,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ )
+
+ def head(
+ self,
+ url: URLTypes,
+ *,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Send a `HEAD` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return self.request(
+ "HEAD",
+ url,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ )
+
+ def post(
+ self,
+ url: URLTypes,
+ *,
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: typing.Any = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Send a `POST` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return self.request(
+ "POST",
+ url,
+ content=content,
+ data=data,
+ files=files,
+ json=json,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ )
+
+ def put(
+ self,
+ url: URLTypes,
+ *,
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: typing.Any = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Send a `PUT` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return self.request(
+ "PUT",
+ url,
+ content=content,
+ data=data,
+ files=files,
+ json=json,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ )
+
+ def patch(
+ self,
+ url: URLTypes,
+ *,
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: typing.Any = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Send a `PATCH` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return self.request(
+ "PATCH",
+ url,
+ content=content,
+ data=data,
+ files=files,
+ json=json,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ )
+
+ def delete(
+ self,
+ url: URLTypes,
+ *,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Send a `DELETE` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return self.request(
+ "DELETE",
+ url,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ )
+
+ def close(self) -> None:
+ """
+ Close transport and proxies.
+ """
+ if self._state != ClientState.CLOSED:
+ self._state = ClientState.CLOSED
+
+ self._transport.close()
+ for transport in self._mounts.values():
+ if transport is not None:
+ transport.close()
+
+ def __enter__(self: T) -> T:
+ self._state = ClientState.OPENED
+
+ self._transport.__enter__()
+ for transport in self._mounts.values():
+ if transport is not None:
+ transport.__enter__()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: typing.Type[BaseException] = None,
+ exc_value: BaseException = None,
+ traceback: TracebackType = None,
+ ) -> None:
+ self._state = ClientState.CLOSED
+
+ self._transport.__exit__(exc_type, exc_value, traceback)
+ for transport in self._mounts.values():
+ if transport is not None:
+ transport.__exit__(exc_type, exc_value, traceback)
+
+ def __del__(self) -> None:
+ # We use 'getattr' here, to manage the case where '__del__()' is called
+ # on a partically initiallized instance that raised an exception during
+ # the call to '__init__()'.
+ if getattr(self, "_state", None) == ClientState.OPENED: # noqa: B009
+ self.close()
+
+
+class AsyncClient(BaseClient):
+ """
+ An asynchronous HTTP client, with connection pooling, HTTP/2, redirects,
+ cookie persistence, etc.
+
+ Usage:
+
+ ```python
+ >>> async with httpx.AsyncClient() as client:
+ >>> response = await client.get('https://example.org')
+ ```
+
+ **Parameters:**
+
+ * **auth** - *(optional)* An authentication class to use when sending
+ requests.
+ * **params** - *(optional)* Query parameters to include in request URLs, as
+ a string, dictionary, or sequence of two-tuples.
+ * **headers** - *(optional)* Dictionary of HTTP headers to include when
+ sending requests.
+ * **cookies** - *(optional)* Dictionary of Cookie items to include when
+ sending requests.
+ * **verify** - *(optional)* SSL certificates (a.k.a CA bundle) used to
+ verify the identity of requested hosts. Either `True` (default CA bundle),
+ a path to an SSL certificate file, or `False` (disable verification).
+ * **cert** - *(optional)* An SSL certificate used by the requested host
+ to authenticate the client. Either a path to an SSL certificate file, or
+ two-tuple of (certificate file, key file), or a three-tuple of (certificate
+ file, key file, password).
+ * **http2** - *(optional)* A boolean indicating if HTTP/2 support should be
+ enabled. Defaults to `False`.
+ * **proxies** - *(optional)* A dictionary mapping HTTP protocols to proxy
+ URLs.
+ * **timeout** - *(optional)* The timeout configuration to use when sending
+ requests.
+ * **limits** - *(optional)* The limits configuration to use.
+ * **max_redirects** - *(optional)* The maximum number of redirect responses
+ that should be followed.
+ * **base_url** - *(optional)* A URL to use as the base when building
+ request URLs.
+ * **transport** - *(optional)* A transport class to use for sending requests
+ over the network.
+ * **app** - *(optional)* An ASGI application to send requests to,
+ rather than sending actual network requests.
+ * **trust_env** - *(optional)* Enables or disables usage of environment
+ variables for configuration.
+ """
+
+ def __init__(
+ self,
+ *,
+ auth: AuthTypes = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ verify: VerifyTypes = True,
+ cert: CertTypes = None,
+ http1: bool = True,
+ http2: bool = False,
+ proxies: ProxiesTypes = None,
+ mounts: typing.Mapping[str, AsyncBaseTransport] = None,
+ timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
+ limits: Limits = DEFAULT_LIMITS,
+ max_redirects: int = DEFAULT_MAX_REDIRECTS,
+ event_hooks: typing.Mapping[str, typing.List[typing.Callable]] = None,
+ base_url: URLTypes = "",
+ transport: AsyncBaseTransport = None,
+ app: typing.Callable = None,
+ trust_env: bool = True,
+ ):
+ super().__init__(
+ auth=auth,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ timeout=timeout,
+ max_redirects=max_redirects,
+ event_hooks=event_hooks,
+ base_url=base_url,
+ trust_env=trust_env,
+ )
+
+ if http2:
+ try:
+ import h2 # noqa
+ except ImportError: # pragma: nocover
+ raise ImportError(
+ "Using http2=True, but the 'h2' package is not installed. "
+ "Make sure to install httpx using `pip install httpx[http2]`."
+ ) from None
+
+ allow_env_proxies = trust_env and app is None and transport is None
+ proxy_map = self._get_proxy_map(proxies, allow_env_proxies)
+
+ self._transport = self._init_transport(
+ verify=verify,
+ cert=cert,
+ http1=http1,
+ http2=http2,
+ limits=limits,
+ transport=transport,
+ app=app,
+ trust_env=trust_env,
+ )
+
+ self._mounts: typing.Dict[URLPattern, typing.Optional[AsyncBaseTransport]] = {
+ URLPattern(key): None
+ if proxy is None
+ else self._init_proxy_transport(
+ proxy,
+ verify=verify,
+ cert=cert,
+ http1=http1,
+ http2=http2,
+ limits=limits,
+ trust_env=trust_env,
+ )
+ for key, proxy in proxy_map.items()
+ }
+ if mounts is not None:
+ self._mounts.update(
+ {URLPattern(key): transport for key, transport in mounts.items()}
+ )
+ self._mounts = dict(sorted(self._mounts.items()))
+
+ def _init_transport(
+ self,
+ verify: VerifyTypes = True,
+ cert: CertTypes = None,
+ http1: bool = True,
+ http2: bool = False,
+ limits: Limits = DEFAULT_LIMITS,
+ transport: AsyncBaseTransport = None,
+ app: typing.Callable = None,
+ trust_env: bool = True,
+ ) -> AsyncBaseTransport:
+ if transport is not None:
+ return transport
+
+ if app is not None:
+ return ASGITransport(app=app)
+
+ return AsyncHTTPTransport(
+ verify=verify,
+ cert=cert,
+ http1=http1,
+ http2=http2,
+ limits=limits,
+ trust_env=trust_env,
+ )
+
+ def _init_proxy_transport(
+ self,
+ proxy: Proxy,
+ verify: VerifyTypes = True,
+ cert: CertTypes = None,
+ http1: bool = True,
+ http2: bool = False,
+ limits: Limits = DEFAULT_LIMITS,
+ trust_env: bool = True,
+ ) -> AsyncBaseTransport:
+ return AsyncHTTPTransport(
+ verify=verify,
+ cert=cert,
+ http2=http2,
+ limits=limits,
+ trust_env=trust_env,
+ proxy=proxy,
+ )
+
+ def _transport_for_url(self, url: URL) -> AsyncBaseTransport:
+ """
+ Returns the transport instance that should be used for a given URL.
+ This will either be the standard connection pool, or a proxy.
+ """
+ for pattern, transport in self._mounts.items():
+ if pattern.matches(url):
+ return self._transport if transport is None else transport
+
+ return self._transport
+
+ async def request(
+ self,
+ method: str,
+ url: URLTypes,
+ *,
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: typing.Any = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Build and send a request.
+
+ Equivalent to:
+
+ ```python
+ request = client.build_request(...)
+ response = await client.send(request, ...)
+ ```
+
+ See `AsyncClient.build_request()`, `AsyncClient.send()`
+ and [Merging of configuration][0] for how the various parameters
+ are merged with client-level configuration.
+
+ [0]: /advanced/#merging-of-configuration
+ """
+ request = self.build_request(
+ method=method,
+ url=url,
+ content=content,
+ data=data,
+ files=files,
+ json=json,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ )
+ response = await self.send(
+ request, auth=auth, allow_redirects=allow_redirects, timeout=timeout
+ )
+ return response
+
+ @asynccontextmanager
+ async def stream(
+ self,
+ method: str,
+ url: URLTypes,
+ *,
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: typing.Any = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> typing.AsyncIterator[Response]:
+ """
+ Alternative to `httpx.request()` that streams the response body
+ instead of loading it into memory at once.
+
+ **Parameters**: See `httpx.request`.
+
+ See also: [Streaming Responses][0]
+
+ [0]: /quickstart#streaming-responses
+ """
+ request = self.build_request(
+ method=method,
+ url=url,
+ content=content,
+ data=data,
+ files=files,
+ json=json,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ )
+ response = await self.send(
+ request=request,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ stream=True,
+ )
+ try:
+ yield response
+ finally:
+ await response.aclose()
+
+ async def send(
+ self,
+ request: Request,
+ *,
+ stream: bool = False,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Send a request.
+
+ The request is sent as-is, unmodified.
+
+ Typically you'll want to build one with `AsyncClient.build_request()`
+ so that any client-level configuration is merged into the request,
+ but passing an explicit `httpx.Request()` is supported as well.
+
+ See also: [Request instances][0]
+
+ [0]: /advanced/#request-instances
+ """
+ if self._state == ClientState.CLOSED:
+ raise RuntimeError("Cannot send a request, as the client has been closed.")
+
+ self._state = ClientState.OPENED
+ timeout = (
+ self.timeout if isinstance(timeout, UseClientDefault) else Timeout(timeout)
+ )
+
+ auth = self._build_request_auth(request, auth)
+
+ response = await self._send_handling_auth(
+ request,
+ auth=auth,
+ timeout=timeout,
+ allow_redirects=allow_redirects,
+ history=[],
+ )
+ try:
+ if not stream:
+ await response.aread()
+
+ for hook in self._event_hooks["response"]:
+ await hook(response)
+
+ return response
+
+ except Exception as exc:
+ await response.aclose()
+ raise exc
+
+ async def _send_handling_auth(
+ self,
+ request: Request,
+ auth: Auth,
+ timeout: Timeout,
+ allow_redirects: bool,
+ history: typing.List[Response],
+ ) -> Response:
+ auth_flow = auth.async_auth_flow(request)
+ try:
+ request = await auth_flow.__anext__()
+
+ for hook in self._event_hooks["request"]:
+ await hook(request)
+
+ while True:
+ response = await self._send_handling_redirects(
+ request,
+ timeout=timeout,
+ allow_redirects=allow_redirects,
+ history=history,
+ )
+ try:
+ try:
+ next_request = await auth_flow.asend(response)
+ except StopAsyncIteration:
+ return response
+
+ response.history = list(history)
+ await response.aread()
+ request = next_request
+ history.append(response)
+
+ except Exception as exc:
+ await response.aclose()
+ raise exc
+ finally:
+ await auth_flow.aclose()
+
+ async def _send_handling_redirects(
+ self,
+ request: Request,
+ timeout: Timeout,
+ allow_redirects: bool,
+ history: typing.List[Response],
+ ) -> Response:
+ while True:
+ if len(history) > self.max_redirects:
+ raise TooManyRedirects(
+ "Exceeded maximum allowed redirects.", request=request
+ )
+
+ response = await self._send_single_request(request, timeout)
+ try:
+ response.history = list(history)
+
+ if not response.is_redirect:
+ return response
+
+ request = self._build_redirect_request(request, response)
+ history = history + [response]
+
+ if allow_redirects:
+ await response.aread()
+ else:
+ response.next_request = request
+ return response
+
+ except Exception as exc:
+ await response.aclose()
+ raise exc
+
+ async def _send_single_request(
+ self, request: Request, timeout: Timeout
+ ) -> Response:
+ """
+ Sends a single request, without handling any redirections.
+ """
+ transport = self._transport_for_url(request.url)
+ timer = Timer()
+ await timer.async_start()
+
+ if not isinstance(request.stream, AsyncByteStream):
+ raise RuntimeError(
+ "Attempted to send an sync request with an AsyncClient instance."
+ )
+
+ with request_context(request=request):
+ (
+ status_code,
+ headers,
+ stream,
+ extensions,
+ ) = await transport.handle_async_request(
+ request.method.encode(),
+ request.url.raw,
+ headers=request.headers.raw,
+ stream=request.stream,
+ extensions={"timeout": timeout.as_dict()},
+ )
+
+ response = Response(
+ status_code,
+ headers=headers,
+ stream=stream,
+ extensions=extensions,
+ request=request,
+ )
+
+ response.stream = BoundAsyncStream(stream, response=response, timer=timer)
+ self.cookies.extract_cookies(response)
+
+ status = f"{response.status_code} {response.reason_phrase}"
+ response_line = f"{response.http_version} {status}"
+ logger.debug(f'HTTP Request: {request.method} {request.url} "{response_line}"')
+
+ return response
+
+ async def get(
+ self,
+ url: URLTypes,
+ *,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Send a `GET` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return await self.request(
+ "GET",
+ url,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ )
+
+ async def options(
+ self,
+ url: URLTypes,
+ *,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Send an `OPTIONS` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return await self.request(
+ "OPTIONS",
+ url,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ )
+
+ async def head(
+ self,
+ url: URLTypes,
+ *,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Send a `HEAD` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return await self.request(
+ "HEAD",
+ url,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ )
+
+ async def post(
+ self,
+ url: URLTypes,
+ *,
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: typing.Any = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Send a `POST` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return await self.request(
+ "POST",
+ url,
+ content=content,
+ data=data,
+ files=files,
+ json=json,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ )
+
+ async def put(
+ self,
+ url: URLTypes,
+ *,
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: typing.Any = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Send a `PUT` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return await self.request(
+ "PUT",
+ url,
+ content=content,
+ data=data,
+ files=files,
+ json=json,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ )
+
+ async def patch(
+ self,
+ url: URLTypes,
+ *,
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: typing.Any = None,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Send a `PATCH` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return await self.request(
+ "PATCH",
+ url,
+ content=content,
+ data=data,
+ files=files,
+ json=json,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ )
+
+ async def delete(
+ self,
+ url: URLTypes,
+ *,
+ params: QueryParamTypes = None,
+ headers: HeaderTypes = None,
+ cookies: CookieTypes = None,
+ auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ allow_redirects: bool = True,
+ timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
+ ) -> Response:
+ """
+ Send a `DELETE` request.
+
+ **Parameters**: See `httpx.request`.
+ """
+ return await self.request(
+ "DELETE",
+ url,
+ params=params,
+ headers=headers,
+ cookies=cookies,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ )
+
+ async def aclose(self) -> None:
+ """
+ Close transport and proxies.
+ """
+ if self._state != ClientState.CLOSED:
+ self._state = ClientState.CLOSED
+
+ await self._transport.aclose()
+ for proxy in self._mounts.values():
+ if proxy is not None:
+ await proxy.aclose()
+
+ async def __aenter__(self: U) -> U:
+ self._state = ClientState.OPENED
+
+ await self._transport.__aenter__()
+ for proxy in self._mounts.values():
+ if proxy is not None:
+ await proxy.__aenter__()
+ return self
+
+ async def __aexit__(
+ self,
+ exc_type: typing.Type[BaseException] = None,
+ exc_value: BaseException = None,
+ traceback: TracebackType = None,
+ ) -> None:
+ self._state = ClientState.CLOSED
+
+ await self._transport.__aexit__(exc_type, exc_value, traceback)
+ for proxy in self._mounts.values():
+ if proxy is not None:
+ await proxy.__aexit__(exc_type, exc_value, traceback)
+
+ def __del__(self) -> None:
+ # We use 'getattr' here, to manage the case where '__del__()' is called
+ # on a partically initiallized instance that raised an exception during
+ # the call to '__init__()'.
+ if getattr(self, "_state", None) == ClientState.OPENED: # noqa: B009
+ # Unlike the sync case, we cannot silently close the client when
+ # it is garbage collected, because `.aclose()` is an async operation,
+ # but `__del__` is not.
+ #
+ # For this reason we require explicit close management for
+ # `AsyncClient`, and issue a warning on unclosed clients.
+ #
+ # The context managed style is usually preferable, because it neatly
+ # ensures proper resource cleanup:
+ #
+ # async with httpx.AsyncClient() as client:
+ # ...
+ #
+ # However, an explicit call to `aclose()` is also sufficient:
+ #
+ # client = httpx.AsyncClient()
+ # try:
+ # ...
+ # finally:
+ # await client.aclose()
+ warnings.warn(
+ f"Unclosed {self!r}. "
+ "See https://www.python-httpx.org/async/#opening-and-closing-clients "
+ "for details."
+ )
diff --git a/packages/httpx/_compat.py b/packages/httpx/_compat.py
new file mode 100644
index 000000000..98a3e37b8
--- /dev/null
+++ b/packages/httpx/_compat.py
@@ -0,0 +1,25 @@
+"""
+The _compat module is used for code which requires branching between different
+Python environments. It is excluded from the code coverage checks.
+"""
+import ssl
+import sys
+
+# `contextlib.asynccontextmanager` exists from Python 3.7 onwards.
+# For 3.6 we require the `async_generator` package for a backported version.
+try:
+ from contextlib import asynccontextmanager # type: ignore
+except ImportError:
+ from async_generator import asynccontextmanager # type: ignore # noqa
+
+
+def set_minimum_tls_version_1_2(context: ssl.SSLContext) -> None:
+ if sys.version_info >= (3, 10):
+ context.minimum_version = ssl.TLSVersion.TLSv1_2
+ else:
+ # These become deprecated in favor of 'context.minimum_version'
+ # from Python 3.10 onwards.
+ context.options |= ssl.OP_NO_SSLv2
+ context.options |= ssl.OP_NO_SSLv3
+ context.options |= ssl.OP_NO_TLSv1
+ context.options |= ssl.OP_NO_TLSv1_1
diff --git a/packages/httpx/_config.py b/packages/httpx/_config.py
new file mode 100644
index 000000000..9d29f9f2f
--- /dev/null
+++ b/packages/httpx/_config.py
@@ -0,0 +1,358 @@
+import os
+import ssl
+import typing
+from base64 import b64encode
+from pathlib import Path
+
+import certifi
+
+from ._compat import set_minimum_tls_version_1_2
+from ._models import URL, Headers
+from ._types import CertTypes, HeaderTypes, TimeoutTypes, URLTypes, VerifyTypes
+from ._utils import get_ca_bundle_from_env, get_logger
+
+DEFAULT_CIPHERS = ":".join(
+ [
+ "ECDHE+AESGCM",
+ "ECDHE+CHACHA20",
+ "DHE+AESGCM",
+ "DHE+CHACHA20",
+ "ECDH+AESGCM",
+ "DH+AESGCM",
+ "ECDH+AES",
+ "DH+AES",
+ "RSA+AESGCM",
+ "RSA+AES",
+ "!aNULL",
+ "!eNULL",
+ "!MD5",
+ "!DSS",
+ ]
+)
+
+
+logger = get_logger(__name__)
+
+
+class UnsetType:
+ pass # pragma: nocover
+
+
+UNSET = UnsetType()
+
+
+def create_ssl_context(
+ cert: CertTypes = None,
+ verify: VerifyTypes = True,
+ trust_env: bool = True,
+ http2: bool = False,
+) -> ssl.SSLContext:
+ return SSLConfig(
+ cert=cert, verify=verify, trust_env=trust_env, http2=http2
+ ).ssl_context
+
+
+class SSLConfig:
+ """
+ SSL Configuration.
+ """
+
+ DEFAULT_CA_BUNDLE_PATH = Path(certifi.where())
+
+ def __init__(
+ self,
+ *,
+ cert: CertTypes = None,
+ verify: VerifyTypes = True,
+ trust_env: bool = True,
+ http2: bool = False,
+ ):
+ self.cert = cert
+ self.verify = verify
+ self.trust_env = trust_env
+ self.http2 = http2
+ self.ssl_context = self.load_ssl_context()
+
+ def load_ssl_context(self) -> ssl.SSLContext:
+ logger.trace(
+ f"load_ssl_context "
+ f"verify={self.verify!r} "
+ f"cert={self.cert!r} "
+ f"trust_env={self.trust_env!r} "
+ f"http2={self.http2!r}"
+ )
+
+ if self.verify:
+ return self.load_ssl_context_verify()
+ return self.load_ssl_context_no_verify()
+
+ def load_ssl_context_no_verify(self) -> ssl.SSLContext:
+ """
+ Return an SSL context for unverified connections.
+ """
+ context = self._create_default_ssl_context()
+ context.check_hostname = False
+ context.verify_mode = ssl.CERT_NONE
+ self._load_client_certs(context)
+ return context
+
+ def load_ssl_context_verify(self) -> ssl.SSLContext:
+ """
+ Return an SSL context for verified connections.
+ """
+ if self.trust_env and self.verify is True:
+ ca_bundle = get_ca_bundle_from_env()
+ if ca_bundle is not None:
+ self.verify = ca_bundle
+
+ if isinstance(self.verify, ssl.SSLContext):
+ # Allow passing in our own SSLContext object that's pre-configured.
+ context = self.verify
+ self._load_client_certs(context)
+ return context
+ elif isinstance(self.verify, bool):
+ ca_bundle_path = self.DEFAULT_CA_BUNDLE_PATH
+ elif Path(self.verify).exists():
+ ca_bundle_path = Path(self.verify)
+ else:
+ raise IOError(
+ "Could not find a suitable TLS CA certificate bundle, "
+ "invalid path: {}".format(self.verify)
+ )
+
+ context = self._create_default_ssl_context()
+ context.verify_mode = ssl.CERT_REQUIRED
+ context.check_hostname = True
+
+ # Signal to server support for PHA in TLS 1.3. Raises an
+ # AttributeError if only read-only access is implemented.
+ try:
+ context.post_handshake_auth = True # type: ignore
+ except AttributeError: # pragma: nocover
+ pass
+
+ # Disable using 'commonName' for SSLContext.check_hostname
+ # when the 'subjectAltName' extension isn't available.
+ try:
+ context.hostname_checks_common_name = False # type: ignore
+ except AttributeError: # pragma: nocover
+ pass
+
+ if ca_bundle_path.is_file():
+ logger.trace(f"load_verify_locations cafile={ca_bundle_path!s}")
+ context.load_verify_locations(cafile=str(ca_bundle_path))
+ elif ca_bundle_path.is_dir():
+ logger.trace(f"load_verify_locations capath={ca_bundle_path!s}")
+ context.load_verify_locations(capath=str(ca_bundle_path))
+
+ self._load_client_certs(context)
+
+ return context
+
+ def _create_default_ssl_context(self) -> ssl.SSLContext:
+ """
+ Creates the default SSLContext object that's used for both verified
+ and unverified connections.
+ """
+ context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
+ set_minimum_tls_version_1_2(context)
+ context.options |= ssl.OP_NO_COMPRESSION
+ context.set_ciphers(DEFAULT_CIPHERS)
+
+ if ssl.HAS_ALPN:
+ alpn_idents = ["http/1.1", "h2"] if self.http2 else ["http/1.1"]
+ context.set_alpn_protocols(alpn_idents)
+
+ if hasattr(context, "keylog_filename"): # pragma: nocover (Available in 3.8+)
+ keylogfile = os.environ.get("SSLKEYLOGFILE")
+ if keylogfile and self.trust_env:
+ context.keylog_filename = keylogfile # type: ignore
+
+ return context
+
+ def _load_client_certs(self, ssl_context: ssl.SSLContext) -> None:
+ """
+ Loads client certificates into our SSLContext object
+ """
+ if self.cert is not None:
+ if isinstance(self.cert, str):
+ ssl_context.load_cert_chain(certfile=self.cert)
+ elif isinstance(self.cert, tuple) and len(self.cert) == 2:
+ ssl_context.load_cert_chain(certfile=self.cert[0], keyfile=self.cert[1])
+ elif isinstance(self.cert, tuple) and len(self.cert) == 3:
+ ssl_context.load_cert_chain(
+ certfile=self.cert[0],
+ keyfile=self.cert[1],
+ password=self.cert[2], # type: ignore
+ )
+
+
+class Timeout:
+ """
+ Timeout configuration.
+
+ **Usage**:
+
+ Timeout(None) # No timeouts.
+ Timeout(5.0) # 5s timeout on all operations.
+ Timeout(None, connect=5.0) # 5s timeout on connect, no other timeouts.
+ Timeout(5.0, connect=10.0) # 10s timeout on connect. 5s timeout elsewhere.
+ Timeout(5.0, pool=None) # No timeout on acquiring connection from pool.
+ # 5s timeout elsewhere.
+ """
+
+ def __init__(
+ self,
+ timeout: typing.Union[TimeoutTypes, UnsetType] = UNSET,
+ *,
+ connect: typing.Union[None, float, UnsetType] = UNSET,
+ read: typing.Union[None, float, UnsetType] = UNSET,
+ write: typing.Union[None, float, UnsetType] = UNSET,
+ pool: typing.Union[None, float, UnsetType] = UNSET,
+ ):
+ if isinstance(timeout, Timeout):
+ # Passed as a single explicit Timeout.
+ assert connect is UNSET
+ assert read is UNSET
+ assert write is UNSET
+ assert pool is UNSET
+ self.connect = timeout.connect # type: typing.Optional[float]
+ self.read = timeout.read # type: typing.Optional[float]
+ self.write = timeout.write # type: typing.Optional[float]
+ self.pool = timeout.pool # type: typing.Optional[float]
+ elif isinstance(timeout, tuple):
+ # Passed as a tuple.
+ self.connect = timeout[0]
+ self.read = timeout[1]
+ self.write = None if len(timeout) < 3 else timeout[2]
+ self.pool = None if len(timeout) < 4 else timeout[3]
+ elif not (
+ isinstance(connect, UnsetType)
+ or isinstance(read, UnsetType)
+ or isinstance(write, UnsetType)
+ or isinstance(pool, UnsetType)
+ ):
+ self.connect = connect
+ self.read = read
+ self.write = write
+ self.pool = pool
+ else:
+ if isinstance(timeout, UnsetType):
+ raise ValueError(
+ "httpx.Timeout must either include a default, or set all "
+ "four parameters explicitly."
+ )
+ self.connect = timeout if isinstance(connect, UnsetType) else connect
+ self.read = timeout if isinstance(read, UnsetType) else read
+ self.write = timeout if isinstance(write, UnsetType) else write
+ self.pool = timeout if isinstance(pool, UnsetType) else pool
+
+ def as_dict(self) -> typing.Dict[str, typing.Optional[float]]:
+ return {
+ "connect": self.connect,
+ "read": self.read,
+ "write": self.write,
+ "pool": self.pool,
+ }
+
+ def __eq__(self, other: typing.Any) -> bool:
+ return (
+ isinstance(other, self.__class__)
+ and self.connect == other.connect
+ and self.read == other.read
+ and self.write == other.write
+ and self.pool == other.pool
+ )
+
+ def __repr__(self) -> str:
+ class_name = self.__class__.__name__
+ if len({self.connect, self.read, self.write, self.pool}) == 1:
+ return f"{class_name}(timeout={self.connect})"
+ return (
+ f"{class_name}(connect={self.connect}, "
+ f"read={self.read}, write={self.write}, pool={self.pool})"
+ )
+
+
+class Limits:
+ """
+ Configuration for limits to various client behaviors.
+
+ **Parameters:**
+
+ * **max_connections** - The maximum number of concurrent connections that may be
+ established.
+ * **max_keepalive_connections** - Allow the connection pool to maintain
+ keep-alive connections below this point. Should be less than or equal
+ to `max_connections`.
+ """
+
+ def __init__(
+ self,
+ *,
+ max_connections: int = None,
+ max_keepalive_connections: int = None,
+ keepalive_expiry: typing.Optional[float] = 5.0,
+ ):
+ self.max_connections = max_connections
+ self.max_keepalive_connections = max_keepalive_connections
+ self.keepalive_expiry = keepalive_expiry
+
+ def __eq__(self, other: typing.Any) -> bool:
+ return (
+ isinstance(other, self.__class__)
+ and self.max_connections == other.max_connections
+ and self.max_keepalive_connections == other.max_keepalive_connections
+ and self.keepalive_expiry == other.keepalive_expiry
+ )
+
+ def __repr__(self) -> str:
+ class_name = self.__class__.__name__
+ return (
+ f"{class_name}(max_connections={self.max_connections}, "
+ f"max_keepalive_connections={self.max_keepalive_connections}, "
+ f"keepalive_expiry={self.keepalive_expiry})"
+ )
+
+
+class Proxy:
+ def __init__(
+ self, url: URLTypes, *, headers: HeaderTypes = None, mode: str = "DEFAULT"
+ ):
+ url = URL(url)
+ headers = Headers(headers)
+
+ if url.scheme not in ("http", "https"):
+ raise ValueError(f"Unknown scheme for proxy URL {url!r}")
+ if mode not in ("DEFAULT", "FORWARD_ONLY", "TUNNEL_ONLY"):
+ raise ValueError(f"Unknown proxy mode {mode!r}")
+
+ if url.username or url.password:
+ headers.setdefault(
+ "Proxy-Authorization",
+ self._build_auth_header(url.username, url.password),
+ )
+ # Remove userinfo from the URL authority, e.g.:
+ # 'username:password@proxy_host:proxy_port' -> 'proxy_host:proxy_port'
+ url = url.copy_with(username=None, password=None)
+
+ self.url = url
+ self.headers = headers
+ self.mode = mode
+
+ def _build_auth_header(self, username: str, password: str) -> str:
+ userpass = (username.encode("utf-8"), password.encode("utf-8"))
+ token = b64encode(b":".join(userpass)).decode()
+ return f"Basic {token}"
+
+ def __repr__(self) -> str:
+ return (
+ f"Proxy(url={str(self.url)!r}, "
+ f"headers={dict(self.headers)!r}, "
+ f"mode={self.mode!r})"
+ )
+
+
+DEFAULT_TIMEOUT_CONFIG = Timeout(timeout=5.0)
+DEFAULT_LIMITS = Limits(max_connections=100, max_keepalive_connections=20)
+DEFAULT_MAX_REDIRECTS = 20
diff --git a/packages/httpx/_content.py b/packages/httpx/_content.py
new file mode 100644
index 000000000..86f3c7c25
--- /dev/null
+++ b/packages/httpx/_content.py
@@ -0,0 +1,207 @@
+import inspect
+import warnings
+from json import dumps as json_dumps
+from typing import (
+ Any,
+ AsyncIterable,
+ AsyncIterator,
+ Dict,
+ Iterable,
+ Iterator,
+ Tuple,
+ Union,
+)
+from urllib.parse import urlencode
+
+from ._exceptions import StreamClosed, StreamConsumed
+from ._multipart import MultipartStream
+from ._transports.base import AsyncByteStream, SyncByteStream
+from ._types import RequestContent, RequestData, RequestFiles, ResponseContent
+from ._utils import peek_filelike_length, primitive_value_to_str
+
+
+class ByteStream(AsyncByteStream, SyncByteStream):
+ def __init__(self, stream: bytes) -> None:
+ self._stream = stream
+
+ def __iter__(self) -> Iterator[bytes]:
+ yield self._stream
+
+ async def __aiter__(self) -> AsyncIterator[bytes]:
+ yield self._stream
+
+
+class IteratorByteStream(SyncByteStream):
+ def __init__(self, stream: Iterable[bytes]):
+ self._stream = stream
+ self._is_stream_consumed = False
+ self._is_generator = inspect.isgenerator(stream)
+
+ def __iter__(self) -> Iterator[bytes]:
+ if self._is_stream_consumed and self._is_generator:
+ raise StreamConsumed()
+
+ self._is_stream_consumed = True
+ for part in self._stream:
+ yield part
+
+
+class AsyncIteratorByteStream(AsyncByteStream):
+ def __init__(self, stream: AsyncIterable[bytes]):
+ self._stream = stream
+ self._is_stream_consumed = False
+ self._is_generator = inspect.isasyncgen(stream)
+
+ async def __aiter__(self) -> AsyncIterator[bytes]:
+ if self._is_stream_consumed and self._is_generator:
+ raise StreamConsumed()
+
+ self._is_stream_consumed = True
+ async for part in self._stream:
+ yield part
+
+
+class UnattachedStream(AsyncByteStream, SyncByteStream):
+ """
+ If a request or response is serialized using pickle, then it is no longer
+ attached to a stream for I/O purposes. Any stream operations should result
+ in `httpx.StreamClosed`.
+ """
+
+ def __iter__(self) -> Iterator[bytes]:
+ raise StreamClosed()
+
+ async def __aiter__(self) -> AsyncIterator[bytes]:
+ raise StreamClosed()
+ yield b"" # pragma: nocover
+
+
+def encode_content(
+ content: Union[str, bytes, Iterable[bytes], AsyncIterable[bytes]]
+) -> Tuple[Dict[str, str], Union[SyncByteStream, AsyncByteStream]]:
+
+ if isinstance(content, (bytes, str)):
+ body = content.encode("utf-8") if isinstance(content, str) else content
+ content_length = len(body)
+ headers = {"Content-Length": str(content_length)} if body else {}
+ return headers, ByteStream(body)
+
+ elif isinstance(content, Iterable):
+ content_length_or_none = peek_filelike_length(content)
+
+ if content_length_or_none is None:
+ headers = {"Transfer-Encoding": "chunked"}
+ else:
+ headers = {"Content-Length": str(content_length_or_none)}
+ return headers, IteratorByteStream(content) # type: ignore
+
+ elif isinstance(content, AsyncIterable):
+ headers = {"Transfer-Encoding": "chunked"}
+ return headers, AsyncIteratorByteStream(content)
+
+ raise TypeError(f"Unexpected type for 'content', {type(content)!r}")
+
+
+def encode_urlencoded_data(
+ data: dict,
+) -> Tuple[Dict[str, str], ByteStream]:
+ plain_data = []
+ for key, value in data.items():
+ if isinstance(value, (list, tuple)):
+ plain_data.extend([(key, primitive_value_to_str(item)) for item in value])
+ else:
+ plain_data.append((key, primitive_value_to_str(value)))
+ body = urlencode(plain_data, doseq=True).encode("utf-8")
+ content_length = str(len(body))
+ content_type = "application/x-www-form-urlencoded"
+ headers = {"Content-Length": content_length, "Content-Type": content_type}
+ return headers, ByteStream(body)
+
+
+def encode_multipart_data(
+ data: dict, files: RequestFiles, boundary: bytes = None
+) -> Tuple[Dict[str, str], MultipartStream]:
+ multipart = MultipartStream(data=data, files=files, boundary=boundary)
+ headers = multipart.get_headers()
+ return headers, multipart
+
+
+def encode_text(text: str) -> Tuple[Dict[str, str], ByteStream]:
+ body = text.encode("utf-8")
+ content_length = str(len(body))
+ content_type = "text/plain; charset=utf-8"
+ headers = {"Content-Length": content_length, "Content-Type": content_type}
+ return headers, ByteStream(body)
+
+
+def encode_html(html: str) -> Tuple[Dict[str, str], ByteStream]:
+ body = html.encode("utf-8")
+ content_length = str(len(body))
+ content_type = "text/html; charset=utf-8"
+ headers = {"Content-Length": content_length, "Content-Type": content_type}
+ return headers, ByteStream(body)
+
+
+def encode_json(json: Any) -> Tuple[Dict[str, str], ByteStream]:
+ body = json_dumps(json).encode("utf-8")
+ content_length = str(len(body))
+ content_type = "application/json"
+ headers = {"Content-Length": content_length, "Content-Type": content_type}
+ return headers, ByteStream(body)
+
+
+def encode_request(
+ content: RequestContent = None,
+ data: RequestData = None,
+ files: RequestFiles = None,
+ json: Any = None,
+ boundary: bytes = None,
+) -> Tuple[Dict[str, str], Union[SyncByteStream, AsyncByteStream]]:
+ """
+ Handles encoding the given `content`, `data`, `files`, and `json`,
+ returning a two-tuple of (, ).
+ """
+ if data is not None and not isinstance(data, dict):
+ # We prefer to seperate `content=`
+ # for raw request content, and `data=