Skip to content


Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
tree: aad4ef3d73
Fetching contributors…

Cannot retrieve contributors at this time

executable file 149 lines (115 sloc) 4.502 kb
# Checks whether or not a domain is a primary authority for BrowserID
# Return codes:
# 0 - domain is correctly setup as a primary
# 1 - domain doesn't advertise itself as a primary
# 2 - domain configuration problem
# 3 - tool error
from common import stringify_time
import httplib2
import json
import socket
import sys
import time
CACERTS = '/etc/ssl/certs/ca-certificates.crt'
URL_TIMEOUT = 5 # in seconds
WELLKNOWN_FILENAME = '/.well-known/browserid'
def fetch_url(domain, path):
url = 'https://%s%s' % (domain, path)
client = httplib2.Http(timeout=URL_TIMEOUT, ca_certs=CACERTS)
response = content = None
error = False
response, content = client.request(url, 'GET')
except socket.error as e:
print "%s doesn't listen on port 443: %s" % (domain, e)
error = True
except httplib2.HttpLib2Error as e:
print 'Error while trying to retrieve the well-known file: %s' % e
error = True
return (response, content, error)
def parse_wellknown_file(domain, wellknown_content):
details = json.loads(wellknown_content)
except Exception as e:
print "Error parsing well-known file: %s" % e
return 2
# TODO: check for 'authority' handle delegation to that other domain
auth_page = None
if 'authentication' in details:
auth_page = details['authentication']
(response, content, error) = fetch_url(domain, auth_page)
if error or int(response.status) != 200:
print "%s points to an invalid authentication page: https://%s%s" % (domain, domain, auth_page)
return 2
prov_page = None
if 'provisioning' in details:
prov_page = details['provisioning']
(response, content, error) = fetch_url(domain, prov_page)
if error or int(response.status) != 200:
print "%s points to an invalid provisioning page: https://%s%s" % (domain, domain, prov_page)
return error
if 'public-key' not in details:
print "%s is missing a public key." % domain
return 2
if auth_page and prov_page:
print '%s is a primary authority for BrowserID.' % domain
print ' Authentication: https://%s%s' % (domain, auth_page)
print ' Provisioning: https://%s%s' % (domain, prov_page)
elif not auth_page and not prov_page:
print '%s looks like a secondary authority for BrowserID.' % domain
elif prov_page:
print '%s has a provisioning page but no authentication page.' % domain
return 2
elif auth_page:
print '%s has an authentication page but no provisioning page.' % domain
return 2
print ' Public key: (algorightm=%s)' % details['public-key']['algorithm']
return 0
def parse_response_headers(headers):
if 'cache-control' in headers:
cache_control = headers['cache-control'].split(', ')
for elem in cache_control:
if elem.lower().startswith('max-age='):
max_age = int(elem[8:])
expiry = (time.time() + max_age) * 1000
print ' Expiry: %s' % stringify_time(expiry)
return 0
elif 'expires' in headers:
# TODO: parse this format and then use stringify_time()
print ' Expiry: %s' % headers['expires']
return 0
print ' Expiry: unknown'
return 0
def check_domain(domain):
(response, content, error) = fetch_url(domain, WELLKNOWN_FILENAME)
if error:
return error
if 404 == int(response['status']):
print "No '%s' file available on %s." % (WELLKNOWN_FILENAME, domain)
return 1
if int(response['status']) != 200:
print 'Received a %s status code while trying to retrieve the well-known file.' % response['status']
return 2
if 'application/json' not in response['content-type']:
print "Received a '%s' response (instead of 'application/json') while trying to retrieve the well-known file." % response['content-type']
return 2
exit_code = parse_wellknown_file(domain, content)
if exit_code != 0:
return exit_code
return parse_response_headers(response)
def main(argv=None):
if argv is None:
argv = sys.argv
domain = None
if len(argv) > 1:
domain = argv[1]
print "Usage: %s <domain>" % argv[0]
return 3
return check_domain(domain)
if __name__ == "__main__":
Jump to Line
Something went wrong with that request. Please try again.