diff --git a/granary/atom.py b/granary/atom.py index 0888b2d8..1f9aa019 100644 --- a/granary/atom.py +++ b/granary/atom.py @@ -6,7 +6,7 @@ import collections import os import re -import urlparse +import urllib.parse import xml.sax.saxutils import jinja2 @@ -14,8 +14,8 @@ import mf2util from oauth_dropins.webutil import util -import microformats2 -import source +from . import microformats2 +from . import source ATOM_TEMPLATE_FILE = 'user_feed.atom' # stolen from django.utils.html @@ -87,10 +87,10 @@ class Defaulter(collections.defaultdict): def __init__(self, **kwargs): super(Defaulter, self).__init__(Defaulter, **{ k: (Defaulter(**v) if isinstance(v, dict) else v) - for k, v in kwargs.items()}) + for k, v in list(kwargs.items())}) def __unicode__(self): - return super(Defaulter, self).__unicode__() if self else u'' + return super(Defaulter, self).__unicode__() if self else '' env = jinja2.Environment(loader=jinja2.PackageLoader(__package__, 'templates'), autoescape=True) @@ -127,6 +127,6 @@ def html_to_atom(html, url=None, **kwargs): def _remove_query_params(url): - parsed = list(urlparse.urlparse(url)) + parsed = list(urllib.parse.urlparse(url)) parsed[4] = '' - return urlparse.urlunparse(parsed) + return urllib.parse.urlunparse(parsed) diff --git a/granary/facebook.py b/granary/facebook.py index 90fe09fb..c638b644 100644 --- a/granary/facebook.py +++ b/granary/facebook.py @@ -48,14 +48,14 @@ import json import logging import re -import urllib -import urllib2 -import urlparse +import urllib.request, urllib.parse, urllib.error +import urllib.request, urllib.error, urllib.parse +import urllib.parse import mf2util -import appengine_config +from . import appengine_config from oauth_dropins.webutil import util -import source +from . import source # Since API v2.4, we need to explicitly ask for the fields we want from most API # endpoints with ?fields=... @@ -166,11 +166,11 @@ 'rsvp-interested': API_PUBLISH_RSVP_INTERESTED, } REACTION_CONTENT = { - 'LOVE': u'❤️', - 'WOW': u'😮', - 'HAHA': u'😆', - 'SAD': u'😢', - 'ANGRY': u'😡', + 'LOVE': '❤️', + 'WOW': '😮', + 'HAHA': '😆', + 'SAD': '😢', + 'ANGRY': '😡', # nothing for LIKE (it's a like :P) or for NONE } @@ -286,7 +286,7 @@ def get_activities_response(self, user_id=None, group_id=None, app_id=None, resp = self.urlopen(url, headers=headers, _as=None) etag = resp.info().get('ETag') posts = self._as(list, json.loads(resp.read())) - except urllib2.HTTPError, e: + except urllib.error.HTTPError as e: if e.code == 304: # Not Modified, from a matching ETag posts = [] else: @@ -334,7 +334,7 @@ def get_activities_response(self, user_id=None, group_id=None, app_id=None, # some sharedposts requests 400, not sure why. # https://github.com/snarfed/bridgy/issues/348 with util.ignore_http_4xx_error(): - for id, shares in self._split_id_requests(API_SHARES, fetch_shares_ids).items(): + for id, shares in list(self._split_id_requests(API_SHARES, fetch_shares_ids).items()): activity = id_to_activity.get(id) if activity: activity['object'].setdefault('tags', []).extend( @@ -343,8 +343,8 @@ def get_activities_response(self, user_id=None, group_id=None, app_id=None, if fetch_replies and fetch_comments_ids: # some comments requests 400, not sure why. with util.ignore_http_4xx_error(): - for id, comments in self._split_id_requests(API_COMMENTS_ALL, - fetch_comments_ids).items(): + for id, comments in list(self._split_id_requests(API_COMMENTS_ALL, + fetch_comments_ids).items()): activity = id_to_activity.get(id) if activity: replies = activity['object'].setdefault('replies', {} @@ -408,7 +408,7 @@ def _merge_photos(self, posts): photo['privacy'] = 'custom' # ie unknown return ([p for p in posts if not p.get('object_id')] + - posts_by_obj_id.values() + photos) + list(posts_by_obj_id.values()) + photos) def _split_id_requests(self, api_call, ids): """Splits an API call into multiple to stay under the MAX_IDS limit per call. @@ -424,7 +424,7 @@ def _split_id_requests(self, api_call, ids): results = {} for i in range(0, len(ids), MAX_IDS): resp = self.urlopen(api_call % ','.join(ids[i:i + MAX_IDS])) - for id, objs in resp.items(): + for id, objs in list(resp.items()): # objs is usually a dict but sometimes a boolean. (oh FB, never change!) results.setdefault(id, []).extend(self._as(dict, objs).get('data', [])) @@ -486,7 +486,7 @@ def get_comment(self, comment_id, activity_id=None, activity_author_id=None, """ try: resp = self.urlopen(API_COMMENT % comment_id) - except urllib2.HTTPError, e: + except urllib.error.HTTPError as e: if e.code == 400 and '_' in comment_id: # Facebook may want us to ask for this without the other prefixed id(s) resp = self.urlopen(API_COMMENT % comment_id.split('_')[-1]) @@ -636,7 +636,7 @@ def _create(self, obj, preview=None, include_link=source.OMIT_LINK, name = obj.get('displayName') if name and mf2util.is_name_a_title(name, content): - content = name + u"\n\n" + content + content = name + "\n\n" + content people = self._get_person_tags(obj) @@ -677,7 +677,7 @@ def _create(self, obj, preview=None, include_link=source.OMIT_LINK, if image_url: msg_data['attachment_url'] = image_url resp = self.urlopen(API_PUBLISH_COMMENT % base_id, - data=urllib.urlencode(msg_data)) + data=urllib.parse.urlencode(msg_data)) url = self.comment_url(base_id, resp['id'], post_author_id=base_obj.get('author', {}).get('id')) resp.update({'url': url, 'type': 'comment'}) @@ -772,7 +772,7 @@ def _create(self, obj, preview=None, include_link=source.OMIT_LINK, # https://developers.facebook.com/docs/graph-api/reference/user/feed#pubfields msg_data['tags'] = ','.join(tag['id'] for tag in people) - resp = self.urlopen(api_call, data=urllib.urlencode(msg_data)) + resp = self.urlopen(api_call, data=urllib.parse.urlencode(msg_data)) resp.update({'url': self.post_url(resp), 'type': 'post'}) if video_url and not resp.get('success', True): msg = 'Video upload failed.' @@ -817,7 +817,7 @@ def _get_person_tags(self, obj): tag['id'] = id people[id] = tag - return people.values() + return list(people.values()) def create_notification(self, user_id, text, link): """Sends the authenticated user a notification. @@ -843,7 +843,7 @@ def create_notification(self, user_id, text, link): appengine_config.FACEBOOK_APP_SECRET), } url = API_BASE + API_NOTIFICATION % user_id - resp = util.urlopen(urllib2.Request(url, data=urllib.urlencode(params))) + resp = util.urlopen(urllib.request.Request(url, data=urllib.parse.urlencode(params))) logging.debug('Response: %s %s', resp.getcode(), resp.read()) def post_url(self, post): @@ -904,8 +904,8 @@ def base_object(self, obj, verb=None, resolve_numeric_id=False): base_obj = self.user_to_actor(self.urlopen(base_id)) try: - parsed = urlparse.urlparse(url) - params = urlparse.parse_qs(parsed.query) + parsed = urllib.parse.urlparse(url) + params = urllib.parse.parse_qs(parsed.query) assert parsed.path.startswith('/') path = parsed.path.strip('/') path_parts = path.split('/') @@ -920,7 +920,7 @@ def base_object(self, obj, verb=None, resolve_numeric_id=False): # populate image, displayName, etc. if not base_obj.get('username') and not util.is_int(base_id): base_obj['username'] = base_id - base_obj.update({k: v for k, v in self.user_to_actor(base_obj).items() + base_obj.update({k: v for k, v in list(self.user_to_actor(base_obj).items()) if k not in base_obj}) elif len(path_parts) >= 3 and path_parts[1] == 'posts': @@ -955,7 +955,7 @@ def base_object(self, obj, verb=None, resolve_numeric_id=False): # add author user id prefix. https://github.com/snarfed/bridgy/issues/229 base_obj['id'] = '%s_%s' % (author['numeric_id'], base_id) - except BaseException, e: + except BaseException as e: logging.error( "Couldn't parse object URL %s : %s. Falling back to default logic.", url, e) @@ -1074,7 +1074,7 @@ def post_to_object(self, post, is_comment=False): # types, e.g. comments. message_tags = post.get('message_tags', []) if isinstance(message_tags, dict): - message_tags = sum(message_tags.values(), []) # flatten + message_tags = sum(list(message_tags.values()), []) # flatten elif not isinstance(message_tags, list): message_tags = list(message_tags) # fingers crossed! :P @@ -1366,7 +1366,7 @@ def event_to_object(self, event, rsvps=None): for rsvp in event.get(field, {}).get('data', []): rsvp = self.rsvp_to_object(rsvp, type=field, event=event) id_to_rsvp[rsvp['id']] = rsvp - self.add_rsvps_to_event(obj, id_to_rsvp.values()) + self.add_rsvps_to_event(obj, list(id_to_rsvp.values())) return self.postprocess_object(obj) @@ -1684,7 +1684,7 @@ def urlopen(self, url, _as=dict, **kwargs): log_url = url if self.access_token: url = util.add_query_params(url, [('access_token', self.access_token)]) - resp = util.urlopen(urllib2.Request(url, **kwargs)) + resp = util.urlopen(urllib.request.Request(url, **kwargs)) if _as is None: return resp @@ -1745,7 +1745,7 @@ def urlopen_batch(self, urls): code = int(resp.get('code', 0)) body = resp.get('body') if code / 100 in (4, 5): - raise urllib2.HTTPError(url, code, body, resp.get('headers'), None) + raise urllib.error.HTTPError(url, code, body, resp.get('headers'), None) bodies.append(body) return bodies @@ -1786,7 +1786,7 @@ def urlopen_batch_full(self, requests): req['method'] = 'GET' if 'headers' in req: req['headers'] = [{'name': n, 'value': v} - for n, v in req['headers'].items()] + for n, v in list(req['headers'].items())] data = 'batch=' + json.dumps(util.trim_nulls(requests), separators=(',', ':')) # no whitespace diff --git a/granary/flickr.py b/granary/flickr.py index 66cda78c..cfc3a9bf 100644 --- a/granary/flickr.py +++ b/granary/flickr.py @@ -20,14 +20,14 @@ import json import logging import requests -import source +from . import source import sys import mf2py import mf2util -import urllib2 -import urlparse +import urllib.request, urllib.error, urllib.parse +import urllib.parse -import appengine_config +from . import appengine_config from oauth_dropins.webutil import util from oauth_dropins import flickr_auth @@ -312,7 +312,7 @@ def _get_person_tags(self, obj): tag = copy.copy(tag) tag['id'] = id people[id] = tag - return people.values() + return list(people.values()) def get_activities_response(self, user_id=None, group_id=None, app_id=None, activity_id=None, start_index=0, count=0, @@ -442,7 +442,7 @@ def user_to_actor(self, resp): obj['url'] = next( (u for u in urls if not u.startswith('https://www.flickr.com/')), None) - except urllib2.URLError, e: + except urllib.error.URLError as e: logging.warning('could not fetch user homepage %s', profile_url) return self.postprocess_object(obj) @@ -512,7 +512,7 @@ def photo_to_activity(self, photo): 'url': photo_permalink, 'id': self.tag_uri(photo.get('id')), 'image': { - 'url': u'https://farm{}.staticflickr.com/{}/{}_{}_{}.jpg'.format( + 'url': 'https://farm{}.staticflickr.com/{}/{}_{}_{}.jpg'.format( photo.get('farm'), photo.get('server'), photo.get('id'), photo.get('secret'), 'b'), }, @@ -555,14 +555,14 @@ def photo_to_activity(self, photo): activity['object']['tags'] = [{ 'objectType': 'hashtag', 'id': self.tag_uri(tag.get('id')), - 'url': u'https://www.flickr.com/search?tags={}'.format( + 'url': 'https://www.flickr.com/search?tags={}'.format( tag.get('_content')), 'displayName': tag.get('raw'), } for tag in photo.get('tags', {}).get('tag', [])] - elif isinstance(photo.get('tags'), basestring): + elif isinstance(photo.get('tags'), str): activity['object']['tags'] = [{ 'objectType': 'hashtag', - 'url': u'https://www.flickr.com/search?tags={}'.format( + 'url': 'https://www.flickr.com/search?tags={}'.format( tag.strip()), 'displayName': tag.strip(), } for tag in photo.get('tags').split(' ') if tag.strip()] @@ -606,10 +606,10 @@ def like_to_object(self, person, photo_activity): }, }, 'created': util.maybe_timestamp_to_rfc3339(photo_activity.get('favedate')), - 'url': u'{}#liked-by-{}'.format( + 'url': '{}#liked-by-{}'.format( photo_activity.get('url'), person.get('nsid')), 'object': {'url': photo_activity.get('url')}, - 'id': self.tag_uri(u'{}_liked_by_{}'.format( + 'id': self.tag_uri('{}_liked_by_{}'.format( photo_activity.get('flickr_id'), person.get('nsid'))), 'objectType': 'activity', 'verb': 'like', @@ -656,8 +656,8 @@ def get_user_image(self, farm, server, author): ref: https://www.flickr.com/services/api/misc.buddyicons.html """ if server == 0: - return u'https://www.flickr.com/images/buddyicon.gif' - return u'https://farm{}.staticflickr.com/{}/buddyicons/{}.jpg'.format( + return 'https://www.flickr.com/images/buddyicon.gif' + return 'https://farm{}.staticflickr.com/{}/buddyicons/{}.jpg'.format( farm, server, author) def user_id(self): @@ -710,13 +710,13 @@ def photo_url(self, user_id, photo_id): Returns: string, the photo URL """ - return u'https://www.flickr.com/photos/%s/%s/' % (user_id, photo_id) + return 'https://www.flickr.com/photos/%s/%s/' % (user_id, photo_id) @classmethod def post_id(cls, url): """Used when publishing comments or favorites. Flickr photo ID is the 3rd path component rather than the first. """ - parts = urlparse.urlparse(url).path.split('/') + parts = urllib.parse.urlparse(url).path.split('/') if len(parts) >= 4 and parts[1] == 'photos': return parts[3] diff --git a/granary/googleplus.py b/granary/googleplus.py index 4a2d5ec0..95e606dd 100644 --- a/granary/googleplus.py +++ b/granary/googleplus.py @@ -13,8 +13,8 @@ import json import re -import appengine_config -import source +from . import appengine_config +from . import source from apiclient.errors import HttpError from apiclient.http import BatchHttpRequest @@ -128,7 +128,7 @@ def request_with_etag(*args, **kwargs): resp = call.execute(http) activities = resp.get('items', []) etag = resp.get('etag') - except HttpError, e: + except HttpError as e: if e.resp.status == 304: # Not Modified, from a matching ETag activities = [] else: @@ -328,7 +328,7 @@ def html_to_activities(self, html): html = re.sub(r'([,[])\s*([],])', r'\1null\2', html) data = json.loads(html)[1][7][1:] - data = [d[6].values()[0] for d in data if len(d) >= 7 and d[6]] + data = [list(d[6].values())[0] for d in data if len(d) >= 7 and d[6]] activities = [] for d in data: @@ -392,7 +392,7 @@ def html_to_activities(self, html): 'image': {'url': att[1]}, 'displayName': att[2], 'content': att[3], - } for att in attachments.values()] + } for att in list(attachments.values())] self.postprocess_object(activity['object']) activities.append(super(GooglePlus, self).postprocess_activity(activity)) diff --git a/granary/instagram.py b/granary/instagram.py index 054ce2dd..6845d364 100644 --- a/granary/instagram.py +++ b/granary/instagram.py @@ -15,15 +15,15 @@ import operator import re import string -import urllib -import urllib2 -import urlparse +import urllib.request, urllib.parse, urllib.error +import urllib.request, urllib.error, urllib.parse +import urllib.parse import xml.sax.saxutils -import appengine_config +from . import appengine_config from oauth_dropins.webutil import util import requests -import source +from . import source # Maps Instagram media type to ActivityStreams objectType. OBJECT_TYPES = {'image': 'photo', 'video': 'video'} @@ -91,7 +91,7 @@ def urlopen(self, url, **kwargs): if self.access_token: # TODO add access_token to the data parameter for POST requests url = util.add_query_params(url, [('access_token', self.access_token)]) - resp = util.urlopen(urllib2.Request(url, **kwargs)) + resp = util.urlopen(urllib.request.Request(url, **kwargs)) return resp if kwargs.get('data') else json.loads(resp.read()).get('data') @classmethod @@ -210,7 +210,7 @@ def get_activities_response(self, user_id=None, group_id=None, app_id=None, activities += [self.like_to_object(user, l['id'], l['link']) for l in liked] - except urllib2.HTTPError, e: + except urllib.error.HTTPError as e: code, body = util.interpret_http_exception(e) # instagram api should give us back a json block describing the # error. but if it's an error for some other reason, it probably won't @@ -403,7 +403,7 @@ def _create(self, obj, include_link=source.OMIT_LINK, preview=None, description='comment on ' 'this post:\n%s' % (base_url, self.embed_post(base_obj))) - self.urlopen(API_COMMENT_URL % base_id, data=urllib.urlencode({ + self.urlopen(API_COMMENT_URL % base_id, data=urllib.parse.urlencode({ 'access_token': self.access_token, 'text': content, })) @@ -437,7 +437,7 @@ def _create(self, obj, include_link=source.OMIT_LINK, preview=None, logging.info('posting like for media id id=%s, url=%s', base_id, base_url) # no response other than success/failure - self.urlopen(API_MEDIA_LIKES_URL % base_id, data=urllib.urlencode({ + self.urlopen(API_MEDIA_LIKES_URL % base_id, data=urllib.parse.urlencode({ 'access_token': self.access_token })) # TODO use the stored user_json rather than looking it up each time. @@ -507,14 +507,14 @@ def media_to_object(self, media): # ActivityStreams 2.0 allows image to be a JSON array. # http://jasnell.github.io/w3c-socialwg-activitystreams/activitystreams2.html#link 'image': sorted( - media.get('images', {}).values(), + list(media.get('images', {}).values()), # sort by size, descending, since atom.py # uses the first image in the list. key=operator.itemgetter('width'), reverse=True), # video object defined in # http://activitystrea.ms/head/activity-schema.html#rfc.section.4.18 'stream': sorted( - media.get('videos', {}).values(), + list(media.get('videos', {}).values()), key=operator.itemgetter('width'), reverse=True), }], # comments go in the replies field, according to the "Responses for @@ -682,7 +682,7 @@ def id_to_shortcode(id): if not id: return None - if isinstance(id, basestring): + if isinstance(id, str): parts = id.split('_') if not util.is_int(parts[0]): return id diff --git a/granary/microformats2.py b/granary/microformats2.py index ac5259ee..864fae77 100644 --- a/granary/microformats2.py +++ b/granary/microformats2.py @@ -7,7 +7,7 @@ import copy import itertools import logging -import urlparse +import urllib.parse import string import re import xml.sax.saxutils @@ -15,7 +15,7 @@ import mf2py import mf2util from oauth_dropins.webutil import util -import source +from . import source HENTRY = string.Template("""\
@@ -61,7 +61,7 @@ def get_string_urls(objs): """ urls = [] for item in objs: - if isinstance(item, basestring): + if isinstance(item, str): urls.append(item) else: itemtype = [x for x in item.get('type', []) if x.startswith('h-')] @@ -258,7 +258,7 @@ def json_to_object(mf2): def absolute_urls(prop): return [{'url': url} for url in get_string_urls(props.get(prop, [])) # filter out relative and invalid URLs (mf2py gives absolute urls) - if urlparse.urlparse(url).netloc] + if urllib.parse.urlparse(url).netloc] urls = props.get('url') and get_string_urls(props.get('url')) @@ -278,7 +278,7 @@ def absolute_urls(prop): 'location': json_to_object(prop.get('location')), 'replies': {'items': [json_to_object(c) for c in props.get('comment', [])]}, 'tags': [{'objectType': 'hashtag', 'displayName': cat} - if isinstance(cat, basestring) + if isinstance(cat, str) else json_to_object(cat) for cat in props.get('category', [])], } @@ -425,7 +425,7 @@ def json_to_html(obj, parent_props=None): for mftype in ['like', 'repost']: # having like-of or repost-of makes this a like or repost. for target in props.get(mftype + '-of', []): - if isinstance(target, basestring): + if isinstance(target, str): children.append('' % (mftype, target)) else: children.append(json_to_html(target, ['u-' + mftype + '-of'])) @@ -641,7 +641,7 @@ def render_content(obj, include_location=True, synthesize_content=True): target.get('url', '#'), author.get('username')) else: # image looks bad in the simplified rendering - author = {k: v for k, v in author.iteritems() if k != 'image'} + author = {k: v for k, v in author.items() if k != 'image'} content += '%s %s by %s' % ( verb, target.get('url', '#'), target.get('displayName', target.get('title', 'a post')), @@ -668,7 +668,7 @@ def render_content(obj, include_location=True, synthesize_content=True): # render the rest content += tags_to_html(tags.pop('hashtag', []), 'p-category') content += tags_to_html(tags.pop('mention', []), 'u-mention') - content += tags_to_html(sum(tags.values(), []), 'tag') + content += tags_to_html(sum(list(tags.values()), []), 'tag') return content diff --git a/granary/source.py b/granary/source.py index 3ec94c84..72e417a1 100644 --- a/granary/source.py +++ b/granary/source.py @@ -17,13 +17,13 @@ import logging import mimetypes import re -import urlparse +import urllib.parse import html2text from bs4 import BeautifulSoup import requests -import appengine_config +from . import appengine_config from oauth_dropins.webutil import util ME = '@me' @@ -132,7 +132,7 @@ def __new__(meta, name, bases, class_dict): return cls -class Source(object): +class Source(object, metaclass=SourceMeta): """Abstract base class for a source (e.g. Facebook, Twitter). Concrete subclasses must override the class constants below and implement @@ -148,7 +148,6 @@ class Source(object): placeholder for the post URL and (optionally) a %(content)s placeholder for the post content. """ - __metaclass__ = SourceMeta RESPONSE_CACHE_TIME = 5 * 60 # 5m @@ -561,10 +560,9 @@ def original_post_discovery(activity, domains=None, cache=None, candidates += [match.expand(r'http://\1/\2') for match in Source._PERMASHORTCITATION_RE.finditer(content)] - candidates = set(filter(None, - (util.clean_url(url) for url in candidates + candidates = set([_f for _f in (util.clean_url(url) for url in candidates # heuristic: ellipsized URLs are probably incomplete, so omit them. - if url and not url.endswith('...') and not url.endswith(u'…')))) + if url and not url.endswith('...') and not url.endswith('…')) if _f]) # check for redirect and add their final urls redirects = {} # maps final URL to original URL for redirects @@ -579,7 +577,7 @@ def original_post_discovery(activity, domains=None, cache=None, originals = set() mentions = set() for url in util.dedupe_urls(candidates): - if url in redirects.values(): + if url in list(redirects.values()): # this is a redirected original URL. postpone and handle it when we hit # its final URL so that we know the final domain. continue @@ -659,7 +657,7 @@ def get_rsvps_from_event(event): author = event.get('author') rsvps = [] - for verb, field in RSVP_TO_EVENT.items(): + for verb, field in list(RSVP_TO_EVENT.items()): for actor in event.get(field, []): rsvp = {'objectType': 'activity', 'verb': verb, @@ -792,7 +790,7 @@ def post_id(cls, url): Returns: string, or None """ - return urlparse.urlparse(url).path.rstrip('/').rsplit('/', 1)[-1] or None + return urllib.parse.urlparse(url).path.rstrip('/').rsplit('/', 1)[-1] or None def _content_for_create(self, obj, ignore_formatting=False, prefer_name=False, strip_first_video_tag=False): @@ -833,4 +831,4 @@ def _content_for_create(self, obj, ignore_formatting=False, prefer_name=False, return summary or ( (name or content) if prefer_name else (content or name) - ) or u'' + ) or '' diff --git a/granary/test/test_atom.py b/granary/test/test_atom.py index 49bd7f5f..a6b7dd56 100644 --- a/granary/test/test_atom.py +++ b/granary/test/test_atom.py @@ -6,9 +6,9 @@ from granary import atom -import test_facebook -import test_instagram -import test_twitter +from . import test_facebook +from . import test_instagram +from . import test_twitter class AtomTest(testutil.HandlerTest): diff --git a/granary/test/test_facebook.py b/granary/test/test_facebook.py index e12cac33..3b33b274 100644 --- a/granary/test/test_facebook.py +++ b/granary/test/test_facebook.py @@ -6,8 +6,8 @@ import copy import json -import urllib -import urllib2 +import urllib.request, urllib.parse, urllib.error +import urllib.request, urllib.error, urllib.parse from oauth_dropins.webutil import testutil from oauth_dropins.webutil import util @@ -416,7 +416,7 @@ def tag_uri(name): 'url': 'https://www.facebook.com/212038/posts/10100176064482163#haha-by-100005', 'objectType': 'activity', 'verb': 'react', - 'content': u'😆', + 'content': '😆', 'object': {'url': 'https://www.facebook.com/212038/posts/10100176064482163'}, 'author': { 'objectType': 'person', @@ -431,7 +431,7 @@ def tag_uri(name): 'url': 'https://www.facebook.com/212038/posts/10100176064482163#sad-by-100006', 'objectType': 'activity', 'verb': 'react', - 'content': u'😢', + 'content': '😢', 'object': {'url': 'https://www.facebook.com/212038/posts/10100176064482163'}, 'author': { 'objectType': 'person', @@ -552,7 +552,7 @@ def tag_uri(name): 'objectType': 'note', 'url': 'https://www.facebook.com/212038/posts/222', 'content': 'Stopped in to grab coffee and saw this table topper. Wow. Just...wow.', - 'image': {'url': u'https://fbcdn-photos-b-a.akamaihd.net/pic_s.jpg'}, + 'image': {'url': 'https://fbcdn-photos-b-a.akamaihd.net/pic_s.jpg'}, 'published': '2014-04-09T20:44:26+00:00', 'author': POST_OBJ['author'], 'to': [{'alias': '@public', 'objectType': 'group'}], @@ -604,7 +604,7 @@ def tag_uri(name): 'url': 'https://www.facebook.com/212038/posts/222#wow-by-777', 'objectType': 'activity', 'verb': 'react', - 'content': u'😮', + 'content': '😮', 'object': {'url': 'https://www.facebook.com/212038/posts/222'}, 'author': { 'objectType': 'person', @@ -863,7 +863,7 @@ def tag_uri(name): 'count': 2, 'cover_photo': '1520050698319836', 'from': { - 'name': u'Snoøpy Barrett', + 'name': 'Snoøpy Barrett', 'id': '1407574399567467' }, 'link': 'https://www.facebook.com/album.php?fbid=1520022318322674&id=1407574399567467&aid=1073741827', @@ -882,7 +882,7 @@ def tag_uri(name): 'objectType': 'person', 'id': tag_uri('1407574399567467'), 'numeric_id': '1407574399567467', - 'displayName': u'Snoøpy Barrett', + 'displayName': 'Snoøpy Barrett', 'image': {'url': 'https://graph.facebook.com/v2.6/1407574399567467/picture?type=large'}, 'url': 'https://www.facebook.com/1407574399567467', }, @@ -1014,7 +1014,7 @@ def expect_batch_req(self, url, response, status=200, headers={}, batch.append({ 'method': 'GET', 'relative_url': url, - 'headers': [{'name': n, 'value': v} for n, v in headers.items()], + 'headers': [{'name': n, 'value': v} for n, v in list(headers.items())], }) batch_responses.append(util.trim_nulls({ 'code': status, @@ -1160,7 +1160,7 @@ def test_get_activities_self_merge_photos(self): {'fb_id': '5'}, {'fb_id': '66', 'to': [{'objectType':'group', 'alias':'@public'}]}, {'fb_id': '77', 'to': [{'objectType': 'unknown'}]}, - ], [{k: v for k, v in activity['object'].items() if k in ('fb_id', 'to')} + ], [{k: v for k, v in list(activity['object'].items()) if k in ('fb_id', 'to')} for activity in self.fb.get_activities(group_id=source.SELF)]) def test_get_activities_self_photos_returns_list(self): @@ -1247,13 +1247,13 @@ def test_get_activities_activity_id_with_underscore(self): self.expect_urlopen(API_OBJECT % ('12', '34'), {'id': '123'}) self.mox.ReplayAll() obj = self.fb.get_activities(activity_id='12_34')[0]['object'] - self.assertEquals('123', obj['fb_id']) + self.assertEqual('123', obj['fb_id']) def test_get_activities_activity_id_with_user_id(self): self.expect_urlopen(API_OBJECT % ('12', '34'), {'id': '123'}) self.mox.ReplayAll() obj = self.fb.get_activities(activity_id='34', user_id='12')[0]['object'] - self.assertEquals('123', obj['fb_id']) + self.assertEqual('123', obj['fb_id']) def test_get_activities_activity_id_no_underscore_or_user_id(self): with self.assertRaises(NotImplementedError): @@ -1511,7 +1511,7 @@ def test_get_comment_400s_id_without_underscore(self): '123?fields=id,message,from,created_time,message_tags,parent,attachment', {}, status=400) self.mox.ReplayAll() - self.assertRaises(urllib2.HTTPError, self.fb.get_comment, '123') + self.assertRaises(urllib.error.HTTPError, self.fb.get_comment, '123') def test_get_comment_with_activity(self): # still makes the API call, since the comment might be paged out or nested @@ -1552,7 +1552,7 @@ def test_get_share_obj_400s(self): def test_get_share_500s(self): self.expect_urlopen(API_SHARES % '1_2', {}, status=500) self.mox.ReplayAll() - self.assertRaises(urllib2.HTTPError, self.fb.get_share, '1', '2', '_') + self.assertRaises(urllib.error.HTTPError, self.fb.get_share, '1', '2', '_') def test_get_share_with_activity(self): self.expect_urlopen(API_SHARES % '1_2', {'1_2': {'data': [{'id': SHARE['id']}]}}) @@ -1714,7 +1714,7 @@ def test_post_to_object_with_comment_unknown_id_format(self): def test_post_to_object_message_tags_list(self): post = copy.copy(POST) - tags = post['message_tags'].values() + tags = list(post['message_tags'].values()) post['message_tags'] = tags[0] + tags[1] # both lists self.assert_equals(POST_OBJ, self.fb.post_to_object(post)) @@ -1863,8 +1863,8 @@ def test_user_to_actor_multiple_urls(self): http://b http://c""", 'link': 'http://x', # website overrides link }) - self.assertEquals('http://a', actor['url']) - self.assertEquals( + self.assertEqual('http://a', actor['url']) + self.assertEqual( [{'value': 'http://a'}, {'value': 'http://b'}, {'value': 'http://c'}], actor['urls']) @@ -1872,8 +1872,8 @@ def test_user_to_actor_multiple_urls(self): 'id': '123', 'link': 'http://b http://c http://a', }) - self.assertEquals('http://b', actor['url']) - self.assertEquals( + self.assertEqual('http://b', actor['url']) + self.assertEqual( [{'value': 'http://b'}, {'value': 'http://c'}, {'value': 'http://a'}], actor['urls']) @@ -2119,7 +2119,7 @@ def test_album_to_object_full(self): self.assert_equals(ALBUM_OBJ, self.fb.album_to_object(ALBUM)) def test_create_post(self): - self.expect_urlopen(API_PUBLISH_POST, {'id': '123_456'}, data=urllib.urlencode({ + self.expect_urlopen(API_PUBLISH_POST, {'id': '123_456'}, data=urllib.parse.urlencode({ 'message': 'my msg', 'tags': '234,345,456', })) @@ -2138,11 +2138,11 @@ def test_create_post(self): }, self.fb.create(obj).content) preview = self.fb.preview_create(obj) - self.assertEquals('post:', preview.description) - self.assertEquals('my msg

with Friend 1, Friend 2, Friend 3', preview.content) + self.assertEqual('post:', preview.description) + self.assertEqual('my msg

with Friend 1, Friend 2, Friend 3', preview.content) def test_create_post_include_link(self): - self.expect_urlopen(API_PUBLISH_POST, {}, data=urllib.urlencode({ + self.expect_urlopen(API_PUBLISH_POST, {}, data=urllib.parse.urlencode({ 'message': 'my content\n\n(Originally published at: http://obj.co)', })) self.mox.ReplayAll() @@ -2159,12 +2159,12 @@ def test_create_post_include_link(self): }) self.fb.create(obj, include_link=source.INCLUDE_LINK) preview = self.fb.preview_create(obj, include_link=source.INCLUDE_LINK) - self.assertEquals( + self.assertEqual( 'my content\n\n(Originally published at: http://obj.co)', preview.content) def test_create_post_with_title(self): - self.expect_urlopen(API_PUBLISH_POST, {}, data=urllib.urlencode({ + self.expect_urlopen(API_PUBLISH_POST, {}, data=urllib.parse.urlencode({ 'message': 'my title\n\nmy content\n\n(Originally published at: http://obj.co)', })) self.mox.ReplayAll() @@ -2181,12 +2181,12 @@ def test_create_post_with_title(self): }) self.fb.create(obj, include_link=source.INCLUDE_LINK) preview = self.fb.preview_create(obj, include_link=source.INCLUDE_LINK) - self.assertEquals( + self.assertEqual( 'my title\n\nmy content\n\n(Originally published at: http://obj.co)', preview.content) def test_create_post_with_no_title(self): - self.expect_urlopen(API_PUBLISH_POST, {}, data=urllib.urlencode({ + self.expect_urlopen(API_PUBLISH_POST, {}, data=urllib.parse.urlencode({ 'message': 'my\ncontent\n\n(Originally published at: http://obj.co)', })) self.mox.ReplayAll() @@ -2203,7 +2203,7 @@ def test_create_post_with_no_title(self): }) self.fb.create(obj, include_link=source.INCLUDE_LINK) preview = self.fb.preview_create(obj, include_link=source.INCLUDE_LINK) - self.assertEquals( + self.assertEqual( 'my\ncontent\n\n(Originally published at: http://obj.co)', preview.content) @@ -2222,7 +2222,7 @@ def test_create_comment(self): }, self.fb.create(obj).content) preview = self.fb.preview_create(obj) - self.assertEquals('my cmt', preview.content) + self.assertEqual('my cmt', preview.content) self.assertIn('comment on this post:', preview.description) self.assertIn('
', preview.description) @@ -2252,7 +2252,7 @@ def test_create_comment_on_post_urls(self): self.mox.ReplayAll() obj = copy.deepcopy(COMMENT_OBJS[0]) - for post_url, cmt_url in urls.items(): + for post_url, cmt_url in list(urls.items()): obj.update({ 'inReplyTo': [{'url': post_url}], 'content': 'my cmt', @@ -2266,7 +2266,7 @@ def test_create_comment_on_post_urls(self): def test_create_comment_with_photo(self): self.expect_urlopen( '547822715231468/comments', {'id': '456_789'}, - data=urllib.urlencode({'message': 'cc Sam G, Michael M', + data=urllib.parse.urlencode({'message': 'cc Sam G, Michael M', 'attachment_url': 'http://pict/ure'})) self.mox.ReplayAll() @@ -2282,7 +2282,7 @@ def test_create_comment_with_photo(self): }, self.fb.create(obj).content) preview = self.fb.preview_create(obj) - self.assertEquals('cc Sam G, Michael M

', + self.assertEqual('cc Sam G, Michael M

', preview.content) self.assertIn('comment on this post:', preview.description) self.assertIn('
', preview.description) @@ -2379,7 +2379,7 @@ def test_create_rsvp(self): '%s\n%s' % (created.content, rsvp)) preview = self.fb.preview_create(rsvp) - self.assertEquals('RSVP interested to ' + self.assertEqual('RSVP interested to ' 'this event.', preview.description) @@ -2427,8 +2427,8 @@ def test_create_with_photo(self): # test preview preview = self.fb.preview_create(obj) - self.assertEquals('post:', preview.description) - self.assertEquals('my caption

', + self.assertEqual('post:', preview.description) + self.assertEqual('my caption

', preview.content) # test create @@ -2452,7 +2452,7 @@ def test_create_with_photo_uses_timeline_photos_album(self): {'id': '1', 'name': 'foo bar'}, {'id': '2', 'type': 'wall'}, ]}) - self.expect_urlopen('2/photos', {}, data=urllib.urlencode({ + self.expect_urlopen('2/photos', {}, data=urllib.parse.urlencode({ 'url': 'http://my/picture', 'message': ''})) self.mox.ReplayAll() self.assert_equals({'type': 'post', 'url': None}, self.fb.create(obj).content) @@ -2473,7 +2473,7 @@ def test_create_with_photo_and_person_tags(self): # test preview preview = self.fb.preview_create(obj) - self.assertEquals( + self.assertEqual( '



with ' 'Foo, ' 'User 345', @@ -2482,7 +2482,7 @@ def test_create_with_photo_and_person_tags(self): # test create self.expect_urlopen(API_ALBUMS % 'me', {'data': []}) self.expect_urlopen( - API_PUBLISH_PHOTO, {'id': '123_456'}, data=urllib.urlencode({ + API_PUBLISH_PHOTO, {'id': '123_456'}, data=urllib.parse.urlencode({ 'url': 'http://my/picture', 'message': '', 'tags': json.dumps([{'tag_uid': '234'}, {'tag_uid': '345'}]), @@ -2504,13 +2504,13 @@ def test_create_with_video(self): # test preview preview = self.fb.preview_create(obj) - self.assertEquals('post:', preview.description) - self.assertEquals('my\ncaption