Skip to content

Commit

Permalink
tentative port to python 3
Browse files Browse the repository at this point in the history
  • Loading branch information
snarfed committed Aug 8, 2016
1 parent 9235863 commit cf78fbb
Show file tree
Hide file tree
Showing 17 changed files with 364 additions and 366 deletions.
14 changes: 7 additions & 7 deletions granary/atom.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,16 @@
import collections
import os
import re
import urlparse
import urllib.parse
import xml.sax.saxutils

import jinja2
import mf2py
import mf2util
from oauth_dropins.webutil import util

import microformats2
import source
from . import microformats2
from . import source

ATOM_TEMPLATE_FILE = 'user_feed.atom'
# stolen from django.utils.html
Expand Down Expand Up @@ -87,10 +87,10 @@ class Defaulter(collections.defaultdict):
def __init__(self, **kwargs):
super(Defaulter, self).__init__(Defaulter, **{
k: (Defaulter(**v) if isinstance(v, dict) else v)
for k, v in kwargs.items()})
for k, v in list(kwargs.items())})

def __unicode__(self):
return super(Defaulter, self).__unicode__() if self else u''
return super(Defaulter, self).__unicode__() if self else ''

env = jinja2.Environment(loader=jinja2.PackageLoader(__package__, 'templates'),
autoescape=True)
Expand Down Expand Up @@ -127,6 +127,6 @@ def html_to_atom(html, url=None, **kwargs):


def _remove_query_params(url):
parsed = list(urlparse.urlparse(url))
parsed = list(urllib.parse.urlparse(url))
parsed[4] = ''
return urlparse.urlunparse(parsed)
return urllib.parse.urlunparse(parsed)
62 changes: 31 additions & 31 deletions granary/facebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,14 +48,14 @@
import json
import logging
import re
import urllib
import urllib2
import urlparse
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import urllib.parse
import mf2util

import appengine_config
from . import appengine_config
from oauth_dropins.webutil import util
import source
from . import source

# Since API v2.4, we need to explicitly ask for the fields we want from most API
# endpoints with ?fields=...
Expand Down Expand Up @@ -166,11 +166,11 @@
'rsvp-interested': API_PUBLISH_RSVP_INTERESTED,
}
REACTION_CONTENT = {
'LOVE': u'❤️',
'WOW': u'😮',
'HAHA': u'😆',
'SAD': u'😢',
'ANGRY': u'😡',
'LOVE': '❤️',
'WOW': '😮',
'HAHA': '😆',
'SAD': '😢',
'ANGRY': '😡',
# nothing for LIKE (it's a like :P) or for NONE
}

Expand Down Expand Up @@ -286,7 +286,7 @@ def get_activities_response(self, user_id=None, group_id=None, app_id=None,
resp = self.urlopen(url, headers=headers, _as=None)
etag = resp.info().get('ETag')
posts = self._as(list, json.loads(resp.read()))
except urllib2.HTTPError, e:
except urllib.error.HTTPError as e:
if e.code == 304: # Not Modified, from a matching ETag
posts = []
else:
Expand Down Expand Up @@ -334,7 +334,7 @@ def get_activities_response(self, user_id=None, group_id=None, app_id=None,
# some sharedposts requests 400, not sure why.
# https://github.com/snarfed/bridgy/issues/348
with util.ignore_http_4xx_error():
for id, shares in self._split_id_requests(API_SHARES, fetch_shares_ids).items():
for id, shares in list(self._split_id_requests(API_SHARES, fetch_shares_ids).items()):
activity = id_to_activity.get(id)
if activity:
activity['object'].setdefault('tags', []).extend(
Expand All @@ -343,8 +343,8 @@ def get_activities_response(self, user_id=None, group_id=None, app_id=None,
if fetch_replies and fetch_comments_ids:
# some comments requests 400, not sure why.
with util.ignore_http_4xx_error():
for id, comments in self._split_id_requests(API_COMMENTS_ALL,
fetch_comments_ids).items():
for id, comments in list(self._split_id_requests(API_COMMENTS_ALL,
fetch_comments_ids).items()):
activity = id_to_activity.get(id)
if activity:
replies = activity['object'].setdefault('replies', {}
Expand Down Expand Up @@ -408,7 +408,7 @@ def _merge_photos(self, posts):
photo['privacy'] = 'custom' # ie unknown

return ([p for p in posts if not p.get('object_id')] +
posts_by_obj_id.values() + photos)
list(posts_by_obj_id.values()) + photos)

def _split_id_requests(self, api_call, ids):
"""Splits an API call into multiple to stay under the MAX_IDS limit per call.
Expand All @@ -424,7 +424,7 @@ def _split_id_requests(self, api_call, ids):
results = {}
for i in range(0, len(ids), MAX_IDS):
resp = self.urlopen(api_call % ','.join(ids[i:i + MAX_IDS]))
for id, objs in resp.items():
for id, objs in list(resp.items()):
# objs is usually a dict but sometimes a boolean. (oh FB, never change!)
results.setdefault(id, []).extend(self._as(dict, objs).get('data', []))

Expand Down Expand Up @@ -486,7 +486,7 @@ def get_comment(self, comment_id, activity_id=None, activity_author_id=None,
"""
try:
resp = self.urlopen(API_COMMENT % comment_id)
except urllib2.HTTPError, e:
except urllib.error.HTTPError as e:
if e.code == 400 and '_' in comment_id:
# Facebook may want us to ask for this without the other prefixed id(s)
resp = self.urlopen(API_COMMENT % comment_id.split('_')[-1])
Expand Down Expand Up @@ -636,7 +636,7 @@ def _create(self, obj, preview=None, include_link=source.OMIT_LINK,

name = obj.get('displayName')
if name and mf2util.is_name_a_title(name, content):
content = name + u"\n\n" + content
content = name + "\n\n" + content

people = self._get_person_tags(obj)

Expand Down Expand Up @@ -677,7 +677,7 @@ def _create(self, obj, preview=None, include_link=source.OMIT_LINK,
if image_url:
msg_data['attachment_url'] = image_url
resp = self.urlopen(API_PUBLISH_COMMENT % base_id,
data=urllib.urlencode(msg_data))
data=urllib.parse.urlencode(msg_data))
url = self.comment_url(base_id, resp['id'],
post_author_id=base_obj.get('author', {}).get('id'))
resp.update({'url': url, 'type': 'comment'})
Expand Down Expand Up @@ -772,7 +772,7 @@ def _create(self, obj, preview=None, include_link=source.OMIT_LINK,
# https://developers.facebook.com/docs/graph-api/reference/user/feed#pubfields
msg_data['tags'] = ','.join(tag['id'] for tag in people)

resp = self.urlopen(api_call, data=urllib.urlencode(msg_data))
resp = self.urlopen(api_call, data=urllib.parse.urlencode(msg_data))
resp.update({'url': self.post_url(resp), 'type': 'post'})
if video_url and not resp.get('success', True):
msg = 'Video upload failed.'
Expand Down Expand Up @@ -817,7 +817,7 @@ def _get_person_tags(self, obj):
tag['id'] = id
people[id] = tag

return people.values()
return list(people.values())

def create_notification(self, user_id, text, link):
"""Sends the authenticated user a notification.
Expand All @@ -843,7 +843,7 @@ def create_notification(self, user_id, text, link):
appengine_config.FACEBOOK_APP_SECRET),
}
url = API_BASE + API_NOTIFICATION % user_id
resp = util.urlopen(urllib2.Request(url, data=urllib.urlencode(params)))
resp = util.urlopen(urllib.request.Request(url, data=urllib.parse.urlencode(params)))
logging.debug('Response: %s %s', resp.getcode(), resp.read())

def post_url(self, post):
Expand Down Expand Up @@ -904,8 +904,8 @@ def base_object(self, obj, verb=None, resolve_numeric_id=False):
base_obj = self.user_to_actor(self.urlopen(base_id))

try:
parsed = urlparse.urlparse(url)
params = urlparse.parse_qs(parsed.query)
parsed = urllib.parse.urlparse(url)
params = urllib.parse.parse_qs(parsed.query)
assert parsed.path.startswith('/')
path = parsed.path.strip('/')
path_parts = path.split('/')
Expand All @@ -920,7 +920,7 @@ def base_object(self, obj, verb=None, resolve_numeric_id=False):
# populate image, displayName, etc.
if not base_obj.get('username') and not util.is_int(base_id):
base_obj['username'] = base_id
base_obj.update({k: v for k, v in self.user_to_actor(base_obj).items()
base_obj.update({k: v for k, v in list(self.user_to_actor(base_obj).items())
if k not in base_obj})

elif len(path_parts) >= 3 and path_parts[1] == 'posts':
Expand Down Expand Up @@ -955,7 +955,7 @@ def base_object(self, obj, verb=None, resolve_numeric_id=False):
# add author user id prefix. https://github.com/snarfed/bridgy/issues/229
base_obj['id'] = '%s_%s' % (author['numeric_id'], base_id)

except BaseException, e:
except BaseException as e:
logging.error(
"Couldn't parse object URL %s : %s. Falling back to default logic.",
url, e)
Expand Down Expand Up @@ -1074,7 +1074,7 @@ def post_to_object(self, post, is_comment=False):
# types, e.g. comments.
message_tags = post.get('message_tags', [])
if isinstance(message_tags, dict):
message_tags = sum(message_tags.values(), []) # flatten
message_tags = sum(list(message_tags.values()), []) # flatten
elif not isinstance(message_tags, list):
message_tags = list(message_tags) # fingers crossed! :P

Expand Down Expand Up @@ -1366,7 +1366,7 @@ def event_to_object(self, event, rsvps=None):
for rsvp in event.get(field, {}).get('data', []):
rsvp = self.rsvp_to_object(rsvp, type=field, event=event)
id_to_rsvp[rsvp['id']] = rsvp
self.add_rsvps_to_event(obj, id_to_rsvp.values())
self.add_rsvps_to_event(obj, list(id_to_rsvp.values()))

return self.postprocess_object(obj)

Expand Down Expand Up @@ -1684,7 +1684,7 @@ def urlopen(self, url, _as=dict, **kwargs):
log_url = url
if self.access_token:
url = util.add_query_params(url, [('access_token', self.access_token)])
resp = util.urlopen(urllib2.Request(url, **kwargs))
resp = util.urlopen(urllib.request.Request(url, **kwargs))

if _as is None:
return resp
Expand Down Expand Up @@ -1745,7 +1745,7 @@ def urlopen_batch(self, urls):
code = int(resp.get('code', 0))
body = resp.get('body')
if code / 100 in (4, 5):
raise urllib2.HTTPError(url, code, body, resp.get('headers'), None)
raise urllib.error.HTTPError(url, code, body, resp.get('headers'), None)
bodies.append(body)

return bodies
Expand Down Expand Up @@ -1786,7 +1786,7 @@ def urlopen_batch_full(self, requests):
req['method'] = 'GET'
if 'headers' in req:
req['headers'] = [{'name': n, 'value': v}
for n, v in req['headers'].items()]
for n, v in list(req['headers'].items())]

data = 'batch=' + json.dumps(util.trim_nulls(requests),
separators=(',', ':')) # no whitespace
Expand Down
32 changes: 16 additions & 16 deletions granary/flickr.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,14 @@
import json
import logging
import requests
import source
from . import source
import sys
import mf2py
import mf2util
import urllib2
import urlparse
import urllib.request, urllib.error, urllib.parse
import urllib.parse

import appengine_config
from . import appengine_config
from oauth_dropins.webutil import util
from oauth_dropins import flickr_auth

Expand Down Expand Up @@ -312,7 +312,7 @@ def _get_person_tags(self, obj):
tag = copy.copy(tag)
tag['id'] = id
people[id] = tag
return people.values()
return list(people.values())

def get_activities_response(self, user_id=None, group_id=None, app_id=None,
activity_id=None, start_index=0, count=0,
Expand Down Expand Up @@ -442,7 +442,7 @@ def user_to_actor(self, resp):
obj['url'] = next(
(u for u in urls if not u.startswith('https://www.flickr.com/')),
None)
except urllib2.URLError, e:
except urllib.error.URLError as e:
logging.warning('could not fetch user homepage %s', profile_url)

return self.postprocess_object(obj)
Expand Down Expand Up @@ -512,7 +512,7 @@ def photo_to_activity(self, photo):
'url': photo_permalink,
'id': self.tag_uri(photo.get('id')),
'image': {
'url': u'https://farm{}.staticflickr.com/{}/{}_{}_{}.jpg'.format(
'url': 'https://farm{}.staticflickr.com/{}/{}_{}_{}.jpg'.format(
photo.get('farm'), photo.get('server'),
photo.get('id'), photo.get('secret'), 'b'),
},
Expand Down Expand Up @@ -555,14 +555,14 @@ def photo_to_activity(self, photo):
activity['object']['tags'] = [{
'objectType': 'hashtag',
'id': self.tag_uri(tag.get('id')),
'url': u'https://www.flickr.com/search?tags={}'.format(
'url': 'https://www.flickr.com/search?tags={}'.format(
tag.get('_content')),
'displayName': tag.get('raw'),
} for tag in photo.get('tags', {}).get('tag', [])]
elif isinstance(photo.get('tags'), basestring):
elif isinstance(photo.get('tags'), str):
activity['object']['tags'] = [{
'objectType': 'hashtag',
'url': u'https://www.flickr.com/search?tags={}'.format(
'url': 'https://www.flickr.com/search?tags={}'.format(
tag.strip()),
'displayName': tag.strip(),
} for tag in photo.get('tags').split(' ') if tag.strip()]
Expand Down Expand Up @@ -606,10 +606,10 @@ def like_to_object(self, person, photo_activity):
},
},
'created': util.maybe_timestamp_to_rfc3339(photo_activity.get('favedate')),
'url': u'{}#liked-by-{}'.format(
'url': '{}#liked-by-{}'.format(
photo_activity.get('url'), person.get('nsid')),
'object': {'url': photo_activity.get('url')},
'id': self.tag_uri(u'{}_liked_by_{}'.format(
'id': self.tag_uri('{}_liked_by_{}'.format(
photo_activity.get('flickr_id'), person.get('nsid'))),
'objectType': 'activity',
'verb': 'like',
Expand Down Expand Up @@ -656,8 +656,8 @@ def get_user_image(self, farm, server, author):
ref: https://www.flickr.com/services/api/misc.buddyicons.html
"""
if server == 0:
return u'https://www.flickr.com/images/buddyicon.gif'
return u'https://farm{}.staticflickr.com/{}/buddyicons/{}.jpg'.format(
return 'https://www.flickr.com/images/buddyicon.gif'
return 'https://farm{}.staticflickr.com/{}/buddyicons/{}.jpg'.format(
farm, server, author)

def user_id(self):
Expand Down Expand Up @@ -710,13 +710,13 @@ def photo_url(self, user_id, photo_id):
Returns:
string, the photo URL
"""
return u'https://www.flickr.com/photos/%s/%s/' % (user_id, photo_id)
return 'https://www.flickr.com/photos/%s/%s/' % (user_id, photo_id)

@classmethod
def post_id(cls, url):
"""Used when publishing comments or favorites. Flickr photo ID is the
3rd path component rather than the first.
"""
parts = urlparse.urlparse(url).path.split('/')
parts = urllib.parse.urlparse(url).path.split('/')
if len(parts) >= 4 and parts[1] == 'photos':
return parts[3]
10 changes: 5 additions & 5 deletions granary/googleplus.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
import json
import re

import appengine_config
import source
from . import appengine_config
from . import source

from apiclient.errors import HttpError
from apiclient.http import BatchHttpRequest
Expand Down Expand Up @@ -128,7 +128,7 @@ def request_with_etag(*args, **kwargs):
resp = call.execute(http)
activities = resp.get('items', [])
etag = resp.get('etag')
except HttpError, e:
except HttpError as e:
if e.resp.status == 304: # Not Modified, from a matching ETag
activities = []
else:
Expand Down Expand Up @@ -328,7 +328,7 @@ def html_to_activities(self, html):
html = re.sub(r'([,[])\s*([],])', r'\1null\2', html)

data = json.loads(html)[1][7][1:]
data = [d[6].values()[0] for d in data if len(d) >= 7 and d[6]]
data = [list(d[6].values())[0] for d in data if len(d) >= 7 and d[6]]

activities = []
for d in data:
Expand Down Expand Up @@ -392,7 +392,7 @@ def html_to_activities(self, html):
'image': {'url': att[1]},
'displayName': att[2],
'content': att[3],
} for att in attachments.values()]
} for att in list(attachments.values())]

self.postprocess_object(activity['object'])
activities.append(super(GooglePlus, self).postprocess_activity(activity))
Expand Down
Loading

0 comments on commit cf78fbb

Please sign in to comment.