Skip to content

Commit

Permalink
Fixed #177 pagination for all types
Browse files Browse the repository at this point in the history
  • Loading branch information
wolph committed Feb 13, 2017
1 parent ecf2c1b commit 589dba6
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 57 deletions.
51 changes: 0 additions & 51 deletions digitalocean/Manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,57 +21,6 @@ class Manager(BaseAPI):
def __init__(self, *args, **kwargs):
super(Manager, self).__init__(*args, **kwargs)

def get_data(self, *args, **kwargs):
"""
Customized version of get_data to perform __check_actions_in_data.
The default amount of elements per page defined is 200 as explained
here: https://github.com/koalalorenzo/python-digitalocean/pull/78
"""
params = {}
if "params" in kwargs:
params = kwargs["params"]

if "per_page" not in params:
params["per_page"] = 200

kwargs["params"] = params
data = super(Manager, self).get_data(*args, **kwargs)
# if there are more elements available (total) than the elements per
# page, try to deal with pagination. Note: Breaking the logic on
# multiple pages,
if 'meta' in data and 'total' in data['meta']:
if data['meta']['total'] > params['per_page']:
return self.__deal_with_pagination(args[0], data, params)
else:
return data
else:
return data

def __deal_with_pagination(self, url, data, params):
"""
Perform multiple calls in order to have a full list of elements
when the API are "paginated". (content list is divided in more
than one page)
"""
try:
lastpage_url = data['links']['pages']['last']
pages = parse_qs(urlparse(lastpage_url).query)['page'][0]
key, values = data.popitem()
for page in range(2, int(pages) + 1):
params.update({'page': page})
new_data = super(Manager, self).get_data(url, params=params)

more_values = list(new_data.values())[0]
for value in more_values:
values.append(value)
data = {}
data[key] = values
except KeyError: # No pages.
pass

return data

def get_account(self):
"""
Returns an Account object.
Expand Down
44 changes: 38 additions & 6 deletions digitalocean/baseapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@
import logging
import requests
try:
from urlparse import urljoin
import urlparse
except ImportError:
from urllib.parse import urljoin
from urllib import parse as urlparse


GET = 'GET'
Expand Down Expand Up @@ -65,7 +65,7 @@ def __perform_request(self, url, type=GET, params=None):
if not self.token:
raise TokenError("No token provided. Please use a valid token")

url = urljoin(self.end_point, url)
url = urlparse.urljoin(self.end_point, url)

# lookup table to find out the apropriate requests method,
# headers and payload type (json or query parameters)
Expand Down Expand Up @@ -97,6 +97,26 @@ def __perform_request(self, url, type=GET, params=None):

return requests_method(url, **kwargs)

def __deal_with_pagination(self, url, method, params, data):
"""
Perform multiple calls in order to have a full list of elements
when the API are "paginated". (content list is divided in more
than one page)
"""
all_data = data
while data.get('links', {}).get('pages', {}).get('next'):
url = data['links']['pages']['next']
data = self.__perform_request(url, method, params).json()

# Merge the dictionaries
for key, value in data.items():
if isinstance(value, list) and key in all_data:
all_data[key] += value
else:
all_data[key] = value

return all_data

def get_timeout(self):
"""
Checks if any timeout for the requests to DigitalOcean is required.
Expand All @@ -113,15 +133,20 @@ def get_timeout(self):
timeout_str)
return None

def get_data(self, url, type=GET, params=None):
def get_data(self, url, type=GET, params=None,):
"""
This method is a basic implementation of __call_api that checks
errors too. In cas of success the method will return True or the
errors too. In case of success the method will return True or the
content of the response to the request.
Pagination is automatically detected and handled accordingly
"""
if params is None:
params = dict()

# If per_page is not set, make sure it has a sane default
params.setdefault('per_page', 200)

req = self.__perform_request(url, type, params)
if req.status_code == 204:
return True
Expand All @@ -140,7 +165,14 @@ def get_data(self, url, type=GET, params=None):
msg = [data[m] for m in ("id", "message") if m in data][1]
raise DataReadError(msg)

return data
# If there are more elements available (total) than the elements per
# page, try to deal with pagination. Note: Breaking the logic on
# multiple pages,
pages = data.get('links', {}).get('pages', {})
if pages.get('next') and 'page' not in params:
return self.__deal_with_pagination(url, type, params, data)
else:
return data

def __str__(self):
return "<%s>" % self.__class__.__name__
Expand Down

0 comments on commit 589dba6

Please sign in to comment.