Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions src/pyff/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,13 @@ def _fmt(data, accepter):
raise exc.exception_response(406)


def call(entry):
requests.post('{}/api/call/{}'.format(config.base_url, entry))
def call(entry: str) -> None:
url = f'{config.base_url}/api/call/{entry}'
log.debug(f'Calling API endpoint at {url}')
resp = requests.post(url)
if resp.status_code >= 300:
log.error(f'POST request to API endpoint at {url} failed: {resp.status_code} {resp.reason}')
return None


def request_handler(request):
Expand Down
14 changes: 7 additions & 7 deletions src/pyff/builtins.py
Original file line number Diff line number Diff line change
Expand Up @@ -596,7 +596,7 @@ def load(req, *opts):
- max_workers <5> : Number of parallel threads to use for loading MD files
- timeout <120> : Socket timeout when downloading files
- validate <True*|False> : When true downloaded metadata files are validated (schema validation)
- fail_on_error <True|False*> : Control whether an error during download, parsing or (optional)validatation of a MD file
- fail_on_error <True|False*> : Control whether an error during download, parsing or (optional)validation of a MD file
does not abort processing of the pipeline. When true a failure aborts and causes pyff
to exit with a non zero exit code. Otherwise errors are logged but ignored.
- filter_invalid <True*|False> : Controls validation behaviour. When true Entities that fail validation are filtered
Expand Down Expand Up @@ -713,7 +713,7 @@ def select(req, *opts):
This would select all SPs

Select statements are not cumulative - a select followed by another select in the plumbing resets the
working douments to the result of the second select.
working documents to the result of the second select.

Most statements except local and remote depend on having a select somewhere in your plumbing and will
stop the plumbing if the current working document is empty. For instance, running
Expand Down Expand Up @@ -799,7 +799,7 @@ def _match(q, elt):
raise PipeException("empty select - stop")

if req.plumbing.id != name:
log.debug("storing synthentic collection {}".format(name))
log.debug("storing synthetic collection {}".format(name))
req.store.update(ot, name)

return ot
Expand Down Expand Up @@ -886,7 +886,7 @@ def first(req, *opts):
:return: returns the first entity descriptor if the working document only contains one

Sometimes (eg when running an MDX pipeline) it is usually expected that if a single EntityDescriptor is being returned
then the outer EntitiesDescriptor is stripped. This method does exactly that:
then the outer EntitiesDescriptor is stripped. This method does exactly that.

"""
if req.t is None:
Expand Down Expand Up @@ -918,7 +918,7 @@ def _discojson(req, *opts):
cache & converted to data: URIs

:param req: The request
:param opts: Options (unusued)
:param opts: Options (unused)
:return: returns a JSON array

"""
Expand Down Expand Up @@ -1453,10 +1453,10 @@ def finalize(req, *opts):
:return: returns the working document with @Name, @cacheDuration and @validUntil set

Set Name, ID, cacheDuration and validUntil on the toplevel EntitiesDescriptor element of the working document.
Unlessexplicit provided the @Name is set from the request URI if the pipeline is executed in the pyFF server. The
Unless explicitly provided the @Name is set from the request URI if the pipeline is executed in the pyFF server. The
@ID is set to a string representing the current date/time and will be prefixed with the string provided, which
defaults to '_'. The @cacheDuration element must be a valid xsd duration (eg PT5H for 5 hrs) and @validUntil can
be either an absolute ISO 8601 time string or (more comonly) a relative time on the form
be either an absolute ISO 8601 time string or (more commonly) a relative time in the form

.. code-block:: none

Expand Down
4 changes: 2 additions & 2 deletions src/pyff/locks.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def acquireRead(self, blocking=True, timeout=None):
finally:
self.__condition.release()

@property
@property # type: ignore
@contextmanager
def readlock(self):
"""Yields a read lock"""
Expand All @@ -116,7 +116,7 @@ def readlock(self):
finally:
self.release()

@property
@property # type: ignore
@contextmanager
def writelock(self):
"""Yields a write lock"""
Expand Down
2 changes: 1 addition & 1 deletion src/pyff/resource.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""

An abstraction layer for metadata fetchers. Supports both syncronous and asyncronous fetchers with cache.
An abstraction layer for metadata fetchers. Supports both synchronous and asynchronous fetchers with cache.

"""

Expand Down
31 changes: 14 additions & 17 deletions src/pyff/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,12 @@
from itertools import chain
from threading import local
from time import gmtime, strftime
from typing import AnyStr, Optional, Union

import iso8601
import pkg_resources
import requests
import six
import xmlsec
import yaml
from _collections_abc import Mapping, MutableMapping
from apscheduler.executors.pool import ThreadPoolExecutor
from apscheduler.jobstores.memory import MemoryJobStore
Expand All @@ -43,22 +42,13 @@
from requests.structures import CaseInsensitiveDict
from requests_cache import CachedSession
from requests_file import FileAdapter
from six.moves.urllib_parse import quote_plus, urlparse
from six.moves.urllib_parse import urlparse

from . import __version__
from .constants import NS, config
from .exceptions import *
from .logs import get_log

try:
from redis import StrictRedis
except ImportError as ex:
StrictRedis = None

try:
from PIL import Image
except ImportError as ex:
Image = None

etree.set_default_parser(etree.XMLParser(resolve_entities=False))

Expand Down Expand Up @@ -244,7 +234,9 @@ def redis():
if not hasattr(thread_data, 'redis'):
thread_data.redis = None

if StrictRedis is None:
try:
from redis import StrictRedis
except ImportError:
raise ValueError("redis_py missing from dependencies")

if thread_data.redis is None:
Expand Down Expand Up @@ -727,27 +719,32 @@ def url_get(url):
return r


def safe_b64e(data):
if not isinstance(data, six.binary_type):
def safe_b64e(data: Union[str, bytes]) -> str:
if not isinstance(data, bytes):
data = data.encode("utf-8")
return base64.b64encode(data).decode('ascii')


def safe_b64d(s):
def safe_b64d(s: str) -> bytes:
return base64.b64decode(s)


# data:&lt;class 'type'&gt;;base64,
# data:<class 'type'>;base64,


def img_to_data(data, content_type):
def img_to_data(data: bytes, content_type: str) -> Optional[str]:
"""Convert a file (specified by a path) into a data URI."""
mime_type, options = cgi.parse_header(content_type)
data64 = None
if len(data) > config.icon_maxsize:
return None

try:
from PIL import Image
except ImportError:
Image = None

if Image is not None:
try:
im = Image.open(io.BytesIO(data))
Expand Down