diff --git a/model/configuration.py b/model/configuration.py index a033d7353..2ec0bba00 100644 --- a/model/configuration.py +++ b/model/configuration.py @@ -182,6 +182,7 @@ class ExternalIntegration(Base, HasFullTableCache): FEEDBOOKS = DataSourceConstants.FEEDBOOKS LCP = DataSourceConstants.LCP MANUAL = DataSourceConstants.MANUAL + PROQUEST = DataSourceConstants.PROQUEST # These protocols were used on the Content Server when mirroring # content from a given directory or directly from Project @@ -524,6 +525,7 @@ def key(setting): lines.append(explanation) return lines + class ConfigurationSetting(Base, HasFullTableCache): """An extra piece of site configuration. A ConfigurationSetting may be associated with an @@ -882,6 +884,8 @@ class ConfigurationAttributeType(Enum): TEXTAREA = 'textarea' SELECT = 'select' NUMBER = 'number' + LIST = 'list' + MENU = 'menu' def to_control_type(self): """Converts the value to a attribute type understandable by circulation-web @@ -909,6 +913,7 @@ class ConfigurationAttribute(Enum): DEFAULT = 'default' OPTIONS = 'options' CATEGORY = 'category' + FORMAT = 'format' class ConfigurationOption(object): @@ -1035,7 +1040,9 @@ def __init__( default=None, options=None, category=None, - index=None): + format=None, + index=None + ): """Initializes a new instance of ConfigurationMetadata class :param key: Setting's key @@ -1070,6 +1077,7 @@ def __init__( self._default = default self._options = options self._category = category + self._format = format if index is not None: self._index = index @@ -1187,6 +1195,15 @@ def category(self): """ return self._category + @property + def format(self): + """Returns the setting's format + + :return: Setting's format + :rtype: string + """ + return self._format + @property def index(self): return self._index @@ -1224,7 +1241,8 @@ def to_settings(self): [option.to_settings() for option in self.options] if self.options else None, - ConfigurationAttribute.CATEGORY.value: self.category + ConfigurationAttribute.CATEGORY.value: self.category, + ConfigurationAttribute.FORMAT.value: self.format } @@ -1288,6 +1306,7 @@ def to_settings(cls): default_attribute = getattr(member, ConfigurationAttribute.DEFAULT.value, None) options_attribute = getattr(member, ConfigurationAttribute.OPTIONS.value, None) category_attribute = getattr(member, ConfigurationAttribute.CATEGORY.value, None) + format_attribute = getattr(member, ConfigurationAttribute.FORMAT.value, None) settings.append({ ConfigurationAttribute.KEY.value: key_attribute, @@ -1300,7 +1319,8 @@ def to_settings(cls): [option.to_settings() for option in options_attribute] if options_attribute else None, - ConfigurationAttribute.CATEGORY.value: category_attribute + ConfigurationAttribute.CATEGORY.value: category_attribute, + ConfigurationAttribute.FORMAT.value: format_attribute }) return settings diff --git a/model/constants.py b/model/constants.py index 61578093f..5927fa31a 100644 --- a/model/constants.py +++ b/model/constants.py @@ -41,6 +41,7 @@ class DataSourceConstants(object): BIBBLIO = u"Bibblio" ENKI = u"Enki" LCP = u"LCP" + PROQUEST = u"ProQuest" DEPRECATED_NAMES = { u"3M" : BIBLIOTHECA, @@ -140,6 +141,7 @@ class EditionConstants(object): COURSEWARE_MEDIUM: "courseware" } + class IdentifierConstants(object): # Common types of identifiers. OVERDRIVE_ID = u"Overdrive ID" @@ -163,6 +165,7 @@ class IdentifierConstants(object): BIBBLIO_CONTENT_ITEM_ID = u"Bibblio Content Item ID" ENKI_ID = u"Enki ID" SUDOC_CALL_NUMBER = u"SuDoc Call Number" + PROQUEST_ID = u"ProQuest Doc ID" DEPRECATED_NAMES = { u"3M ID" : BIBLIOTHECA_ID, diff --git a/model/credential.py b/model/credential.py index 6b5bea84b..ea1932feb 100644 --- a/model/credential.py +++ b/model/credential.py @@ -1,14 +1,10 @@ # encoding: utf-8 # Credential, DRMDeviceIdentifier, DelegatedPatronIdentifier -from nose.tools import set_trace - -from . import ( - Base, - get_one, - get_one_or_create, -) - import datetime +import uuid + +import sqlalchemy +from nose.tools import set_trace from sqlalchemy import ( Column, DateTime, @@ -18,13 +14,14 @@ String, UniqueConstraint, ) -from sqlalchemy.orm import ( - backref, - relationship, -) +from sqlalchemy.orm import backref, relationship from sqlalchemy.orm.session import Session from sqlalchemy.sql.expression import and_ -import uuid + +from ..util import is_session +from ..util.string_helpers import is_string +from . import Base, get_one, get_one_or_create + class Credential(Base): """A place to store credentials for external services.""" @@ -89,14 +86,40 @@ class Credential(Base): IDENTIFIER_FROM_REMOTE_SERVICE = "Identifier Received From Remote Service" @classmethod - def lookup(self, _db, data_source, type, patron, refresher_method, + def _filter_invalid_credential(cls, credential, allow_persistent_token): + """Filter out invalid credentials based on their expiration time and persistence. + + :param credential: Credential object + :type credential: Credential + + :param allow_persistent_token: Boolean value indicating whether persistent tokens are allowed + :type allow_persistent_token: bool + """ + if not credential: + # No matching token. + return None + + if not credential.expires: + if allow_persistent_token: + return credential + else: + # It's an error that this token never expires. It's invalid. + return None + elif credential.expires > datetime.datetime.utcnow(): + return credential + else: + # Token has expired. + return None + + @classmethod + def lookup(cls, _db, data_source, token_type, patron, refresher_method, allow_persistent_token=False, allow_empty_token=False, collection=None, force_refresh=False): from datasource import DataSource - if isinstance(data_source, basestring): + if is_string(data_source): data_source = DataSource.lookup(_db, data_source) credential, is_new = get_one_or_create( - _db, Credential, data_source=data_source, type=type, patron=patron, collection=collection) + _db, Credential, data_source=data_source, type=token_type, patron=patron, collection=collection) if (is_new or force_refresh or (not credential.expires and not allow_persistent_token) @@ -108,32 +131,88 @@ def lookup(self, _db, data_source, type, patron, refresher_method, return credential @classmethod - def lookup_by_token(self, _db, data_source, type, token, - allow_persistent_token=False): + def lookup_by_token( + cls, + _db, + data_source, + token_type, + token, + allow_persistent_token=False + ): """Look up a unique token. Lookup will fail on expired tokens. Unless persistent tokens are specifically allowed, lookup will fail on persistent tokens. """ credential = get_one( - _db, Credential, data_source=data_source, type=type, + _db, Credential, data_source=data_source, type=token_type, credential=token) - if not credential: - # No matching token. - return None + return cls._filter_invalid_credential(credential, allow_persistent_token) - if not credential.expires: - if allow_persistent_token: - return credential - else: - # It's an error that this token never expires. It's invalid. - return None - elif credential.expires > datetime.datetime.utcnow(): - return credential - else: - # Token has expired. - return None + @classmethod + def lookup_by_patron( + cls, + _db, + data_source_name, + token_type, + patron, + allow_persistent_token=False, + auto_create_datasource=True + ): + """Look up a unique token. + Lookup will fail on expired tokens. Unless persistent tokens + are specifically allowed, lookup will fail on persistent tokens. + + :param _db: Database session + :type _db: sqlalchemy.orm.session.Session + + :param data_source_name: Name of the data source + :type data_source_name: str + + :param token_type: Token type + :type token_type: str + + :param patron: Patron object + :type patron: core.model.patron.Patron + + :param allow_persistent_token: Boolean value indicating whether persistent tokens are allowed or not + :type allow_persistent_token: bool + + :param auto_create_datasource: Boolean value indicating whether + a data source should be created in the case it doesn't + :type auto_create_datasource: bool + """ + from patron import Patron + + if not is_session(_db): + raise ValueError('"_db" argument must be a valid SQLAlchemy session') + if not is_string(data_source_name) or not data_source_name: + raise ValueError('"data_source_name" argument must be a non-empty string') + if not is_string(token_type) or not token_type: + raise ValueError('"token_type" argument must be a non-empty string') + if not isinstance(patron, Patron): + raise ValueError('"patron" argument must be an instance of Patron class') + if not isinstance(allow_persistent_token, bool): + raise ValueError('"allow_persistent_token" argument must be boolean') + if not isinstance(auto_create_datasource, bool): + raise ValueError('"auto_create_datasource" argument must be boolean') + + from datasource import DataSource + data_source = DataSource.lookup( + _db, + data_source_name, + autocreate=auto_create_datasource + ) + credential = get_one( + _db, + Credential, + data_source=data_source, + type=token_type, + patron=patron + ) + + return cls._filter_invalid_credential(credential, allow_persistent_token) @classmethod def lookup_and_expire_temporary_token(cls, _db, data_source, type, token): @@ -147,7 +226,13 @@ def lookup_and_expire_temporary_token(cls, _db, data_source, type, token): @classmethod def temporary_token_create( - self, _db, data_source, type, patron, duration, value=None + cls, + _db, + data_source, + token_type, + patron, + duration, + value=None ): """Create a temporary token for the given data_source/type/patron. The token will be good for the specified `duration`. @@ -155,7 +240,7 @@ def temporary_token_create( expires = datetime.datetime.utcnow() + duration token_string = value or str(uuid.uuid1()) credential, is_new = get_one_or_create( - _db, Credential, data_source=data_source, type=type, patron=patron) + _db, Credential, data_source=data_source, type=token_type, patron=patron) # If there was already a token of this type for this patron, # the new one overwrites the old one. credential.credential=token_string @@ -223,6 +308,7 @@ class DRMDeviceIdentifier(Base): credential_id = Column(Integer, ForeignKey('credentials.id'), index=True) device_identifier = Column(String(255), index=True) + class DelegatedPatronIdentifier(Base): """This library is in charge of coming up with, and storing, identifiers associated with the patrons of some other library. diff --git a/model/datasource.py b/model/datasource.py index cb63f1f3a..2dd550ad7 100644 --- a/model/datasource.py +++ b/model/datasource.py @@ -222,7 +222,8 @@ def well_known_sources(cls, _db): (cls.INTERNAL_PROCESSING, False, False, None, None), (cls.FEEDBOOKS, True, False, IdentifierConstants.URI, None), (cls.BIBBLIO, False, True, IdentifierConstants.BIBBLIO_CONTENT_ITEM_ID, None), - (cls.ENKI, True, False, IdentifierConstants.ENKI_ID, None) + (cls.ENKI, True, False, IdentifierConstants.ENKI_ID, None), + (cls.PROQUEST, True, False, IdentifierConstants.PROQUEST_ID, None) ): obj = DataSource.lookup( diff --git a/model/identifier.py b/model/identifier.py index 29753ee15..2f99eca0a 100644 --- a/model/identifier.py +++ b/model/identifier.py @@ -1,59 +1,58 @@ # encoding: utf-8 # Identifier, Equivalency -from nose.tools import set_trace - -from . import ( - Base, - create, - get_one, - get_one_or_create, - PresentationCalculationPolicy -) -from coverage import CoverageRecord -from classification import ( - Classification, - Subject, -) -from constants import ( - IdentifierConstants, - LinkRelations, -) -from datasource import DataSource -from licensing import ( - LicensePoolDeliveryMechanism, - RightsStatus, -) -from measurement import Measurement - -from collections import defaultdict import datetime -from functools import total_ordering -import isbnlib import logging import random +import urllib +from abc import ABCMeta, abstractmethod +from collections import defaultdict +from functools import total_ordering + +import isbnlib +import six +from classification import Classification, Subject +from constants import IdentifierConstants, LinkRelations +from coverage import CoverageRecord +from datasource import DataSource +from licensing import LicensePoolDeliveryMechanism, RightsStatus +from measurement import Measurement from sqlalchemy import ( Boolean, Column, Float, ForeignKey, - func, Integer, String, UniqueConstraint, + func, ) -from sqlalchemy.orm import ( - joinedload, - relationship, -) +from sqlalchemy.orm import joinedload, relationship +from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound from sqlalchemy.orm.session import Session from sqlalchemy.sql import select -from sqlalchemy.sql.expression import ( - and_, - or_, -) -import urllib +from sqlalchemy.sql.expression import and_, or_ + from ..util.string_helpers import native_string from ..util.summary import SummaryEvaluator +from . import Base, PresentationCalculationPolicy, create, get_one, get_one_or_create + + +@six.add_metaclass(ABCMeta) +class IdentifierParser(object): + """Interface for identifier parsers.""" + + @abstractmethod + def parse(self, identifier_string): + """Parse a string containing an identifier, extract it and determine its type. + + :param identifier_string: String containing an identifier + :type identifier_string: str + + :return: 2-tuple containing the identifier's type and identifier itself or None + if the string contains an incorrect identifier + :rtype: Optional[Tuple[str, str]] + """ + raise NotImplementedError() @total_ordering @@ -367,18 +366,80 @@ def find_existing_identifiers(identifier_details): return identifiers_by_urn, failures @classmethod - def parse_urn(cls, _db, identifier_string, must_support_license_pools=False): - type, identifier_string = cls.type_and_identifier_for_urn(identifier_string) + def _parse_urn(cls, _db, identifier_string, identifier_type, must_support_license_pools=False): + """Parse identifier string. + + :param _db: Database session + :type _db: sqlalchemy.orm.session.Session + + :param identifier_string: Identifier itself + :type identifier_string: str + + :param identifier_type: Identifier's type + :type identifier_type: str + + :param must_support_license_pools: Boolean value indicating whether there should be a DataSource that provides + licenses for books identified by the given identifier + :type must_support_license_pools: bool + + :return: 2-tuple containing Identifier object and a boolean value indicating whether it's new + :rtype: Tuple[Identifier, bool] + """ if must_support_license_pools: try: - ls = DataSource.license_source_for(_db, type) + _ = DataSource.license_source_for(_db, identifier_type) except NoResultFound: raise Identifier.UnresolvableIdentifierException() except MultipleResultsFound: - # This is fine. + # This is fine. pass - return cls.for_foreign_id(_db, type, identifier_string) + return cls.for_foreign_id(_db, identifier_type, identifier_string) + + @classmethod + def parse_urn(cls, _db, identifier_string, must_support_license_pools=False): + """Parse identifier string. + + :param _db: Database session + :type _db: sqlalchemy.orm.session.Session + + :param identifier_string: String containing an identifier + :type identifier_string: str + + :param must_support_license_pools: Boolean value indicating whether there should be a DataSource that provides + licenses for books identified by the given identifier + :type must_support_license_pools: bool + + :return: 2-tuple containing Identifier object and a boolean value indicating whether it's new + :rtype: Tuple[Identifier, bool] + """ + identifier_type, identifier_string = cls.type_and_identifier_for_urn(identifier_string) + + return cls._parse_urn(_db, identifier_string, identifier_type, must_support_license_pools) + + @classmethod + def parse(cls, _db, identifier_string, parser, must_support_license_pools=False): + """Parse identifier string. + + :param _db: Database session + :type _db: sqlalchemy.orm.session.Session + + :param identifier_string: String containing an identifier + :type identifier_string: str + + :param parser: Identifier parser + :type parser: IdentifierParser + + :param must_support_license_pools: Boolean value indicating whether there should be a DataSource that provides + licenses for books identified by the given identifier + :type must_support_license_pools: bool + + :return: 2-tuple containing Identifier object and a boolean value indicating whether it's new + :rtype: Tuple[Identifier, bool] + """ + identifier_type, identifier_string = parser.parse(identifier_string) + + return cls._parse_urn(_db, identifier_string, identifier_type, must_support_license_pools) def equivalent_to(self, data_source, identifier, strength): """Make one Identifier equivalent to another. @@ -484,11 +545,7 @@ def add_link(self, rel, href, data_source, media_type=None, content=None, fetching, mirroring and scaling Representations as links are created. It might be good to move that code into here. """ - from resource import ( - Resource, - Hyperlink, - Representation, - ) + from resource import Hyperlink, Representation, Resource _db = Session.object_session(self) # Find or create the Resource. @@ -612,10 +669,7 @@ def classify(self, data_source, subject_type, subject_identifier, @classmethod def resources_for_identifier_ids(self, _db, identifier_ids, rel=None, data_source=None): - from resource import ( - Resource, - Hyperlink, - ) + from resource import Hyperlink, Resource resources = _db.query(Resource).join(Resource.links).filter( Hyperlink.identifier_id.in_(identifier_ids)) if data_source: @@ -640,10 +694,7 @@ def classifications_for_identifier_ids(self, _db, identifier_ids): def best_cover_for(cls, _db, identifier_ids, rel=None): # Find all image resources associated with any of # these identifiers. - from resource import ( - Resource, - Hyperlink, - ) + from resource import Hyperlink, Resource rel = rel or Hyperlink.IMAGE images = cls.resources_for_identifier_ids( _db, identifier_ids, rel) diff --git a/opds2_import.py b/opds2_import.py index 4bb5717dd..dedc5ec47 100644 --- a/opds2_import.py +++ b/opds2_import.py @@ -475,6 +475,19 @@ def _extract_medium(publication, default_medium=Edition.BOOK_MEDIUM): return medium + def _extract_identifier(self, publication): + """Extract the publication's identifier from its metadata. + + :param publication: Publication object + :type publication: opds2_core.OPDS2Publication + + :return: Identifier object + :rtype: Identifier + """ + identifier, _ = Identifier.parse_urn(self._db, publication.metadata.identifier) + + return identifier + def _extract_publication_metadata(self, feed, publication, data_source_name): """Extract a Metadata object from webpub-manifest-parser's publication. @@ -560,7 +573,7 @@ def _extract_publication_metadata(self, feed, publication, data_source_name): last_opds_update = publication.metadata.modified - identifier, _ = Identifier.parse_urn(self._db, publication.metadata.identifier) + identifier = self._extract_identifier(publication) identifier_data = IdentifierData( type=identifier.type, identifier=identifier.identifier ) @@ -747,7 +760,17 @@ def _parse_feed(self, feed, silent=True): if not silent: raise + elif isinstance(feed, dict): + try: + parser_factory = OPDS2DocumentParserFactory() + parser = parser_factory.create() + + parsed_feed = parser.parse_json(feed) + except BaseError: + self._logger.exception("Failed to parse the OPDS 2.0 feed") + if not silent: + raise elif isinstance(feed, opds2_ast.OPDS2Feed): parsed_feed = feed else: diff --git a/opds_import.py b/opds_import.py index 116dc4b43..8c0af003d 100644 --- a/opds_import.py +++ b/opds_import.py @@ -559,7 +559,7 @@ def __init__(self, _db, collection, data_source_name=None, """ self._db = _db self.log = logging.getLogger("OPDS Importer") - self.collection = collection + self._collection_id = collection.id if collection else None if self.collection and not data_source_name: # Use the Collection data_source for OPDS import. data_source = self.collection.data_source @@ -577,7 +577,7 @@ def __init__(self, _db, collection, data_source_name=None, self.identifier_mapping = identifier_mapping try: self.metadata_client = metadata_client or MetadataWranglerOPDSLookup.from_config(_db, collection=collection) - except CannotLoadConfiguration, e: + except CannotLoadConfiguration: # The Metadata Wrangler isn't configured, but we can import without it. self.log.warn("Metadata Wrangler integration couldn't be loaded, importing without it.") self.metadata_client = None @@ -608,6 +608,18 @@ def __init__(self, _db, collection, data_source_name=None, self.http_get = http_get or Representation.cautious_http_get self.map_from_collection = map_from_collection + @property + def collection(self): + """Returns an associated Collection object + + :return: Associated Collection object + :rtype: Optional[Collection] + """ + if self._collection_id: + return Collection.by_id(self._db, id=self._collection_id) + + return None + @property def data_source(self): """Look up or create a DataSource object representing the @@ -1981,7 +1993,7 @@ def import_one_feed(self, feed): ) return imported_editions, failures - def run_once(self, progress_ignore): + def _get_feeds(self): feeds = [] queue = [self.feed_url] seen_links = set([]) @@ -1989,9 +2001,6 @@ def run_once(self, progress_ignore): # First, follow the feed's next links until we reach a page with # nothing new. If any link raises an exception, nothing will be imported. - total_imported = 0 - total_failures = 0 - while queue: new_queue = [] @@ -2008,7 +2017,16 @@ def run_once(self, progress_ignore): # Start importing at the end. If something fails, it will be easier to # pick up where we left off. - for link, feed in reversed(feeds): + feeds = reversed(feeds) + + return feeds + + def run_once(self, progress_ignore): + feeds = self._get_feeds() + total_imported = 0 + total_failures = 0 + + for link, feed in feeds: self.log.info("Importing next feed: %s", link) imported_editions, failures = self.import_one_feed(feed) total_imported += len(imported_editions) diff --git a/requirements.txt b/requirements.txt index d1dfa3d32..584a89c31 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,6 +12,7 @@ expiringdict==1.2.1 feedparser==5.2.1 Flask==1.1.2 Flask-Babel==1.0.0 +flask-sqlalchemy-session==1.1 funcsigs==1.0.2 futures==3.3.0 # fuzzywuzzy is for author name manipulations diff --git a/tests/util/test_string_helpers.py b/tests/util/test_string_helpers.py index 69b9c5c51..8562f5afc 100644 --- a/tests/util/test_string_helpers.py +++ b/tests/util/test_string_helpers.py @@ -1,23 +1,17 @@ # encoding: utf-8 # Test the helper objects in util.string. -from nose.tools import ( - assert_raises, - eq_, -) import base64 as stdlib_base64 -import random import re -from ...util.string_helpers import ( - UnicodeAwareBase64, - base64, - random_string -) +from nose.tools import assert_raises, eq_ +from parameterized import parameterized + +from ...util.string_helpers import UnicodeAwareBase64, base64, is_string, random_string -class TestUnicodeAwareBase64(object): - def test_encoding(self): +class TestUnicodeAwareBase64(object): + def test_encoding(self): string = u"םולש" # Run the same tests against two different encodings that can @@ -85,8 +79,7 @@ def test_default_is_base64(self): eq_(b"4piD", stdlib_base64.b64encode(snowman_utf8)) -class TestRandomstring(object): - +class TestRandomString(object): def test_random_string(self): m = random_string eq_("", m(0)) @@ -109,4 +102,18 @@ def test_random_string(self): # Each byte is represented as two digits, so the length of the # string is twice the length passed in to the function. - eq_(size*2, len(x)) + eq_(size * 2, len(x)) + + +class TestIsString(object): + @parameterized.expand( + [ + ("byte_string", "test", True), + ("unicode_string", u"test", True), + ("not_string", 1, False), + ] + ) + def test_is_string(self, _, value, expected_result): + result = is_string(value) + + eq_(expected_result, result) diff --git a/util/__init__.py b/util/__init__.py index 9ff21b6ab..0712c20d1 100644 --- a/util/__init__.py +++ b/util/__init__.py @@ -1,20 +1,20 @@ # encoding: utf-8 -"Miscellaneous utilities" -from money import Money -from nose.tools import set_trace -from collections import Counter -import os +"""Miscellaneous utilities""" + import re import string +from collections import Counter + +import flask_sqlalchemy_session +import sqlalchemy +from money import Money from sqlalchemy import distinct from sqlalchemy.sql.functions import func # For backwards compatibility, import items that were moved to # languages.py -from .languages import ( - LanguageCodes, - LookupTable, -) +from .languages import LanguageCodes, LookupTable + def batch(iterable, size=1): """Split up `iterable` into batches of size `size`.""" @@ -533,3 +533,31 @@ def parse(cls, amount): amount = amount[1:] return Money(amount, currency) + +def is_session(value): + """Return a boolean value indicating whether the value is a valid SQLAlchemy session. + + :param value: Value + :type value: Any + + :return: Boolean value indicating whether the value is a valid SQLAlchemy session or not + :rtype: bool + """ + return isinstance(value, (sqlalchemy.orm.session.Session, flask_sqlalchemy_session.flask_scoped_session)) + + +def first_or_default(collection, default=None): + """Return first element of the specified collection or the default value if the collection is empty. + + :param collection: Collection + :type collection: Iterable + + :param default: Default value + :type default: Any + """ + element = next(iter(collection), None) + + if element is None: + element = default + + return element diff --git a/util/problem_detail.py b/util/problem_detail.py index 59f061181..a326c1fc7 100644 --- a/util/problem_detail.py +++ b/util/problem_detail.py @@ -4,9 +4,12 @@ """ import json as j import logging + from flask_babel import LazyString from nose.tools import set_trace +from ..exceptions import BaseError + JSON_MEDIA_TYPE = "application/api-problem+json" @@ -20,6 +23,7 @@ def json(type, status, title, detail=None, instance=None, debug_message=None): d['debug_message'] = debug_message return j.dumps(d) + class ProblemDetail(object): """A common type of problem.""" @@ -79,3 +83,27 @@ def with_debug(self, debug_message, detail=None, status_code=None, self.uri, status_code or self.status_code, title or self.title, detail or self.detail, instance or self.instance, debug_message ) + + +class ProblemError(BaseError): + """Exception class allowing to raise and catch ProblemDetail objects.""" + + def __init__(self, problem_detail): + """Initialize a new instance of ProblemError class. + + :param problem_detail: ProblemDetail object + :type problem_detail: ProblemDetail + """ + if not isinstance(problem_detail, ProblemDetail): + raise ValueError('Argument "problem_detail" must be an instance of ProblemDetail class') + + self._problem_detail = problem_detail + + @property + def problem_detail(self): + """Return the ProblemDetail object associated with this exception. + + :return: ProblemDetail object associated with this exception + :rtype: ProblemDetail + """ + return self._problem_detail diff --git a/util/string_helpers.py b/util/string_helpers.py index ac05b4e07..fb5272ede 100644 --- a/util/string_helpers.py +++ b/util/string_helpers.py @@ -7,6 +7,9 @@ import os import sys +import six + + class UnicodeAwareBase64(object): """Simulate the interface of the base64 module, but make it look as though base64-encoding and -decoding works on Unicode strings. @@ -84,3 +87,20 @@ class for this version of Python. if isinstance(x, bytes): x = x.decode("utf8") return x + + +def is_string(value): + """Return a boolean value indicating whether the value is a string or not. + + This method is compatible with both Python 2.7 and Python 3.x. + NOTE: + 1. We can't use isinstance(string_value, str) because strings in Python 2.7 can have "unicode" type. + 2. We can't use isinstance(string_value, basestring) because "basestring" type is not available in Python 3.x. + + :param value: Value + :type value: Any + + :return: Boolean value indicating whether the value is a string or not + :rtype: bool + """ + return isinstance(value, six.string_types)