diff --git a/djqscsv/__init__.py b/djqscsv/__init__.py index 63f0ff0..d5439ee 100644 --- a/djqscsv/__init__.py +++ b/djqscsv/__init__.py @@ -1,2 +1,2 @@ -from djqscsv import (render_to_csv_response, write_csv, # NOQA +from .djqscsv import (render_to_csv_response, write_csv, # NOQA generate_filename, CSVException) # NOQA diff --git a/djqscsv/djqscsv.py b/djqscsv/djqscsv.py index c879f4b..fdbd51b 100644 --- a/djqscsv/djqscsv.py +++ b/djqscsv/djqscsv.py @@ -70,7 +70,7 @@ def write_csv(queryset, file_obj, **kwargs): csv_kwargs[key] = val # add BOM to support CSVs in MS Excel (for Windows only) - file_obj.write(_safe_utf8_stringify(u'\ufeff')) + file_obj.write(_safe_utf8_stringify(six.u('\ufeff'))) # the CSV must always be built from a values queryset # in order to introspect the necessary fields. @@ -134,7 +134,7 @@ def generate_filename(queryset, append_datestamp=False): Takes a queryset and returns a default base filename based on the underlying model """ - base_filename = slugify(unicode(queryset.model.__name__)) + '_export.csv' + base_filename = slugify(six.text_type(queryset.model.__name__)) + '_export.csv' if append_datestamp: base_filename = _append_datestamp(base_filename) @@ -154,17 +154,17 @@ def _validate_and_clean_filename(filename): else: filename = filename[:-4] - filename = slugify(unicode(filename)) + '.csv' + filename = slugify(six.text_type(filename)) + '.csv' return filename def _safe_utf8_stringify(value): if isinstance(value, str): return value - elif isinstance(value, unicode): + elif isinstance(value, six.text_type): return value.encode('utf-8') else: - return unicode(value).encode('utf-8') + return six.text_type(value).encode('utf-8') def _sanitize_unicode_record(field_serializer_map, record): @@ -175,7 +175,7 @@ def _serialize_value(value): if isinstance(value, datetime.datetime): return value.isoformat() else: - return unicode(value) + return six.text_type(value) obj = {} for key, val in six.iteritems(record):