diff --git a/csvkit/cleanup.py b/csvkit/cleanup.py index 63e13311b..149f684c8 100644 --- a/csvkit/cleanup.py +++ b/csvkit/cleanup.py @@ -21,7 +21,7 @@ def join_rows(rows, joiner=' '): return fixed_row -class RowChecker(object): +class RowChecker: """ Iterate over rows of a CSV producing cleaned rows and storing error rows. """ diff --git a/csvkit/cli.py b/csvkit/cli.py index 62c94884c..471119670 100644 --- a/csvkit/cli.py +++ b/csvkit/cli.py @@ -2,29 +2,19 @@ import argparse import bz2 -import codecs import gzip import itertools +import lzma import sys import warnings from os.path import splitext import agate -import six - -if six.PY3: - import lzma -elif six.PY2: - # Try import backports.lzma if available - try: - from backports import lzma - except ImportError: - lzma = None from csvkit.exceptions import ColumnIdentifierError, RequiredHeaderError -class LazyFile(six.Iterator): +class LazyFile: """ A proxy for a File object that delays opening it until a read method is called. @@ -65,7 +55,7 @@ def __next__(self): return next(self.f) -class CSVKitUtility(object): +class CSVKitUtility: description = '' epilog = '' override_flags = '' @@ -241,32 +231,21 @@ def _open_input_file(self, path): """ Open the input file specified on the command line. """ - if six.PY2: - mode = 'Urb' - kwargs = {} - else: - mode = 'rt' # default - kwargs = {'encoding': self.args.encoding} - if not path or path == '-': f = sys.stdin else: extension = splitext(path)[1] if extension == '.gz': - f = LazyFile(gzip.open, path, mode, **kwargs) + func = gzip.open elif extension == '.bz2': - if six.PY2: - f = LazyFile(bz2.BZ2File, path, mode, **kwargs) - else: - f = LazyFile(bz2.open, path, mode, **kwargs) + func = bz2.open elif extension == ".xz": - if lzma is not None: - f = LazyFile(lzma.open, path, mode, **kwargs) - else: - raise RuntimeError("backports.lzma is needed for .xz support with Python 2") + func = lzma.open else: - f = LazyFile(open, path, mode, **kwargs) + func = open + + f = LazyFile(func, path, mode='rt', encoding=self.args.encoding) return f @@ -286,9 +265,6 @@ def _extract_csv_reader_kwargs(self): if value is not None: kwargs[arg] = value - if six.PY2 and self.args.encoding: - kwargs['encoding'] = self.args.encoding - if getattr(self.args, 'no_header_row', None): kwargs['header'] = not self.args.no_header_row @@ -309,9 +285,6 @@ def _install_exception_handler(self): """ Installs a replacement for sys.excepthook, which handles pretty-printing uncaught exceptions. """ - if six.PY2: - sys.stderr = codecs.getwriter('utf-8')(sys.stderr) - def handler(t, value, traceback): if self.args.verbose: sys.__excepthook__(t, value, traceback) @@ -323,7 +296,7 @@ def handler(t, value, traceback): 'flag or with the PYTHONIOENCODING environment variable. Use the -v flag to see ' 'the complete error.\n' % self.args.encoding) else: - sys.stderr.write('%s: %s\n' % (t.__name__, six.text_type(value))) + sys.stderr.write('%s: %s\n' % (t.__name__, str(value))) sys.excepthook = handler @@ -360,8 +333,7 @@ def get_column_types(self): def get_column_offset(self): if self.args.zero_based: return 0 - else: - return 1 + return 1 def skip_lines(self): if isinstance(self.args.skip_lines, int): @@ -444,24 +416,24 @@ def match_column_identifier(column_names, c, column_offset=1): Note that integer values are *always* treated as positional identifiers. If you happen to have column names which are also integers, you must specify them using a positional index. """ - if isinstance(c, six.string_types) and not c.isdigit() and c in column_names: + if isinstance(c, str) and not c.isdigit() and c in column_names: return column_names.index(c) - else: - try: - c = int(c) - column_offset - # Fail out if neither a column name nor an integer - except ValueError: - raise ColumnIdentifierError("Column '%s' is invalid. It is neither an integer nor a column name. " - "Column names are: %s" % (c, repr(column_names)[1:-1])) - - # Fail out if index is 0-based - if c < 0: - raise ColumnIdentifierError("Column %i is invalid. Columns are 1-based." % (c + column_offset)) - - # Fail out if index is out of range - if c >= len(column_names): - raise ColumnIdentifierError("Column %i is invalid. The last column is '%s' at index %i." % ( - c + column_offset, column_names[-1], len(column_names) - 1 + column_offset)) + + try: + c = int(c) - column_offset + # Fail out if neither a column name nor an integer + except ValueError: + raise ColumnIdentifierError("Column '%s' is invalid. It is neither an integer nor a column name. " + "Column names are: %s" % (c, repr(column_names)[1:-1])) + + # Fail out if index is 0-based + if c < 0: + raise ColumnIdentifierError("Column %i is invalid. Columns are 1-based." % (c + column_offset)) + + # Fail out if index is out of range + if c >= len(column_names): + raise ColumnIdentifierError("Column %i is invalid. The last column is '%s' at index %i." % ( + c + column_offset, column_names[-1], len(column_names) - 1 + column_offset)) return c diff --git a/csvkit/convert/__init__.py b/csvkit/convert/__init__.py index f95a0693d..c8beefc8a 100644 --- a/csvkit/convert/__init__.py +++ b/csvkit/convert/__init__.py @@ -15,7 +15,7 @@ def guess_format(filename): if extension in ('csv', 'dbf', 'fixed', 'xls', 'xlsx'): return extension - elif extension in ['json', 'js']: + if extension in ('json', 'js'): return 'json' return None diff --git a/csvkit/convert/fixed.py b/csvkit/convert/fixed.py index 03cbb7138..5c04d1cfb 100644 --- a/csvkit/convert/fixed.py +++ b/csvkit/convert/fixed.py @@ -2,9 +2,9 @@ from codecs import iterdecode from collections import namedtuple +from io import StringIO import agate -import six def fixed2csv(f, schema, output=None, skip_lines=0, **kwargs): @@ -27,10 +27,10 @@ def fixed2csv(f, schema, output=None, skip_lines=0, **kwargs): :param skip_lines: The number of lines to skip from the top of the file. """ - streaming = True if output else False + streaming = bool(output) if not streaming: - output = six.StringIO() + output = StringIO() try: encoding = kwargs['encoding'] @@ -59,7 +59,7 @@ def fixed2csv(f, schema, output=None, skip_lines=0, **kwargs): return '' -class FixedWidthReader(six.Iterator): +class FixedWidthReader: """ Given a fixed-width file and a schema file, produce an analog to a csv reader that yields a row of strings for each line in the fixed-width file, @@ -95,7 +95,7 @@ def __next__(self): FixedWidthField = namedtuple('FixedWidthField', ['name', 'start', 'length']) -class FixedWidthRowParser(object): +class FixedWidthRowParser: """ Instantiated with a schema, able to return a sequence of trimmed strings representing fields given a fixed-length line. Flexible about where the @@ -135,7 +135,7 @@ def headers(self): return [field.name for field in self.fields] -class SchemaDecoder(object): +class SchemaDecoder: """ Extracts column, start, and length columns from schema rows. Once instantiated, each time the instance is called with a row, a diff --git a/csvkit/convert/geojs.py b/csvkit/convert/geojs.py index 1a7b07e36..140f2f7eb 100644 --- a/csvkit/convert/geojs.py +++ b/csvkit/convert/geojs.py @@ -1,14 +1,10 @@ #!/usr/bin/env python -try: - import json - from collections import OrderedDict -except ImportError: - from ordereddict import OrderedDict - import simplejson as json +import json +from collections import OrderedDict +from io import StringIO import agate -import six def geojson2csv(f, key=None, **kwargs): @@ -58,7 +54,7 @@ def geojson2csv(f, key=None, **kwargs): header.extend(property_fields) header.extend(('geojson', 'type', 'longitude', 'latitude')) - o = six.StringIO() + o = StringIO() writer = agate.csv.writer(o) writer.writerow(header) diff --git a/csvkit/exceptions.py b/csvkit/exceptions.py index dcf4ce3b2..f6ac6f431 100644 --- a/csvkit/exceptions.py +++ b/csvkit/exceptions.py @@ -30,7 +30,7 @@ class CSVTestException(CustomException): """ def __init__(self, line_number, row, msg): - super(CSVTestException, self).__init__(msg) + super().__init__(msg) self.line_number = line_number self.row = row @@ -42,7 +42,7 @@ class LengthMismatchError(CSVTestException): def __init__(self, line_number, row, expected_length): msg = 'Expected %i columns, found %i columns' % (expected_length, len(row)) - super(LengthMismatchError, self).__init__(line_number, row, msg) + super().__init__(line_number, row, msg) @property def length(self): @@ -59,7 +59,7 @@ def __init__(self, index, value, normal_type): self.value = value self.normal_type = normal_type msg = 'Unable to convert "%s" to type %s (at index %i)' % (value, normal_type, index) - super(InvalidValueForTypeException, self).__init__(msg) + super().__init__(msg) class RequiredHeaderError(CustomException): diff --git a/csvkit/grep.py b/csvkit/grep.py index d80b37a0d..44f233016 100644 --- a/csvkit/grep.py +++ b/csvkit/grep.py @@ -1,11 +1,10 @@ #!/usr/bin/env python -import six from csvkit.exceptions import ColumnIdentifierError -class FilteringCSVReader(six.Iterator): +class FilteringCSVReader: r""" Given any row iterator, only return rows which pass the filter. If 'header' is False, then all rows must pass the filter; by default, the first row will be passed @@ -34,7 +33,7 @@ class FilteringCSVReader(six.Iterator): column_names = None def __init__(self, reader, patterns, header=True, any_match=False, inverse=False): - super(FilteringCSVReader, self).__init__() + super().__init__() self.reader = reader self.header = header @@ -78,8 +77,7 @@ def test_row(self, row): if self.any_match: return self.inverse # False - else: - return not self.inverse # True + return not self.inverse # True def standardize_patterns(column_names, patterns): @@ -122,7 +120,7 @@ def pattern_as_function(obj): return lambda x: obj in x -class regex_callable(object): +class regex_callable: def __init__(self, pattern): self.pattern = pattern diff --git a/csvkit/utilities/csvgrep.py b/csvkit/utilities/csvgrep.py index 96eeeb955..939e0ae1f 100644 --- a/csvkit/utilities/csvgrep.py +++ b/csvkit/utilities/csvgrep.py @@ -5,7 +5,6 @@ from argparse import FileType import agate -import six from csvkit.cli import CSVKitUtility from csvkit.grep import FilteringCSVReader @@ -16,13 +15,6 @@ class CSVGrep(CSVKitUtility): override_flags = ['L', 'blanks', 'date-format', 'datetime-format'] def add_arguments(self): - # I feel that there ought to be a better way to do this across Python 2 and 3. - def option_parser(bytestring): - if six.PY2: - return bytestring.decode(sys.getfilesystemencoding()) - else: - return bytestring - self.argparser.add_argument( '-n', '--names', dest='names_only', action='store_true', help='Display column names and indices from the input CSV and exit.') @@ -30,10 +22,10 @@ def option_parser(bytestring): '-c', '--columns', dest='columns', help='A comma-separated list of column indices, names or ranges to be searched, e.g. "1,id,3-5".') self.argparser.add_argument( - '-m', '--match', dest="pattern", action='store', type=option_parser, + '-m', '--match', dest="pattern", action='store', help='A string to search for.') self.argparser.add_argument( - '-r', '--regex', dest='regex', action='store', type=option_parser, + '-r', '--regex', dest='regex', action='store', help='A regular expression to match.') self.argparser.add_argument( '-f', '--file', dest='matchfile', type=FileType('r'), action='store', diff --git a/csvkit/utilities/csvjson.py b/csvkit/utilities/csvjson.py index 203495877..8923e031a 100644 --- a/csvkit/utilities/csvjson.py +++ b/csvkit/utilities/csvjson.py @@ -1,19 +1,12 @@ #!/usr/bin/env python -import codecs import datetime import decimal +import json import sys - -try: - import json - from collections import OrderedDict -except ImportError: - from ordereddict import OrderedDict - import simplejson as json +from collections import OrderedDict import agate -import six from csvkit.cli import CSVKitUtility, match_column_identifier @@ -63,7 +56,7 @@ def add_arguments(self): help='Disable type inference (and --locale, --date-format, --datetime-format) when parsing CSV input.') def __init__(self, args=None, output_file=None): - super(CSVJSON, self).__init__(args, output_file) + super().__init__(args, output_file) self.validate_args() @@ -73,11 +66,7 @@ def __init__(self, args=None, output_file=None): } # We need to do this stream dance here, because we aren't writing through agate. - if six.PY2: - self.stream = codecs.getwriter('utf-8')(self.output_file) - self.json_kwargs['encoding'] = 'utf-8' - else: - self.stream = self.output_file + self.stream = self.output_file def main(self): """ @@ -106,7 +95,7 @@ def dump_json(self, data, newline=False): def default(obj): if isinstance(obj, (datetime.date, datetime.datetime)): return obj.isoformat() - elif isinstance(obj, decimal.Decimal): + if isinstance(obj, decimal.Decimal): return str(obj) raise TypeError('%s is not JSON serializable' % repr(obj)) diff --git a/csvkit/utilities/csvsql.py b/csvkit/utilities/csvsql.py index 54ff8db83..9626d99a7 100644 --- a/csvkit/utilities/csvsql.py +++ b/csvkit/utilities/csvsql.py @@ -5,7 +5,6 @@ import agate import agatesql # noqa: F401 -import six from pkg_resources import iter_entry_points from sqlalchemy import create_engine, dialects @@ -130,13 +129,13 @@ def main(self): try: engine = create_engine(self.args.connection_string) except ImportError as e: - six.raise_from(ImportError( + raise ImportError( "You don't appear to have the necessary database backend installed for connection string you're " "trying to use. Available backends include:\n\nPostgreSQL:\tpip install psycopg2\nMySQL:\t\tpip " "install mysql-connector-python OR pip install mysqlclient\n\nFor details on connection strings " "and other backends, please see the SQLAlchemy documentation on dialects at:\n\n" "http://www.sqlalchemy.org/docs/dialects/\n\n" - ), e) + ) from e self.connection = engine.connect() diff --git a/csvkit/utilities/csvstat.py b/csvkit/utilities/csvstat.py index 31a0036e1..17ba7d9ef 100644 --- a/csvkit/utilities/csvstat.py +++ b/csvkit/utilities/csvstat.py @@ -1,13 +1,11 @@ #!/usr/bin/env python -import codecs import locale import warnings from collections import Counter, OrderedDict from decimal import Decimal import agate -import six from csvkit.cli import CSVKitUtility, parse_column_identifiers @@ -147,9 +145,6 @@ def main(self): self.argparser.error( 'You may not specify --count and an operation (--mean, --median, etc) at the same time.') - if six.PY2: - self.output_file = codecs.getwriter('utf-8')(self.output_file) - if self.args.count_only: count = len(list(agate.csv.reader(self.skip_lines(), **self.reader_kwargs))) @@ -228,13 +223,13 @@ def print_one(self, table, column_id, operation, label=True, **kwargs): # Formatting if op_name == 'freq': - stat = ', '.join([(u'"%s": %s' % (six.text_type(row['value']), row['count'])) for row in stat]) - stat = u'{ %s }' % stat + stat = ', '.join([('"%s": %s' % (str(row['value']), row['count'])) for row in stat]) + stat = '{ %s }' % stat if label: - self.output_file.write(u'%3i. %s: %s\n' % (column_id + 1, column_name, stat)) + self.output_file.write('%3i. %s: %s\n' % (column_id + 1, column_name, stat)) else: - self.output_file.write(u'%s\n' % stat) + self.output_file.write('%s\n' % stat) def calculate_stats(self, table, column_id, **kwargs): """ @@ -281,7 +276,7 @@ def print_stats(self, table, column_ids, stats): if column_stats[op_name] is None: continue - label = u'{label:{label_column_width}}'.format(**{ + label = '{label:{label_column_width}}'.format(**{ 'label_column_width': label_column_width, 'label': op_data['label'] }) @@ -291,7 +286,7 @@ def print_stats(self, table, column_ids, stats): if i == 0: self.output_file.write('\t{} '.format(label)) else: - self.output_file.write(u'\t{label:{label_column_width}} '.format(**{ + self.output_file.write('\t{label:{label_column_width}} '.format(**{ 'label_column_width': label_column_width, 'label': '' })) @@ -302,9 +297,9 @@ def print_stats(self, table, column_ids, stats): if self.is_finite_decimal(v): v = format_decimal(v, self.args.decimal_format, self.args.no_grouping_separator) else: - v = six.text_type(row['value']) + v = str(row['value']) - self.output_file.write(u'{} ({}x)\n'.format(v, row['count'])) + self.output_file.write('{} ({}x)\n'.format(v, row['count'])) continue @@ -315,7 +310,7 @@ def print_stats(self, table, column_ids, stats): elif op_name == 'len': v = '%s characters' % v - self.output_file.write(u'\t{} {}\n'.format(label, v)) + self.output_file.write('\t{} {}\n'.format(label, v)) self.output_file.write('\n') @@ -343,7 +338,7 @@ def print_csv(self, table, column_ids, stats): continue if op_name == 'freq': - value = ', '.join([six.text_type(row['value']) for row in column_stats['freq']]) + value = ', '.join([str(row['value']) for row in column_stats['freq']]) else: value = column_stats[op_name] diff --git a/csvkit/utilities/in2csv.py b/csvkit/utilities/in2csv.py index 2fdcd06f5..49f407f33 100644 --- a/csvkit/utilities/in2csv.py +++ b/csvkit/utilities/in2csv.py @@ -1,13 +1,13 @@ #!/usr/bin/env python import sys +from io import BytesIO from os.path import splitext import agate import agatedbf # noqa: F401 import agateexcel # noqa: F401 import openpyxl -import six import xlrd from csvkit import convert @@ -25,13 +25,6 @@ class In2CSV(CSVKitUtility): override_flags = ['f'] def add_arguments(self): - # I feel that there ought to be a better way to do this across Python 2 and 3. - def option_parser(bytestring): - if six.PY2: - return bytestring.decode(sys.getfilesystemencoding()) - else: - return bytestring - self.argparser.add_argument( metavar='FILE', nargs='?', dest='input_path', help='The CSV file to operate on. If omitted, will accept input as piped data via STDIN.') @@ -48,10 +41,10 @@ def option_parser(bytestring): '-n', '--names', dest='names_only', action='store_true', help='Display sheet names from the input Excel file.') self.argparser.add_argument( - '--sheet', dest='sheet', type=option_parser, + '--sheet', dest='sheet', help='The name of the Excel sheet to operate on.') self.argparser.add_argument( - '--write-sheets', dest='write_sheets', type=option_parser, + '--write-sheets', dest='write_sheets', help='The names of the Excel sheets to write to files, or "-" to write all sheets.') self.argparser.add_argument( '--encoding-xls', dest='encoding_xls', @@ -66,12 +59,8 @@ def option_parser(bytestring): def open_excel_input_file(self, path): if not path or path == '-': - if six.PY2: - return six.BytesIO(sys.stdin.read()) - else: - return six.BytesIO(sys.stdin.buffer.read()) - else: - return open(path, 'rb') + return BytesIO(sys.stdin.buffer.read()) + return open(path, 'rb') def sheet_names(self, path, filetype): input_file = self.open_excel_input_file(path) diff --git a/csvkit/utilities/sql2csv.py b/csvkit/utilities/sql2csv.py index c4e855d11..d2cf39f41 100644 --- a/csvkit/utilities/sql2csv.py +++ b/csvkit/utilities/sql2csv.py @@ -1,7 +1,6 @@ #!/usr/bin/env python import agate -import six from sqlalchemy import create_engine from csvkit.cli import CSVKitUtility @@ -48,13 +47,13 @@ def main(self): try: engine = create_engine(self.args.connection_string) except ImportError as e: - six.raise_from(ImportError( + raise ImportError( "You don't appear to have the necessary database backend installed for connection string you're " "trying to use. Available backends include:\n\nPostgreSQL:\tpip install psycopg2\nMySQL:\t\tpip " "install mysql-connector-python OR pip install mysqlclient\n\nFor details on connection strings " "and other backends, please see the SQLAlchemy documentation on dialects at:\n\n" "http://www.sqlalchemy.org/docs/dialects/\n\n" - ), e) + ) from e connection = engine.connect() diff --git a/docs/conf.py b/docs/conf.py index a6a6da20f..39c336b8d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -33,8 +33,8 @@ master_doc = 'index' # General information about the project. -project = u'csvkit' -copyright = u'2016, Christopher Groskopf' +project = 'csvkit' +copyright = '2016, Christopher Groskopf' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -78,8 +78,8 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - # ('scripts/csvcut', 'csvcut', u'csvcut Documentation', - # [u'Christopher Groskopf'], 1), + # ('scripts/csvcut', 'csvcut', 'csvcut Documentation', + # ['Christopher Groskopf'], 1), ] for filename in os.listdir('scripts'): @@ -88,6 +88,6 @@ os.path.join('scripts', name), name, '%s Documentation' % name, - [u'Christopher Groskopf'], + ['Christopher Groskopf'], 1 )) diff --git a/docs/scripts/csvpy.rst b/docs/scripts/csvpy.rst index 960962d38..2aae57699 100644 --- a/docs/scripts/csvpy.rst +++ b/docs/scripts/csvpy.rst @@ -41,14 +41,14 @@ Basic use:: csvpy examples/dummy.csv Welcome! "examples/dummy.csv" has been loaded in a reader object named "reader". >>> reader.next() - [u'a', u'b', u'c'] + ['a', 'b', 'c'] As a dictionary:: csvpy --dict examples/dummy.csv Welcome! "examples/dummy.csv" has been loaded in a DictReader object named "reader". >>> reader.next() - {u'a': u'1', u'c': u'3', u'b': u'2'} + {'a': '1', 'c': '3', 'b': '2'} As an agate table:: diff --git a/docs/tricks.rst b/docs/tricks.rst index 365e4496b..0367f14c3 100644 --- a/docs/tricks.rst +++ b/docs/tricks.rst @@ -79,14 +79,6 @@ Or if you see ``/usr/local/bin/pip: bad interpreter`` and have Python 3 installe python3 -m pip install csvkit -If you use Python 2 and have a recent version of pip, you may need to run pip with :code:`--allow-external argparse`. - -If you use Python 2 on FreeBSD, you may need to install `py-sqlite3 `_. - -.. note :: - - Need more speed? If you use Python 2, :code:`pip install cdecimal` for a boost. - CSV formatting and parsing -------------------------- @@ -148,7 +140,7 @@ Python standard output encoding errors If, when running a command like :code:`csvlook dummy.csv | less` you get an error like:: - 'ascii' codec can't encode character u'\u0105' in position 2: ordinal not in range(128) + 'ascii' codec can't encode character '\u0105' in position 2: ordinal not in range(128) The simplest option is to set the encoding that Python uses for standard streams, using the :code:`PYTHONIOENCODING` environment variable:: diff --git a/setup.py b/setup.py index 339f8100b..95bc2b6ce 100644 --- a/setup.py +++ b/setup.py @@ -62,14 +62,12 @@ 'agate-excel>=0.2.2', 'agate-dbf>=0.2.2', 'agate-sql>=0.5.3', - 'six>=1.6.1', ], extras_require={ 'test': [ 'coverage>=4.4.2', 'pytest', 'pytest-cov', - 'mock>=1.3.0;python_version<"3"', ], 'docs': [ 'sphinx>=1.0.7', diff --git a/tests/test_convert/test_fixed.py b/tests/test_convert/test_fixed.py index 02e3a3b47..d8852c42b 100644 --- a/tests/test_convert/test_fixed.py +++ b/tests/test_convert/test_fixed.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -import six +from io import StringIO from csvkit.convert import fixed from csvkit.utilities.in2csv import In2CSV @@ -27,7 +27,7 @@ def test_fixed_skip_lines(self): self.assertEqual(f.read(), output) def test_fixed_no_inference(self): - input_file = six.StringIO(' 1 2 3') + input_file = StringIO(' 1 2 3') with stdin_as_string(input_file): self.assertLines(['--no-inference', '-f', 'fixed', '--schema', @@ -41,7 +41,7 @@ def test_fixed_no_inference(self): def test_fixed_streaming(self): with open('examples/testfixed', 'r') as f: with open('examples/testfixed_schema.csv', 'r') as schema: - output_file = six.StringIO() + output_file = StringIO() fixed.fixed2csv(f, schema, output=output_file) output = output_file.getvalue() output_file.close() @@ -96,7 +96,7 @@ def test_schematic_line_parser(self): bar,6,2 baz,8,5""" - f = six.StringIO(schema) + f = StringIO(schema) parser = fixed.FixedWidthRowParser(f) f.close() diff --git a/tests/test_grep.py b/tests/test_grep.py index 18b797788..acdc3adab 100644 --- a/tests/test_grep.py +++ b/tests/test_grep.py @@ -12,17 +12,17 @@ class TestGrep(unittest.TestCase): def setUp(self): self.tab1 = [ ['id', 'name', 'i_work_here'], - [u'1', u'Chicago Reader', u'first'], - [u'2', u'Chicago Sun-Times', u'only'], - [u'3', u'Chicago Tribune', u'only'], - [u'1', u'Chicago Reader', u'second']] + ['1', 'Chicago Reader', 'first'], + ['2', 'Chicago Sun-Times', 'only'], + ['3', 'Chicago Tribune', 'only'], + ['1', 'Chicago Reader', 'second']] self.tab2 = [ ['id', 'age', 'i_work_here'], - [u'1', u'first', u'0'], - [u'4', u'only', u'0'], - [u'1', u'second', u'0'], - [u'2', u'only', u'0', u'0']] # Note extra value in this column + ['1', 'first', '0'], + ['4', 'only', '0'], + ['1', 'second', '0'], + ['2', 'only', '0', '0']] # Note extra value in this column def test_pattern(self): fcr = FilteringCSVReader(iter(self.tab1), patterns=['1']) diff --git a/tests/test_utilities/test_csvclean.py b/tests/test_utilities/test_csvclean.py index 0f0baa598..32790773a 100644 --- a/tests/test_utilities/test_csvclean.py +++ b/tests/test_utilities/test_csvclean.py @@ -3,13 +3,8 @@ import os import sys - -import six - -try: - from mock import patch -except ImportError: - from unittest.mock import patch +from io import StringIO +from unittest.mock import patch from csvkit.utilities.csvclean import CSVClean, launch_new_instance from tests.utils import CSVKitTestCase, EmptyFileTests @@ -25,7 +20,7 @@ def tearDown(self): def assertCleaned(self, basename, output_lines, error_lines, additional_args=[]): args = ['examples/%s.csv' % basename] + additional_args - output_file = six.StringIO() + output_file = StringIO() utility = CSVClean(args, output_file) utility.run() diff --git a/tests/test_utilities/test_csvcut.py b/tests/test_utilities/test_csvcut.py index db6f47447..6dcf40896 100644 --- a/tests/test_utilities/test_csvcut.py +++ b/tests/test_utilities/test_csvcut.py @@ -2,11 +2,7 @@ # -*- coding: utf-8 -*- import sys - -try: - from mock import patch -except ImportError: - from unittest.mock import patch +from unittest.mock import patch from csvkit.utilities.csvcut import CSVCut, launch_new_instance from tests.utils import ColumnsTests, CSVKitTestCase, EmptyFileTests, NamesTests @@ -41,7 +37,7 @@ def test_unicode(self): self.assertRows(['-c', '1,3', 'examples/test_utf8.csv'], [ ['foo', 'baz'], ['1', '3'], - ['4', u'ʤ'], + ['4', 'ʤ'], ]) def test_with_gzip(self): diff --git a/tests/test_utilities/test_csvformat.py b/tests/test_utilities/test_csvformat.py index 78e8cdead..9a0cd4bb4 100644 --- a/tests/test_utilities/test_csvformat.py +++ b/tests/test_utilities/test_csvformat.py @@ -2,13 +2,8 @@ # -*- coding: utf-8 -*- import sys - -import six - -try: - from mock import patch -except ImportError: - from unittest.mock import patch +from io import StringIO +from unittest.mock import patch from csvkit.utilities.csvformat import CSVFormat, launch_new_instance from tests.utils import CSVKitTestCase, EmptyFileTests, stdin_as_string @@ -52,7 +47,7 @@ def test_tab_delimiter(self): ]) def test_quotechar(self): - input_file = six.StringIO('a,b,c\n1*2,3,4\n') + input_file = StringIO('a,b,c\n1*2,3,4\n') with stdin_as_string(input_file): self.assertLines(['-Q', '*'], [ @@ -63,7 +58,7 @@ def test_quotechar(self): input_file.close() def test_doublequote(self): - input_file = six.StringIO('a\n"a ""quoted"" string"') + input_file = StringIO('a\n"a ""quoted"" string"') with stdin_as_string(input_file): self.assertLines(['-P', '#', '-B'], [ @@ -74,7 +69,7 @@ def test_doublequote(self): input_file.close() def test_escapechar(self): - input_file = six.StringIO('a,b,c\n1"2,3,4\n') + input_file = StringIO('a,b,c\n1"2,3,4\n') with stdin_as_string(input_file): self.assertLines(['-P', '#', '-U', '3'], [ diff --git a/tests/test_utilities/test_csvgrep.py b/tests/test_utilities/test_csvgrep.py index c15904668..7ffda81f9 100644 --- a/tests/test_utilities/test_csvgrep.py +++ b/tests/test_utilities/test_csvgrep.py @@ -2,11 +2,7 @@ # -*- coding: utf-8 -*- import sys - -try: - from mock import patch -except ImportError: - from unittest.mock import patch +from unittest.mock import patch from csvkit.utilities.csvgrep import CSVGrep, launch_new_instance from tests.utils import ColumnsTests, CSVKitTestCase, EmptyFileTests, NamesTests @@ -42,13 +38,13 @@ def test_any_match(self): def test_match_utf8(self): self.assertRows(['-c', '3', '-m', 'ʤ', 'examples/test_utf8.csv'], [ ['foo', 'bar', 'baz'], - ['4', '5', u'ʤ'], + ['4', '5', 'ʤ'], ]) def test_match_utf8_bom(self): self.assertRows(['-c', '3', '-m', 'ʤ', 'examples/test_utf8_bom.csv'], [ ['foo', 'bar', 'baz'], - ['4', '5', u'ʤ'], + ['4', '5', 'ʤ'], ]) def test_no_match(self): @@ -71,7 +67,7 @@ def test_re_match(self): def test_re_match_utf8(self): self.assertRows(['-c', '3', '-r', 'ʤ', 'examples/test_utf8.csv'], [ ['foo', 'bar', 'baz'], - ['4', '5', u'ʤ'], + ['4', '5', 'ʤ'], ]) def test_string_match(self): diff --git a/tests/test_utilities/test_csvjoin.py b/tests/test_utilities/test_csvjoin.py index 936f3019c..334e85d7a 100644 --- a/tests/test_utilities/test_csvjoin.py +++ b/tests/test_utilities/test_csvjoin.py @@ -1,11 +1,7 @@ #!/usr/bin/env python import sys - -try: - from mock import patch -except ImportError: - from unittest.mock import patch +from unittest.mock import patch from csvkit.utilities.csvjoin import CSVJoin, launch_new_instance from tests.utils import CSVKitTestCase, EmptyFileTests diff --git a/tests/test_utilities/test_csvjson.py b/tests/test_utilities/test_csvjson.py index e5351b825..cf0c6e779 100644 --- a/tests/test_utilities/test_csvjson.py +++ b/tests/test_utilities/test_csvjson.py @@ -3,13 +3,8 @@ import json import sys - -import six - -try: - from mock import patch -except ImportError: - from unittest.mock import patch +from io import StringIO +from unittest.mock import patch from csvkit.utilities.csvjson import CSVJSON, launch_new_instance from tests.utils import CSVKitTestCase, EmptyFileTests @@ -59,18 +54,18 @@ def test_indentation(self): output = self.get_output(['-i', '4', 'examples/dummy.csv']) js = json.loads(output) self.assertDictEqual(js[0], {'a': True, 'c': 3.0, 'b': 2.0}) - six.assertRegex(self, output, ' "a": true,') + self.assertRegex(output, ' "a": true,') def test_keying(self): js = json.loads(self.get_output(['-k', 'a', 'examples/dummy.csv'])) self.assertDictEqual(js, {'True': {'a': True, 'c': 3.0, 'b': 2.0}}) def test_duplicate_keys(self): - output_file = six.StringIO() + output_file = StringIO() utility = CSVJSON(['-k', 'a', 'examples/dummy3.csv'], output_file) - six.assertRaisesRegex(self, ValueError, - 'Value True is not unique in the key column.', - utility.run) + self.assertRaisesRegex(ValueError, + 'Value True is not unique in the key column.', + utility.run) output_file.close() def test_geojson_with_id(self): diff --git a/tests/test_utilities/test_csvlook.py b/tests/test_utilities/test_csvlook.py index 334dfb020..4e95e2fbe 100644 --- a/tests/test_utilities/test_csvlook.py +++ b/tests/test_utilities/test_csvlook.py @@ -2,13 +2,8 @@ # -*- coding: utf-8 -*- import sys - -import six - -try: - from mock import patch -except ImportError: - from unittest.mock import patch +from io import StringIO +from unittest.mock import patch from csvkit.utilities.csvlook import CSVLook, launch_new_instance from tests.utils import CSVKitTestCase, EmptyFileTests, stdin_as_string @@ -22,26 +17,20 @@ def test_launch_new_instance(self): launch_new_instance() def test_runs(self): - if six.PY2: - self.get_output(['examples/test_utf8.csv']) - else: - self.assertLines(['examples/test_utf8.csv'], [ - '| foo | bar | baz |', - '| --- | --- | --- |', - '| 1 | 2 | 3 |', - '| 4 | 5 | ʤ |', - ]) + self.assertLines(['examples/test_utf8.csv'], [ + '| foo | bar | baz |', + '| --- | --- | --- |', + '| 1 | 2 | 3 |', + '| 4 | 5 | ʤ |', + ]) def test_encoding(self): - if six.PY2: - self.get_output(['-e', 'latin1', 'examples/test_latin1.csv']) - else: - self.assertLines(['-e', 'latin1', 'examples/test_latin1.csv'], [ - '| a | b | c |', - '| - | - | - |', - '| 1 | 2 | 3 |', - '| 4 | 5 | © |', - ]) + self.assertLines(['-e', 'latin1', 'examples/test_latin1.csv'], [ + '| a | b | c |', + '| - | - | - |', + '| 1 | 2 | 3 |', + '| 4 | 5 | © |', + ]) def test_simple(self): self.assertLines(['examples/dummy3.csv'], [ @@ -78,7 +67,7 @@ def test_unicode(self): '| foo | bar | baz |', '| --- | --- | --- |', '| 1 | 2 | 3 |', - u'| 4 | 5 | ʤ |', + '| 4 | 5 | ʤ |', ]) def test_unicode_bom(self): @@ -86,7 +75,7 @@ def test_unicode_bom(self): '| foo | bar | baz |', '| --- | --- | --- |', '| 1 | 2 | 3 |', - u'| 4 | 5 | ʤ |', + '| 4 | 5 | ʤ |', ]) def test_linenumbers(self): @@ -141,7 +130,7 @@ def test_max_column_width(self): ]) def test_stdin(self): - input_file = six.StringIO('a,b,c\n1,2,3\n4,5,6\n') + input_file = StringIO('a,b,c\n1,2,3\n4,5,6\n') with stdin_as_string(input_file): self.assertLines([], [ diff --git a/tests/test_utilities/test_csvsort.py b/tests/test_utilities/test_csvsort.py index 5da61ae32..f3f8a7b01 100644 --- a/tests/test_utilities/test_csvsort.py +++ b/tests/test_utilities/test_csvsort.py @@ -2,13 +2,8 @@ # -*- coding: utf-8 -*- import sys - -import six - -try: - from mock import patch -except ImportError: - from unittest.mock import patch +from io import StringIO +from unittest.mock import patch from csvkit.utilities.csvsort import CSVSort, launch_new_instance from tests.utils import ColumnsTests, CSVKitTestCase, EmptyFileTests, NamesTests, stdin_as_string @@ -22,37 +17,31 @@ def test_launch_new_instance(self): launch_new_instance() def test_runs(self): - if six.PY2: - self.get_output(['examples/test_utf8.csv']) - else: - self.assertRows(['examples/test_utf8.csv'], [ - ['foo', 'bar', 'baz'], - ['1', '2', '3'], - ['4', '5', 'ʤ'], - ]) + self.assertRows(['examples/test_utf8.csv'], [ + ['foo', 'bar', 'baz'], + ['1', '2', '3'], + ['4', '5', 'ʤ'], + ]) def test_encoding(self): - if six.PY2: - self.get_output(['-e', 'latin1', 'examples/test_latin1.csv']) - else: - self.assertRows(['-e', 'latin1', 'examples/test_latin1.csv'], [ - ['a', 'b', 'c'], - ['1', '2', '3'], - ['4', '5', '©'], - ]) + self.assertRows(['-e', 'latin1', 'examples/test_latin1.csv'], [ + ['a', 'b', 'c'], + ['1', '2', '3'], + ['4', '5', '©'], + ]) def test_sort_string_reverse(self): reader = self.get_output_as_reader(['-c', '1', '-r', 'examples/testxls_converted.csv']) - test_order = [u'text', u'Unicode! Σ', u'This row has blanks', - u'Chicago Tribune', u'Chicago Sun-Times', u'Chicago Reader'] - new_order = [six.text_type(r[0]) for r in reader] + test_order = ['text', 'Unicode! Σ', 'This row has blanks', + 'Chicago Tribune', 'Chicago Sun-Times', 'Chicago Reader'] + new_order = [str(r[0]) for r in reader] self.assertEqual(test_order, new_order) def test_sort_date(self): reader = self.get_output_as_reader(['-c', '2', 'examples/testxls_converted.csv']) - test_order = [u'text', u'Chicago Tribune', u'Chicago Sun-Times', - u'Chicago Reader', u'This row has blanks', u'Unicode! Σ'] - new_order = [six.text_type(r[0]) for r in reader] + test_order = ['text', 'Chicago Tribune', 'Chicago Sun-Times', + 'Chicago Reader', 'This row has blanks', 'Unicode! Σ'] + new_order = [str(r[0]) for r in reader] self.assertEqual(test_order, new_order) def test_no_blanks(self): @@ -81,18 +70,18 @@ def test_no_header_row(self): def test_no_inference(self): reader = self.get_output_as_reader(['--no-inference', '-c', '1', 'examples/test_literal_order.csv']) - test_order = [u'a', u'192', u'27', u'3'] - new_order = [six.text_type(r[0]) for r in reader] + test_order = ['a', '192', '27', '3'] + new_order = [str(r[0]) for r in reader] self.assertEqual(test_order, new_order) def test_sort_t_and_nulls(self): reader = self.get_output_as_reader(['-c', '2', 'examples/sort_ints_nulls.csv']) test_order = ['b', '1', '2', ''] - new_order = [six.text_type(r[1]) for r in reader] + new_order = [str(r[1]) for r in reader] self.assertEqual(test_order, new_order) def test_stdin(self): - input_file = six.StringIO('a,b,c\n4,5,6\n1,2,3\n') + input_file = StringIO('a,b,c\n4,5,6\n1,2,3\n') with stdin_as_string(input_file): self.assertLines([], [ diff --git a/tests/test_utilities/test_csvsql.py b/tests/test_utilities/test_csvsql.py index 06f3083c5..037317fa2 100644 --- a/tests/test_utilities/test_csvsql.py +++ b/tests/test_utilities/test_csvsql.py @@ -3,16 +3,12 @@ import os import sys +from io import StringIO from textwrap import dedent +from unittest.mock import patch -import six from sqlalchemy.exc import IntegrityError, OperationalError -try: - from mock import patch -except ImportError: - from unittest.mock import patch - from csvkit.utilities.csvsql import CSVSQL, launch_new_instance from csvkit.utilities.sql2csv import SQL2CSV from tests.utils import CSVKitTestCase, EmptyFileTests, stdin_as_string @@ -117,7 +113,7 @@ def test_linenumbers(self): ''')) # noqa: W291 def test_stdin(self): - input_file = six.StringIO('a,b,c\n4,2,3\n') + input_file = StringIO('a,b,c\n4,2,3\n') with stdin_as_string(input_file): sql = self.get_output(['--tables', 'foo']) @@ -133,7 +129,7 @@ def test_stdin(self): input_file.close() def test_stdin_and_filename(self): - input_file = six.StringIO("a,b,c\n1,2,3\n") + input_file = StringIO("a,b,c\n1,2,3\n") with stdin_as_string(input_file): sql = self.get_output(['-', 'examples/dummy.csv']) @@ -144,7 +140,7 @@ def test_stdin_and_filename(self): input_file.close() def test_query(self): - input_file = six.StringIO("a,b,c\n1,2,3\n") + input_file = StringIO("a,b,c\n1,2,3\n") with stdin_as_string(input_file): sql = self.get_output(['--query', 'SELECT m.usda_id, avg(i.sepal_length) AS mean_sepal_length FROM iris ' @@ -159,7 +155,7 @@ def test_query(self): input_file.close() def test_query_empty(self): - input_file = six.StringIO() + input_file = StringIO() with stdin_as_string(input_file): output = self.get_output(['--query', 'SELECT 1']) @@ -194,14 +190,14 @@ def test_before_after_insert(self): 'SELECT 1; CREATE TABLE foobar (date DATE)', '--after-insert', 'INSERT INTO dummy VALUES (0, 5, 6)']) - output_file = six.StringIO() + output_file = StringIO() utility = SQL2CSV(['--db', 'sqlite:///' + self.db_file, '--query', 'SELECT * FROM foobar'], output_file) utility.run() output = output_file.getvalue() output_file.close() self.assertEqual(output, 'date\n') - output_file = six.StringIO() + output_file = StringIO() utility = SQL2CSV(['--db', 'sqlite:///' + self.db_file, '--query', 'SELECT * FROM dummy'], output_file) utility.run() output = output_file.getvalue() diff --git a/tests/test_utilities/test_csvstack.py b/tests/test_utilities/test_csvstack.py index ae7cdb033..7bf5682ea 100644 --- a/tests/test_utilities/test_csvstack.py +++ b/tests/test_utilities/test_csvstack.py @@ -1,11 +1,7 @@ #!/usr/bin/env python import sys - -try: - from mock import patch -except ImportError: - from unittest.mock import patch +from unittest.mock import patch from csvkit.utilities.csvstack import CSVStack, launch_new_instance from tests.utils import CSVKitTestCase, EmptyFileTests diff --git a/tests/test_utilities/test_csvstat.py b/tests/test_utilities/test_csvstat.py index bc749b293..ac8648125 100644 --- a/tests/test_utilities/test_csvstat.py +++ b/tests/test_utilities/test_csvstat.py @@ -1,14 +1,9 @@ #!/usr/bin/env python import sys +from unittest.mock import patch import agate -import six - -try: - from mock import patch -except ImportError: - from unittest.mock import patch from csvkit.utilities.csvstat import CSVStat, launch_new_instance from tests.utils import ColumnsTests, CSVKitTestCase, EmptyFileTests, NamesTests @@ -50,11 +45,11 @@ def test_count_only(self): def test_unique(self): output = self.get_output(['-c', 'county', 'examples/realdata/ks_1033_data.csv']) - six.assertRegex(self, output, r'Unique values:\s+73') + self.assertRegex(output, r'Unique values:\s+73') def test_max_length(self): output = self.get_output(['-c', 'county', 'examples/realdata/ks_1033_data.csv']) - six.assertRegex(self, output, r'Longest value:\s+12') + self.assertRegex(output, r'Longest value:\s+12') def test_freq_list(self): output = self.get_output(['examples/realdata/ks_1033_data.csv']) diff --git a/tests/test_utilities/test_in2csv.py b/tests/test_utilities/test_in2csv.py index eb000882f..f5f439428 100644 --- a/tests/test_utilities/test_in2csv.py +++ b/tests/test_utilities/test_in2csv.py @@ -3,13 +3,8 @@ import os import sys - -import six - -try: - from mock import patch -except ImportError: - from unittest.mock import patch +from io import StringIO +from unittest.mock import patch from csvkit.utilities.in2csv import In2CSV, launch_new_instance from tests.utils import CSVKitTestCase, EmptyFileTests, stdin_as_string @@ -127,7 +122,7 @@ def test_names(self): self.assertLines(['--names', 'examples/sheets.xlsx'], [ 'not this one', 'data', - u'ʤ', + 'ʤ', ]) def test_csv_no_headers(self): @@ -139,7 +134,7 @@ def test_csv_no_headers_streaming(self): ['--no-header-row', '--no-inference', '--snifflimit', '0']) def test_csv_datetime_inference(self): - input_file = six.StringIO('a\n2015-01-01T00:00:00Z') + input_file = StringIO('a\n2015-01-01T00:00:00Z') with stdin_as_string(input_file): self.assertLines(['-f', 'csv'], [ @@ -168,7 +163,7 @@ def test_xlsx_no_inference(self): ]) def test_geojson_no_inference(self): - input_file = six.StringIO( + input_file = StringIO( '{"a": 1, "b": 2, "type": "FeatureCollection", "features": [{"geometry": {}, "properties": ' '{"a": 1, "b": 2, "c": 3}}]}') @@ -181,7 +176,7 @@ def test_geojson_no_inference(self): input_file.close() def test_json_no_inference(self): - input_file = six.StringIO('[{"a": 1, "b": 2, "c": 3}]') + input_file = StringIO('[{"a": 1, "b": 2, "c": 3}]') with stdin_as_string(input_file): self.assertLines(['--no-inference', '-f', 'json'], [ @@ -192,7 +187,7 @@ def test_json_no_inference(self): input_file.close() def test_ndjson_no_inference(self): - input_file = six.StringIO('{"a": 1, "b": 2, "c": 3}') + input_file = StringIO('{"a": 1, "b": 2, "c": 3}') with stdin_as_string(input_file): self.assertLines(['--no-inference', '-f', 'ndjson'], [ diff --git a/tests/test_utilities/test_sql2csv.py b/tests/test_utilities/test_sql2csv.py index 049c07ac2..5147fb9d7 100644 --- a/tests/test_utilities/test_sql2csv.py +++ b/tests/test_utilities/test_sql2csv.py @@ -2,13 +2,8 @@ import os import sys - -import six - -try: - from mock import patch -except ImportError: - from unittest.mock import patch +from io import StringIO +from unittest.mock import patch try: import psycopg2 # noqa: F401 @@ -78,7 +73,7 @@ def test_file_with_query(self): self.assertTrue('54' in csv) def test_stdin(self): - input_file = six.StringIO('select cast(3.1415 * 13.37 as integer) as answer') + input_file = StringIO('select cast(3.1415 * 13.37 as integer) as answer') with stdin_as_string(input_file): csv = self.get_output([]) @@ -89,7 +84,7 @@ def test_stdin(self): input_file.close() def test_stdin_with_query(self): - input_file = six.StringIO('select cast(3.1415 * 13.37 as integer) as answer') + input_file = StringIO('select cast(3.1415 * 13.37 as integer) as answer') with stdin_as_string(input_file): csv = self.get_output(['--query', 'select 6*9 as question']) @@ -100,7 +95,7 @@ def test_stdin_with_query(self): input_file.close() def test_stdin_with_file(self): - input_file = six.StringIO('select cast(3.1415 * 13.37 as integer) as answer') + input_file = StringIO('select cast(3.1415 * 13.37 as integer) as answer') with stdin_as_string(input_file): csv = self.get_output(['examples/test.sql']) @@ -111,7 +106,7 @@ def test_stdin_with_file(self): input_file.close() def test_stdin_with_file_and_query(self): - input_file = six.StringIO('select cast(3.1415 * 13.37 as integer) as answer') + input_file = StringIO('select cast(3.1415 * 13.37 as integer) as answer') with stdin_as_string(input_file): csv = self.get_output(['examples/test.sql', '--query', 'select 6*9 as question']) diff --git a/tests/utils.py b/tests/utils.py index 3ade6cf84..ae64ad7ee 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -23,9 +23,9 @@ import unittest import warnings from contextlib import contextmanager +from io import StringIO import agate -import six from csvkit.exceptions import ColumnIdentifierError, RequiredHeaderError @@ -50,7 +50,7 @@ class CSVKitTestCase(unittest.TestCase): warnings.filterwarnings(action='ignore', module='agate') def get_output(self, args): - output_file = six.StringIO() + output_file = StringIO() utility = self.Utility(args, output_file) utility.run() @@ -61,7 +61,7 @@ def get_output(self, args): return output def get_output_as_io(self, args): - return six.StringIO(self.get_output(args)) + return StringIO(self.get_output(args)) def get_output_as_list(self, args): return self.get_output(args).split('\n') @@ -89,7 +89,7 @@ def assertLines(self, args, rows, newline_at_eof=True): self.assertEqual(len(lines), len(rows)) -class EmptyFileTests(object): +class EmptyFileTests: def test_empty(self): with open('examples/empty.csv') as f: with stdin_as_string(f): @@ -97,7 +97,7 @@ def test_empty(self): utility.run() -class NamesTests(object): +class NamesTests: def test_names(self): output = self.get_output_as_io(['-n', 'examples/dummy.csv']) @@ -108,7 +108,7 @@ def test_names(self): def test_invalid_options(self): args = ['-n', '--no-header-row', 'examples/dummy.csv'] - output_file = six.StringIO() + output_file = StringIO() utility = self.Utility(args, output_file) with self.assertRaises(RequiredHeaderError): @@ -117,11 +117,11 @@ def test_invalid_options(self): output_file.close() -class ColumnsTests(object): +class ColumnsTests: def test_invalid_column(self): args = getattr(self, 'columns_args', []) + ['-c', '0', 'examples/dummy.csv'] - output_file = six.StringIO() + output_file = StringIO() utility = self.Utility(args, output_file) with self.assertRaises(ColumnIdentifierError):