Skip to content

Commit

Permalink
CLN: replace %s syntax with .format in pandas.io.parsers
Browse files Browse the repository at this point in the history
progress towards pandas-dev#16130
  • Loading branch information
makeajourney committed Feb 22, 2019
1 parent b673188 commit 10b372f
Showing 1 changed file with 37 additions and 28 deletions.
65 changes: 37 additions & 28 deletions pandas/io/parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1482,9 +1482,9 @@ def extract(r):
for n in range(len(columns[0])):
if all(compat.to_str(c[n]) in self.unnamed_cols for c in columns):
raise ParserError(
"Passed header=[%s] are too many rows for this "
"Passed header=[{header}] are too many rows for this "
"multi_index of columns"
% ','.join(str(x) for x in self.header)
.format(header=','.join(str(x) for x in self.header))
)

# Clean the column names (if we have an index_col).
Expand Down Expand Up @@ -1517,9 +1517,11 @@ def _maybe_dedup_names(self, names):
counts[col] = cur_count + 1

if is_potential_mi:
col = col[:-1] + ('%s.%d' % (col[-1], cur_count),)
col = col[:-1] + ('{column}.{count}'.format(
column=col[-1], count=cur_count),)
else:
col = '%s.%d' % (col, cur_count)
col = '{column}.{count}'.format(
column=col, count=cur_count)
cur_count = counts[col]

names[i] = col
Expand Down Expand Up @@ -1566,7 +1568,7 @@ def _get_simple_index(self, data, columns):
def ix(col):
if not isinstance(col, compat.string_types):
return col
raise ValueError('Index %s invalid' % col)
raise ValueError('Index {col} invalid'.format(col=col))

to_remove = []
index = []
Expand All @@ -1590,8 +1592,8 @@ def _get_name(icol):
return icol

if col_names is None:
raise ValueError(('Must supply column order to use %s as '
'index') % str(icol))
raise ValueError(('Must supply column order to use {icol!s} '
'as index').format(icol=icol))

for i, c in enumerate(col_names):
if i == icol:
Expand Down Expand Up @@ -1706,7 +1708,8 @@ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,

result[c] = cvals
if verbose and na_count:
print('Filled %d NA values in column %s' % (na_count, str(c)))
print('Filled {count} NA values in column {c!s}'.format(
count=na_count, c=c))
return result

def _infer_types(self, values, na_values, try_num_bool=True):
Expand Down Expand Up @@ -1807,8 +1810,10 @@ def _cast_types(self, values, cast_type, column):
values = astype_nansafe(values, cast_type,
copy=True, skipna=True)
except ValueError:
raise ValueError("Unable to convert column %s to "
"type %s" % (column, cast_type))
raise ValueError(
"Unable to convert column {column} to type "
"{cast_type}".format(
column=column, cast_type=cast_type))
return values

def _do_date_conversions(self, names, data):
Expand Down Expand Up @@ -1871,7 +1876,7 @@ def __init__(self, src, **kwds):

if self.names is None:
if self.prefix:
self.names = ['%s%d' % (self.prefix, i)
self.names = ['{prefix}{i}'.format(prefix=self.prefix, i=i)
for i in range(self._reader.table_width)]
else:
self.names = lrange(self._reader.table_width)
Expand Down Expand Up @@ -2273,10 +2278,11 @@ def __init__(self, f, **kwds):
raise ValueError('Only length-1 decimal markers supported')

if self.thousands is None:
self.nonnum = re.compile('[^-^0-9^%s]+' % self.decimal)
self.nonnum = re.compile(
r'[^-^0-9^{decimal}]+'.format(decimal=self.decimal))
else:
self.nonnum = re.compile('[^-^0-9^%s^%s]+' % (self.thousands,
self.decimal))
self.nonnum = re.compile(r'[^-^0-9^{thousands}^{decimal}]+'.format(
thousands=self.thousands, decimal=self.decimal))

def _set_no_thousands_columns(self):
# Create a set of column ids that are not to be stripped of thousands
Expand Down Expand Up @@ -2515,8 +2521,8 @@ def _infer_columns(self):
except StopIteration:
if self.line_pos < hr:
raise ValueError(
'Passed header=%s but only %d lines in file'
% (hr, self.line_pos + 1))
'Passed header={hr} but only {pos} lines in '
'file'.format(hr=hr, pos=(self.line_pos + 1)))

# We have an empty file, so check
# if columns are provided. That will
Expand Down Expand Up @@ -2557,7 +2563,8 @@ def _infer_columns(self):

while cur_count > 0:
counts[col] = cur_count + 1
col = "%s.%d" % (col, cur_count)
col = u'{column}.{count}'.format(
column=col, count=cur_count)
cur_count = counts[col]

this_columns[i] = col
Expand Down Expand Up @@ -2625,8 +2632,8 @@ def _infer_columns(self):

if not names:
if self.prefix:
columns = [['%s%d' % (self.prefix, i)
for i in range(ncols)]]
columns = [['{prefix}{idx}'.format(
prefix=self.prefix, idx=i) for i in range(ncols)]]
else:
columns = [lrange(ncols)]
columns = self._handle_usecols(columns, columns[0])
Expand Down Expand Up @@ -3053,8 +3060,9 @@ def _rows_to_cols(self, content):
content.append(l)

for row_num, actual_len in bad_lines:
msg = ('Expected %d fields in line %d, saw %d' %
(col_len, row_num + 1, actual_len))
msg = ('Expected {col_len} fields in line {line}, saw '
'{length}'.format(col_len=col_len, line=(row_num + 1),
length=actual_len))
if (self.delimiter and
len(self.delimiter) > 1 and
self.quoting != csv.QUOTE_NONE):
Expand Down Expand Up @@ -3225,8 +3233,9 @@ def _isindex(colspec):
new_name, col, old_names = _try_convert_dates(
converter, colspec, data_dict, orig_names)
if new_name in data_dict:
raise ValueError('New date column already in dict %s' %
new_name)
raise ValueError(
'New date column already in dict {name}'.format(
name=new_name))
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
Expand All @@ -3235,8 +3244,8 @@ def _isindex(colspec):
# dict of new name to column list
for new_name, colspec in compat.iteritems(parse_spec):
if new_name in data_dict:
raise ValueError('Date column %s already in dict' %
new_name)
raise ValueError(
'Date column {name} already in dict'.format(name=new_name))

_, col, old_names = _try_convert_dates(converter, colspec,
data_dict, orig_names)
Expand Down Expand Up @@ -3415,7 +3424,7 @@ def _stringify_na_values(na_values):
# we are like 999 here
if v == int(v):
v = int(v)
result.append("%s.0" % v)
result.append("{value}.0".format(value=v))
result.append(str(v))

result.append(v)
Expand Down Expand Up @@ -3560,8 +3569,8 @@ def get_rows(self, infer_nrows, skiprows=None):

def detect_colspecs(self, infer_nrows=100, skiprows=None):
# Regex escape the delimiters
delimiters = ''.join(r'\%s' % x for x in self.delimiter)
pattern = re.compile('([^%s]+)' % delimiters)
delimiters = ''.join(r'\{}'.format(x) for x in self.delimiter)
pattern = re.compile('([^{}]+)'.format(delimiters))
rows = self.get_rows(infer_nrows, skiprows)
if not rows:
raise EmptyDataError("No rows from which to infer column width")
Expand Down

0 comments on commit 10b372f

Please sign in to comment.