Skip to content

Commit

Permalink
Remove trailing whitespace
Browse files Browse the repository at this point in the history
  • Loading branch information
astrofrog committed Feb 9, 2018
1 parent 6860652 commit 3e3b45f
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 52 deletions.
59 changes: 24 additions & 35 deletions ply/cpp.py
Expand Up @@ -5,7 +5,7 @@
# Copyright (C) 2007
# All rights reserved
#
# This module implements an ANSI-C style lexical preprocessor for PLY.
# This module implements an ANSI-C style lexical preprocessor for PLY.
# -----------------------------------------------------------------------------
from __future__ import generators

Expand Down Expand Up @@ -78,7 +78,7 @@ def t_CPP_COMMENT2(t):
# replace with '/n'
t.type = 'CPP_WS'; t.value = '\n'
return t

def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
Expand All @@ -92,8 +92,8 @@ def t_error(t):

# -----------------------------------------------------------------------------
# trigraph()
#
# Given an input string, this function replaces all trigraph sequences.
#
# Given an input string, this function replaces all trigraph sequences.
# The following mapping is used:
#
# ??= #
Expand Down Expand Up @@ -263,7 +263,7 @@ def lexprobe(self):
# ----------------------------------------------------------------------
# add_path()
#
# Adds a search path to the preprocessor.
# Adds a search path to the preprocessor.
# ----------------------------------------------------------------------

def add_path(self,path):
Expand Down Expand Up @@ -307,7 +307,7 @@ def group_lines(self,input):

# ----------------------------------------------------------------------
# tokenstrip()
#
#
# Remove leading/trailing whitespace tokens from a token list
# ----------------------------------------------------------------------

Expand All @@ -333,7 +333,7 @@ def tokenstrip(self,tokens):
# argument. Each argument is represented by a list of tokens.
#
# When collecting arguments, leading and trailing whitespace is removed
# from each argument.
# from each argument.
#
# This function properly handles nested parenthesis and commas---these do not
# define new arguments.
Expand All @@ -345,7 +345,7 @@ def collect_args(self,tokenlist):
current_arg = []
nesting = 1
tokenlen = len(tokenlist)

# Search for the opening '('.
i = 0
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
Expand Down Expand Up @@ -379,7 +379,7 @@ def collect_args(self,tokenlist):
else:
current_arg.append(t)
i += 1

# Missing end argument
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
return 0, [],[]
Expand All @@ -391,9 +391,9 @@ def collect_args(self,tokenlist):
# This is used to speed up macro expansion later on---we'll know
# right away where to apply patches to the value to form the expansion
# ----------------------------------------------------------------------

def macro_prescan(self,macro):
macro.patch = [] # Standard macro arguments
macro.patch = [] # Standard macro arguments
macro.str_patch = [] # String conversion expansion
macro.var_comma_patch = [] # Variadic macro comma patch
i = 0
Expand Down Expand Up @@ -441,7 +441,7 @@ def macro_expand_args(self,macro,args):
rep = [copy.copy(_x) for _x in macro.value]

# Make string expansion patches. These do not alter the length of the replacement sequence

str_expansion = {}
for argnum, i in macro.str_patch:
if argnum not in str_expansion:
Expand All @@ -459,7 +459,7 @@ def macro_expand_args(self,macro,args):
# Make all other patches. The order of these matters. It is assumed that the patch list
# has been sorted in reverse order of patch location since replacements will cause the
# size of the replacement sequence to expand from the patch point.

expanded = { }
for ptype, argnum, i in macro.patch:
# Concatenation. Argument is left unexpanded
Expand Down Expand Up @@ -496,7 +496,7 @@ def expand_macros(self,tokens,expanded=None):
if t.value in self.macros and t.value not in expanded:
# Yes, we found a macro match
expanded[t.value] = True

m = self.macros[t.value]
if not m.arglist:
# A simple macro
Expand Down Expand Up @@ -528,7 +528,7 @@ def expand_macros(self,tokens,expanded=None):
else:
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
del args[len(m.arglist):]

# Get macro replacement text
rep = self.macro_expand_args(m,args)
rep = self.expand_macros(rep,expanded)
Expand All @@ -547,13 +547,13 @@ def expand_macros(self,tokens,expanded=None):
elif t.value == '__LINE__':
t.type = self.t_INTEGER
t.value = self.t_INTEGER_TYPE(t.lineno)

i += 1
return tokens

# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# evalexpr()
#
#
# Evaluate an expression token sequence for the purposes of evaluating
# integral expressions.
# ----------------------------------------------------------------------
Expand Down Expand Up @@ -600,7 +600,7 @@ def evalexpr(self,tokens):
tokens[i].value = str(tokens[i].value)
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
tokens[i].value = tokens[i].value[:-1]

expr = "".join([str(x.value) for x in tokens])
expr = expr.replace("&&"," and ")
expr = expr.replace("||"," or ")
Expand All @@ -625,7 +625,7 @@ def parsegen(self,input,source=None):

if not source:
source = ""

self.define("__FILE__ \"%s\"" % source)

self.source = source
Expand All @@ -644,15 +644,15 @@ def parsegen(self,input,source=None):
for tok in x:
if tok.type in self.t_WS and '\n' in tok.value:
chunk.append(tok)

dirtokens = self.tokenstrip(x[i+1:])
if dirtokens:
name = dirtokens[0].value
args = self.tokenstrip(dirtokens[1:])
else:
name = ""
args = []

if name == 'define':
if enable:
for tok in self.expand_macros(chunk):
Expand Down Expand Up @@ -712,7 +712,7 @@ def parsegen(self,input,source=None):
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")

elif name == 'else':
if ifstack:
if ifstack[-1][0]:
Expand Down Expand Up @@ -882,7 +882,7 @@ def undef(self,tokens):
def parse(self,input,source=None,ignore={}):
self.ignore = ignore
self.parser = self.parsegen(input,source)

# ----------------------------------------------------------------------
# token()
#
Expand Down Expand Up @@ -912,14 +912,3 @@ def token(self):
tok = p.token()
if not tok: break
print(p.source, tok)











12 changes: 3 additions & 9 deletions ply/ctokens.py
Expand Up @@ -16,7 +16,7 @@
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',

# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
Expand All @@ -29,7 +29,7 @@

# Ternary operator (?)
'TERNARY',

# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
Expand All @@ -39,7 +39,7 @@
# Ellipsis (...)
'ELLIPSIS',
]

# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
Expand Down Expand Up @@ -125,9 +125,3 @@ def t_CPPCOMMENT(t):
r'//.*\n'
t.lexer.lineno += 1
return t






5 changes: 2 additions & 3 deletions ply/lex.py
Expand Up @@ -184,7 +184,7 @@ def writetab(self, lextab, outputdir=''):
tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))

# Rewrite the lexstatere table, replacing function objects with function names
# Rewrite the lexstatere table, replacing function objects with function names
tabre = {}
for statename, lre in self.lexstatere.items():
titem = []
Expand Down Expand Up @@ -535,7 +535,7 @@ def _statetoken(s, names):
for i, part in enumerate(parts[1:], 1):
if part not in names and part != 'ANY':
break

if i > 1:
states = tuple(parts[1:i])
else:
Expand Down Expand Up @@ -1096,4 +1096,3 @@ def set_regex(f):

# Alternative spelling of the TOKEN decorator
Token = TOKEN

5 changes: 0 additions & 5 deletions ply/ygen.py
Expand Up @@ -67,8 +67,3 @@ def main():

if __name__ == '__main__':
main()





0 comments on commit 3e3b45f

Please sign in to comment.