diff --git a/nsiqcppstyle_checker.py b/nsiqcppstyle_checker.py index 513b428..42d9587 100644 --- a/nsiqcppstyle_checker.py +++ b/nsiqcppstyle_checker.py @@ -377,7 +377,7 @@ def t_CPPCOMMENT(t): def t_error(t): - console.Out.Verbose("Illegal character '%s'" % t.value[0], t.lexer.lineno) + console.Out.Verbose(f"Illegal character '{t.value[0]}'", t.lexer.lineno) t.lexer.skip(1) @@ -403,9 +403,9 @@ def __init__(self, filename, data=None): try: self.data = f.read() except UnicodeDecodeError as ex: - console.Out.Ci("[ERROR] UnicodeDecodeError in CppLexerNavigator: " + str(ex)) + console.Out.Ci(f"[ERROR] UnicodeDecodeError in CppLexerNavigator: {str(ex)}") console.Out.Ci( - "[ERROR] Exception occurred reading file '%s', convert from UTF16LE to UTF8" % (filename), + f"[ERROR] Exception occurred reading file '{filename}', convert from UTF16LE to UTF8" ) raise self.lines = self.data.splitlines() @@ -445,7 +445,7 @@ def ProcessIfdef(self, token): self.ifdefstack.append(True) elif Match(r"^#\s*endif$", token.value) and len(self.ifdefstack) != 0: self.ifdefstack.pop() - return any(not ifdef for ifdef in self.ifdefstack) + return not all(self.ifdefstack) def Backup(self): """ @@ -473,9 +473,7 @@ def GetCurTokenLine(self): Get Current Token, if No current token, return None """ curToken = self.GetCurToken() - if curToken is not None: - return self.lines[curToken.lineno - 1] - return None + return self.lines[curToken.lineno - 1] if curToken is not None else None def _MoveToToken(self, token): self.tokenindex = token.index @@ -488,9 +486,7 @@ def _GetColumn(self, token): if last_cr < 0: last_cr = -1 column = token.lexpos - last_cr - if column == 0: - return 1 - return column + return 1 if column == 0 else column def GetCurToken(self): """ @@ -630,11 +626,10 @@ def GetNextToken(self, skipWhiteSpace=False, skipComment=False, skipDirective=Fa def GetNextMatchingGT(self, keepCur=False): if keepCur: self.PushTokenIndex() - gtStack = [] if self.GetCurToken().type != "LT": msg = "Matching next GT token should be examined when cur token is <" raise RuntimeError(msg) - gtStack.append(self.GetCurToken()) + gtStack = [self.GetCurToken()] t = self._GetNextMatchingGTToken(gtStack) if keepCur: self.PopTokenIndex() @@ -666,11 +661,10 @@ def GetNextMatchingToken(self, keepCur=False): """ if keepCur: self.PushTokenIndex() - tokenStack = [] if self.GetCurToken().type not in ["LPAREN", "LBRACE", "LBRACKET"]: msg = "Matching token should be examined when cur token is { [ (" raise RuntimeError(msg) - tokenStack.append(self.GetCurToken()) + tokenStack = [self.GetCurToken()] t = self._GetNextMatchingToken(tokenStack) if keepCur: self.PopTokenIndex() @@ -771,12 +765,10 @@ def _GetPrevMatchingLTToken(self, tokenStack): def GetPrevMatchingToken(self, keepCur=False): if keepCur: self.PushTokenIndex() - tokenStack = [] if self.GetCurToken().type not in ["RPAREN", "RBRACE", "RBRACKET"]: msg = "Matching token should be examined when cur token is } ) ]" raise RuntimeError(msg) - tokenStack.append(self.GetCurToken()) - + tokenStack = [self.GetCurToken()] t = self._GetPrevMatchingToken(tokenStack) if keepCur: self.PopTokenIndex() @@ -815,28 +807,22 @@ def _GetPrevMatchingToken(self, tokenStack): def _SkipContext(self, skipWhiteSpace=False, skipComment=False): context = [] if skipWhiteSpace: - context.append("SPACE") - context.append("LINEFEED") + context.extend(("SPACE", "LINEFEED")) if skipComment: - context.append("COMMENT") - context.append("CPPCOMMENT") + context.extend(("COMMENT", "CPPCOMMENT")) return context def _GetNextToken(self): - if self.tokenindex < self.tokenlistsize - 1: - self.tokenindex = self.tokenindex + 1 - return self.tokenlist[self.tokenindex] - else: + if self.tokenindex >= self.tokenlistsize - 1: return None + self.tokenindex = self.tokenindex + 1 + return self.tokenlist[self.tokenindex] def _GetPrevToken(self): - if self.tokenindex >= 0: - self.tokenindex = self.tokenindex - 1 - if self.tokenindex == -1: - return None - return self.tokenlist[self.tokenindex] - else: + if self.tokenindex < 0: return None + self.tokenindex = self.tokenindex - 1 + return None if self.tokenindex == -1 else self.tokenlist[self.tokenindex] def GetPrevTokenInType(self, type, keepCur=True, skipPreprocess=True): if keepCur: @@ -844,11 +830,12 @@ def GetPrevTokenInType(self, type, keepCur=True, skipPreprocess=True): token = None while True: token = self.GetPrevToken() - if token is None: - break - elif token.type == type: - if skipPreprocess and token.pp: - continue + if ( + token is not None + and token.type == type + and (not skipPreprocess or not token.pp) + or token is None + ): break if keepCur: self.PopTokenIndex() @@ -860,11 +847,12 @@ def GetPrevTokenInTypeList(self, typelist, keepCur=True, skipPreprocess=True): token = None while True: token = self.GetPrevToken(False, False, skipPreprocess, False) - if token is None: - break - elif token.type in typelist: - if skipPreprocess and token.pp: - continue + if ( + token is not None + and token.type in typelist + and (not skipPreprocess or not token.pp) + or token is None + ): break if keepCur: self.PopTokenIndex() @@ -884,11 +872,12 @@ def GetNextTokenInType(self, type, keepCur=False, skipPreprocess=True): token = None while True: token = self.GetNextToken() - if token is None: - break - elif token.type == type: - if skipPreprocess and token.pp: - continue + if ( + token is not None + and token.type == type + and (not skipPreprocess or not token.pp) + or token is None + ): break if keepCur: self.PopTokenIndex() @@ -900,11 +889,12 @@ def GetNextTokenInTypeList(self, typelist, keepCur=False, skipPreprocess=True): token = None while True: token = self.GetNextToken() - if token is None: - break - elif token.type in typelist: - if skipPreprocess and token.pp: - continue + if ( + token is not None + and token.type in typelist + and (not skipPreprocess or not token.pp) + or token is None + ): break if keepCur: self.PopTokenIndex() @@ -918,9 +908,7 @@ def HasBody(self): if token_id3 is None and token_id2 is not None: return True - if token_id2 is not None and token_id2.lexpos < token_id3.lexpos: - return True - return False + return token_id2 is not None and token_id2.lexpos < token_id3.lexpos class Context: @@ -933,7 +921,9 @@ def __init__(self, type, name, sig=False, starttoken=None, endtoken=None): self.additional = "" def __str__(self): - return ", ".join([self.type, "'" + self.name + "'", str(self.startToken), str(self.endToken)]) + return ", ".join( + [self.type, f"'{self.name}'", str(self.startToken), str(self.endToken)] + ) def IsContextStart(self, token): return token == self.startToken @@ -942,9 +932,10 @@ def IsContextEnd(self, token): return token == self.endToken def InScope(self, token): - if token.lexpos >= self.startToken.lexpos and token.lexpos <= self.endToken.lexpos: - return True - return False + return ( + token.lexpos >= self.startToken.lexpos + and token.lexpos <= self.endToken.lexpos + ) class ContextStack: @@ -955,14 +946,10 @@ def Push(self, context): self.contextstack.append(context) def Pop(self): - if self.Size() == 0: - return None - return self.contextstack.pop() + return None if self.Size() == 0 else self.contextstack.pop() def Peek(self): - if self.Size() == 0: - return None - return self.contextstack[-1] + return None if self.Size() == 0 else self.contextstack[-1] def SigPeek(self): i = len(self.contextstack) @@ -991,10 +978,9 @@ def ContainsIn(self, type): return False def __str__(self): - a = "" - for eachContext in self.contextstack: - a += eachContext.__str__() + " >> " - return a + return "".join( + f"{eachContext.__str__()} >> " for eachContext in self.contextstack + ) def Copy(self): contextStack = ContextStack() @@ -1015,9 +1001,7 @@ def Pop(self): return self.contextstackstack.pop() def Peek(self): - if len(self.contextstackstack) == 0: - return None - return self.contextstackstack[-1] + return None if len(self.contextstackstack) == 0 else self.contextstackstack[-1] ############################################################################ diff --git a/nsiqcppstyle_exe.py b/nsiqcppstyle_exe.py index 102b2a0..8dee440 100755 --- a/nsiqcppstyle_exe.py +++ b/nsiqcppstyle_exe.py @@ -40,7 +40,7 @@ from nsiqcppstyle_util import * ########################################################################## -title = "nsiqcppstyle: N'SIQ Cpp Style ver " + version + "\n" +title = f"nsiqcppstyle: N'SIQ Cpp Style ver {version}" + "\n" def ShowMessageAndExit(msg, usageOutput=True): @@ -120,7 +120,9 @@ def get_parser(): help="Set a logging level", ) - parser.add_argument("--version", action="version", version="%(prog)s " + version) + parser.add_argument( + "--version", action="version", version=f"%(prog)s {version}" + ) parser.add_argument("--show-url", action="store_true", default=False) parser.add_argument("-r", "--list-rules", action="store_true", default=False, help="Show rule list") parser.add_argument( @@ -240,7 +242,7 @@ def main(): extLangMapCopy = copy.deepcopy(extLangMap) targetName = os.path.basename(targetPath) console.Out.Ci(console.Separator) - console.Out.Ci("= Analyzing %s " % targetName) + console.Out.Ci(f"= Analyzing {targetName} ") if filterPath != "": filefilterPath = filterPath @@ -255,17 +257,14 @@ def main(): if filterScope != filterManager.GetActiveFilter().filterName: console.Out.Error( - "\n{} filter scope is not available. Instead, use {}\n".format( - filterScope, - filterManager.GetActiveFilter().filterName, - ), + f"\n{filterScope} filter scope is not available. Instead, use {filterManager.GetActiveFilter().filterName}\n" ) filter = filterManager.GetActiveFilter() # Load Rule if len(filter.nsiqCppStyleRules) == 0: - ShowMessageAndExit("Error!. Rules must be set in %s" % filefilterPath, False) + ShowMessageAndExit(f"Error!. Rules must be set in {filefilterPath}", False) continue ruleManager.LoadRules(filter.nsiqCppStyleRules) @@ -277,7 +276,7 @@ def main(): console.Out.Info(filter.to_string()) console.Out.Ci(console.Separator) - console.Out.Verbose("* run nsiqcppstyle analysis on %s" % targetName) + console.Out.Verbose(f"* run nsiqcppstyle analysis on {targetName}") # if the target is file, analyze it without condition if os.path.isfile(targetPath): @@ -349,7 +348,7 @@ def GetRealTargetPaths(args): targetPaths.append(realPath) # CheckPathPermission(realPath, "Target directory") if not os.path.exists(realPath): - ShowMessageAndExit("Error!: Target directory %s does not exist" % eachTarget) + ShowMessageAndExit(f"Error!: Target directory {eachTarget} does not exist") return targetPaths @@ -381,7 +380,7 @@ def _ProcessFilterLine(self, filter, raw_line): filter = self.GetFilter(filterName) elif line.startswith("="): if len(line[1:].strip()) != 0: - filter.AddLangMap(line[1:].strip(), '"' + line + '" of filefilter.txt') + filter.AddLangMap(line[1:].strip(), f'"{line}" of filefilter.txt') elif line.startswith("~"): if len(line[1:].strip()) != 0: filter.AddCppChecker(line[1:].strip()) @@ -396,7 +395,7 @@ def _ProcessFilterLine(self, filter, raw_line): elif line.startswith("%"): arg = line[1:].strip() if arg != "": - filter.AddVarMap(arg, '"' + arg + '" of filefilter.txt') + filter.AddVarMap(arg, f'"{arg}" of filefilter.txt') return filter @@ -412,8 +411,7 @@ def __init__(self, fileFilterPath, filterStringList, extLangMap, varMap, activeF for line in filterStringList: filter = self._ProcessFilterLine(filter, line) - f = self.GetFilterFile(fileFilterPath) - if f: + if f := self.GetFilterFile(fileFilterPath): for line in f.readlines(): filter = self._ProcessFilterLine(filter, line) f.close() @@ -442,9 +440,7 @@ def GetActiveFilter(self): return self.GetFilter(self.activeFilterName) def GetFilterFile(self, filterfile): - if not os.path.exists(filterfile): - return None - return open(filterfile) + return None if not os.path.exists(filterfile) else open(filterfile) ############################################################################## @@ -480,7 +476,7 @@ def to_string(self): for eachfilter in self.filefilter: filterment = "" filterment = "is included" if eachfilter[0] else "is excluded" - s = s + (f" {count}. {eachfilter[1]} {filterment}\n") + s = f"{s} {count}. {eachfilter[1]} {filterment}\n" count = count + 1 return template % (self.filterName, s, self.GetLangString()) @@ -518,14 +514,14 @@ def GetLangString(self): s = "" for eachKey in self.extLangMap: if eachKey == "C/C++": - s = s + " " + eachKey + "=" + s = f"{s} {eachKey}=" extSet = self.extLangMap.get(eachKey) setLen = len(extSet) count = 0 for eachExt in extSet: count = count + 1 s = s + eachExt - s = s + "," if count < setLen else s + "\n" + s = f"{s}," if count < setLen else s + "\n" return s def CheckFileInclusion(self, fileStr): @@ -535,9 +531,8 @@ def CheckFileInclusion(self, fileStr): if eachfilter[2] is True: if eachfile.startswith(eachfilter[1]): inclusion = eachfilter[0] - else: - if eachfile.find(eachfilter[1]) != -1: - inclusion = eachfilter[0] + elif eachfile.find(eachfilter[1]) != -1: + inclusion = eachfilter[0] return inclusion def GetLangMap(self): @@ -549,10 +544,7 @@ def AddLangMap(self, langMapString, where): extLangPair = eachExt.split(": ") if len(extLangPair) != 2: ShowMessageAndExit( - "Error!: The extension and language pair ({}) is incorrect in {}, please use LANGUAGENAME: EXTENSION style".format( - langMapString, - where, - ), + f"Error!: The extension and language pair ({langMapString}) is incorrect in {where}, please use LANGUAGENAME: EXTENSION style" ) lang, ext = extLangPair self.extLangMap.get(lang).add(ext) @@ -585,10 +577,7 @@ def GetCustomKeyValueMap(keyValuePair, where): customKeyValuePair = eachCustomKeyValue.split(": ") if len(customKeyValuePair) != 2: ShowMessageAndExit( - "Error!: The var key and value pair ({}) is incorrect in {}, please use KEY: VALUE style".format( - keyValuePair, - where, - ), + f"Error!: The var key and value pair ({keyValuePair}) is incorrect in {where}, please use KEY: VALUE style" ) key, value = customKeyValuePair varMap[key] = value @@ -612,7 +601,7 @@ def __init__(self, targetDir): fsrc = os.path.join(targetDir, "basefilelist.txt") if os.path.exists(fsrc): with open(fsrc) as f: - for line in f.readlines(): + for line in f: self.baseFileList[line.strip()] = True def IsNewOrChanged(self, filename): diff --git a/nsiqcppstyle_lexer.py b/nsiqcppstyle_lexer.py index b732c62..36ecd11 100644 --- a/nsiqcppstyle_lexer.py +++ b/nsiqcppstyle_lexer.py @@ -163,7 +163,6 @@ def clone(self, object=None): if object: newtab = {} for key, ritem in self.lexstatere.items(): - newre = [] for cre, findex in ritem: newfindex = [] for f in findex: @@ -171,7 +170,7 @@ def clone(self, object=None): newfindex.append(f) continue newfindex.append((getattr(object, f[0].__name__), f[1])) - newre.append((cre, newfindex)) + newre = [(cre, newfindex)] newtab[key] = newre c.lexstatere = newtab c.lexstateerrorf = {} @@ -197,25 +196,24 @@ def _writetab_impl(self, tabfile, tf): initial = self.lexstatere["INITIAL"] initialfuncs = [] for part in initial: - for f in part[1]: - if f and f[0]: - initialfuncs.append(f) - + initialfuncs.extend(f for f in part[1] if f and f[0]) for key, lre in self.lexstatere.items(): - titem = [] - for i in range(len(lre)): - titem.append((self.lexstateretext[key][i], _funcs_to_names(lre[i][1], self.lexstaterenames[key][i]))) + titem = [ + ( + self.lexstateretext[key][i], + _funcs_to_names(lre[i][1], self.lexstaterenames[key][i]), + ) + for i in range(len(lre)) + ] tabre[key] = titem tf.write("_lexstatere = %s\n" % repr(tabre)) tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore)) - taberr = {} - for key, ef in self.lexstateerrorf.items(): - if ef: - taberr[key] = ef.__name__ - else: - taberr[key] = None + taberr = { + key: ef.__name__ if ef else None + for key, ef in self.lexstateerrorf.items() + } tf.write("_lexstateerrorf = %s\n" % repr(taberr)) # ------------------------------------------------------------ @@ -225,7 +223,7 @@ def writetab(self, tabfile, outputdir=""): if isinstance(tabfile, types.ModuleType): return basetabfilename = tabfile.split(".")[-1] - filename = os.path.join(outputdir, basetabfilename) + ".py" + filename = f"{os.path.join(outputdir, basetabfilename)}.py" with open(filename, "w") as tf: self._writetab_impl(tabfile, tf) @@ -235,13 +233,12 @@ def writetab(self, tabfile, outputdir=""): def readtab(self, tabfile, fdict): if isinstance(tabfile, types.ModuleType): lextab = tabfile + elif sys.version_info[0] < 3: + exec(f"import {tabfile} as lextab") else: - if sys.version_info[0] < 3: - exec("import %s as lextab" % tabfile) - else: - env = {} - exec("import %s as lextab" % tabfile, env, env) - lextab = env["lextab"] + env = {} + exec(f"import {tabfile} as lextab", env, env) + lextab = env["lextab"] if getattr(lextab, "_tabversion", "0.0") != __version__: msg = "Inconsistent PLY version" @@ -416,7 +413,10 @@ def token(self): if lexpos == self.lexpos: # Error method didn't change text position at all. This # is an error. - raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) + raise LexError( + f"Scanning error. Illegal character '{lexdata[lexpos]}'", + lexdata[lexpos:], + ) lexpos = self.lexpos if not newtok: continue @@ -546,7 +546,7 @@ def _form_master_re(relist, reflags, ldict, toknames): return [(lexre, lexindexfunc)], [regex], [lexindexnames] except Exception: - m = int(len(relist) / 2) + m = len(relist) // 2 if m == 0: m = 1 llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames) @@ -594,10 +594,7 @@ def __init__(self, ldict, log=None, reflags=0): self.files = {} self.error = 0 - if log is None: - self.log = PlyLogger(sys.stderr) - else: - self.log = log + self.log = PlyLogger(sys.stderr) if log is None else log # Get all of the basic information def get_all(self): @@ -682,7 +679,7 @@ def get_states(self): self.log.error("State name %s must be a string", repr(name)) self.error = 1 continue - if not (statetype == "inclusive" or statetype == "exclusive"): + if statetype not in ["inclusive", "exclusive"]: self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name) self.error = 1 continue @@ -710,7 +707,7 @@ def get_rules(self): self.funcsym[s] = [] self.strsym[s] = [] - if len(tsymbols) == 0: + if not tsymbols: self.log.error("No rules of the form t_rulename are defined") self.error = 1 return @@ -844,9 +841,7 @@ def validate_rules(self): self.log.error("No rules defined for state '%s'", state) self.error = 1 - # Validate the error function - efunc = self.errorf.get(state, None) - if efunc: + if efunc := self.errorf.get(state, None): f = efunc line = func_code(f).co_firstlineno file = func_code(f).co_filename @@ -884,8 +879,10 @@ def validate_file(self, filename): with open(filename) as f: lines = f.readlines() except UnicodeDecodeError as ex: - console.Out.Ci("[ERROR] UnicodeDecodeError in validate_file: " + str(ex)) - console.Out.Ci("[ERROR] Exception occurred reading file '%s', convert from UTF16LE to UTF8" % (filename)) + console.Out.Ci(f"[ERROR] UnicodeDecodeError in validate_file: {str(ex)}") + console.Out.Ci( + f"[ERROR] Exception occurred reading file '{filename}', convert from UTF16LE to UTF8" + ) raise except OSError: return # Couldn't find the file. Don't worry about it @@ -894,17 +891,13 @@ def validate_file(self, filename): sre = re.compile(r"\s*(t_[a-zA-Z_0-9]*)\s*=") counthash = {} - linen = 1 - for l in lines: + for linen, l in enumerate(lines, start=1): m = fre.match(l) if not m: m = sre.match(l) if m: name = m.group(1) - prev = counthash.get(name) - if not prev: - counthash[name] = linen - else: + if prev := counthash.get(name): self.log.error( "%s:%d: Rule %s redefined. Previously defined on line %d", filename, @@ -913,7 +906,8 @@ def validate_file(self, filename): prev, ) self.error = 1 - linen += 1 + else: + counthash[name] = linen # ----------------------------------------------------------------------------- @@ -982,11 +976,7 @@ def lex( debuglog.info("lex: literals = %r", linfo.literals) debuglog.info("lex: states = %r", linfo.stateinfo) - # Build a dictionary of valid token names - lexobj.lextokens = {} - for n in linfo.tokens: - lexobj.lextokens[n] = 1 - + lexobj.lextokens = {n: 1 for n in linfo.tokens} # Get literals specification if isinstance(linfo.literals, (list, tuple)): lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals) @@ -1100,10 +1090,10 @@ def runmain(lexer=None, data=None): _token = lexer.token if lexer else token while True: - tok = _token() - if not tok: + if tok := _token(): + sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno, tok.lexpos)) + else: break - sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno, tok.lexpos)) # ----------------------------------------------------------------------------- @@ -1116,10 +1106,7 @@ def runmain(lexer=None, data=None): def TOKEN(r): def set_doc(f): - if callable(r): - f.__doc__ = r.__doc__ - else: - f.__doc__ = r + f.__doc__ = r.__doc__ if callable(r) else r return f return set_doc diff --git a/nsiqcppstyle_reporter.py b/nsiqcppstyle_reporter.py index 841bab1..d6cf4ec 100644 --- a/nsiqcppstyle_reporter.py +++ b/nsiqcppstyle_reporter.py @@ -82,9 +82,10 @@ def ReportSummaryToScreen(analyzedFiles, nsiqcppstyle_state, filter): console.Out.Info(" - ", checker, "rule violated :", nsiqcppstyle_state.errorPerChecker[checker]) console.Out.Info("\n================================ Violated File Details ===============================") for eachFile in nsiqcppstyle_state.errorPerFile: - count = 0 - for eachRule in nsiqcppstyle_state.errorPerFile[eachFile]: - count += nsiqcppstyle_state.errorPerFile[eachFile][eachRule] + count = sum( + nsiqcppstyle_state.errorPerFile[eachFile][eachRule] + for eachRule in nsiqcppstyle_state.errorPerFile[eachFile] + ) console.Out.Info(" - ", eachFile, " violated in total : ", count) for eachRule in nsiqcppstyle_state.errorPerFile[eachFile]: console.Out.Info(" * ", eachRule, " : ", nsiqcppstyle_state.errorPerFile[eachFile][eachRule]) @@ -103,10 +104,7 @@ def CloseReport(format): def IsRuleUsed(ruleName, ruleNames): - if ruleNames.count(ruleName) == 0: - return "false" - else: - return "true" + return "false" if ruleNames.count(ruleName) == 0 else "true" def ReportRules(availRuleName, ruleNames): @@ -129,16 +127,14 @@ def ReportRules(availRuleName, ruleNames): def StartDir(dirname): - if _nsiqcppstyle_state.output_format == "xml": - pass + pass # writer.write("\n" % (dirname)) def EndDir(): - if _nsiqcppstyle_state.output_format == "xml": - pass + pass # writer.write("\n") @@ -154,8 +150,6 @@ def StartTarget(targetname): def EndTarget(): """Write Report when each target is ended""" - if _nsiqcppstyle_state.output_format == "xml": - pass # writer.write("\n") def StartFile(dirname, filename): @@ -214,7 +208,7 @@ def ErrorInternal(t, ruleName, message): _nsiqcppstyle_state.IncrementErrorCount(ruleName, t.filename) url = "" if _nsiqcppstyle_state.showUrl: - url = "http://nsiqcppstyle.appspot.com/rule_doc/" + ruleName + url = f"http://nsiqcppstyle.appspot.com/rule_doc/{ruleName}" if _nsiqcppstyle_state.output_format == "emacs": sys.stdout.write(f"{t.filename}:{t.lineno}: {message} [{ruleName}] {url}\n") elif _nsiqcppstyle_state.output_format == "vs7": diff --git a/nsiqcppstyle_rulehelper.py b/nsiqcppstyle_rulehelper.py index b972577..ef19665 100644 --- a/nsiqcppstyle_rulehelper.py +++ b/nsiqcppstyle_rulehelper.py @@ -97,6 +97,4 @@ def IsConstructor(value, fullName, context): def IsOperator(value): """Check if the passed value is 'operator'""" - if value is not None and value == "operator": - return True - return False + return value is not None and value == "operator" diff --git a/nsiqcppstyle_rulemanager.py b/nsiqcppstyle_rulemanager.py index 1fb4ade..91b0baa 100644 --- a/nsiqcppstyle_rulemanager.py +++ b/nsiqcppstyle_rulemanager.py @@ -85,11 +85,11 @@ def LoadRules(self, checkingRuleNames): for ruleName in checkingRuleNames: count = self.availRuleNames.count(ruleName) if count == 0: - console.Out.Error("%s does not exist or incompatible." % ruleName) + console.Out.Error(f"{ruleName} does not exist or incompatible.") continue else: console.Out.Info(" - ", ruleName, "is applied.") - ruleModule = __import__("rules." + ruleName) + ruleModule = __import__(f"rules.{ruleName}") self.loadedRule.append(ruleModule) if len(self.loadedRule) == 0: console.Out.Ci(" No Rule is specified. Please configure rules in filefilter.txt.") diff --git a/nsiqcppstyle_util.py b/nsiqcppstyle_util.py index 2a584b3..a180218 100644 --- a/nsiqcppstyle_util.py +++ b/nsiqcppstyle_util.py @@ -45,19 +45,14 @@ def ModulePath(): def GetRuntimePath(): "Return the path of this tool" if sys.platform == "win32": - runtimePath = ModulePath() - else: - modename = globals()["__name__"] - module = sys.modules[modename] - runtimePath = os.path.dirname(module.__file__) - return runtimePath + return ModulePath() + modename = globals()["__name__"] + module = sys.modules[modename] + return os.path.dirname(module.__file__) def GetSystemKey(): - if sys.platform == "win32": - return "window" - else: - return "linux" + return "window" if sys.platform == "win32" else "linux" def CmpObjects(a, b): diff --git a/nsiqunittest/nsiqcppstyle_unittest.py b/nsiqunittest/nsiqcppstyle_unittest.py index 76b3f1a..9648bbc 100644 --- a/nsiqunittest/nsiqcppstyle_unittest.py +++ b/nsiqunittest/nsiqcppstyle_unittest.py @@ -52,7 +52,9 @@ def __expectTokenTypes(code, expectedTokenTypes): def __testFunctionSpecifier(self, specifier): expectedTokenTypes = ["VOID", "FUNCTION", "LPAREN", "RPAREN", "IGNORE", "SEMI"] - self.__expectTokenTypes("void FunctionName() " + specifier + ";", expectedTokenTypes) + self.__expectTokenTypes( + f"void FunctionName() {specifier};", expectedTokenTypes + ) def testIgnoreFinalFunctionSpecifier(self): self.__testFunctionSpecifier("final") diff --git a/nsiqunittest/nsiqcppstyle_unittestbase.py b/nsiqunittest/nsiqcppstyle_unittestbase.py index b6ab0fd..e56a39f 100644 --- a/nsiqunittest/nsiqcppstyle_unittestbase.py +++ b/nsiqunittest/nsiqcppstyle_unittestbase.py @@ -64,7 +64,7 @@ def ExpectSuccess(self, msg): global errors result = self._CheckErrorContent(msg) # Error with message - assert not result, "Expected no error but got: " + str(errors) + assert not result, f"Expected no error but got: {str(errors)}" def _CheckErrorContent(self, msg): global errors diff --git a/rules/RULE_10_1_A_do_not_use_bufferoverflow_risky_function_for_unix.py b/rules/RULE_10_1_A_do_not_use_bufferoverflow_risky_function_for_unix.py index 752ea8a..a24c88f 100644 --- a/rules/RULE_10_1_A_do_not_use_bufferoverflow_risky_function_for_unix.py +++ b/rules/RULE_10_1_A_do_not_use_bufferoverflow_risky_function_for_unix.py @@ -37,11 +37,6 @@ def RunRule(lexer, contextStack): - # Boost.Format, Folly.Format don't provide printf but if they do, - # that can be handled by adding (or others) to whitelist - whitelist = ["fmt"] - blacklist = ["std", ""] # to catch ::printf - t = lexer.GetCurToken() if t.type == "ID" and t.value in unix_bufferoverflow_functions: t2 = lexer.PeekNextTokenSkipWhiteSpaceAndComment() @@ -50,6 +45,11 @@ def RunRule(lexer, contextStack): # tribool state: safe, unsafe, unknown safe_alternative = False unsafe_alternative = False + # Boost.Format, Folly.Format don't provide printf but if they do, + # that can be handled by adding (or others) to whitelist + whitelist = ["fmt"] + blacklist = ["std", ""] # to catch ::printf + if t3 is None: # C style usage: flat out error unsafe_alternative = True @@ -66,12 +66,16 @@ def RunRule(lexer, contextStack): unsafe_alternative = True # elif unknown namespace => unknown safety if unsafe_alternative: - nsiqcppstyle_reporter.Error(t, __name__, "Do not use bufferoverflow risky function(%s)" % t.value) + nsiqcppstyle_reporter.Error( + t, + __name__, + f"Do not use bufferoverflow risky function({t.value})", + ) elif not safe_alternative: nsiqcppstyle_reporter.Error( t, __name__, - "Caution: Uknown imlementation of a bufferoverflow risky function(%s)" % t.value, + f"Caution: Uknown imlementation of a bufferoverflow risky function({t.value})", ) diff --git a/rules/RULE_10_1_B_do_not_use_bufferoverflow_risky_function_for_windows.py b/rules/RULE_10_1_B_do_not_use_bufferoverflow_risky_function_for_windows.py index cbb2adc..35a3c80 100644 --- a/rules/RULE_10_1_B_do_not_use_bufferoverflow_risky_function_for_windows.py +++ b/rules/RULE_10_1_B_do_not_use_bufferoverflow_risky_function_for_windows.py @@ -92,7 +92,11 @@ def RunRule(lexer, contextStack): if t2 is not None and t2.type == "LPAREN": t3 = lexer.PeekPrevTokenSkipWhiteSpaceAndComment() if t3 is None or t3.type != "PERIOD": - nsiqcppstyle_reporter.Error(t, __name__, "Do not use burfferoverflow risky function(%s)" % t.value) + nsiqcppstyle_reporter.Error( + t, + __name__, + f"Do not use burfferoverflow risky function({t.value})", + ) ruleManager.AddFunctionScopeRule(RunRule) diff --git a/rules/RULE_3_1_A_do_not_start_filename_with_underbar.py b/rules/RULE_3_1_A_do_not_start_filename_with_underbar.py index e01ccaf..16dc031 100644 --- a/rules/RULE_3_1_A_do_not_start_filename_with_underbar.py +++ b/rules/RULE_3_1_A_do_not_start_filename_with_underbar.py @@ -21,7 +21,7 @@ def RunRule(lexer, filename, dirname): nsiqcppstyle_reporter.Error( nsiqcppstyle_reporter.DummyToken(lexer.filename, "", 0, 0), __name__, - "File name(%s) should not start with underbar." % filename, + f"File name({filename}) should not start with underbar.", ) diff --git a/rules/RULE_3_2_B_do_not_use_same_filename_more_than_once.py b/rules/RULE_3_2_B_do_not_use_same_filename_more_than_once.py index f1bcdc7..9465d82 100644 --- a/rules/RULE_3_2_B_do_not_use_same_filename_more_than_once.py +++ b/rules/RULE_3_2_B_do_not_use_same_filename_more_than_once.py @@ -41,10 +41,7 @@ def RunRule(lexer, filename, dirname): nsiqcppstyle_reporter.Error( DummyToken(lexer.filename, "", 0, 0), __name__, - "Do not use same filename({}) more than once. This filename is used in {}".format( - filename, - ", ".join(filenameMap[filename]), - ), + f'Do not use same filename({filename}) more than once. This filename is used in {", ".join(filenameMap[filename])}', ) diff --git a/rules/RULE_3_2_CD_do_not_use_special_characters_in_filename.py b/rules/RULE_3_2_CD_do_not_use_special_characters_in_filename.py index c235d2f..8b86548 100644 --- a/rules/RULE_3_2_CD_do_not_use_special_characters_in_filename.py +++ b/rules/RULE_3_2_CD_do_not_use_special_characters_in_filename.py @@ -23,7 +23,7 @@ def RunRule(lexer, filename, dirname): nsiqcppstyle_reporter.Error( DummyToken(lexer.filename, "", 0, 0), __name__, - "Do not use special characters in file name (%s)." % filename, + f"Do not use special characters in file name ({filename}).", ) diff --git a/rules/RULE_3_2_F_use_representitive_classname_for_cpp_filename.py b/rules/RULE_3_2_F_use_representitive_classname_for_cpp_filename.py index 1c7710c..884ab3a 100644 --- a/rules/RULE_3_2_F_use_representitive_classname_for_cpp_filename.py +++ b/rules/RULE_3_2_F_use_representitive_classname_for_cpp_filename.py @@ -76,7 +76,7 @@ def RunFileEndRule(lexer, filename, dirname): nsiqcppstyle_reporter.Error( DummyToken(lexer.filename, "", 0, 0), __name__, - "The filename does not represent the classnames (%s)" % (classname), + f"The filename does not represent the classnames ({classname})", ) diff --git a/rules/RULE_3_2_H_do_not_use_underbars_for_cpp_filename.py b/rules/RULE_3_2_H_do_not_use_underbars_for_cpp_filename.py index 58d37c8..ac146fd 100644 --- a/rules/RULE_3_2_H_do_not_use_underbars_for_cpp_filename.py +++ b/rules/RULE_3_2_H_do_not_use_underbars_for_cpp_filename.py @@ -30,7 +30,7 @@ def RunRule(lexer, filename, dirname): nsiqcppstyle_reporter.Error( DummyToken(lexer.filename, "", 0, 0), __name__, - "Do not use underbar for cpp file name (%s)." % filename, + f"Do not use underbar for cpp file name ({filename}).", ) diff --git a/rules/RULE_3_2_H_do_not_use_uppercase_for_c_filename.py b/rules/RULE_3_2_H_do_not_use_uppercase_for_c_filename.py index 5d59147..541f5ba 100644 --- a/rules/RULE_3_2_H_do_not_use_uppercase_for_c_filename.py +++ b/rules/RULE_3_2_H_do_not_use_uppercase_for_c_filename.py @@ -24,7 +24,7 @@ def RunRule(lexer, filename, dirname): nsiqcppstyle_reporter.Error( DummyToken(lexer.filename, "", 0, 0), __name__, - "Do not use uppercase for c file name (%s)." % filename, + f"Do not use uppercase for c file name ({filename}).", ) diff --git a/rules/RULE_3_3_A_start_function_name_with_is_or_has_when_return_bool.py b/rules/RULE_3_3_A_start_function_name_with_is_or_has_when_return_bool.py index bd4f8c0..f945d60 100644 --- a/rules/RULE_3_3_A_start_function_name_with_is_or_has_when_return_bool.py +++ b/rules/RULE_3_3_A_start_function_name_with_is_or_has_when_return_bool.py @@ -40,7 +40,7 @@ def RunRule(lexer, fullName, decl, contextStack, context): nsiqcppstyle_reporter.Error( t, __name__, - "The function name(%s) should start with has or is when returinning bool" % fullName, + f"The function name({fullName}) should start with has or is when returinning bool", ) break k += 1 diff --git a/rules/RULE_3_3_A_start_function_name_with_lowercase_unix.py b/rules/RULE_3_3_A_start_function_name_with_lowercase_unix.py index 2932fb9..c76d39d 100644 --- a/rules/RULE_3_3_A_start_function_name_with_lowercase_unix.py +++ b/rules/RULE_3_3_A_start_function_name_with_lowercase_unix.py @@ -38,7 +38,11 @@ def RunRule(lexer, fullName, decl, contextStack, context): return if IsOperator(value): return - nsiqcppstyle_reporter.Error(t, __name__, "Do not start function name(%s) with uppercase" % fullName) + nsiqcppstyle_reporter.Error( + t, + __name__, + f"Do not start function name({fullName}) with uppercase", + ) ruleManager.AddFunctionNameRule(RunRule) diff --git a/rules/RULE_3_3_A_start_function_name_with_upperrcase_windows.py b/rules/RULE_3_3_A_start_function_name_with_upperrcase_windows.py index 74acbcb..5828a04 100644 --- a/rules/RULE_3_3_A_start_function_name_with_upperrcase_windows.py +++ b/rules/RULE_3_3_A_start_function_name_with_upperrcase_windows.py @@ -42,7 +42,11 @@ def RunRule(lexer, fullName, decl, contextStack, context): if value.startswith("~"): value = value[1:] if Search("^[a-z]", value) and not IsOperator(value) and t.value not in keywords: - nsiqcppstyle_reporter.Error(t, __name__, "Do not start function name(%s) with lowercase" % fullName) + nsiqcppstyle_reporter.Error( + t, + __name__, + f"Do not start function name({fullName}) with lowercase", + ) ruleManager.AddFunctionNameRule(RunRule) diff --git a/rules/RULE_3_3_B_start_private_function_name_with_underbar.py b/rules/RULE_3_3_B_start_private_function_name_with_underbar.py index 4bbba32..c32a582 100644 --- a/rules/RULE_3_3_B_start_private_function_name_with_underbar.py +++ b/rules/RULE_3_3_B_start_private_function_name_with_underbar.py @@ -36,7 +36,11 @@ def RunRule(lexer, fullName, decl, contextStack, context): if IsOperator(value): return if upperBlock is not None and upperBlock.additional == "PRIVATE" and not value.startswith("_"): - nsiqcppstyle_reporter.Error(t, __name__, "Start private function name(%s) with underbar" % fullName) + nsiqcppstyle_reporter.Error( + t, + __name__, + f"Start private function name({fullName}) with underbar", + ) def RunTypeScopeRule(lexer, contextStack): diff --git a/rules/RULE_4_1_B_indent_each_enum_item_in_enum_block.py b/rules/RULE_4_1_B_indent_each_enum_item_in_enum_block.py index 2870bdf..6441617 100644 --- a/rules/RULE_4_1_B_indent_each_enum_item_in_enum_block.py +++ b/rules/RULE_4_1_B_indent_each_enum_item_in_enum_block.py @@ -23,21 +23,22 @@ def RunRule(lexer, typeName, typeFullName, decl, contextStack, typeContext): - if not decl and typeName == "ENUM" and typeContext is not None: - column = GetIndentation(lexer.GetCurToken()) - lexer._MoveToToken(typeContext.startToken) - t2 = typeContext.endToken - while True: - t = lexer.GetNextTokenSkipWhiteSpaceAndCommentAndPreprocess() - if t is None or t == t2: - break + if decl or typeName != "ENUM" or typeContext is None: + return + column = GetIndentation(lexer.GetCurToken()) + lexer._MoveToToken(typeContext.startToken) + t2 = typeContext.endToken + while True: + t = lexer.GetNextTokenSkipWhiteSpaceAndCommentAndPreprocess() + if t is None or t == t2: + break # if typeContext != t.contextStack.Peek() : continue - if GetRealColumn(t) <= (column + 1): - nsiqcppstyle_reporter.Error( - t, - __name__, - "Enum block should be indented. But the token(%s) seems to be unindented" % t.value, - ) + if GetRealColumn(t) <= (column + 1): + nsiqcppstyle_reporter.Error( + t, + __name__, + f"Enum block should be indented. But the token({t.value}) seems to be unindented", + ) ruleManager.AddTypeNameRule(RunRule) diff --git a/rules/RULE_4_1_B_locate_each_enum_item_in_seperate_line.py b/rules/RULE_4_1_B_locate_each_enum_item_in_seperate_line.py index 132ccb7..61574a5 100644 --- a/rules/RULE_4_1_B_locate_each_enum_item_in_seperate_line.py +++ b/rules/RULE_4_1_B_locate_each_enum_item_in_seperate_line.py @@ -22,25 +22,26 @@ def RunRule(lexer, typeName, typeFullName, decl, contextStack, typeContext): - if not decl and typeContext is not None: + if decl or typeContext is None: + return # column = GetRealColumn(lexer.GetCurToken()) - if typeName == "ENUM": - lexer._MoveToToken(typeContext.startToken) - while True: - nt = lexer.GetNextTokenInTypeList(["COMMA", "RBRACE"], False, True) - if nt is None or nt == typeContext.endToken: - break - if typeContext != nt.contextStack.Peek(): - continue - nt2 = lexer.PeekNextTokenSkipWhiteSpaceAndCommentAndPreprocess() - nt3 = lexer.PeekPrevTokenSkipWhiteSpaceAndCommentAndPreprocess() + if typeName == "ENUM": + lexer._MoveToToken(typeContext.startToken) + while True: + nt = lexer.GetNextTokenInTypeList(["COMMA", "RBRACE"], False, True) + if nt is None or nt == typeContext.endToken: + break + if typeContext != nt.contextStack.Peek(): + continue + nt2 = lexer.PeekNextTokenSkipWhiteSpaceAndCommentAndPreprocess() + nt3 = lexer.PeekPrevTokenSkipWhiteSpaceAndCommentAndPreprocess() # print nt, nt2,nt3 - if nt.lineno == nt2.lineno and nt3.lineno == nt.lineno: - nsiqcppstyle_reporter.Error( - nt2, - __name__, - "Each enum item(%s) should be located in the different line" % nt2.value, - ) + if nt.lineno == nt2.lineno and nt3.lineno == nt.lineno: + nsiqcppstyle_reporter.Error( + nt2, + __name__, + f"Each enum item({nt2.value}) should be located in the different line", + ) ruleManager.AddTypeNameRule(RunRule) diff --git a/rules/RULE_4_2_A_A_space_around_operator.py b/rules/RULE_4_2_A_A_space_around_operator.py index 6198d4e..ad442e7 100644 --- a/rules/RULE_4_2_A_A_space_around_operator.py +++ b/rules/RULE_4_2_A_A_space_around_operator.py @@ -79,7 +79,9 @@ def RunRule(lexer, contextStack): if t2.type not in ["SPACE", "LINEFEED", "PREPROCESSORNEXT"] or t3.type not in ["SPACE", "LINEFEED"]: t3 = lexer.GetPrevTokenSkipWhiteSpaceAndComment() if t3 is not None and t3.type != "OPERATOR" and not Match(r"^\w*#include", t.line): - nsiqcppstyle_reporter.Error(t, __name__, "Provide spaces b/w operator '%s'" % t.value) + nsiqcppstyle_reporter.Error( + t, __name__, f"Provide spaces b/w operator '{t.value}'" + ) elif t.type in nextoperator: t2 = lexer.PeekNextToken() if ( @@ -87,7 +89,9 @@ def RunRule(lexer, contextStack): and t2.type not in ["SPACE", "LINEFEED", "PREPROCESSORNEXT"] and not Match(r"^\w*#include", t.line) ): - nsiqcppstyle_reporter.Error(t, __name__, "Provide spaces after operator '%s'" % t.value) + nsiqcppstyle_reporter.Error( + t, __name__, f"Provide spaces after operator '{t.value}'" + ) elif t.type in unaryoperator: t2 = lexer.PeekPrevToken() t3 = lexer.PeekNextToken() @@ -109,7 +113,9 @@ def RunRule(lexer, contextStack): ] and t3.type not in ["SEMI", "SPACE", "LINEFEED", "RBRACE", "RPAREN", "RBRACKET"] ): - nsiqcppstyle_reporter.Error(t, __name__, "Provide spaces before operator '%s'" % t.value) + nsiqcppstyle_reporter.Error( + t, __name__, f"Provide spaces before operator '{t.value}'" + ) if ( t2 is not None @@ -126,7 +132,9 @@ def RunRule(lexer, contextStack): ] and t3.type not in ["SEMI", "SPACE", "LINEFEED", "RBRACE", "RPAREN", "RBRACKET"] ): - nsiqcppstyle_reporter.Error(t, __name__, "Provide spaces after operator '%s'" % t.value) + nsiqcppstyle_reporter.Error( + t, __name__, f"Provide spaces after operator '{t.value}'" + ) ruleManager.AddRule(RunRule) diff --git a/rules/RULE_4_2_A_B_space_around_word.py b/rules/RULE_4_2_A_B_space_around_word.py index 59bfbc8..79d6aff 100644 --- a/rules/RULE_4_2_A_B_space_around_word.py +++ b/rules/RULE_4_2_A_B_space_around_word.py @@ -48,7 +48,11 @@ def RunRule(lexer, contextStack): if t2 is not None and t3 is not None: if t2.type not in ["SPACE", "LINEFEED", "PREPROCESSORNEXT"] or t3.type not in ["SPACE", "LINEFEED"]: if not Search("^[ ]*#[ ]*include", t.line): - nsiqcppstyle_reporter.Error(t, __name__, "Put space before/after word '%s'." % t.value) + nsiqcppstyle_reporter.Error( + t, + __name__, + f"Put space before/after word '{t.value}'.", + ) ruleManager.AddFunctionScopeRule(RunRule) diff --git a/rules/RULE_4_4_A_do_not_write_over_120_columns_per_line.py b/rules/RULE_4_4_A_do_not_write_over_120_columns_per_line.py index 992fed5..34e4724 100644 --- a/rules/RULE_4_4_A_do_not_write_over_120_columns_per_line.py +++ b/rules/RULE_4_4_A_do_not_write_over_120_columns_per_line.py @@ -25,9 +25,6 @@ def RunRule(lexer, line, lineno): __name__, "Lines should very rarely be longer than 120 characters", ) - else: - # add code to recognise tabs as charachters - pass ruleManager.AddLineRule(RunRule) diff --git a/rules/RULE_4_5_A_brace_for_namespace_should_be_located_in_seperate_line.py b/rules/RULE_4_5_A_brace_for_namespace_should_be_located_in_seperate_line.py index 7cd9c99..95e7dc8 100644 --- a/rules/RULE_4_5_A_brace_for_namespace_should_be_located_in_seperate_line.py +++ b/rules/RULE_4_5_A_brace_for_namespace_should_be_located_in_seperate_line.py @@ -22,25 +22,26 @@ def RunRule(lexer, currentType, fullName, decl, contextStack, typeContext): - if not decl and currentType == "NAMESPACE" and typeContext is not None: - t = lexer.GetNextTokenInType("LBRACE", False, True) - if t is not None: - t2 = typeContext.endToken - if t2 is not None and t.lineno != t2.lineno: - prevToken = lexer.GetPrevTokenSkipWhiteSpaceAndCommentAndPreprocess() - # print contextStack.Peek() - if prevToken is not None and prevToken.lineno == t.lineno: - nsiqcppstyle_reporter.Error( - t, - __name__, - "The brace for type definition should be located in start of line", - ) - if t2.lineno != t.lineno and GetRealColumn(t2) != GetRealColumn(t): - nsiqcppstyle_reporter.Error( - t2, - __name__, - "The brace for type definition should be located in same column", - ) + if decl or currentType != "NAMESPACE" or typeContext is None: + return + t = lexer.GetNextTokenInType("LBRACE", False, True) + if t is not None: + t2 = typeContext.endToken + if t2 is not None and t.lineno != t2.lineno: + prevToken = lexer.GetPrevTokenSkipWhiteSpaceAndCommentAndPreprocess() + # print contextStack.Peek() + if prevToken is not None and prevToken.lineno == t.lineno: + nsiqcppstyle_reporter.Error( + t, + __name__, + "The brace for type definition should be located in start of line", + ) + if t2.lineno != t.lineno and GetRealColumn(t2) != GetRealColumn(t): + nsiqcppstyle_reporter.Error( + t2, + __name__, + "The brace for type definition should be located in same column", + ) ruleManager.AddTypeNameRule(RunRule) diff --git a/rules/RULE_4_5_A_braces_for_function_definition_should_be_located_in_seperate_line.py b/rules/RULE_4_5_A_braces_for_function_definition_should_be_located_in_seperate_line.py index e8acd8c..94cae60 100644 --- a/rules/RULE_4_5_A_braces_for_function_definition_should_be_located_in_seperate_line.py +++ b/rules/RULE_4_5_A_braces_for_function_definition_should_be_located_in_seperate_line.py @@ -31,25 +31,26 @@ def RunRule(lexer, fullName, decl, contextStack, typeContext): - if not decl and typeContext is not None: - t = lexer.GetNextTokenInType("LBRACE", False, True) - if t is not None: - t2 = typeContext.endToken - if t2 is not None and t.lineno != t2.lineno: - prevToken = lexer.GetPrevTokenSkipWhiteSpaceAndCommentAndPreprocess() - # print contextStack.Peek() - if prevToken is not None and prevToken.lineno == t.lineno: - nsiqcppstyle_reporter.Error( - t, - __name__, - "The brace for function definition should be located in start of line", - ) - if t2.lineno != t.lineno and GetRealColumn(t2) != GetRealColumn(t): - nsiqcppstyle_reporter.Error( - t2, - __name__, - "The brace for function definition should be located in same column", - ) + if decl or typeContext is None: + return + t = lexer.GetNextTokenInType("LBRACE", False, True) + if t is not None: + t2 = typeContext.endToken + if t2 is not None and t.lineno != t2.lineno: + prevToken = lexer.GetPrevTokenSkipWhiteSpaceAndCommentAndPreprocess() + # print contextStack.Peek() + if prevToken is not None and prevToken.lineno == t.lineno: + nsiqcppstyle_reporter.Error( + t, + __name__, + "The brace for function definition should be located in start of line", + ) + if t2.lineno != t.lineno and GetRealColumn(t2) != GetRealColumn(t): + nsiqcppstyle_reporter.Error( + t2, + __name__, + "The brace for function definition should be located in same column", + ) ruleManager.AddFunctionNameRule(RunRule) diff --git a/rules/RULE_4_5_A_braces_for_type_definition_should_be_located_in_seperate_line.py b/rules/RULE_4_5_A_braces_for_type_definition_should_be_located_in_seperate_line.py index 69625b6..c7d6398 100644 --- a/rules/RULE_4_5_A_braces_for_type_definition_should_be_located_in_seperate_line.py +++ b/rules/RULE_4_5_A_braces_for_type_definition_should_be_located_in_seperate_line.py @@ -29,25 +29,26 @@ class K() def RunRule(lexer, currentType, fullName, decl, contextStack, typeContext): - if not decl and currentType != "NAMESPACE" and typeContext is not None: - t = lexer.GetNextTokenInType("LBRACE", False, True) - if t is not None: - t2 = typeContext.endToken - if t2 is not None and t.lineno != t2.lineno: - prevToken = lexer.GetPrevTokenSkipWhiteSpaceAndCommentAndPreprocess() - # print contextStack.Peek() - if prevToken is not None and prevToken.lineno == t.lineno: - nsiqcppstyle_reporter.Error( - t, - __name__, - "The brace for type definition should be located in start of line", - ) - if t2.lineno != t.lineno and GetRealColumn(t2) != GetRealColumn(t): - nsiqcppstyle_reporter.Error( - t2, - __name__, - "The brace for type definition should be located in same column", - ) + if decl or currentType == "NAMESPACE" or typeContext is None: + return + t = lexer.GetNextTokenInType("LBRACE", False, True) + if t is not None: + t2 = typeContext.endToken + if t2 is not None and t.lineno != t2.lineno: + prevToken = lexer.GetPrevTokenSkipWhiteSpaceAndCommentAndPreprocess() + # print contextStack.Peek() + if prevToken is not None and prevToken.lineno == t.lineno: + nsiqcppstyle_reporter.Error( + t, + __name__, + "The brace for type definition should be located in start of line", + ) + if t2.lineno != t.lineno and GetRealColumn(t2) != GetRealColumn(t): + nsiqcppstyle_reporter.Error( + t2, + __name__, + "The brace for type definition should be located in same column", + ) ruleManager.AddTypeNameRule(RunRule) diff --git a/rules/RULE_4_5_A_braces_inside_of_function_should_be_located_in_end_of_line.py b/rules/RULE_4_5_A_braces_inside_of_function_should_be_located_in_end_of_line.py index f8bc411..eb2c6fe 100644 --- a/rules/RULE_4_5_A_braces_inside_of_function_should_be_located_in_end_of_line.py +++ b/rules/RULE_4_5_A_braces_inside_of_function_should_be_located_in_end_of_line.py @@ -44,7 +44,7 @@ def RunRule(lexer, contextStack): nsiqcppstyle_reporter.Error( t, __name__, - "Braces inside of function should be located in the next of previous token(%s)" % prevToken.value, + f"Braces inside of function should be located in the next of previous token({prevToken.value})", ) diff --git a/rules/RULE_4_5_A_indent_blocks_inside_of_function.py b/rules/RULE_4_5_A_indent_blocks_inside_of_function.py index ba5fbfc..81251d0 100644 --- a/rules/RULE_4_5_A_indent_blocks_inside_of_function.py +++ b/rules/RULE_4_5_A_indent_blocks_inside_of_function.py @@ -60,7 +60,7 @@ def RunRule(lexer, contextStack): nsiqcppstyle_reporter.Error( nt, __name__, - "Indent in the block. token(%s) seems to be located left column of previsous brace" % nt.value, + f"Indent in the block. token({nt.value}) seems to be located left column of previsous brace", ) diff --git a/rules/RULE_5_2_C_provide_doxygen_class_comment_on_class_def.py b/rules/RULE_5_2_C_provide_doxygen_class_comment_on_class_def.py index e0f676d..f8586a8 100644 --- a/rules/RULE_5_2_C_provide_doxygen_class_comment_on_class_def.py +++ b/rules/RULE_5_2_C_provide_doxygen_class_comment_on_class_def.py @@ -43,7 +43,7 @@ def RunRule(lexer, currentType, fullName, decl, contextStack, typeContext): nsiqcppstyle_reporter.Error( t, __name__, - "Doxygen Comment should be provided in front of class def(%s)." % fullName, + f"Doxygen Comment should be provided in front of class def({fullName}).", ) diff --git a/rules/RULE_5_2_C_provide_doxygen_namespace_comment_on_namespace_def.py b/rules/RULE_5_2_C_provide_doxygen_namespace_comment_on_namespace_def.py index 6d501ba..a2cee93 100644 --- a/rules/RULE_5_2_C_provide_doxygen_namespace_comment_on_namespace_def.py +++ b/rules/RULE_5_2_C_provide_doxygen_namespace_comment_on_namespace_def.py @@ -45,7 +45,7 @@ def RunRule(lexer, currentType, fullName, decl, contextStack, typeContext): nsiqcppstyle_reporter.Error( t, __name__, - "Doxygen Comment should be provided in front of namespace def(%s)." % fullName, + f"Doxygen Comment should be provided in front of namespace def({fullName}).", ) diff --git a/rules/RULE_5_2_C_provide_doxygen_struct_comment_on_struct_def.py b/rules/RULE_5_2_C_provide_doxygen_struct_comment_on_struct_def.py index b526294..cd2f112 100644 --- a/rules/RULE_5_2_C_provide_doxygen_struct_comment_on_struct_def.py +++ b/rules/RULE_5_2_C_provide_doxygen_struct_comment_on_struct_def.py @@ -44,7 +44,7 @@ def RunRule(lexer, currentType, fullName, decl, contextStack, context): nsiqcppstyle_reporter.Error( t, __name__, - "Doxygen Comment should be provided in front of struct/union def(%s)." % fullName, + f"Doxygen Comment should be provided in front of struct/union def({fullName}).", ) diff --git a/rules/RULE_5_3_A_provide_doxygen_function_comment_on_function_in_header.py b/rules/RULE_5_3_A_provide_doxygen_function_comment_on_function_in_header.py index 59dd7d1..e533590 100644 --- a/rules/RULE_5_3_A_provide_doxygen_function_comment_on_function_in_header.py +++ b/rules/RULE_5_3_A_provide_doxygen_function_comment_on_function_in_header.py @@ -65,7 +65,7 @@ def RunRule(lexer, fullName, decl, contextStack, context): nsiqcppstyle_reporter.Error( t, __name__, - "Doxygen Comment should be provided in front of function (%s) in header." % fullName, + f"Doxygen Comment should be provided in front of function ({fullName}) in header.", ) diff --git a/rules/RULE_5_3_A_provide_doxygen_function_comment_on_function_in_impl.py b/rules/RULE_5_3_A_provide_doxygen_function_comment_on_function_in_impl.py index 948b2c7..177cfb0 100644 --- a/rules/RULE_5_3_A_provide_doxygen_function_comment_on_function_in_impl.py +++ b/rules/RULE_5_3_A_provide_doxygen_function_comment_on_function_in_impl.py @@ -82,7 +82,7 @@ def RunRule(lexer, fullName, decl, contextStack, context): nsiqcppstyle_reporter.Error( t, __name__, - "Doxygen Comment should be provided in front of function (%s) in impl file." % fullName, + f"Doxygen Comment should be provided in front of function ({fullName}) in impl file.", ) diff --git a/rules/RULE_6_1_A_do_not_omit_function_parameter_names.py b/rules/RULE_6_1_A_do_not_omit_function_parameter_names.py index 46e12d5..a51200a 100644 --- a/rules/RULE_6_1_A_do_not_omit_function_parameter_names.py +++ b/rules/RULE_6_1_A_do_not_omit_function_parameter_names.py @@ -24,42 +24,43 @@ def RunRule(lexer, fullName, decl, contextStack, context): - if decl: - t2 = lexer.GetCurToken() - lexer.GetNextTokenInType("LPAREN", False, True) - lexer.PushTokenIndex() - rparen = lexer.GetNextMatchingToken() - lexer.PopTokenIndex() - count = 0 - - while True: - t = lexer.GetNextTokenSkipWhiteSpaceAndCommentAndPreprocess() - if rparen is None or t == rparen or t is None: - break - if t.type in ["ID", "BOOL", "CHAR", "INT", "LONG", "DOUBLE", "FLOAT", "SHORT", "VOID"]: - if t.type == "VOID": - nt = lexer.PeekNextTokenSkipWhiteSpaceAndCommentAndPreprocess() - if nt == rparen: - return - count += 1 - elif t.type == "LT": - lexer.GetNextMatchingGT() - elif t.type == "COMMA": - if count == 1: - nsiqcppstyle_reporter.Error( - t2, - __name__, - "function (%s) has non named parameter. use named parameter." % fullName, - ) - break - count = 0 - elif rparen.lexpos <= t.lexpos and count == 1: + if not decl: + return + t2 = lexer.GetCurToken() + lexer.GetNextTokenInType("LPAREN", False, True) + lexer.PushTokenIndex() + rparen = lexer.GetNextMatchingToken() + lexer.PopTokenIndex() + count = 0 + + while True: + t = lexer.GetNextTokenSkipWhiteSpaceAndCommentAndPreprocess() + if rparen is None or t == rparen or t is None: + break + if t.type in ["ID", "BOOL", "CHAR", "INT", "LONG", "DOUBLE", "FLOAT", "SHORT", "VOID"]: + if t.type == "VOID": + nt = lexer.PeekNextTokenSkipWhiteSpaceAndCommentAndPreprocess() + if nt == rparen: + return + count += 1 + elif t.type == "LT": + lexer.GetNextMatchingGT() + elif t.type == "COMMA": + if count == 1: nsiqcppstyle_reporter.Error( t2, __name__, - "function (%s) has non named parameter. use named parameter." % fullName, + f"function ({fullName}) has non named parameter. use named parameter.", ) break + count = 0 + elif rparen.lexpos <= t.lexpos and count == 1: + nsiqcppstyle_reporter.Error( + t2, + __name__, + f"function ({fullName}) has non named parameter. use named parameter.", + ) + break ruleManager.AddFunctionNameRule(RunRule) diff --git a/rules/RULE_6_1_E_do_not_use_more_than_5_paramters_in_function.py b/rules/RULE_6_1_E_do_not_use_more_than_5_paramters_in_function.py index fb4a7b5..8afd687 100644 --- a/rules/RULE_6_1_E_do_not_use_more_than_5_paramters_in_function.py +++ b/rules/RULE_6_1_E_do_not_use_more_than_5_paramters_in_function.py @@ -43,7 +43,7 @@ def RunRule(lexer, fullName, decl, contextStack, context): nsiqcppstyle_reporter.Error( t, __name__, - "function (%s) has more than 5 parameters. please use struct instead." % fullName, + f"function ({fullName}) has more than 5 parameters. please use struct instead.", ) break diff --git a/rules/RULE_6_1_G_write_less_than_200_lines_for_function.py b/rules/RULE_6_1_G_write_less_than_200_lines_for_function.py index 9aefe60..42aee44 100644 --- a/rules/RULE_6_1_G_write_less_than_200_lines_for_function.py +++ b/rules/RULE_6_1_G_write_less_than_200_lines_for_function.py @@ -27,15 +27,16 @@ def RunRule(lexer, fullName, decl, contextStack, context): if not decl and context is not None: startline = context.startToken.lineno endline = context.endToken.lineno - count = 0 - for eachLine in lexer.lines[startline - 1 : endline - 1]: - if not Match(r"^\s*$", eachLine): - count += 1 + count = sum( + 1 + for eachLine in lexer.lines[startline - 1 : endline - 1] + if not Match(r"^\s*$", eachLine) + ) if count > 200: nsiqcppstyle_reporter.Error( context.startToken, __name__, - "Do not write function over non blank 200 lines(%s)." % fullName, + f"Do not write function over non blank 200 lines({fullName}).", ) diff --git a/rules/RULE_6_2_A_do_not_use_system_dependent_type.py b/rules/RULE_6_2_A_do_not_use_system_dependent_type.py index 603f463..bb6fec5 100644 --- a/rules/RULE_6_2_A_do_not_use_system_dependent_type.py +++ b/rules/RULE_6_2_A_do_not_use_system_dependent_type.py @@ -28,10 +28,7 @@ def RunRule(lexer, contextStack): nsiqcppstyle_reporter.Error( t, __name__, - "Do not use system dependent type({}). Use system independent type like ({})".format( - t.value, - systemDependentType[t.type], - ), + f"Do not use system dependent type({t.value}). Use system independent type like ({systemDependentType[t.type]})", ) diff --git a/rules/RULE_6_5_B_do_not_use_lowercase_for_macro_constants.py b/rules/RULE_6_5_B_do_not_use_lowercase_for_macro_constants.py index 0c2d555..55cbf00 100644 --- a/rules/RULE_6_5_B_do_not_use_lowercase_for_macro_constants.py +++ b/rules/RULE_6_5_B_do_not_use_lowercase_for_macro_constants.py @@ -26,7 +26,11 @@ def RunRule(lexer, contextStack): k2 = lexer.GetNextTokenSkipWhiteSpaceAndComment() if d.type == "ID" and k2 is not None and k2.type in ["NUMBER", "STRING", "CHARACTOR"] and d.lineno == k2.lineno: if Search("[a-z]", d.value): - nsiqcppstyle_reporter.Error(d, __name__, "Do not use lower case (%s) for macro value" % d.value) + nsiqcppstyle_reporter.Error( + d, + __name__, + f"Do not use lower case ({d.value}) for macro value", + ) ruleManager.AddPreprocessRule(RunRule) diff --git a/rules/RULE_6_5_B_do_not_use_macro_for_constants.py b/rules/RULE_6_5_B_do_not_use_macro_for_constants.py index 6fd366d..3703542 100644 --- a/rules/RULE_6_5_B_do_not_use_macro_for_constants.py +++ b/rules/RULE_6_5_B_do_not_use_macro_for_constants.py @@ -32,7 +32,9 @@ def RunRule(lexer, contextStack): k2 = lexer.GetNextTokenSkipWhiteSpaceAndComment() if d.type == "ID" and k2 is not None and k2.type in ["NUMBER", "STRING", "CHARACTOR"] and d.lineno == k2.lineno: if not Search("^_", d.value): - nsiqcppstyle_reporter.Error(d, __name__, "Do not use macro(%s) for constant" % d.value) + nsiqcppstyle_reporter.Error( + d, __name__, f"Do not use macro({d.value}) for constant" + ) ruleManager.AddPreprocessRule(RunRule) diff --git a/rules/RULE_9_1_A_do_not_use_hardcorded_include_path.py b/rules/RULE_9_1_A_do_not_use_hardcorded_include_path.py index 7c02243..719622c 100644 --- a/rules/RULE_9_1_A_do_not_use_hardcorded_include_path.py +++ b/rules/RULE_9_1_A_do_not_use_hardcorded_include_path.py @@ -25,7 +25,11 @@ def RunRule(lexer, contextStack): if d is not None and d.type == "STRING": value = d.value if value.startswith('"/') or Search(r"^\"[a-zA-Z]:", value): - nsiqcppstyle_reporter.Error(d, __name__, "Do not use absolute path(%s) in the include path" % value) + nsiqcppstyle_reporter.Error( + d, + __name__, + f"Do not use absolute path({value}) in the include path", + ) ruleManager.AddPreprocessRule(RunRule) diff --git a/rules/RULE_9_2_D_use_reentrant_function.py b/rules/RULE_9_2_D_use_reentrant_function.py index 96ffcb1..c0926ac 100644 --- a/rules/RULE_9_2_D_use_reentrant_function.py +++ b/rules/RULE_9_2_D_use_reentrant_function.py @@ -44,7 +44,9 @@ def RunRule(lexer, contextStack): and nsiqcppstyle_state._nsiqcppstyle_state.GetVar("ignore_toupper", "false") == "true" ): return - nsiqcppstyle_reporter.Error(t, __name__, "Do not use not reentrant function(%s)." % t.value) + nsiqcppstyle_reporter.Error( + t, __name__, f"Do not use not reentrant function({t.value})." + ) ruleManager.AddFunctionScopeRule(RunRule)