Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Merged dashersw/GUI-tools as a subtree at tools

  • Loading branch information...
commit 0724c6e1c40bb55b3dd91364add2327f78891a15 2 parents 661a93f + 4bc5778
@dashersw authored
Showing with 9,209 additions and 0 deletions.
  1. 0  tools/README
  2. +10 −0 tools/closure_linter-2.2.7/PKG-INFO
  3. +9 −0 tools/closure_linter-2.2.7/README
  4. +1 −0  tools/closure_linter-2.2.7/closure_linter/__init__.py
  5. +82 −0 tools/closure_linter-2.2.7/closure_linter/checker.py
  6. +248 −0 tools/closure_linter-2.2.7/closure_linter/checkerbase.py
  7. +1 −0  tools/closure_linter-2.2.7/closure_linter/common/__init__.py
  8. +65 −0 tools/closure_linter-2.2.7/closure_linter/common/error.py
  9. +46 −0 tools/closure_linter-2.2.7/closure_linter/common/erroraccumulator.py
  10. +61 −0 tools/closure_linter-2.2.7/closure_linter/common/errorhandler.py
  11. +203 −0 tools/closure_linter-2.2.7/closure_linter/common/errorprinter.py
  12. +105 −0 tools/closure_linter-2.2.7/closure_linter/common/filetestcase.py
  13. +170 −0 tools/closure_linter-2.2.7/closure_linter/common/htmlutil.py
  14. +39 −0 tools/closure_linter-2.2.7/closure_linter/common/lintrunner.py
  15. +60 −0 tools/closure_linter-2.2.7/closure_linter/common/matcher.py
  16. +126 −0 tools/closure_linter-2.2.7/closure_linter/common/position.py
  17. +190 −0 tools/closure_linter-2.2.7/closure_linter/common/simplefileflags.py
  18. +184 −0 tools/closure_linter-2.2.7/closure_linter/common/tokenizer.py
  19. +125 −0 tools/closure_linter-2.2.7/closure_linter/common/tokens.py
  20. +754 −0 tools/closure_linter-2.2.7/closure_linter/ecmalintrules.py
  21. +521 −0 tools/closure_linter-2.2.7/closure_linter/ecmametadatapass.py
  22. +336 −0 tools/closure_linter-2.2.7/closure_linter/error_fixer.py
  23. +42 −0 tools/closure_linter-2.2.7/closure_linter/errorrules.py
  24. +131 −0 tools/closure_linter-2.2.7/closure_linter/errors.py
  25. +47 −0 tools/closure_linter-2.2.7/closure_linter/fixjsstyle.py
  26. +61 −0 tools/closure_linter-2.2.7/closure_linter/fixjsstyle_test.py
  27. +101 −0 tools/closure_linter-2.2.7/closure_linter/full_test.py
  28. +142 −0 tools/closure_linter-2.2.7/closure_linter/gjslint.py
  29. +543 −0 tools/closure_linter-2.2.7/closure_linter/indentation.py
  30. +395 −0 tools/closure_linter-2.2.7/closure_linter/javascriptlintrules.py
  31. +238 −0 tools/closure_linter-2.2.7/closure_linter/javascriptstatetracker.py
  32. +53 −0 tools/closure_linter-2.2.7/closure_linter/javascriptstatetracker_test.py
  33. +367 −0 tools/closure_linter-2.2.7/closure_linter/javascripttokenizer.py
  34. +147 −0 tools/closure_linter-2.2.7/closure_linter/javascripttokens.py
  35. +965 −0 tools/closure_linter-2.2.7/closure_linter/statetracker.py
  36. +285 −0 tools/closure_linter-2.2.7/closure_linter/tokenutil.py
  37. +38 −0 tools/closure_linter-2.2.7/setup.py
  38. +257 −0 tools/goog/build/closurebuilder.py
  39. +186 −0 tools/goog/build/depstree.py
  40. +127 −0 tools/goog/build/depstree_test.py
  41. +197 −0 tools/goog/build/depswriter.py
  42. +69 −0 tools/goog/build/jscompiler.py
  43. +86 −0 tools/goog/build/source.py
  44. +57 −0 tools/goog/build/source_test.py
  45. +78 −0 tools/goog/build/treescan.py
  46. +570 −0 tools/goog/calcdeps.py
  47. +202 −0 tools/goog/compiler/COPYING
  48. +278 −0 tools/goog/compiler/README
  49. BIN  tools/goog/compiler/compiler.jar
  50. +211 −0 tools/goog/scopify.py
View
0  tools/README
No changes.
View
10 tools/closure_linter-2.2.7/PKG-INFO
@@ -0,0 +1,10 @@
+Metadata-Version: 1.0
+Name: closure_linter
+Version: 2.2.7
+Summary: Closure Linter
+Home-page: http://code.google.com/p/closure-linter
+Author: The Closure Linter Authors
+Author-email: opensource@google.com
+License: Apache
+Description: UNKNOWN
+Platform: UNKNOWN
View
9 tools/closure_linter-2.2.7/README
@@ -0,0 +1,9 @@
+This repository contains the Closure Linter - a style checker for JavaScript.
+
+To install the application, run
+ python ./setup.py install
+
+After installing, you get two helper applications installed into /usr/local/bin:
+
+ gjslint.py - runs the linter and checks for errors
+ fixjsstyle.py - tries to fix errors automatically
View
1  tools/closure_linter-2.2.7/closure_linter/__init__.py
@@ -0,0 +1 @@
+#!/usr/bin/env python
View
82 tools/closure_linter-2.2.7/closure_linter/checker.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Core methods for checking JS files for common style guide violations."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+import gflags as flags
+
+from closure_linter import checkerbase
+from closure_linter import ecmametadatapass
+from closure_linter import errors
+from closure_linter import javascriptlintrules
+from closure_linter import javascriptstatetracker
+from closure_linter.common import errorprinter
+from closure_linter.common import lintrunner
+
+flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'],
+ 'List of files with relaxed documentation checks. Will not '
+ 'report errors for missing documentation, some missing '
+ 'descriptions, or methods whose @return tags don\'t have a '
+ 'matching return statement.')
+
+
+class JavaScriptStyleChecker(checkerbase.CheckerBase):
+ """Checker that applies JavaScriptLintRules."""
+
+ def __init__(self, error_handler):
+ """Initialize an JavaScriptStyleChecker object.
+
+ Args:
+ error_handler: Error handler to pass all errors to
+ """
+ checkerbase.CheckerBase.__init__(
+ self,
+ error_handler=error_handler,
+ lint_rules=javascriptlintrules.JavaScriptLintRules(),
+ state_tracker=javascriptstatetracker.JavaScriptStateTracker(
+ closurized_namespaces=flags.FLAGS.closurized_namespaces),
+ metadata_pass=ecmametadatapass.EcmaMetaDataPass(),
+ limited_doc_files=flags.FLAGS.limited_doc_files)
+
+
+class GJsLintRunner(lintrunner.LintRunner):
+ """Wrapper class to run GJsLint."""
+
+ def Run(self, filenames, error_handler=None):
+ """Run GJsLint on the given filenames.
+
+ Args:
+ filenames: The filenames to check
+ error_handler: An optional ErrorHandler object, an ErrorPrinter is used if
+ none is specified.
+
+ Returns:
+ error_count, file_count: The number of errors and the number of files that
+ contain errors.
+ """
+ if not error_handler:
+ error_handler = errorprinter.ErrorPrinter(errors.NEW_ERRORS)
+
+ checker = JavaScriptStyleChecker(error_handler)
+
+ # Check the list of files.
+ for filename in filenames:
+ checker.Check(filename)
+
+ return error_handler
View
248 tools/closure_linter-2.2.7/closure_linter/checkerbase.py
@@ -0,0 +1,248 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Base classes for writing checkers that operate on tokens."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)',
+ 'jacobr@google.com (Jacob Richman)')
+
+import StringIO
+import traceback
+
+import gflags as flags
+from closure_linter import ecmametadatapass
+from closure_linter import errorrules
+from closure_linter import errors
+from closure_linter import javascripttokenizer
+from closure_linter.common import error
+from closure_linter.common import htmlutil
+
+FLAGS = flags.FLAGS
+flags.DEFINE_boolean('debug_tokens', False,
+ 'Whether to print all tokens for debugging.')
+
+flags.DEFINE_boolean('error_trace', False,
+ 'Whether to show error exceptions.')
+
+class LintRulesBase(object):
+ """Base class for all classes defining the lint rules for a language."""
+
+ def __init__(self):
+ self.__checker = None
+
+ def Initialize(self, checker, limited_doc_checks, is_html):
+ """Initializes to prepare to check a file.
+
+ Args:
+ checker: Class to report errors to.
+ limited_doc_checks: Whether doc checking is relaxed for this file.
+ is_html: Whether the file is an HTML file with extracted contents.
+ """
+ self.__checker = checker
+ self._limited_doc_checks = limited_doc_checks
+ self._is_html = is_html
+
+ def _HandleError(self, code, message, token, position=None,
+ fix_data=None):
+ """Call the HandleError function for the checker we are associated with."""
+ if errorrules.ShouldReportError(code):
+ self.__checker.HandleError(code, message, token, position, fix_data)
+
+ def CheckToken(self, token, parser_state):
+ """Checks a token, given the current parser_state, for warnings and errors.
+
+ Args:
+ token: The current token under consideration.
+ parser_state: Object that indicates the parser state in the page.
+
+ Raises:
+ TypeError: If not overridden.
+ """
+ raise TypeError('Abstract method CheckToken not implemented')
+
+ def Finalize(self, parser_state, tokenizer_mode):
+ """Perform all checks that need to occur after all lines are processed.
+
+ Args:
+ parser_state: State of the parser after parsing all tokens
+ tokenizer_mode: Mode of the tokenizer after parsing the entire page
+
+ Raises:
+ TypeError: If not overridden.
+ """
+ raise TypeError('Abstract method Finalize not implemented')
+
+
+class CheckerBase(object):
+ """This class handles checking a LintRules object against a file."""
+
+ def __init__(self, error_handler, lint_rules, state_tracker,
+ limited_doc_files=None, metadata_pass=None):
+ """Initialize a checker object.
+
+ Args:
+ error_handler: Object that handles errors.
+ lint_rules: LintRules object defining lint errors given a token
+ and state_tracker object.
+ state_tracker: Object that tracks the current state in the token stream.
+ limited_doc_files: List of filenames that are not required to have
+ documentation comments.
+ metadata_pass: Object that builds metadata about the token stream.
+ """
+ self.__error_handler = error_handler
+ self.__lint_rules = lint_rules
+ self.__state_tracker = state_tracker
+ self.__metadata_pass = metadata_pass
+ self.__limited_doc_files = limited_doc_files
+ self.__tokenizer = javascripttokenizer.JavaScriptTokenizer()
+ self.__has_errors = False
+
+ def HandleError(self, code, message, token, position=None,
+ fix_data=None):
+ """Prints out the given error message including a line number.
+
+ Args:
+ code: The error code.
+ message: The error to print.
+ token: The token where the error occurred, or None if it was a file-wide
+ issue.
+ position: The position of the error, defaults to None.
+ fix_data: Metadata used for fixing the error.
+ """
+ self.__has_errors = True
+ self.__error_handler.HandleError(
+ error.Error(code, message, token, position, fix_data))
+
+ def HasErrors(self):
+ """Returns true if the style checker has found any errors.
+
+ Returns:
+ True if the style checker has found any errors.
+ """
+ return self.__has_errors
+
+ def Check(self, filename, source=None):
+ """Checks the file, printing warnings and errors as they are found.
+
+ Args:
+ filename: The name of the file to check.
+ source: Optional. The contents of the file. Can be either a string or
+ file-like object. If omitted, contents will be read from disk from
+ the given filename.
+ """
+
+ if source is None:
+ try:
+ f = open(filename)
+ except IOError:
+ self.__error_handler.HandleFile(filename, None)
+ self.HandleError(errors.FILE_NOT_FOUND, 'File not found', None)
+ self.__error_handler.FinishFile()
+ return
+ else:
+ if type(source) in [str, unicode]:
+ f = StringIO.StringIO(source)
+ else:
+ f = source
+
+ try:
+ if filename.endswith('.html') or filename.endswith('.htm'):
+ self.CheckLines(filename, htmlutil.GetScriptLines(f), True)
+ else:
+ self.CheckLines(filename, f, False)
+ finally:
+ f.close()
+
+ def CheckLines(self, filename, lines_iter, is_html):
+ """Checks a file, given as an iterable of lines, for warnings and errors.
+
+ Args:
+ filename: The name of the file to check.
+ lines_iter: An iterator that yields one line of the file at a time.
+ is_html: Whether the file being checked is an HTML file with extracted
+ contents.
+
+ Returns:
+ A boolean indicating whether the full file could be checked or if checking
+ failed prematurely.
+ """
+ limited_doc_checks = False
+ if self.__limited_doc_files:
+ for limited_doc_filename in self.__limited_doc_files:
+ if filename.endswith(limited_doc_filename):
+ limited_doc_checks = True
+ break
+
+ state_tracker = self.__state_tracker
+ lint_rules = self.__lint_rules
+ state_tracker.Reset()
+ lint_rules.Initialize(self, limited_doc_checks, is_html)
+
+ token = self.__tokenizer.TokenizeFile(lines_iter)
+
+ parse_error = None
+ if self.__metadata_pass:
+ try:
+ self.__metadata_pass.Reset()
+ self.__metadata_pass.Process(token)
+ except ecmametadatapass.ParseError, caught_parse_error:
+ if FLAGS.error_trace:
+ traceback.print_exc()
+ parse_error = caught_parse_error
+ except Exception:
+ print 'Internal error in %s' % filename
+ traceback.print_exc()
+ return False
+
+ self.__error_handler.HandleFile(filename, token)
+
+ while token:
+ if FLAGS.debug_tokens:
+ print token
+
+ if parse_error and parse_error.token == token:
+ # Report any parse errors from above once we find the token.
+ message = ('Error parsing file at token "%s". Unable to '
+ 'check the rest of file.' % token.string)
+ self.HandleError(errors.FILE_DOES_NOT_PARSE, message, token)
+ self.__error_handler.FinishFile()
+ return False
+
+ if FLAGS.error_trace:
+ state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
+ else:
+ try:
+ state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
+ except:
+ self.HandleError(errors.FILE_DOES_NOT_PARSE,
+ ('Error parsing file at token "%s". Unable to '
+ 'check the rest of file.' % token.string),
+ token)
+ self.__error_handler.FinishFile()
+ return False
+
+ # Check the token for style guide violations.
+ lint_rules.CheckToken(token, state_tracker)
+
+ state_tracker.HandleAfterToken(token)
+
+ # Move to the next token.
+ token = token.next
+
+ lint_rules.Finalize(state_tracker, self.__tokenizer.mode)
+ self.__error_handler.FinishFile()
+ return True
View
1  tools/closure_linter-2.2.7/closure_linter/common/__init__.py
@@ -0,0 +1 @@
+#!/usr/bin/env python
View
65 tools/closure_linter-2.2.7/closure_linter/common/error.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Error object commonly used in linters."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+
+class Error(object):
+ """Object representing a style error."""
+
+ def __init__(self, code, message, token, position, fix_data):
+ """Initialize the error object.
+
+ Args:
+ code: The numeric error code.
+ message: The error message string.
+ token: The tokens.Token where the error occurred.
+ position: The position of the error within the token.
+ fix_data: Data to be used in autofixing. Codes with fix_data are:
+ GOOG_REQUIRES_NOT_ALPHABETIZED - List of string value tokens that are
+ class names in goog.requires calls.
+ """
+ self.code = code
+ self.message = message
+ self.token = token
+ self.position = position
+ if token:
+ self.start_index = token.start_index
+ else:
+ self.start_index = 0
+ self.fix_data = fix_data
+ if self.position:
+ self.start_index += self.position.start
+
+ def Compare(a, b):
+ """Compare two error objects, by source code order.
+
+ Args:
+ a: First error object.
+ b: Second error object.
+
+ Returns:
+ A Negative/0/Positive number when a is before/the same as/after b.
+ """
+ line_diff = a.token.line_number - b.token.line_number
+ if line_diff:
+ return line_diff
+
+ return a.start_index - b.start_index
+ Compare = staticmethod(Compare)
View
46 tools/closure_linter-2.2.7/closure_linter/common/erroraccumulator.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Linter error handler class that accumulates an array of errors."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+
+from closure_linter.common import errorhandler
+
+
+class ErrorAccumulator(errorhandler.ErrorHandler):
+ """Error handler object that accumulates errors in a list."""
+
+ def __init__(self):
+ self._errors = []
+
+ def HandleError(self, error):
+ """Append the error to the list.
+
+ Args:
+ error: The error object
+ """
+ self._errors.append((error.token.line_number, error.code))
+
+ def GetErrors(self):
+ """Returns the accumulated errors.
+
+ Returns:
+ A sequence of errors.
+ """
+ return self._errors
View
61 tools/closure_linter-2.2.7/closure_linter/common/errorhandler.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Interface for a linter error handler.
+
+Error handlers aggregate a set of errors from multiple files and can optionally
+perform some action based on the reported errors, for example, logging the error
+or automatically fixing it.
+"""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+
+class ErrorHandler(object):
+ """Error handler interface."""
+
+ def __init__(self):
+ if self.__class__ == ErrorHandler:
+ raise NotImplementedError('class ErrorHandler is abstract')
+
+ def HandleFile(self, filename, first_token):
+ """Notifies this ErrorHandler that subsequent errors are in filename.
+
+ Args:
+ filename: The file being linted.
+ first_token: The first token of the file.
+ """
+
+ def HandleError(self, error):
+ """Append the error to the list.
+
+ Args:
+ error: The error object
+ """
+
+ def FinishFile(self):
+ """Finishes handling the current file.
+
+ Should be called after all errors in a file have been handled.
+ """
+
+ def GetErrors(self):
+ """Returns the accumulated errors.
+
+ Returns:
+ A sequence of errors.
+ """
View
203 tools/closure_linter-2.2.7/closure_linter/common/errorprinter.py
@@ -0,0 +1,203 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Linter error handler class that prints errors to stdout."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+from closure_linter.common import error
+from closure_linter.common import errorhandler
+
+Error = error.Error
+
+
+# The error message is of the format:
+# Line <number>, E:<code>: message
+DEFAULT_FORMAT = 1
+
+# The error message is of the format:
+# filename:[line number]:message
+UNIX_FORMAT = 2
+
+
+class ErrorPrinter(errorhandler.ErrorHandler):
+ """ErrorHandler that prints errors to stdout."""
+
+ def __init__(self, new_errors=None):
+ """Initializes this error printer.
+
+ Args:
+ new_errors: A sequence of error codes representing recently introduced
+ errors, defaults to None.
+ """
+ # Number of errors
+ self._error_count = 0
+
+ # Number of new errors
+ self._new_error_count = 0
+
+ # Number of files checked
+ self._total_file_count = 0
+
+ # Number of files with errors
+ self._error_file_count = 0
+
+ # Dict of file name to number of errors
+ self._file_table = {}
+
+ # List of errors for each file
+ self._file_errors = None
+
+ # Current file
+ self._filename = None
+
+ self._format = DEFAULT_FORMAT
+
+ if new_errors:
+ self._new_errors = frozenset(new_errors)
+ else:
+ self._new_errors = frozenset(set())
+
+ def SetFormat(self, format):
+ """Sets the print format of errors.
+
+ Args:
+ format: One of {DEFAULT_FORMAT, UNIX_FORMAT}.
+ """
+ self._format = format
+
+ def HandleFile(self, filename, first_token):
+ """Notifies this ErrorPrinter that subsequent errors are in filename.
+
+ Sets the current file name, and sets a flag stating the header for this file
+ has not been printed yet.
+
+ Should be called by a linter before a file is style checked.
+
+ Args:
+ filename: The name of the file about to be checked.
+ first_token: The first token in the file, or None if there was an error
+ opening the file
+ """
+ if self._filename and self._file_table[self._filename]:
+ print
+
+ self._filename = filename
+ self._file_table[filename] = 0
+ self._total_file_count += 1
+ self._file_errors = []
+
+ def HandleError(self, error):
+ """Prints a formatted error message about the specified error.
+
+ The error message is of the format:
+ Error #<code>, line #<number>: message
+
+ Args:
+ error: The error object
+ """
+ self._file_errors.append(error)
+ self._file_table[self._filename] += 1
+ self._error_count += 1
+
+ if self._new_errors and error.code in self._new_errors:
+ self._new_error_count += 1
+
+ def _PrintError(self, error):
+ """Prints a formatted error message about the specified error.
+
+ Args:
+ error: The error object
+ """
+ new_error = self._new_errors and error.code in self._new_errors
+ if self._format == DEFAULT_FORMAT:
+ line = ''
+ if error.token:
+ line = 'Line %d, ' % error.token.line_number
+
+ code = 'E:%04d' % error.code
+ if new_error:
+ print '%s%s: (New error) %s' % (line, code, error.message)
+ else:
+ print '%s%s: %s' % (line, code, error.message)
+ else:
+ # UNIX format
+ filename = self._filename
+ line = ''
+ if error.token:
+ line = '%d' % error.token.line_number
+
+ error_code = '%04d' % error.code
+ if new_error:
+ error_code = 'New Error ' + error_code
+ print '%s:%s:(%s) %s' % (filename, line, error_code, error.message)
+
+ def FinishFile(self):
+ """Finishes handling the current file."""
+ if self._file_errors:
+ self._error_file_count += 1
+
+ if self._format != UNIX_FORMAT:
+ print '----- FILE : %s -----' % (self._filename)
+
+ self._file_errors.sort(Error.Compare)
+
+ for error in self._file_errors:
+ self._PrintError(error)
+
+ def HasErrors(self):
+ """Whether this error printer encountered any errors.
+
+ Returns:
+ True if the error printer encountered any errors.
+ """
+ return self._error_count
+
+ def HasNewErrors(self):
+ """Whether this error printer encountered any new errors.
+
+ Returns:
+ True if the error printer encountered any new errors.
+ """
+ return self._new_error_count
+
+ def HasOldErrors(self):
+ """Whether this error printer encountered any old errors.
+
+ Returns:
+ True if the error printer encountered any old errors.
+ """
+ return self._error_count - self._new_error_count
+
+ def PrintSummary(self):
+ """Print a summary of the number of errors and files."""
+ if self.HasErrors() or self.HasNewErrors():
+ print ('Found %d errors, including %d new errors, in %d files '
+ '(%d files OK).' % (
+ self._error_count,
+ self._new_error_count,
+ self._error_file_count,
+ self._total_file_count - self._error_file_count))
+ else:
+ print '%d files checked, no errors found.' % self._total_file_count
+
+ def PrintFileSummary(self):
+ """Print a detailed summary of the number of errors in each file."""
+ keys = self._file_table.keys()
+ keys.sort()
+ for filename in keys:
+ print '%s: %d' % (filename, self._file_table[filename])
View
105 tools/closure_linter-2.2.7/closure_linter/common/filetestcase.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Test case that runs a checker on a file, matching errors against annotations.
+
+Runs the given checker on the given file, accumulating all errors. The list
+of errors is then matched against those annotated in the file. Based heavily
+on devtools/javascript/gpylint/full_test.py.
+"""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+import re
+
+import unittest as googletest
+from closure_linter.common import erroraccumulator
+
+
+class AnnotatedFileTestCase(googletest.TestCase):
+ """Test case to run a linter against a single file."""
+
+ # Matches an all caps letters + underscores error identifer
+ _MESSAGE = {'msg': '[A-Z][A-Z_]+'}
+ # Matches a //, followed by an optional line number with a +/-, followed by a
+ # list of message IDs. Used to extract expected messages from testdata files.
+ # TODO(robbyw): Generalize to use different commenting patterns.
+ _EXPECTED_RE = re.compile(r'\s*//\s*(?:(?P<line>[+-]?[0-9]+):)?'
+ r'\s*(?P<msgs>%(msg)s(?:,\s*%(msg)s)*)' % _MESSAGE)
+
+ def __init__(self, filename, runner, converter):
+ """Create a single file lint test case.
+
+ Args:
+ filename: Filename to test.
+ runner: Object implementing the LintRunner interface that lints a file.
+ converter: Function taking an error string and returning an error code.
+ """
+
+ googletest.TestCase.__init__(self, 'runTest')
+ self._filename = filename
+ self._messages = []
+ self._runner = runner
+ self._converter = converter
+
+ def shortDescription(self):
+ """Provides a description for the test."""
+ return 'Run linter on %s' % self._filename
+
+ def runTest(self):
+ """Runs the test."""
+ try:
+ filename = self._filename
+ stream = open(filename)
+ except IOError, ex:
+ raise IOError('Could not find testdata resource for %s: %s' %
+ (self._filename, ex))
+
+ expected = self._GetExpectedMessages(stream)
+ got = self._ProcessFileAndGetMessages(filename)
+ self.assertEqual(expected, got)
+
+ def _GetExpectedMessages(self, stream):
+ """Parse a file and get a sorted list of expected messages."""
+ messages = []
+ for i, line in enumerate(stream):
+ match = self._EXPECTED_RE.search(line)
+ if match:
+ line = match.group('line')
+ msg_ids = match.group('msgs')
+ if line is None:
+ line = i + 1
+ elif line.startswith('+') or line.startswith('-'):
+ line = i + 1 + int(line)
+ else:
+ line = int(line)
+ for msg_id in msg_ids.split(','):
+ # Ignore a spurious message from the license preamble.
+ if msg_id != 'WITHOUT':
+ messages.append((line, self._converter(msg_id.strip())))
+ stream.seek(0)
+ messages.sort()
+ return messages
+
+ def _ProcessFileAndGetMessages(self, filename):
+ """Trap gpylint's output parse it to get messages added."""
+ errors = erroraccumulator.ErrorAccumulator()
+ self._runner.Run([filename], errors)
+
+ errors = errors.GetErrors()
+ errors.sort()
+ return errors
View
170 tools/closure_linter-2.2.7/closure_linter/common/htmlutil.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for dealing with HTML."""
+
+__author__ = ('robbyw@google.com (Robert Walker)')
+
+import cStringIO
+import formatter
+import htmllib
+import HTMLParser
+import re
+
+
+class ScriptExtractor(htmllib.HTMLParser):
+ """Subclass of HTMLParser that extracts script contents from an HTML file.
+
+ Also inserts appropriate blank lines so that line numbers in the extracted
+ code match the line numbers in the original HTML.
+ """
+
+ def __init__(self):
+ """Initialize a ScriptExtractor."""
+ htmllib.HTMLParser.__init__(self, formatter.NullFormatter())
+ self._in_script = False
+ self._text = ''
+
+ def start_script(self, attrs):
+ """Internal handler for the start of a script tag.
+
+ Args:
+ attrs: The attributes of the script tag, as a list of tuples.
+ """
+ for attribute in attrs:
+ if attribute[0].lower() == 'src':
+ # Skip script tags with a src specified.
+ return
+ self._in_script = True
+
+ def end_script(self):
+ """Internal handler for the end of a script tag."""
+ self._in_script = False
+
+ def handle_data(self, data):
+ """Internal handler for character data.
+
+ Args:
+ data: The character data from the HTML file.
+ """
+ if self._in_script:
+ # If the last line contains whitespace only, i.e. is just there to
+ # properly align a </script> tag, strip the whitespace.
+ if data.rstrip(' \t') != data.rstrip(' \t\n\r\f'):
+ data = data.rstrip(' \t')
+ self._text += data
+ else:
+ self._AppendNewlines(data)
+
+ def handle_comment(self, data):
+ """Internal handler for HTML comments.
+
+ Args:
+ data: The text of the comment.
+ """
+ self._AppendNewlines(data)
+
+ def _AppendNewlines(self, data):
+ """Count the number of newlines in the given string and append them.
+
+ This ensures line numbers are correct for reported errors.
+
+ Args:
+ data: The data to count newlines in.
+ """
+ # We append 'x' to both sides of the string to ensure that splitlines
+ # gives us an accurate count.
+ for i in xrange(len(('x' + data + 'x').splitlines()) - 1):
+ self._text += '\n'
+
+ def GetScriptLines(self):
+ """Return the extracted script lines.
+
+ Returns:
+ The extracted script lines as a list of strings.
+ """
+ return self._text.splitlines()
+
+
+def GetScriptLines(f):
+ """Extract script tag contents from the given HTML file.
+
+ Args:
+ f: The HTML file.
+
+ Returns:
+ Lines in the HTML file that are from script tags.
+ """
+ extractor = ScriptExtractor()
+
+ # The HTML parser chokes on text like Array.<!string>, so we patch
+ # that bug by replacing the < with &lt; - escaping all text inside script
+ # tags would be better but it's a bit of a catch 22.
+ contents = f.read()
+ contents = re.sub(r'<([^\s\w/])',
+ lambda x: '&lt;%s' % x.group(1),
+ contents)
+
+ extractor.feed(contents)
+ extractor.close()
+ return extractor.GetScriptLines()
+
+
+def StripTags(str):
+ """Returns the string with HTML tags stripped.
+
+ Args:
+ str: An html string.
+
+ Returns:
+ The html string with all tags stripped. If there was a parse error, returns
+ the text successfully parsed so far.
+ """
+ # Brute force approach to stripping as much HTML as possible. If there is a
+ # parsing error, don't strip text before parse error position, and continue
+ # trying from there.
+ final_text = ''
+ finished = False
+ while not finished:
+ try:
+ strip = _HtmlStripper()
+ strip.feed(str)
+ strip.close()
+ str = strip.get_output()
+ final_text += str
+ finished = True
+ except HTMLParser.HTMLParseError, e:
+ final_text += str[:e.offset]
+ str = str[e.offset + 1:]
+
+ return final_text
+
+
+class _HtmlStripper(HTMLParser.HTMLParser):
+ """Simple class to strip tags from HTML.
+
+ Does so by doing nothing when encountering tags, and appending character data
+ to a buffer when that is encountered.
+ """
+ def __init__(self):
+ self.reset()
+ self.__output = cStringIO.StringIO()
+
+ def handle_data(self, d):
+ self.__output.write(d)
+
+ def get_output(self):
+ return self.__output.getvalue()
View
39 tools/closure_linter-2.2.7/closure_linter/common/lintrunner.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Interface for a lint running wrapper."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+
+class LintRunner(object):
+ """Interface for a lint running wrapper."""
+
+ def __init__(self):
+ if self.__class__ == LintRunner:
+ raise NotImplementedError('class LintRunner is abstract')
+
+ def Run(self, filenames, error_handler):
+ """Run a linter on the given filenames.
+
+ Args:
+ filenames: The filenames to check
+ error_handler: An ErrorHandler object
+
+ Returns:
+ The error handler, which may have been used to collect error info.
+ """
View
60 tools/closure_linter-2.2.7/closure_linter/common/matcher.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Regular expression based JavaScript matcher classes."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+from closure_linter.common import position
+from closure_linter.common import tokens
+
+# Shorthand
+Token = tokens.Token
+Position = position.Position
+
+
+class Matcher(object):
+ """A token matcher.
+
+ Specifies a pattern to match, the type of token it represents, what mode the
+ token changes to, and what mode the token applies to.
+
+ Modes allow more advanced grammars to be incorporated, and are also necessary
+ to tokenize line by line. We can have different patterns apply to different
+ modes - i.e. looking for documentation while in comment mode.
+
+ Attributes:
+ regex: The regular expression representing this matcher.
+ type: The type of token indicated by a successful match.
+ result_mode: The mode to move to after a successful match.
+ """
+
+ def __init__(self, regex, token_type, result_mode=None, line_start=False):
+ """Create a new matcher template.
+
+ Args:
+ regex: The regular expression to match.
+ token_type: The type of token a successful match indicates.
+ result_mode: What mode to change to after a successful match. Defaults to
+ None, which means to not change the current mode.
+ line_start: Whether this matcher should only match string at the start
+ of a line.
+ """
+ self.regex = regex
+ self.type = token_type
+ self.result_mode = result_mode
+ self.line_start = line_start
View
126 tools/closure_linter-2.2.7/closure_linter/common/position.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Classes to represent positions within strings."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+
+class Position(object):
+ """Object representing a segment of a string.
+
+ Attributes:
+ start: The index in to the string where the segment starts.
+ length: The length of the string segment.
+ """
+
+ def __init__(self, start, length):
+ """Initialize the position object.
+
+ Args:
+ start: The start index.
+ length: The number of characters to include.
+ """
+ self.start = start
+ self.length = length
+
+ def Get(self, string):
+ """Returns this range of the given string.
+
+ Args:
+ string: The string to slice.
+
+ Returns:
+ The string within the range specified by this object.
+ """
+ return string[self.start:self.start + self.length]
+
+ def Set(self, target, source):
+ """Sets this range within the target string to the source string.
+
+ Args:
+ target: The target string.
+ source: The source string.
+
+ Returns:
+ The resulting string
+ """
+ return target[:self.start] + source + target[self.start + self.length:]
+
+ def AtEnd(string):
+ """Create a Position representing the end of the given string.
+
+ Args:
+ string: The string to represent the end of.
+
+ Returns:
+ The created Position object.
+ """
+ return Position(len(string), 0)
+ AtEnd = staticmethod(AtEnd)
+
+ def IsAtEnd(self, string):
+ """Returns whether this position is at the end of the given string.
+
+ Args:
+ string: The string to test for the end of.
+
+ Returns:
+ Whether this position is at the end of the given string.
+ """
+ return self.start == len(string) and self.length == 0
+
+ def AtBeginning():
+ """Create a Position representing the beginning of any string.
+
+ Returns:
+ The created Position object.
+ """
+ return Position(0, 0)
+ AtBeginning = staticmethod(AtBeginning)
+
+ def IsAtBeginning(self):
+ """Returns whether this position is at the beginning of any string.
+
+ Returns:
+ Whether this position is at the beginning of any string.
+ """
+ return self.start == 0 and self.length == 0
+
+ def All(string):
+ """Create a Position representing the entire string.
+
+ Args:
+ string: The string to represent the entirety of.
+
+ Returns:
+ The created Position object.
+ """
+ return Position(0, len(string))
+ All = staticmethod(All)
+
+ def Index(index):
+ """Returns a Position object for the specified index.
+
+ Args:
+ index: The index to select, inclusively.
+
+ Returns:
+ The created Position object.
+ """
+ return Position(index, 1)
+ Index = staticmethod(Index)
View
190 tools/closure_linter-2.2.7/closure_linter/common/simplefileflags.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Determines the list of files to be checked from command line arguments."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+import glob
+import os
+import re
+
+import gflags as flags
+
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_multistring(
+ 'recurse',
+ None,
+ 'Recurse in to the subdirectories of the given path',
+ short_name='r')
+flags.DEFINE_list(
+ 'exclude_directories',
+ ('_demos'),
+ 'Exclude the specified directories (only applicable along with -r or '
+ '--presubmit)',
+ short_name='e')
+flags.DEFINE_list(
+ 'exclude_files',
+ ('deps.js'),
+ 'Exclude the specified files',
+ short_name='x')
+
+
+def MatchesSuffixes(filename, suffixes):
+ """Returns whether the given filename matches one of the given suffixes.
+
+ Args:
+ filename: Filename to check.
+ suffixes: Sequence of suffixes to check.
+
+ Returns:
+ Whether the given filename matches one of the given suffixes.
+ """
+ suffix = filename[filename.rfind('.'):]
+ return suffix in suffixes
+
+
+def _GetUserSpecifiedFiles(argv, suffixes):
+ """Returns files to be linted, specified directly on the command line.
+
+ Can handle the '*' wildcard in filenames, but no other wildcards.
+
+ Args:
+ argv: Sequence of command line arguments. The second and following arguments
+ are assumed to be files that should be linted.
+ suffixes: Expected suffixes for the file type being checked.
+
+ Returns:
+ A sequence of files to be linted.
+ """
+ files = argv[1:] or []
+ all_files = []
+ lint_files = []
+
+ # Perform any necessary globs.
+ for f in files:
+ if f.find('*') != -1:
+ for result in glob.glob(f):
+ all_files.append(result)
+ else:
+ all_files.append(f)
+
+ for f in all_files:
+ if MatchesSuffixes(f, suffixes):
+ lint_files.append(f)
+ return lint_files
+
+
+def _GetRecursiveFiles(suffixes):
+ """Returns files to be checked specified by the --recurse flag.
+
+ Args:
+ suffixes: Expected suffixes for the file type being checked.
+
+ Returns:
+ A list of files to be checked.
+ """
+ lint_files = []
+ # Perform any request recursion
+ if FLAGS.recurse:
+ for start in FLAGS.recurse:
+ for root, subdirs, files in os.walk(start):
+ for f in files:
+ if MatchesSuffixes(f, suffixes):
+ lint_files.append(os.path.join(root, f))
+ return lint_files
+
+
+def GetAllSpecifiedFiles(argv, suffixes):
+ """Returns all files specified by the user on the commandline.
+
+ Args:
+ argv: Sequence of command line arguments. The second and following arguments
+ are assumed to be files that should be linted.
+ suffixes: Expected suffixes for the file type
+
+ Returns:
+ A list of all files specified directly or indirectly (via flags) on the
+ command line by the user.
+ """
+ files = _GetUserSpecifiedFiles(argv, suffixes)
+
+ if FLAGS.recurse:
+ files += _GetRecursiveFiles(suffixes)
+
+ return FilterFiles(files)
+
+
+def FilterFiles(files):
+ """Filters the list of files to be linted be removing any excluded files.
+
+ Filters out files excluded using --exclude_files and --exclude_directories.
+
+ Args:
+ files: Sequence of files that needs filtering.
+
+ Returns:
+ Filtered list of files to be linted.
+ """
+ num_files = len(files)
+
+ ignore_dirs_regexs = []
+ for ignore in FLAGS.exclude_directories:
+ ignore_dirs_regexs.append(re.compile(r'(^|[\\/])%s[\\/]' % ignore))
+
+ result_files = []
+ for f in files:
+ add_file = True
+ for exclude in FLAGS.exclude_files:
+ if f.endswith('/' + exclude) or f == exclude:
+ add_file = False
+ break
+ for ignore in ignore_dirs_regexs:
+ if ignore.search(f):
+ # Break out of ignore loop so we don't add to
+ # filtered files.
+ add_file = False
+ break
+ if add_file:
+ # Convert everything to absolute paths so we can easily remove duplicates
+ # using a set.
+ result_files.append(os.path.abspath(f))
+
+ skipped = num_files - len(result_files)
+ if skipped:
+ print 'Skipping %d file(s).' % skipped
+
+ return set(result_files)
+
+
+def GetFileList(argv, file_type, suffixes):
+ """Parse the flags and return the list of files to check.
+
+ Args:
+ argv: Sequence of command line arguments.
+ suffixes: Sequence of acceptable suffixes for the file type.
+
+ Returns:
+ The list of files to check.
+ """
+ return sorted(GetAllSpecifiedFiles(argv, suffixes))
+
+
+def IsEmptyArgumentList(argv):
+ return not (len(argv[1:]) or FLAGS.recurse)
View
184 tools/closure_linter-2.2.7/closure_linter/common/tokenizer.py
@@ -0,0 +1,184 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Regular expression based lexer."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+from closure_linter.common import tokens
+
+# Shorthand
+Type = tokens.TokenType
+
+
+class Tokenizer(object):
+ """General purpose tokenizer.
+
+ Attributes:
+ mode: The latest mode of the tokenizer. This allows patterns to distinguish
+ if they are mid-comment, mid-parameter list, etc.
+ matchers: Dictionary of modes to sequences of matchers that define the
+ patterns to check at any given time.
+ default_types: Dictionary of modes to types, defining what type to give
+ non-matched text when in the given mode. Defaults to Type.NORMAL.
+ """
+
+ def __init__(self, starting_mode, matchers, default_types):
+ """Initialize the tokenizer.
+
+ Args:
+ starting_mode: Mode to start in.
+ matchers: Dictionary of modes to sequences of matchers that defines the
+ patterns to check at any given time.
+ default_types: Dictionary of modes to types, defining what type to give
+ non-matched text when in the given mode. Defaults to Type.NORMAL.
+ """
+ self.__starting_mode = starting_mode
+ self.matchers = matchers
+ self.default_types = default_types
+
+ def TokenizeFile(self, file):
+ """Tokenizes the given file.
+
+ Args:
+ file: An iterable that yields one line of the file at a time.
+
+ Returns:
+ The first token in the file
+ """
+ # The current mode.
+ self.mode = self.__starting_mode
+ # The first token in the stream.
+ self.__first_token = None
+ # The last token added to the token stream.
+ self.__last_token = None
+ # The current line number.
+ self.__line_number = 0
+
+ for line in file:
+ self.__line_number += 1
+ self.__TokenizeLine(line)
+
+ return self.__first_token
+
+ def _CreateToken(self, string, token_type, line, line_number, values=None):
+ """Creates a new Token object (or subclass).
+
+ Args:
+ string: The string of input the token represents.
+ token_type: The type of token.
+ line: The text of the line this token is in.
+ line_number: The line number of the token.
+ values: A dict of named values within the token. For instance, a
+ function declaration may have a value called 'name' which captures the
+ name of the function.
+
+ Returns:
+ The newly created Token object.
+ """
+ return tokens.Token(string, token_type, line, line_number, values)
+
+ def __TokenizeLine(self, line):
+ """Tokenizes the given line.
+
+ Args:
+ line: The contents of the line.
+ """
+ string = line.rstrip('\n\r\f')
+ line_number = self.__line_number
+ self.__start_index = 0
+
+ if not string:
+ self.__AddToken(self._CreateToken('', Type.BLANK_LINE, line, line_number))
+ return
+
+ normal_token = ''
+ index = 0
+ while index < len(string):
+ for matcher in self.matchers[self.mode]:
+ if matcher.line_start and index > 0:
+ continue
+
+ match = matcher.regex.match(string, index)
+
+ if match:
+ if normal_token:
+ self.__AddToken(
+ self.__CreateNormalToken(self.mode, normal_token, line,
+ line_number))
+ normal_token = ''
+
+ # Add the match.
+ self.__AddToken(self._CreateToken(match.group(), matcher.type, line,
+ line_number, match.groupdict()))
+
+ # Change the mode to the correct one for after this match.
+ self.mode = matcher.result_mode or self.mode
+
+ # Shorten the string to be matched.
+ index = match.end()
+
+ break
+
+ else:
+ # If the for loop finishes naturally (i.e. no matches) we just add the
+ # first character to the string of consecutive non match characters.
+ # These will constitute a NORMAL token.
+ if string:
+ normal_token += string[index:index + 1]
+ index += 1
+
+ if normal_token:
+ self.__AddToken(
+ self.__CreateNormalToken(self.mode, normal_token, line, line_number))
+
+ def __CreateNormalToken(self, mode, string, line, line_number):
+ """Creates a normal token.
+
+ Args:
+ mode: The current mode.
+ string: The string to tokenize.
+ line: The line of text.
+ line_number: The line number within the file.
+
+ Returns:
+ A Token object, of the default type for the current mode.
+ """
+ type = Type.NORMAL
+ if mode in self.default_types:
+ type = self.default_types[mode]
+ return self._CreateToken(string, type, line, line_number)
+
+ def __AddToken(self, token):
+ """Add the given token to the token stream.
+
+ Args:
+ token: The token to add.
+ """
+ # Store the first token, or point the previous token to this one.
+ if not self.__first_token:
+ self.__first_token = token
+ else:
+ self.__last_token.next = token
+
+ # Establish the doubly linked list
+ token.previous = self.__last_token
+ self.__last_token = token
+
+ # Compute the character indices
+ token.start_index = self.__start_index
+ self.__start_index += token.length
View
125 tools/closure_linter-2.2.7/closure_linter/common/tokens.py
@@ -0,0 +1,125 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Classes to represent tokens and positions within them."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+
+class TokenType(object):
+ """Token types common to all languages."""
+ NORMAL = 'normal'
+ WHITESPACE = 'whitespace'
+ BLANK_LINE = 'blank line'
+
+
+class Token(object):
+ """Token class for intelligent text splitting.
+
+ The token class represents a string of characters and an identifying type.
+
+ Attributes:
+ type: The type of token.
+ string: The characters the token comprises.
+ length: The length of the token.
+ line: The text of the line the token is found in.
+ line_number: The number of the line the token is found in.
+ values: Dictionary of values returned from the tokens regex match.
+ previous: The token before this one.
+ next: The token after this one.
+ start_index: The character index in the line where this token starts.
+ attached_object: Object containing more information about this token.
+ metadata: Object containing metadata about this token. Must be added by
+ a separate metadata pass.
+ """
+
+ def __init__(self, string, token_type, line, line_number, values=None):
+ """Creates a new Token object.
+
+ Args:
+ string: The string of input the token contains.
+ token_type: The type of token.
+ line: The text of the line this token is in.
+ line_number: The line number of the token.
+ values: A dict of named values within the token. For instance, a
+ function declaration may have a value called 'name' which captures the
+ name of the function.
+ """
+ self.type = token_type
+ self.string = string
+ self.length = len(string)
+ self.line = line
+ self.line_number = line_number
+ self.values = values
+
+ # These parts can only be computed when the file is fully tokenized
+ self.previous = None
+ self.next = None
+ self.start_index = None
+
+ # This part is set in statetracker.py
+ # TODO(robbyw): Wrap this in to metadata
+ self.attached_object = None
+
+ # This part is set in *metadatapass.py
+ self.metadata = None
+
+ def IsFirstInLine(self):
+ """Tests if this token is the first token in its line.
+
+ Returns:
+ Whether the token is the first token in its line.
+ """
+ return not self.previous or self.previous.line_number != self.line_number
+
+ def IsLastInLine(self):
+ """Tests if this token is the last token in its line.
+
+ Returns:
+ Whether the token is the last token in its line.
+ """
+ return not self.next or self.next.line_number != self.line_number
+
+ def IsType(self, token_type):
+ """Tests if this token is of the given type.
+
+ Args:
+ token_type: The type to test for.
+
+ Returns:
+ True if the type of this token matches the type passed in.
+ """
+ return self.type == token_type
+
+ def IsAnyType(self, *token_types):
+ """Tests if this token is any of the given types.
+
+ Args:
+ token_types: The types to check. Also accepts a single array.
+
+ Returns:
+ True if the type of this token is any of the types passed in.
+ """
+ if not isinstance(token_types[0], basestring):
+ return self.type in token_types[0]
+ else:
+ return self.type in token_types
+
+ def __repr__(self):
+ return '<Token: %s, "%s", %r, %d, %r>' % (self.type, self.string,
+ self.values, self.line_number,
+ self.metadata)
View
754 tools/closure_linter-2.2.7/closure_linter/ecmalintrules.py
@@ -0,0 +1,754 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Core methods for checking EcmaScript files for common style guide violations.
+"""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)',
+ 'jacobr@google.com (Jacob Richman)')
+
+import re
+
+from closure_linter import checkerbase
+from closure_linter import ecmametadatapass
+from closure_linter import errors
+from closure_linter import indentation
+from closure_linter import javascripttokens
+from closure_linter import javascripttokenizer
+from closure_linter import statetracker
+from closure_linter import tokenutil
+from closure_linter.common import error
+from closure_linter.common import htmlutil
+from closure_linter.common import lintrunner
+from closure_linter.common import position
+from closure_linter.common import tokens
+import gflags as flags
+
+FLAGS = flags.FLAGS
+flags.DEFINE_boolean('strict', False,
+ 'Whether to validate against the stricter Closure style.')
+flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
+
+# TODO(robbyw): Check for extra parens on return statements
+# TODO(robbyw): Check for 0px in strings
+# TODO(robbyw): Ensure inline jsDoc is in {}
+# TODO(robbyw): Check for valid JS types in parameter docs
+
+# Shorthand
+Context = ecmametadatapass.EcmaContext
+Error = error.Error
+Modes = javascripttokenizer.JavaScriptModes
+Position = position.Position
+Type = javascripttokens.JavaScriptTokenType
+
+class EcmaScriptLintRules(checkerbase.LintRulesBase):
+ """EmcaScript lint style checking rules.
+
+ Can be used to find common style errors in JavaScript, ActionScript and other
+ Ecma like scripting languages. Style checkers for Ecma scripting languages
+ should inherit from this style checker.
+ Please do not add any state to EcmaScriptLintRules or to any subclasses.
+
+ All state should be added to the StateTracker subclass used for a particular
+ language.
+ """
+
+ # Static constants.
+ MAX_LINE_LENGTH = 120
+
+ MISSING_PARAMETER_SPACE = re.compile(r',\S')
+
+ EXTRA_SPACE = re.compile('(\(\s|\s\))')
+
+ ENDS_WITH_SPACE = re.compile('\s$')
+
+ ILLEGAL_TAB = re.compile(r'\t')
+
+ # Regex used to split up complex types to check for invalid use of ? and |.
+ TYPE_SPLIT = re.compile(r'[,<>()]')
+
+ # Regex for form of author lines after the @author tag.
+ AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)')
+
+ # Acceptable tokens to remove for line too long testing.
+ LONG_LINE_IGNORE = frozenset(['*', '//', '@see'] +
+ ['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE])
+
+ def __init__(self):
+ """Initialize this lint rule object."""
+ checkerbase.LintRulesBase.__init__(self)
+
+ def Initialize(self, checker, limited_doc_checks, is_html):
+ """Initialize this lint rule object before parsing a new file."""
+ checkerbase.LintRulesBase.Initialize(self, checker, limited_doc_checks,
+ is_html)
+ self._indentation = indentation.IndentationRules()
+
+ def HandleMissingParameterDoc(self, token, param_name):
+ """Handle errors associated with a parameter missing a @param tag."""
+ raise TypeError('Abstract method HandleMissingParameterDoc not implemented')
+
+ def _CheckLineLength(self, last_token, state):
+ """Checks whether the line is too long.
+
+ Args:
+ last_token: The last token in the line.
+ """
+ # Start from the last token so that we have the flag object attached to
+ # and DOC_FLAG tokens.
+ line_number = last_token.line_number
+ token = last_token
+
+ # Build a representation of the string where spaces indicate potential
+ # line-break locations.
+ line = []
+ while token and token.line_number == line_number:
+ if state.IsTypeToken(token):
+ line.insert(0, 'x' * len(token.string))
+ elif token.type in (Type.IDENTIFIER, Type.NORMAL):
+ # Dots are acceptable places to wrap.
+ line.insert(0, token.string.replace('.', ' '))
+ else:
+ line.insert(0, token.string)
+ token = token.previous
+
+ line = ''.join(line)
+ line = line.rstrip('\n\r\f')
+ try:
+ length = len(unicode(line, 'utf-8'))
+ except:
+ # Unknown encoding. The line length may be wrong, as was originally the
+ # case for utf-8 (see bug 1735846). For now just accept the default
+ # length, but as we find problems we can either add test for other
+ # possible encodings or return without an error to protect against
+ # false positives at the cost of more false negatives.
+ length = len(line)
+
+ if length > self.MAX_LINE_LENGTH:
+
+ # If the line matches one of the exceptions, then it's ok.
+ for long_line_regexp in self.GetLongLineExceptions():
+ if long_line_regexp.match(last_token.line):
+ return
+
+ # If the line consists of only one "word", or multiple words but all
+ # except one are ignoreable, then it's ok.
+ parts = set(line.split())
+
+ # We allow two "words" (type and name) when the line contains @param
+ max = 1
+ if '@param' in parts:
+ max = 2
+
+ # Custom tags like @requires may have url like descriptions, so ignore
+ # the tag, similar to how we handle @see.
+ custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags])
+ if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags)) > max):
+ self._HandleError(errors.LINE_TOO_LONG,
+ 'Line too long (%d characters).' % len(line), last_token)
+
+ def _CheckJsDocType(self, token):
+ """Checks the given type for style errors.
+
+ Args:
+ token: The DOC_FLAG token for the flag whose type to check.
+ """
+ flag = token.attached_object
+ type = flag.type
+ if type and type is not None and not type.isspace():
+ pieces = self.TYPE_SPLIT.split(type)
+ if len(pieces) == 1 and type.count('|') == 1 and (
+ type.endswith('|null') or type.startswith('null|')):
+ self._HandleError(errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,
+ 'Prefer "?Type" to "Type|null": "%s"' % type, token)
+
+ for p in pieces:
+ if p.count('|') and p.count('?'):
+ # TODO(robbyw): We should do actual parsing of JsDoc types. As is,
+ # this won't report an error for {number|Array.<string>?}, etc.
+ self._HandleError(errors.JSDOC_ILLEGAL_QUESTION_WITH_PIPE,
+ 'JsDoc types cannot contain both "?" and "|": "%s"' % p, token)
+
+ if FLAGS.strict and (flag.type_start_token.type != Type.DOC_START_BRACE or
+ flag.type_end_token.type != Type.DOC_END_BRACE):
+ self._HandleError(errors.MISSING_BRACES_AROUND_TYPE,
+ 'Type must always be surrounded by curly braces.', token)
+
+ def _CheckForMissingSpaceBeforeToken(self, token):
+ """Checks for a missing space at the beginning of a token.
+
+ Reports a MISSING_SPACE error if the token does not begin with a space or
+ the previous token doesn't end with a space and the previous token is on the
+ same line as the token.
+
+ Args:
+ token: The token being checked
+ """
+ # TODO(user): Check if too many spaces?
+ if (len(token.string) == len(token.string.lstrip()) and
+ token.previous and token.line_number == token.previous.line_number and
+ len(token.previous.string) - len(token.previous.string.rstrip()) == 0):
+ self._HandleError(
+ errors.MISSING_SPACE,
+ 'Missing space before "%s"' % token.string,
+ token,
+ Position.AtBeginning())
+
+ def _ExpectSpaceBeforeOperator(self, token):
+ """Returns whether a space should appear before the given operator token.
+
+ Args:
+ token: The operator token.
+
+ Returns:
+ Whether there should be a space before the token.
+ """
+ if token.string == ',' or token.metadata.IsUnaryPostOperator():
+ return False
+
+ # Colons should appear in labels, object literals, the case of a switch
+ # statement, and ternary operator. Only want a space in the case of the
+ # ternary operator.
+ if (token.string == ':' and
+ token.metadata.context.type in (Context.LITERAL_ELEMENT,
+ Context.CASE_BLOCK,
+ Context.STATEMENT)):
+ return False
+
+ if token.metadata.IsUnaryOperator() and token.IsFirstInLine():
+ return False
+
+ return True
+
+ def CheckToken(self, token, state):
+ """Checks a token, given the current parser_state, for warnings and errors.
+
+ Args:
+ token: The current token under consideration
+ state: parser_state object that indicates the current state in the page
+ """
+ # Store some convenience variables
+ first_in_line = token.IsFirstInLine()
+ last_in_line = token.IsLastInLine()
+ last_non_space_token = state.GetLastNonSpaceToken()
+
+ type = token.type
+
+ # Process the line change.
+ if not self._is_html and FLAGS.strict:
+ # TODO(robbyw): Support checking indentation in HTML files.
+ indentation_errors = self._indentation.CheckToken(token, state)
+ for indentation_error in indentation_errors:
+ self._HandleError(*indentation_error)
+
+ if last_in_line:
+ self._CheckLineLength(token, state)
+
+ if type == Type.PARAMETERS:
+ # Find missing spaces in parameter lists.
+ if self.MISSING_PARAMETER_SPACE.search(token.string):
+ self._HandleError(errors.MISSING_SPACE, 'Missing space after ","',
+ token)
+
+ # Find extra spaces at the beginning of parameter lists. Make sure
+ # we aren't at the beginning of a continuing multi-line list.
+ if not first_in_line:
+ space_count = len(token.string) - len(token.string.lstrip())
+ if space_count:
+ self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("',
+ token, Position(0, space_count))
+
+ elif (type == Type.START_BLOCK and
+ token.metadata.context.type == Context.BLOCK):
+ self._CheckForMissingSpaceBeforeToken(token)
+
+ elif type == Type.END_BLOCK:
+ # This check is for object literal end block tokens, but there is no need
+ # to test that condition since a comma at the end of any other kind of
+ # block is undoubtedly a parse error.
+ last_code = token.metadata.last_code
+ if last_code.IsOperator(','):
+ self._HandleError(errors.COMMA_AT_END_OF_LITERAL,
+ 'Illegal comma at end of object literal', last_code,
+ Position.All(last_code.string))
+
+ if state.InFunction() and state.IsFunctionClose():
+ is_immediately_called = (token.next and
+ token.next.type == Type.START_PAREN)
+ if state.InTopLevelFunction():
+ # When the function was top-level and not immediately called, check
+ # that it's terminated by a semi-colon.
+ if state.InAssignedFunction():
+ if not is_immediately_called and (last_in_line or
+ not token.next.type == Type.SEMICOLON):
+ self._HandleError(errors.MISSING_SEMICOLON_AFTER_FUNCTION,
+ 'Missing semicolon after function assigned to a variable',
+ token, Position.AtEnd(token.string))
+ else:
+ if not last_in_line and token.next.type == Type.SEMICOLON:
+ self._HandleError(errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
+ 'Illegal semicolon after function declaration',
+ token.next, Position.All(token.next.string))
+
+ if (state.InInterfaceMethod() and last_code.type != Type.START_BLOCK):
+ self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,
+ 'Interface methods cannot contain code', last_code)
+
+ elif (state.IsBlockClose() and
+ token.next and token.next.type == Type.SEMICOLON):
+ self._HandleError(errors.REDUNDANT_SEMICOLON,
+ 'No semicolon is required to end a code block',
+ token.next, Position.All(token.next.string))
+
+ elif type == Type.SEMICOLON:
+ if token.previous and token.previous.type == Type.WHITESPACE:
+ self._HandleError(errors.EXTRA_SPACE, 'Extra space before ";"',
+ token.previous, Position.All(token.previous.string))
+
+ if token.next and token.next.line_number == token.line_number:
+ if token.metadata.context.type != Context.FOR_GROUP_BLOCK:
+ # TODO(robbyw): Error about no multi-statement lines.
+ pass
+
+ elif token.next.type not in (
+ Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):
+ self._HandleError(errors.MISSING_SPACE,
+ 'Missing space after ";" in for statement',
+ token.next,
+ Position.AtBeginning())
+
+ last_code = token.metadata.last_code
+ if last_code and last_code.type == Type.SEMICOLON:
+ # Allow a single double semi colon in for loops for cases like:
+ # for (;;) { }.
+ # NOTE(user): This is not a perfect check, and will not throw an error
+ # for cases like: for (var i = 0;; i < n; i++) {}, but then your code
+ # probably won't work either.
+ for_token = tokenutil.CustomSearch(last_code,
+ lambda token: token.type == Type.KEYWORD and token.string == 'for',
+ end_func=lambda token: token.type == Type.SEMICOLON,
+ distance=None,
+ reverse=True)
+
+ if not for_token:
+ self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',
+ token, Position.All(token.string))
+
+ elif type == Type.START_PAREN:
+ if token.previous and token.previous.type == Type.KEYWORD:
+ self._HandleError(errors.MISSING_SPACE, 'Missing space before "("',
+ token, Position.AtBeginning())
+ elif token.previous and token.previous.type == Type.WHITESPACE:
+ before_space = token.previous.previous
+ if (before_space and before_space.line_number == token.line_number and
+ before_space.type == Type.IDENTIFIER):
+ self._HandleError(errors.EXTRA_SPACE, 'Extra space before "("',
+ token.previous, Position.All(token.previous.string))
+
+ elif type == Type.START_BRACKET:
+ if (not first_in_line and token.previous.type == Type.WHITESPACE and
+ last_non_space_token and
+ last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
+ self._HandleError(errors.EXTRA_SPACE, 'Extra space before "["',
+ token.previous, Position.All(token.previous.string))
+ # If the [ token is the first token in a line we shouldn't complain
+ # about a missing space before [. This is because some Ecma script
+ # languages allow syntax like:
+ # [Annotation]
+ # class MyClass {...}
+ # So we don't want to blindly warn about missing spaces before [.
+ # In the the future, when rules for computing exactly how many spaces
+ # lines should be indented are added, then we can return errors for
+ # [ tokens that are improperly indented.
+ # For example:
+ # var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
+ # [a,b,c];
+ # should trigger a proper indentation warning message as [ is not indented
+ # by four spaces.
+ elif (not first_in_line and token.previous and
+ not token.previous.type in (
+ [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
+ Type.EXPRESSION_ENDER_TYPES)):
+ self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
+ token, Position.AtBeginning())
+
+ elif type in (Type.END_PAREN, Type.END_BRACKET):
+ # Ensure there is no space before closing parentheses, except when
+ # it's in a for statement with an omitted section, or when it's at the
+ # beginning of a line.
+ if (token.previous and token.previous.type == Type.WHITESPACE and
+ not token.previous.IsFirstInLine() and
+ not (last_non_space_token and last_non_space_token.line_number ==
+ token.line_number and
+ last_non_space_token.type == Type.SEMICOLON)):
+ self._HandleError(errors.EXTRA_SPACE, 'Extra space before "%s"' %
+ token.string, token.previous, Position.All(token.previous.string))
+
+ if token.type == Type.END_BRACKET:
+ last_code = token.metadata.last_code
+ if last_code.IsOperator(','):
+ self._HandleError(errors.COMMA_AT_END_OF_LITERAL,
+ 'Illegal comma at end of array literal', last_code,
+ Position.All(last_code.string))
+
+ elif type == Type.WHITESPACE:
+ if self.ILLEGAL_TAB.search(token.string):
+ if token.IsFirstInLine():
+ self._HandleError(errors.ILLEGAL_TAB,
+ 'Illegal tab in whitespace before "%s"' % token.next.string,
+ token, Position.All(token.string))
+ else:
+ self._HandleError(errors.ILLEGAL_TAB,
+ 'Illegal tab in whitespace after "%s"' % token.previous.string,
+ token, Position.All(token.string))
+
+ # Check whitespace length if it's not the first token of the line and
+ # if it's not immediately before a comment.
+ if last_in_line:
+ # Check for extra whitespace at the end of a line.
+ self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
+ token, Position.All(token.string))
+ elif not first_in_line and not token.next.IsComment():
+ if token.length > 1:
+ self._HandleError(errors.EXTRA_SPACE, 'Extra space after "%s"' %
+ token.previous.string, token,
+ Position(1, len(token.string) - 1))
+
+ elif type == Type.OPERATOR:
+ last_code = token.metadata.last_code
+
+ if not self._ExpectSpaceBeforeOperator(token):
+ if (token.previous and token.previous.type == Type.WHITESPACE and
+ last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER)):
+ self._HandleError(errors.EXTRA_SPACE,
+ 'Extra space before "%s"' % token.string, token.previous,
+ Position.All(token.previous.string))
+
+ elif (token.previous and
+ not token.previous.IsComment() and
+ token.previous.type in Type.EXPRESSION_ENDER_TYPES):
+ self._HandleError(errors.MISSING_SPACE,
+ 'Missing space before "%s"' % token.string, token,
+ Position.AtBeginning())
+
+ # Check that binary operators are not used to start lines.
+ if ((not last_code or last_code.line_number != token.line_number) and
+ not token.metadata.IsUnaryOperator()):
+ self._HandleError(errors.LINE_STARTS_WITH_OPERATOR,
+ 'Binary operator should go on previous line "%s"' % token.string,
+ token)
+
+ elif type == Type.DOC_FLAG:
+ flag = token.attached_object
+
+ if flag.flag_type == 'bug':
+ # TODO(robbyw): Check for exactly 1 space on the left.
+ string = token.next.string.lstrip()
+ string = string.split(' ', 1)[0]
+
+ if not string.isdigit():
+ self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,
+ '@bug should be followed by a bug number', token)
+
+ elif flag.flag_type == 'suppress':
+ if flag.type is None:
+ # A syntactically invalid suppress tag will get tokenized as a normal
+ # flag, indicating an error.
+ self._HandleError(errors.INCORRECT_SUPPRESS_SYNTAX,
+ 'Invalid suppress syntax: should be @suppress {errortype}. '
+ 'Spaces matter.', token)
+ elif flag.type not in state.GetDocFlag().SUPPRESS_TYPES:
+ self._HandleError(errors.INVALID_SUPPRESS_TYPE,
+ 'Invalid suppression type: %s' % flag.type,
+ token)
+
+ elif FLAGS.strict and flag.flag_type == 'author':
+ # TODO(user): In non strict mode check the author tag for as much as
+ # it exists, though the full form checked below isn't required.
+ string = token.next.string
+ result = self.AUTHOR_SPEC.match(string)
+ if not result:
+ self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION,
+ 'Author tag line should be of the form: '
+ '@author foo@somewhere.com (Your Name)',
+ token.next)
+ else:
+ # Check spacing between email address and name. Do this before
+ # checking earlier spacing so positions are easier to calculate for
+ # autofixing.
+ num_spaces = len(result.group(2))
+ if num_spaces < 1:
+ self._HandleError(errors.MISSING_SPACE,
+ 'Missing space after email address',
+ token.next, Position(result.start(2), 0))
+ elif num_spaces > 1:
+ self._HandleError(errors.EXTRA_SPACE,
+ 'Extra space after email address',
+ token.next,
+ Position(result.start(2) + 1, num_spaces - 1))
+
+ # Check for extra spaces before email address. Can't be too few, if
+ # not at least one we wouldn't match @author tag.
+ num_spaces = len(result.group(1))
+ if num_spaces > 1:
+ self._HandleError(errors.EXTRA_SPACE,
+ 'Extra space before email address',
+ token.next, Position(1, num_spaces - 1))
+
+ elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and
+ not self._limited_doc_checks):
+ if flag.flag_type == 'param':
+ if flag.name is None:
+ self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,
+ 'Missing name in @param tag', token)
+
+ if not flag.description or flag.description is None:
+ flag_name = token.type
+ if 'name' in token.values:
+ flag_name = '@' + token.values['name']
+ self._HandleError(errors.MISSING_JSDOC_TAG_DESCRIPTION,
+ 'Missing description in %s tag' % flag_name, token)
+ else:
+ self._CheckForMissingSpaceBeforeToken(flag.description_start_token)
+
+ # We want punctuation to be inside of any tags ending a description,
+ # so strip tags before checking description. See bug 1127192. Note
+ # that depending on how lines break, the real description end token
+ # may consist only of stripped html and the effective end token can
+ # be different.
+ end_token = flag.description_end_token
+ end_string = htmlutil.StripTags(end_token.string).strip()
+ while (end_string == '' and not
+ end_token.type in Type.FLAG_ENDING_TYPES):
+ end_token = end_token.previous
+ if end_token.type in Type.FLAG_DESCRIPTION_TYPES:
+ end_string = htmlutil.StripTags(end_token.string).rstrip()
+
+ if not (end_string.endswith('.') or end_string.endswith('?') or
+ end_string.endswith('!')):
+ # Find the position for the missing punctuation, inside of any html
+ # tags.
+ desc_str = end_token.string.rstrip()
+ while desc_str.endswith('>'):
+ start_tag_index = desc_str.rfind('<')
+ if start_tag_index < 0:
+ break
+ desc_str = desc_str[:start_tag_index].rstrip()
+ end_position = Position(len(desc_str), 0)
+
+ self._HandleError(
+ errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER,
+ ('%s descriptions must end with valid punctuation such as a '
+ 'period.' % token.string),
+ end_token, end_position)
+
+ if flag.flag_type in state.GetDocFlag().HAS_TYPE:
+ if flag.type_start_token is not None:
+ self._CheckForMissingSpaceBeforeToken(
+ token.attached_object.type_start_token)
+
+ if flag.type and flag.type != '' and not flag.type.isspace():
+ self._CheckJsDocType(token)
+
+ if type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
+ if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
+ token.values['name'] not in FLAGS.custom_jsdoc_tags):
+ self._HandleError(errors.INVALID_JSDOC_TAG,
+ 'Invalid JsDoc tag: %s' % token.values['name'], token)
+
+ if (FLAGS.strict and token.values['name'] == 'inheritDoc' and
+ type == Type.DOC_INLINE_FLAG):
+ self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
+ 'Unnecessary braces around @inheritDoc',
+ token)
+
+ elif type == Type.SIMPLE_LVALUE:
+ identifier = token.values['identifier']
+
+ if ((not state.InFunction() or state.InConstructor()) and
+ not state.InParentheses() and not state.InObjectLiteralDescendant()):
+ jsdoc = state.GetDocComment()
+ if not state.HasDocComment(identifier):
+ # Only test for documentation on identifiers with .s in them to
+ # avoid checking things like simple variables. We don't require
+ # documenting assignments to .prototype itself (bug 1880803).
+ if (not state.InConstructor() and
+ identifier.find('.')