closure_linter-2.3.13/0000750014730400116100000000000012247733574014211 5ustar ajpeng00000000000000closure_linter-2.3.13/PKG-INFO0000640014730400116100000000040412247733574015305 0ustar ajpeng00000000000000Metadata-Version: 1.0 Name: closure_linter Version: 2.3.13 Summary: Closure Linter Home-page: http://code.google.com/p/closure-linter Author: The Closure Linter Authors Author-email: opensource@google.com License: Apache Description: UNKNOWN Platform: UNKNOWN closure_linter-2.3.13/setup.py0000750014730400116100000000245412247733554015731 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2010 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: from setuptools import setup except ImportError: from distutils.core import setup setup(name='closure_linter', version='2.3.13', description='Closure Linter', license='Apache', author='The Closure Linter Authors', author_email='opensource@google.com', url='http://code.google.com/p/closure-linter', install_requires=['python-gflags'], package_dir={'closure_linter': 'closure_linter'}, packages=['closure_linter', 'closure_linter.common'], entry_points = { 'console_scripts': [ 'gjslint = closure_linter.gjslint:main', 'fixjsstyle = closure_linter.fixjsstyle:main' ] } ) closure_linter-2.3.13/closure_linter/0000750014730400116100000000000012247733574017242 5ustar ajpeng00000000000000closure_linter-2.3.13/closure_linter/full_test.py0000750014730400116100000000672612247733554021631 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Full regression-type (Medium) tests for gjslint. Tests every error that can be thrown by gjslint. Based heavily on devtools/javascript/gpylint/full_test.py """ __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') import os import sys import unittest import gflags as flags import unittest as googletest from closure_linter import error_check from closure_linter import errors from closure_linter import runner from closure_linter.common import filetestcase _RESOURCE_PREFIX = 'closure_linter/testdata' flags.FLAGS.strict = True flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires') flags.FLAGS.closurized_namespaces = ('goog', 'dummy') flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js', 'limited_doc_checks.js') flags.FLAGS.jslint_error = error_check.Rule.ALL # List of files under testdata to test. # We need to list files explicitly since pyglib can't list directories. # TODO(user): Figure out how to list the directory. _TEST_FILES = [ 'all_js_wrapped.js', 'blank_lines.js', 'ends_with_block.js', 'externs.js', 'externs_jsdoc.js', 'goog_scope.js', 'html_parse_error.html', 'indentation.js', 'interface.js', 'jsdoc.js', 'limited_doc_checks.js', 'minimal.js', 'other.js', 'provide_blank.js', 'provide_extra.js', 'provide_missing.js', 'require_all_caps.js', 'require_blank.js', 'require_extra.js', 'require_function.js', 'require_function_missing.js', 'require_function_through_both.js', 'require_function_through_namespace.js', 'require_interface.js', 'require_interface_base.js', 'require_lower_case.js', 'require_missing.js', 'require_numeric.js', 'require_provide_blank.js', 'require_provide_missing.js', 'require_provide_ok.js', 'semicolon_missing.js', 'simple.html', 'spaces.js', 'tokenizer.js', 'unparseable.js', 'unused_local_variables.js', 'unused_private_members.js', 'utf8.html', ] class GJsLintTestSuite(unittest.TestSuite): """Test suite to run a GJsLintTest for each of several files. If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in testdata to test. Otherwise, _TEST_FILES is used. """ def __init__(self, tests=()): unittest.TestSuite.__init__(self, tests) argv = sys.argv and sys.argv[1:] or [] if argv: test_files = argv else: test_files = _TEST_FILES for test_file in test_files: resource_path = os.path.join(_RESOURCE_PREFIX, test_file) self.addTest( filetestcase.AnnotatedFileTestCase( resource_path, runner.Run, errors.ByName)) if __name__ == '__main__': # Don't let main parse args; it happens in the TestSuite. googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite') closure_linter-2.3.13/closure_linter/runner_test.py0000640014730400116100000000544712247733554022175 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the runner module.""" __author__ = ('nnaze@google.com (Nathan Naze)') import StringIO import mox import unittest as googletest from closure_linter import errors from closure_linter import runner from closure_linter.common import error from closure_linter.common import errorhandler from closure_linter.common import tokens class LimitedDocTest(googletest.TestCase): def testIsLimitedDocCheck(self): self.assertTrue(runner._IsLimitedDocCheck('foo_test.js', ['_test.js'])) self.assertFalse(runner._IsLimitedDocCheck('foo_bar.js', ['_test.js'])) self.assertTrue(runner._IsLimitedDocCheck( 'foo_moo.js', ['moo.js', 'quack.js'])) self.assertFalse(runner._IsLimitedDocCheck( 'foo_moo.js', ['woof.js', 'quack.js'])) class RunnerTest(googletest.TestCase): def setUp(self): self.mox = mox.Mox() def testRunOnMissingFile(self): mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler) def ValidateError(err): return (isinstance(err, error.Error) and err.code is errors.FILE_NOT_FOUND and err.token is None) mock_error_handler.HandleFile('does_not_exist.js', None) mock_error_handler.HandleError(mox.Func(ValidateError)) mock_error_handler.FinishFile() self.mox.ReplayAll() runner.Run('does_not_exist.js', mock_error_handler) self.mox.VerifyAll() def testBadTokenization(self): mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler) def ValidateError(err): return (isinstance(err, error.Error) and err.code is errors.FILE_IN_BLOCK and err.token.string == '}') mock_error_handler.HandleFile('foo.js', mox.IsA(tokens.Token)) mock_error_handler.HandleError(mox.Func(ValidateError)) mock_error_handler.HandleError(mox.IsA(error.Error)) mock_error_handler.FinishFile() self.mox.ReplayAll() source = StringIO.StringIO(_BAD_TOKENIZATION_SCRIPT) runner.Run('foo.js', mock_error_handler, source) self.mox.VerifyAll() _BAD_TOKENIZATION_SCRIPT = """ function foo () { var a = 3; var b = 2; return b + a; /* Comment not closed } """ if __name__ == '__main__': googletest.main() closure_linter-2.3.13/closure_linter/javascriptstatetracker_test.py0000640014730400116100000001653512247733554025447 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2012 The Closure Linter Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the javascriptstatetracker module.""" # Allow non-Google copyright # pylint: disable=g-bad-file-header __author__ = ('nnaze@google.com (Nathan Naze)') import unittest as googletest from closure_linter import javascripttokens from closure_linter import testutil from closure_linter import tokenutil _FUNCTION_SCRIPT = """\ var a = 3; function foo(aaa, bbb, ccc) { var b = 4; } /** * JSDoc comment. */ var bar = function(ddd, eee, fff) { }; /** * Verify that nested functions get their proper parameters recorded. */ var baz = function(ggg, hhh, iii) { var qux = function(jjj, kkk, lll) { }; // make sure that entering a new block does not change baz' parameters. {}; }; """ class FunctionTest(googletest.TestCase): def testFunctionParse(self): functions, _ = testutil.ParseFunctionsAndComments(_FUNCTION_SCRIPT) self.assertEquals(4, len(functions)) # First function function = functions[0] self.assertEquals(['aaa', 'bbb', 'ccc'], function.parameters) start_token = function.start_token end_token = function.end_token self.assertEquals( javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION, function.start_token.type) self.assertEquals('function', start_token.string) self.assertEquals(3, start_token.line_number) self.assertEquals(0, start_token.start_index) self.assertEquals('}', end_token.string) self.assertEquals(5, end_token.line_number) self.assertEquals(0, end_token.start_index) self.assertEquals('foo', function.name) self.assertIsNone(function.doc) # Second function function = functions[1] self.assertEquals(['ddd', 'eee', 'fff'], function.parameters) start_token = function.start_token end_token = function.end_token self.assertEquals( javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION, function.start_token.type) self.assertEquals('function', start_token.string) self.assertEquals(11, start_token.line_number) self.assertEquals(10, start_token.start_index) self.assertEquals('}', end_token.string) self.assertEquals(13, end_token.line_number) self.assertEquals(0, end_token.start_index) self.assertEquals('bar', function.name) self.assertIsNotNone(function.doc) # Check function JSDoc doc = function.doc doc_tokens = tokenutil.GetTokenRange(doc.start_token, doc.end_token) comment_type = javascripttokens.JavaScriptTokenType.COMMENT comment_tokens = filter(lambda t: t.type is comment_type, doc_tokens) self.assertEquals('JSDoc comment.', tokenutil.TokensToString(comment_tokens).strip()) # Third function function = functions[2] self.assertEquals(['ggg', 'hhh', 'iii'], function.parameters) start_token = function.start_token end_token = function.end_token self.assertEquals( javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION, function.start_token.type) self.assertEquals('function', start_token.string) self.assertEquals(19, start_token.line_number) self.assertEquals(10, start_token.start_index) self.assertEquals('}', end_token.string) self.assertEquals(24, end_token.line_number) self.assertEquals(0, end_token.start_index) self.assertEquals('baz', function.name) self.assertIsNotNone(function.doc) # Fourth function (inside third function) function = functions[3] self.assertEquals(['jjj', 'kkk', 'lll'], function.parameters) start_token = function.start_token end_token = function.end_token self.assertEquals( javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION, function.start_token.type) self.assertEquals('function', start_token.string) self.assertEquals(20, start_token.line_number) self.assertEquals(12, start_token.start_index) self.assertEquals('}', end_token.string) self.assertEquals(21, end_token.line_number) self.assertEquals(2, end_token.start_index) self.assertEquals('qux', function.name) self.assertIsNone(function.doc) class CommentTest(googletest.TestCase): def testGetDescription(self): comment = self._ParseComment(""" /** * Comment targeting goog.foo. * * This is the second line. * @param {number} foo The count of foo. */ target;""") self.assertEqual( 'Comment targeting goog.foo.\n\nThis is the second line.', comment.description) def testCommentGetTarget(self): self.assertCommentTarget('goog.foo', """ /** * Comment targeting goog.foo. */ goog.foo = 6; """) self.assertCommentTarget('bar', """ /** * Comment targeting bar. */ var bar = "Karate!"; """) self.assertCommentTarget('doThing', """ /** * Comment targeting doThing. */ function doThing() {}; """) self.assertCommentTarget('this.targetProperty', """ goog.bar.Baz = function() { /** * Comment targeting targetProperty. */ this.targetProperty = 3; }; """) self.assertCommentTarget('goog.bar.prop', """ /** * Comment targeting goog.bar.prop. */ goog.bar.prop; """) self.assertCommentTarget('goog.aaa.bbb', """ /** * Comment targeting goog.aaa.bbb. */ (goog.aaa.bbb) """) self.assertCommentTarget('theTarget', """ /** * Comment targeting symbol preceded by newlines, whitespace, * and parens -- things we ignore. */ (theTarget) """) self.assertCommentTarget(None, """ /** * @fileoverview File overview. */ (notATarget) """) self.assertCommentTarget(None, """ /** * Comment that doesn't find a target. */ """) self.assertCommentTarget('theTarget.is.split.across.lines', """ /** * Comment that addresses a symbol split across lines. */ (theTarget.is.split .across.lines) """) self.assertCommentTarget('theTarget.is.split.across.lines', """ /** * Comment that addresses a symbol split across lines. */ (theTarget.is.split. across.lines) """) def _ParseComment(self, script): """Parse a script that contains one comment and return it.""" _, comments = testutil.ParseFunctionsAndComments(script) self.assertEquals(1, len(comments)) return comments[0] def assertCommentTarget(self, target, script): comment = self._ParseComment(script) self.assertEquals(target, comment.GetTargetIdentifier()) if __name__ == '__main__': googletest.main() closure_linter-2.3.13/closure_linter/errorrules_test.py0000640014730400116100000000711212247733554023057 0ustar ajpeng00000000000000#!/usr/bin/env python # Copyright 2013 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Medium tests for the gjslint errorrules. Currently its just verifying that warnings can't be disabled. """ import gflags as flags import unittest as googletest from closure_linter import errors from closure_linter import runner from closure_linter.common import erroraccumulator flags.FLAGS.strict = True flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js') flags.FLAGS.closurized_namespaces = ('goog', 'dummy') class ErrorRulesTest(googletest.TestCase): """Test case to for gjslint errorrules.""" def testNoMaxLineLengthFlagExists(self): """Tests that --max_line_length flag does not exists.""" self.assertTrue('max_line_length' not in flags.FLAGS.FlagDict()) def testGetMaxLineLength(self): """Tests warning are reported for line greater than 80. """ # One line > 100 and one line > 80 and < 100. So should produce two # line too long error. original = [ 'goog.require(\'dummy.aa\');', '', 'function a() {', ' dummy.aa.i = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13' ' + 14 + 15 + 16 + 17 + 18 + 19 + 20;', ' dummy.aa.j = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13' ' + 14 + 15 + 16 + 17 + 18;', '}', '' ] # Expect line too long. expected = [errors.LINE_TOO_LONG, errors.LINE_TOO_LONG] self._AssertErrors(original, expected) def testNoDisableFlagExists(self): """Tests that --disable flag does not exists.""" self.assertTrue('disable' not in flags.FLAGS.FlagDict()) def testWarningsNotDisabled(self): """Tests warnings are reported when nothing is disabled. """ original = [ 'goog.require(\'dummy.aa\');', 'goog.require(\'dummy.Cc\');', 'goog.require(\'dummy.Dd\');', '', 'function a() {', ' dummy.aa.i = 1;', ' dummy.Cc.i = 1;', ' dummy.Dd.i = 1;', '}', ] expected = [errors.GOOG_REQUIRES_NOT_ALPHABETIZED, errors.FILE_MISSING_NEWLINE] self._AssertErrors(original, expected) def _AssertErrors(self, original, expected_errors, include_header=True): """Asserts that the error fixer corrects original to expected.""" if include_header: original = self._GetHeader() + original # Trap gjslint's output parse it to get messages added. error_accumulator = erroraccumulator.ErrorAccumulator() runner.Run('testing.js', error_accumulator, source=original) error_nums = [e.code for e in error_accumulator.GetErrors()] error_nums.sort() expected_errors.sort() self.assertListEqual(error_nums, expected_errors) def _GetHeader(self): """Returns a fake header for a JavaScript file.""" return [ '// Copyright 2011 Google Inc. All Rights Reserved.', '', '/**', ' * @fileoverview Fake file overview.', ' * @author fake@google.com (Fake Person)', ' */', '' ] if __name__ == '__main__': googletest.main() closure_linter-2.3.13/closure_linter/requireprovidesorter.py0000750014730400116100000002610212247733554024122 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2011 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains logic for sorting goog.provide and goog.require statements. Closurized JavaScript files use goog.provide and goog.require statements at the top of the file to manage dependencies. These statements should be sorted alphabetically, however, it is common for them to be accompanied by inline comments or suppression annotations. In order to sort these statements without disrupting their comments and annotations, the association between statements and comments/annotations must be maintained while sorting. RequireProvideSorter: Handles checking/fixing of provide/require statements. """ from closure_linter import javascripttokens from closure_linter import tokenutil # Shorthand Type = javascripttokens.JavaScriptTokenType class RequireProvideSorter(object): """Checks for and fixes alphabetization of provide and require statements. When alphabetizing, comments on the same line or comments directly above a goog.provide or goog.require statement are associated with that statement and stay with the statement as it gets sorted. """ def CheckProvides(self, token): """Checks alphabetization of goog.provide statements. Iterates over tokens in given token stream, identifies goog.provide tokens, and checks that they occur in alphabetical order by the object being provided. Args: token: A token in the token stream before any goog.provide tokens. Returns: The first provide token in the token stream. None is returned if all goog.provide statements are already sorted. """ provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide') provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens) sorted_provide_strings = sorted(provide_strings) if provide_strings != sorted_provide_strings: return provide_tokens[0] return None def CheckRequires(self, token): """Checks alphabetization of goog.require statements. Iterates over tokens in given token stream, identifies goog.require tokens, and checks that they occur in alphabetical order by the dependency being required. Args: token: A token in the token stream before any goog.require tokens. Returns: The first require token in the token stream. None is returned if all goog.require statements are already sorted. """ require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require') require_strings = self._GetRequireOrProvideTokenStrings(require_tokens) sorted_require_strings = sorted(require_strings) if require_strings != sorted_require_strings: return require_tokens[0] return None def FixProvides(self, token): """Sorts goog.provide statements in the given token stream alphabetically. Args: token: The first token in the token stream. """ self._FixProvidesOrRequires( self._GetRequireOrProvideTokens(token, 'goog.provide')) def FixRequires(self, token): """Sorts goog.require statements in the given token stream alphabetically. Args: token: The first token in the token stream. """ self._FixProvidesOrRequires( self._GetRequireOrProvideTokens(token, 'goog.require')) def _FixProvidesOrRequires(self, tokens): """Sorts goog.provide or goog.require statements. Args: tokens: A list of goog.provide or goog.require tokens in the order they appear in the token stream. i.e. the first token in this list must be the first goog.provide or goog.require token. """ strings = self._GetRequireOrProvideTokenStrings(tokens) sorted_strings = sorted(strings) # Make a separate pass to remove any blank lines between goog.require/ # goog.provide tokens. first_token = tokens[0] last_token = tokens[-1] i = last_token while i != first_token: if i.type is Type.BLANK_LINE: tokenutil.DeleteToken(i) i = i.previous # A map from required/provided object name to tokens that make up the line # it was on, including any comments immediately before it or after it on the # same line. tokens_map = self._GetTokensMap(tokens) # Iterate over the map removing all tokens. for name in tokens_map: tokens_to_delete = tokens_map[name] for i in tokens_to_delete: tokenutil.DeleteToken(i) # Save token to rest of file. Sorted token will be inserted before this. rest_of_file = tokens_map[strings[-1]][-1].next # Re-add all tokens in the map in alphabetical order. insert_after = tokens[0].previous for string in sorted_strings: for i in tokens_map[string]: if rest_of_file: tokenutil.InsertTokenBefore(i, rest_of_file) else: tokenutil.InsertTokenAfter(i, insert_after) insert_after = i def _GetRequireOrProvideTokens(self, token, token_string): """Gets all goog.provide or goog.require tokens in the given token stream. Args: token: The first token in the token stream. token_string: One of 'goog.provide' or 'goog.require' to indicate which tokens to find. Returns: A list of goog.provide or goog.require tokens in the order they appear in the token stream. """ tokens = [] while token: if token.type == Type.IDENTIFIER: if token.string == token_string: tokens.append(token) elif token.string not in [ 'goog.provide', 'goog.require', 'goog.setTestOnly']: # These 3 identifiers are at the top of the file. So if any other # identifier is encountered, return. break token = token.next return tokens def _GetRequireOrProvideTokenStrings(self, tokens): """Gets a list of strings corresponding to the given list of tokens. The string will be the next string in the token stream after each token in tokens. This is used to find the object being provided/required by a given goog.provide or goog.require token. Args: tokens: A list of goog.provide or goog.require tokens. Returns: A list of object names that are being provided or required by the given list of tokens. For example: ['object.a', 'object.c', 'object.b'] """ token_strings = [] for token in tokens: name = tokenutil.GetStringAfterToken(token) token_strings.append(name) return token_strings def _GetTokensMap(self, tokens): """Gets a map from object name to tokens associated with that object. Starting from the goog.provide/goog.require token, searches backwards in the token stream for any lines that start with a comment. These lines are associated with the goog.provide/goog.require token. Also associates any tokens on the same line as the goog.provide/goog.require token with that token. Args: tokens: A list of goog.provide or goog.require tokens. Returns: A dictionary that maps object names to the tokens associated with the goog.provide or goog.require of that object name. For example: { 'object.a': [JavaScriptToken, JavaScriptToken, ...], 'object.b': [...] } The list of tokens includes any comment lines above the goog.provide or goog.require statement and everything after the statement on the same line. For example, all of the following would be associated with 'object.a': /** @suppress {extraRequire} */ goog.require('object.a'); // Some comment. """ tokens_map = {} for token in tokens: object_name = tokenutil.GetStringAfterToken(token) # If the previous line starts with a comment, presume that the comment # relates to the goog.require or goog.provide and keep them together when # sorting. first_token = token previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token) while (previous_first_token and previous_first_token.IsAnyType(Type.COMMENT_TYPES)): first_token = previous_first_token previous_first_token = tokenutil.GetFirstTokenInPreviousLine( first_token) # Find the last token on the line. last_token = tokenutil.GetLastTokenInSameLine(token) all_tokens = self._GetTokenList(first_token, last_token) tokens_map[object_name] = all_tokens return tokens_map def _GetTokenList(self, first_token, last_token): """Gets a list of all tokens from first_token to last_token, inclusive. Args: first_token: The first token to get. last_token: The last token to get. Returns: A list of all tokens between first_token and last_token, including both first_token and last_token. Raises: Exception: If the token stream ends before last_token is reached. """ token_list = [] token = first_token while token != last_token: if not token: raise Exception('ran out of tokens') token_list.append(token) token = token.next token_list.append(last_token) return token_list def GetFixedRequireString(self, token): """Get fixed/sorted order of goog.require statements. Args: token: The first token in the token stream. Returns: A string for correct sorted order of goog.require. """ return self._GetFixedRequireOrProvideString( self._GetRequireOrProvideTokens(token, 'goog.require')) def GetFixedProvideString(self, token): """Get fixed/sorted order of goog.provide statements. Args: token: The first token in the token stream. Returns: A string for correct sorted order of goog.provide. """ return self._GetFixedRequireOrProvideString( self._GetRequireOrProvideTokens(token, 'goog.provide')) def _GetFixedRequireOrProvideString(self, tokens): """Sorts goog.provide or goog.require statements. Args: tokens: A list of goog.provide or goog.require tokens in the order they appear in the token stream. i.e. the first token in this list must be the first goog.provide or goog.require token. Returns: A string for sorted goog.require or goog.provide statements """ # A map from required/provided object name to tokens that make up the line # it was on, including any comments immediately before it or after it on the # same line. tokens_map = self._GetTokensMap(tokens) sorted_strings = sorted(tokens_map.keys()) new_order = '' for string in sorted_strings: for i in tokens_map[string]: new_order += i.string if i.IsLastInLine(): new_order += '\n' return new_order closure_linter-2.3.13/closure_linter/scopeutil.py0000640014730400116100000001267312247733554021633 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2012 The Closure Linter Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools to match goog.scope alias statements.""" # Allow non-Google copyright # pylint: disable=g-bad-file-header __author__ = ('nnaze@google.com (Nathan Naze)') import itertools from closure_linter import ecmametadatapass from closure_linter import tokenutil from closure_linter.javascripttokens import JavaScriptTokenType def IsGoogScopeBlock(context): """Whether the given context is a goog.scope block. This function only checks that the block is a function block inside a goog.scope() call. TODO(nnaze): Implement goog.scope checks that verify the call is in the root context and contains only a single function literal. Args: context: An EcmaContext of type block. Returns: Whether the context is a goog.scope block. """ if context.type != ecmametadatapass.EcmaContext.BLOCK: return False if not _IsFunctionLiteralBlock(context): return False # Check that this function is contained by a group # of form "goog.scope(...)". parent = context.parent if parent and parent.type is ecmametadatapass.EcmaContext.GROUP: last_code_token = parent.start_token.metadata.last_code if (last_code_token and last_code_token.type is JavaScriptTokenType.IDENTIFIER and last_code_token.string == 'goog.scope'): return True return False def _IsFunctionLiteralBlock(block_context): """Check if a context is a function literal block (without parameters). Example function literal block: 'function() {}' Args: block_context: An EcmaContext of type block. Returns: Whether this context is a function literal block. """ previous_code_tokens_iter = itertools.ifilter( lambda token: token not in JavaScriptTokenType.NON_CODE_TYPES, reversed(block_context.start_token)) # Ignore the current token next(previous_code_tokens_iter, None) # Grab the previous three tokens and put them in correct order. previous_code_tokens = list(itertools.islice(previous_code_tokens_iter, 3)) previous_code_tokens.reverse() # There aren't three previous tokens. if len(previous_code_tokens) is not 3: return False # Check that the previous three code tokens are "function ()" previous_code_token_types = [token.type for token in previous_code_tokens] if (previous_code_token_types == [ JavaScriptTokenType.FUNCTION_DECLARATION, JavaScriptTokenType.START_PARAMETERS, JavaScriptTokenType.END_PARAMETERS]): return True return False def IsInClosurizedNamespace(symbol, closurized_namespaces): """Match a goog.scope alias. Args: symbol: An identifier like 'goog.events.Event'. closurized_namespaces: Iterable of valid Closurized namespaces (strings). Returns: True if symbol is an identifier in a Closurized namespace, otherwise False. """ for ns in closurized_namespaces: if symbol.startswith(ns + '.'): return True return False def MatchAlias(context): """Match an alias statement (some identifier assigned to a variable). Example alias: var MyClass = proj.longNamespace.MyClass. Args: context: An EcmaContext of type EcmaContext.STATEMENT. Returns: If a valid alias, returns a tuple of alias and symbol, otherwise None. """ if context.type != ecmametadatapass.EcmaContext.STATEMENT: return # Get the tokens in this statement. if context.start_token and context.end_token: statement_tokens = tokenutil.GetTokenRange(context.start_token, context.end_token) else: return # And now just those tokens that are actually code. is_non_code_type = lambda t: t.type not in JavaScriptTokenType.NON_CODE_TYPES code_tokens = filter(is_non_code_type, statement_tokens) # This section identifies statements of the alias form "var alias = symbol". # Pop off the semicolon if present. if code_tokens and code_tokens[-1].IsType(JavaScriptTokenType.SEMICOLON): code_tokens.pop() if not (len(code_tokens) == 4 and code_tokens[0].IsKeyword('var') and (code_tokens[0].metadata.context.type == ecmametadatapass.EcmaContext.VAR)): return # Verify the only code tokens in this statement are part of the var # declaration. var_context = code_tokens[0].metadata.context for token in code_tokens: if token.metadata.context is not var_context: return # Verify that this is of the form "var lvalue = identifier;". if not(code_tokens[0].IsKeyword('var') and code_tokens[1].IsType(JavaScriptTokenType.SIMPLE_LVALUE) and code_tokens[2].IsOperator('=') and code_tokens[3].IsType(JavaScriptTokenType.IDENTIFIER)): return alias, symbol = code_tokens[1], code_tokens[3] # Mark both tokens as an alias definition to avoid counting them as usages. alias.metadata.is_alias_definition = True symbol.metadata.is_alias_definition = True return alias.string, symbol.string closure_linter-2.3.13/closure_linter/ecmalintrules.py0000750014730400116100000010453312247733554022472 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Core methods for checking EcmaScript files for common style guide violations. """ __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)', 'jacobr@google.com (Jacob Richman)') import re import gflags as flags from closure_linter import checkerbase from closure_linter import ecmametadatapass from closure_linter import error_check from closure_linter import errorrules from closure_linter import errors from closure_linter import indentation from closure_linter import javascripttokenizer from closure_linter import javascripttokens from closure_linter import statetracker from closure_linter import tokenutil from closure_linter.common import error from closure_linter.common import position FLAGS = flags.FLAGS flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow') # TODO(robbyw): Check for extra parens on return statements # TODO(robbyw): Check for 0px in strings # TODO(robbyw): Ensure inline jsDoc is in {} # TODO(robbyw): Check for valid JS types in parameter docs # Shorthand Context = ecmametadatapass.EcmaContext Error = error.Error Modes = javascripttokenizer.JavaScriptModes Position = position.Position Rule = error_check.Rule Type = javascripttokens.JavaScriptTokenType class EcmaScriptLintRules(checkerbase.LintRulesBase): """EmcaScript lint style checking rules. Can be used to find common style errors in JavaScript, ActionScript and other Ecma like scripting languages. Style checkers for Ecma scripting languages should inherit from this style checker. Please do not add any state to EcmaScriptLintRules or to any subclasses. All state should be added to the StateTracker subclass used for a particular language. """ # It will be initialized in constructor so the flags are initialized. max_line_length = -1 # Static constants. MISSING_PARAMETER_SPACE = re.compile(r',\S') EXTRA_SPACE = re.compile(r'(\(\s|\s\))') ENDS_WITH_SPACE = re.compile(r'\s$') ILLEGAL_TAB = re.compile(r'\t') # Regex used to split up complex types to check for invalid use of ? and |. TYPE_SPLIT = re.compile(r'[,<>()]') # Regex for form of author lines after the @author tag. AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)') # Acceptable tokens to remove for line too long testing. LONG_LINE_IGNORE = frozenset( ['*', '//', '@see'] + ['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE]) JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED = frozenset([ '@param', '@return', '@returns']) def __init__(self): """Initialize this lint rule object.""" checkerbase.LintRulesBase.__init__(self) if EcmaScriptLintRules.max_line_length == -1: EcmaScriptLintRules.max_line_length = errorrules.GetMaxLineLength() def Initialize(self, checker, limited_doc_checks, is_html): """Initialize this lint rule object before parsing a new file.""" checkerbase.LintRulesBase.Initialize(self, checker, limited_doc_checks, is_html) self._indentation = indentation.IndentationRules() def HandleMissingParameterDoc(self, token, param_name): """Handle errors associated with a parameter missing a @param tag.""" raise TypeError('Abstract method HandleMissingParameterDoc not implemented') def _CheckLineLength(self, last_token, state): """Checks whether the line is too long. Args: last_token: The last token in the line. state: parser_state object that indicates the current state in the page """ # Start from the last token so that we have the flag object attached to # and DOC_FLAG tokens. line_number = last_token.line_number token = last_token # Build a representation of the string where spaces indicate potential # line-break locations. line = [] while token and token.line_number == line_number: if state.IsTypeToken(token): line.insert(0, 'x' * len(token.string)) elif token.type in (Type.IDENTIFIER, Type.NORMAL): # Dots are acceptable places to wrap. line.insert(0, token.string.replace('.', ' ')) else: line.insert(0, token.string) token = token.previous line = ''.join(line) line = line.rstrip('\n\r\f') try: length = len(unicode(line, 'utf-8')) except LookupError: # Unknown encoding. The line length may be wrong, as was originally the # case for utf-8 (see bug 1735846). For now just accept the default # length, but as we find problems we can either add test for other # possible encodings or return without an error to protect against # false positives at the cost of more false negatives. length = len(line) if length > EcmaScriptLintRules.max_line_length: # If the line matches one of the exceptions, then it's ok. for long_line_regexp in self.GetLongLineExceptions(): if long_line_regexp.match(last_token.line): return # If the line consists of only one "word", or multiple words but all # except one are ignoreable, then it's ok. parts = set(line.split()) # We allow two "words" (type and name) when the line contains @param max_parts = 1 if '@param' in parts: max_parts = 2 # Custom tags like @requires may have url like descriptions, so ignore # the tag, similar to how we handle @see. custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags]) if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags)) > max_parts): self._HandleError( errors.LINE_TOO_LONG, 'Line too long (%d characters).' % len(line), last_token) def _CheckJsDocType(self, token): """Checks the given type for style errors. Args: token: The DOC_FLAG token for the flag whose type to check. """ flag = token.attached_object flag_type = flag.type if flag_type and flag_type is not None and not flag_type.isspace(): pieces = self.TYPE_SPLIT.split(flag_type) if len(pieces) == 1 and flag_type.count('|') == 1 and ( flag_type.endswith('|null') or flag_type.startswith('null|')): self._HandleError( errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL, 'Prefer "?Type" to "Type|null": "%s"' % flag_type, token) # TODO(user): We should do actual parsing of JsDoc types to report an # error for wrong usage of '?' and '|' e.g. {?number|string|null} etc. if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and ( flag.type_start_token.type != Type.DOC_START_BRACE or flag.type_end_token.type != Type.DOC_END_BRACE): self._HandleError( errors.MISSING_BRACES_AROUND_TYPE, 'Type must always be surrounded by curly braces.', token) def _CheckForMissingSpaceBeforeToken(self, token): """Checks for a missing space at the beginning of a token. Reports a MISSING_SPACE error if the token does not begin with a space or the previous token doesn't end with a space and the previous token is on the same line as the token. Args: token: The token being checked """ # TODO(user): Check if too many spaces? if (len(token.string) == len(token.string.lstrip()) and token.previous and token.line_number == token.previous.line_number and len(token.previous.string) - len(token.previous.string.rstrip()) == 0): self._HandleError( errors.MISSING_SPACE, 'Missing space before "%s"' % token.string, token, position=Position.AtBeginning()) def _CheckOperator(self, token): """Checks an operator for spacing and line style. Args: token: The operator token. """ last_code = token.metadata.last_code if not self._ExpectSpaceBeforeOperator(token): if (token.previous and token.previous.type == Type.WHITESPACE and last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER)): self._HandleError( errors.EXTRA_SPACE, 'Extra space before "%s"' % token.string, token.previous, position=Position.All(token.previous.string)) elif (token.previous and not token.previous.IsComment() and token.previous.type in Type.EXPRESSION_ENDER_TYPES): self._HandleError(errors.MISSING_SPACE, 'Missing space before "%s"' % token.string, token, position=Position.AtBeginning()) # Check that binary operators are not used to start lines. if ((not last_code or last_code.line_number != token.line_number) and not token.metadata.IsUnaryOperator()): self._HandleError( errors.LINE_STARTS_WITH_OPERATOR, 'Binary operator should go on previous line "%s"' % token.string, token) def _ExpectSpaceBeforeOperator(self, token): """Returns whether a space should appear before the given operator token. Args: token: The operator token. Returns: Whether there should be a space before the token. """ if token.string == ',' or token.metadata.IsUnaryPostOperator(): return False # Colons should appear in labels, object literals, the case of a switch # statement, and ternary operator. Only want a space in the case of the # ternary operator. if (token.string == ':' and token.metadata.context.type in (Context.LITERAL_ELEMENT, Context.CASE_BLOCK, Context.STATEMENT)): return False if token.metadata.IsUnaryOperator() and token.IsFirstInLine(): return False return True def CheckToken(self, token, state): """Checks a token, given the current parser_state, for warnings and errors. Args: token: The current token under consideration state: parser_state object that indicates the current state in the page """ # Store some convenience variables first_in_line = token.IsFirstInLine() last_in_line = token.IsLastInLine() last_non_space_token = state.GetLastNonSpaceToken() token_type = token.type # Process the line change. if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION): # TODO(robbyw): Support checking indentation in HTML files. indentation_errors = self._indentation.CheckToken(token, state) for indentation_error in indentation_errors: self._HandleError(*indentation_error) if last_in_line: self._CheckLineLength(token, state) if token_type == Type.PARAMETERS: # Find missing spaces in parameter lists. if self.MISSING_PARAMETER_SPACE.search(token.string): fix_data = ', '.join([s.strip() for s in token.string.split(',')]) self._HandleError(errors.MISSING_SPACE, 'Missing space after ","', token, position=None, fix_data=fix_data.strip()) # Find extra spaces at the beginning of parameter lists. Make sure # we aren't at the beginning of a continuing multi-line list. if not first_in_line: space_count = len(token.string) - len(token.string.lstrip()) if space_count: self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("', token, position=Position(0, space_count)) elif (token_type == Type.START_BLOCK and token.metadata.context.type == Context.BLOCK): self._CheckForMissingSpaceBeforeToken(token) elif token_type == Type.END_BLOCK: # This check is for object literal end block tokens, but there is no need # to test that condition since a comma at the end of any other kind of # block is undoubtedly a parse error. last_code = token.metadata.last_code if last_code.IsOperator(','): self._HandleError( errors.COMMA_AT_END_OF_LITERAL, 'Illegal comma at end of object literal', last_code, position=Position.All(last_code.string)) if state.InFunction() and state.IsFunctionClose(): is_immediately_called = (token.next and token.next.type == Type.START_PAREN) if state.InTopLevelFunction(): # A semicolons should not be included at the end of a function # declaration. if not state.InAssignedFunction(): if not last_in_line and token.next.type == Type.SEMICOLON: self._HandleError( errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION, 'Illegal semicolon after function declaration', token.next, position=Position.All(token.next.string)) # A semicolon should be included at the end of a function expression # that is not immediately called. if state.InAssignedFunction(): if not is_immediately_called and ( last_in_line or token.next.type != Type.SEMICOLON): self._HandleError( errors.MISSING_SEMICOLON_AFTER_FUNCTION, 'Missing semicolon after function assigned to a variable', token, position=Position.AtEnd(token.string)) if state.InInterfaceMethod() and last_code.type != Type.START_BLOCK: self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE, 'Interface methods cannot contain code', last_code) elif (state.IsBlockClose() and token.next and token.next.type == Type.SEMICOLON): if (last_code.metadata.context.parent.type != Context.OBJECT_LITERAL and last_code.metadata.context.type != Context.OBJECT_LITERAL): self._HandleError( errors.REDUNDANT_SEMICOLON, 'No semicolon is required to end a code block', token.next, position=Position.All(token.next.string)) elif token_type == Type.SEMICOLON: if token.previous and token.previous.type == Type.WHITESPACE: self._HandleError( errors.EXTRA_SPACE, 'Extra space before ";"', token.previous, position=Position.All(token.previous.string)) if token.next and token.next.line_number == token.line_number: if token.metadata.context.type != Context.FOR_GROUP_BLOCK: # TODO(robbyw): Error about no multi-statement lines. pass elif token.next.type not in ( Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN): self._HandleError( errors.MISSING_SPACE, 'Missing space after ";" in for statement', token.next, position=Position.AtBeginning()) last_code = token.metadata.last_code if last_code and last_code.type == Type.SEMICOLON: # Allow a single double semi colon in for loops for cases like: # for (;;) { }. # NOTE(user): This is not a perfect check, and will not throw an error # for cases like: for (var i = 0;; i < n; i++) {}, but then your code # probably won't work either. for_token = tokenutil.CustomSearch( last_code, lambda token: token.type == Type.KEYWORD and token.string == 'for', end_func=lambda token: token.type == Type.SEMICOLON, distance=None, reverse=True) if not for_token: self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon', token, position=Position.All(token.string)) elif token_type == Type.START_PAREN: if token.previous and token.previous.type == Type.KEYWORD: self._HandleError(errors.MISSING_SPACE, 'Missing space before "("', token, position=Position.AtBeginning()) elif token.previous and token.previous.type == Type.WHITESPACE: before_space = token.previous.previous if (before_space and before_space.line_number == token.line_number and before_space.type == Type.IDENTIFIER): self._HandleError( errors.EXTRA_SPACE, 'Extra space before "("', token.previous, position=Position.All(token.previous.string)) elif token_type == Type.START_BRACKET: self._HandleStartBracket(token, last_non_space_token) elif token_type in (Type.END_PAREN, Type.END_BRACKET): # Ensure there is no space before closing parentheses, except when # it's in a for statement with an omitted section, or when it's at the # beginning of a line. if (token.previous and token.previous.type == Type.WHITESPACE and not token.previous.IsFirstInLine() and not (last_non_space_token and last_non_space_token.line_number == token.line_number and last_non_space_token.type == Type.SEMICOLON)): self._HandleError( errors.EXTRA_SPACE, 'Extra space before "%s"' % token.string, token.previous, position=Position.All(token.previous.string)) if token.type == Type.END_BRACKET: last_code = token.metadata.last_code if last_code.IsOperator(','): self._HandleError( errors.COMMA_AT_END_OF_LITERAL, 'Illegal comma at end of array literal', last_code, position=Position.All(last_code.string)) elif token_type == Type.WHITESPACE: if self.ILLEGAL_TAB.search(token.string): if token.IsFirstInLine(): if token.next: self._HandleError( errors.ILLEGAL_TAB, 'Illegal tab in whitespace before "%s"' % token.next.string, token, position=Position.All(token.string)) else: self._HandleError( errors.ILLEGAL_TAB, 'Illegal tab in whitespace', token, position=Position.All(token.string)) else: self._HandleError( errors.ILLEGAL_TAB, 'Illegal tab in whitespace after "%s"' % token.previous.string, token, position=Position.All(token.string)) # Check whitespace length if it's not the first token of the line and # if it's not immediately before a comment. if last_in_line: # Check for extra whitespace at the end of a line. self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line', token, position=Position.All(token.string)) elif not first_in_line and not token.next.IsComment(): if token.length > 1: self._HandleError( errors.EXTRA_SPACE, 'Extra space after "%s"' % token.previous.string, token, position=Position(1, len(token.string) - 1)) elif token_type == Type.OPERATOR: self._CheckOperator(token) elif token_type == Type.DOC_FLAG: flag = token.attached_object if flag.flag_type == 'bug': # TODO(robbyw): Check for exactly 1 space on the left. string = token.next.string.lstrip() string = string.split(' ', 1)[0] if not string.isdigit(): self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG, '@bug should be followed by a bug number', token) elif flag.flag_type == 'suppress': if flag.type is None: # A syntactically invalid suppress tag will get tokenized as a normal # flag, indicating an error. self._HandleError( errors.INCORRECT_SUPPRESS_SYNTAX, 'Invalid suppress syntax: should be @suppress {errortype}. ' 'Spaces matter.', token) else: for suppress_type in re.split(r'\||,', flag.type): if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES: self._HandleError( errors.INVALID_SUPPRESS_TYPE, 'Invalid suppression type: %s' % suppress_type, token) elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and flag.flag_type == 'author'): # TODO(user): In non strict mode check the author tag for as much as # it exists, though the full form checked below isn't required. string = token.next.string result = self.AUTHOR_SPEC.match(string) if not result: self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION, 'Author tag line should be of the form: ' '@author foo@somewhere.com (Your Name)', token.next) else: # Check spacing between email address and name. Do this before # checking earlier spacing so positions are easier to calculate for # autofixing. num_spaces = len(result.group(2)) if num_spaces < 1: self._HandleError(errors.MISSING_SPACE, 'Missing space after email address', token.next, position=Position(result.start(2), 0)) elif num_spaces > 1: self._HandleError( errors.EXTRA_SPACE, 'Extra space after email address', token.next, position=Position(result.start(2) + 1, num_spaces - 1)) # Check for extra spaces before email address. Can't be too few, if # not at least one we wouldn't match @author tag. num_spaces = len(result.group(1)) if num_spaces > 1: self._HandleError(errors.EXTRA_SPACE, 'Extra space before email address', token.next, position=Position(1, num_spaces - 1)) elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and not self._limited_doc_checks): if flag.flag_type == 'param': if flag.name is None: self._HandleError(errors.MISSING_JSDOC_PARAM_NAME, 'Missing name in @param tag', token) if not flag.description or flag.description is None: flag_name = token.type if 'name' in token.values: flag_name = '@' + token.values['name'] if flag_name not in self.JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED: self._HandleError( errors.MISSING_JSDOC_TAG_DESCRIPTION, 'Missing description in %s tag' % flag_name, token) else: self._CheckForMissingSpaceBeforeToken(flag.description_start_token) if flag.flag_type in state.GetDocFlag().HAS_TYPE: if flag.type_start_token is not None: self._CheckForMissingSpaceBeforeToken( token.attached_object.type_start_token) if flag.type and not flag.type.isspace(): self._CheckJsDocType(token) if token_type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG): if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and token.values['name'] not in FLAGS.custom_jsdoc_tags): self._HandleError( errors.INVALID_JSDOC_TAG, 'Invalid JsDoc tag: %s' % token.values['name'], token) if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and token.values['name'] == 'inheritDoc' and token_type == Type.DOC_INLINE_FLAG): self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC, 'Unnecessary braces around @inheritDoc', token) elif token_type == Type.SIMPLE_LVALUE: identifier = token.values['identifier'] if ((not state.InFunction() or state.InConstructor()) and state.InTopLevel() and not state.InObjectLiteralDescendant()): jsdoc = state.GetDocComment() if not state.HasDocComment(identifier): # Only test for documentation on identifiers with .s in them to # avoid checking things like simple variables. We don't require # documenting assignments to .prototype itself (bug 1880803). if (not state.InConstructor() and identifier.find('.') != -1 and not identifier.endswith('.prototype') and not self._limited_doc_checks): comment = state.GetLastComment() if not (comment and comment.lower().count('jsdoc inherited')): self._HandleError( errors.MISSING_MEMBER_DOCUMENTATION, "No docs found for member '%s'" % identifier, token) elif jsdoc and (not state.InConstructor() or identifier.startswith('this.')): # We are at the top level and the function/member is documented. if identifier.endswith('_') and not identifier.endswith('__'): # Can have a private class which inherits documentation from a # public superclass. # # @inheritDoc is deprecated in favor of using @override, and they if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor') and ('accessControls' not in jsdoc.suppressions)): self._HandleError( errors.INVALID_OVERRIDE_PRIVATE, '%s should not override a private member.' % identifier, jsdoc.GetFlag('override').flag_token) if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor') and ('accessControls' not in jsdoc.suppressions)): self._HandleError( errors.INVALID_INHERIT_DOC_PRIVATE, '%s should not inherit from a private member.' % identifier, jsdoc.GetFlag('inheritDoc').flag_token) if (not jsdoc.HasFlag('private') and ('underscore' not in jsdoc.suppressions) and not ((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and ('accessControls' in jsdoc.suppressions))): self._HandleError( errors.MISSING_PRIVATE, 'Member "%s" must have @private JsDoc.' % identifier, token) if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions: self._HandleError( errors.UNNECESSARY_SUPPRESS, '@suppress {underscore} is not necessary with @private', jsdoc.suppressions['underscore']) elif (jsdoc.HasFlag('private') and not self.InExplicitlyTypedLanguage()): # It is convention to hide public fields in some ECMA # implementations from documentation using the @private tag. self._HandleError( errors.EXTRA_PRIVATE, 'Member "%s" must not have @private JsDoc' % identifier, token) # These flags are only legal on localizable message definitions; # such variables always begin with the prefix MSG_. for f in ('desc', 'hidden', 'meaning'): if (jsdoc.HasFlag(f) and not identifier.startswith('MSG_') and identifier.find('.MSG_') == -1): self._HandleError( errors.INVALID_USE_OF_DESC_TAG, 'Member "%s" should not have @%s JsDoc' % (identifier, f), token) # Check for illegaly assigning live objects as prototype property values. index = identifier.find('.prototype.') # Ignore anything with additional .s after the prototype. if index != -1 and identifier.find('.', index + 11) == -1: equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES) next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES) if next_code and ( next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or next_code.IsOperator('new')): self._HandleError( errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE, 'Member %s cannot have a non-primitive value' % identifier, token) elif token_type == Type.END_PARAMETERS: # Find extra space at the end of parameter lists. We check the token # prior to the current one when it is a closing paren. if (token.previous and token.previous.type == Type.PARAMETERS and self.ENDS_WITH_SPACE.search(token.previous.string)): self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"', token.previous) jsdoc = state.GetDocComment() if state.GetFunction().is_interface: if token.previous and token.previous.type == Type.PARAMETERS: self._HandleError( errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS, 'Interface constructor cannot have parameters', token.previous) elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see') and not jsdoc.InheritsDocumentation() and not state.InObjectLiteralDescendant() and not jsdoc.IsInvalidated()): distance, edit = jsdoc.CompareParameters(state.GetParams()) if distance: params_iter = iter(state.GetParams()) docs_iter = iter(jsdoc.ordered_params) for op in edit: if op == 'I': # Insertion. # Parsing doc comments is the same for all languages # but some languages care about parameters that don't have # doc comments and some languages don't care. # Languages that don't allow variables to by typed such as # JavaScript care but languages such as ActionScript or Java # that allow variables to be typed don't care. if not self._limited_doc_checks: self.HandleMissingParameterDoc(token, params_iter.next()) elif op == 'D': # Deletion self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION, 'Found docs for non-existing parameter: "%s"' % docs_iter.next(), token) elif op == 'S': # Substitution if not self._limited_doc_checks: self._HandleError( errors.WRONG_PARAMETER_DOCUMENTATION, 'Parameter mismatch: got "%s", expected "%s"' % (params_iter.next(), docs_iter.next()), token) else: # Equality - just advance the iterators params_iter.next() docs_iter.next() elif token_type == Type.STRING_TEXT: # If this is the first token after the start of the string, but it's at # the end of a line, we know we have a multi-line string. if token.previous.type in ( Type.SINGLE_QUOTE_STRING_START, Type.DOUBLE_QUOTE_STRING_START) and last_in_line: self._HandleError(errors.MULTI_LINE_STRING, 'Multi-line strings are not allowed', token) # This check is orthogonal to the ones above, and repeats some types, so # it is a plain if and not an elif. if token.type in Type.COMMENT_TYPES: if self.ILLEGAL_TAB.search(token.string): self._HandleError(errors.ILLEGAL_TAB, 'Illegal tab in comment "%s"' % token.string, token) trimmed = token.string.rstrip() if last_in_line and token.string != trimmed: # Check for extra whitespace at the end of a line. self._HandleError( errors.EXTRA_SPACE, 'Extra space at end of line', token, position=Position(len(trimmed), len(token.string) - len(trimmed))) # This check is also orthogonal since it is based on metadata. if token.metadata.is_implied_semicolon: self._HandleError(errors.MISSING_SEMICOLON, 'Missing semicolon at end of line', token) def _HandleStartBracket(self, token, last_non_space_token): """Handles a token that is an open bracket. Args: token: The token to handle. last_non_space_token: The last token that was not a space. """ if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and last_non_space_token and last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES): self._HandleError( errors.EXTRA_SPACE, 'Extra space before "["', token.previous, position=Position.All(token.previous.string)) # If the [ token is the first token in a line we shouldn't complain # about a missing space before [. This is because some Ecma script # languages allow syntax like: # [Annotation] # class MyClass {...} # So we don't want to blindly warn about missing spaces before [. # In the the future, when rules for computing exactly how many spaces # lines should be indented are added, then we can return errors for # [ tokens that are improperly indented. # For example: # var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName = # [a,b,c]; # should trigger a proper indentation warning message as [ is not indented # by four spaces. elif (not token.IsFirstInLine() and token.previous and token.previous.type not in ( [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] + Type.EXPRESSION_ENDER_TYPES)): self._HandleError(errors.MISSING_SPACE, 'Missing space before "["', token, position=Position.AtBeginning()) def Finalize(self, state): """Perform all checks that need to occur after all lines are processed. Args: state: State of the parser after parsing all tokens Raises: TypeError: If not overridden. """ last_non_space_token = state.GetLastNonSpaceToken() # Check last line for ending with newline. if state.GetLastLine() and not ( state.GetLastLine().isspace() or state.GetLastLine().rstrip('\n\r\f') != state.GetLastLine()): self._HandleError( errors.FILE_MISSING_NEWLINE, 'File does not end with new line. (%s)' % state.GetLastLine(), last_non_space_token) try: self._indentation.Finalize() except Exception, e: self._HandleError( errors.FILE_DOES_NOT_PARSE, str(e), last_non_space_token) def GetLongLineExceptions(self): """Gets a list of regexps for lines which can be longer than the limit. Returns: A list of regexps, used as matches (rather than searches). """ return [] def InExplicitlyTypedLanguage(self): """Returns whether this ecma implementation is explicitly typed.""" return False closure_linter-2.3.13/closure_linter/error_fixer.py0000750014730400116100000004753112247733554022155 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Main class responsible for automatically fixing simple style violations.""" # Allow non-Google copyright # pylint: disable=g-bad-file-header __author__ = 'robbyw@google.com (Robert Walker)' import re import gflags as flags from closure_linter import errors from closure_linter import javascriptstatetracker from closure_linter import javascripttokens from closure_linter import requireprovidesorter from closure_linter import tokenutil from closure_linter.common import errorhandler # Shorthand Token = javascripttokens.JavaScriptToken Type = javascripttokens.JavaScriptTokenType END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$') # Regex to represent common mistake inverting author name and email as # @author User Name (user@company) INVERTED_AUTHOR_SPEC = re.compile(r'(?P\s*)' r'(?P[^(]+)' r'(?P\s+)' r'\(' r'(?P[^\s]+@[^)\s]+)' r'\)' r'(?P.*)') FLAGS = flags.FLAGS flags.DEFINE_boolean('disable_indentation_fixing', False, 'Whether to disable automatic fixing of indentation.') class ErrorFixer(errorhandler.ErrorHandler): """Object that fixes simple style errors.""" def __init__(self, external_file=None): """Initialize the error fixer. Args: external_file: If included, all output will be directed to this file instead of overwriting the files the errors are found in. """ errorhandler.ErrorHandler.__init__(self) self._file_name = None self._file_token = None self._external_file = external_file def HandleFile(self, filename, first_token): """Notifies this ErrorPrinter that subsequent errors are in filename. Args: filename: The name of the file about to be checked. first_token: The first token in the file. """ self._file_name = filename self._file_is_html = filename.endswith('.html') or filename.endswith('.htm') self._file_token = first_token self._file_fix_count = 0 self._file_changed_lines = set() def _AddFix(self, tokens): """Adds the fix to the internal count. Args: tokens: The token or sequence of tokens changed to fix an error. """ self._file_fix_count += 1 if hasattr(tokens, 'line_number'): self._file_changed_lines.add(tokens.line_number) else: for token in tokens: self._file_changed_lines.add(token.line_number) def HandleError(self, error): """Attempts to fix the error. Args: error: The error object """ code = error.code token = error.token if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL: iterator = token.attached_object.type_start_token if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace(): iterator = iterator.next leading_space = len(iterator.string) - len(iterator.string.lstrip()) iterator.string = '%s?%s' % (' ' * leading_space, iterator.string.lstrip()) # Cover the no outer brace case where the end token is part of the type. while iterator and iterator != token.attached_object.type_end_token.next: iterator.string = iterator.string.replace( 'null|', '').replace('|null', '') iterator = iterator.next # Create a new flag object with updated type info. token.attached_object = javascriptstatetracker.JsDocFlag(token) self._AddFix(token) elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE: iterator = token.attached_object.type_end_token if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace(): iterator = iterator.previous ending_space = len(iterator.string) - len(iterator.string.rstrip()) iterator.string = '%s=%s' % (iterator.string.rstrip(), ' ' * ending_space) # Create a new flag object with updated type info. token.attached_object = javascriptstatetracker.JsDocFlag(token) self._AddFix(token) elif code == errors.JSDOC_MISSING_VAR_ARGS_TYPE: iterator = token.attached_object.type_start_token if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace(): iterator = iterator.next starting_space = len(iterator.string) - len(iterator.string.lstrip()) iterator.string = '%s...%s' % (' ' * starting_space, iterator.string.lstrip()) # Create a new flag object with updated type info. token.attached_object = javascriptstatetracker.JsDocFlag(token) self._AddFix(token) elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION, errors.MISSING_SEMICOLON): semicolon_token = Token(';', Type.SEMICOLON, token.line, token.line_number) tokenutil.InsertTokenAfter(semicolon_token, token) token.metadata.is_implied_semicolon = False semicolon_token.metadata.is_implied_semicolon = False self._AddFix(token) elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION, errors.REDUNDANT_SEMICOLON, errors.COMMA_AT_END_OF_LITERAL): self._DeleteToken(token) self._AddFix(token) elif code == errors.INVALID_JSDOC_TAG: if token.string == '@returns': token.string = '@return' self._AddFix(token) elif code == errors.FILE_MISSING_NEWLINE: # This error is fixed implicitly by the way we restore the file self._AddFix(token) elif code == errors.MISSING_SPACE: if error.fix_data: token.string = error.fix_data self._AddFix(token) elif error.position: if error.position.IsAtBeginning(): tokenutil.InsertSpaceTokenAfter(token.previous) elif error.position.IsAtEnd(token.string): tokenutil.InsertSpaceTokenAfter(token) else: token.string = error.position.Set(token.string, ' ') self._AddFix(token) elif code == errors.EXTRA_SPACE: if error.position: token.string = error.position.Set(token.string, '') self._AddFix(token) elif code == errors.MISSING_LINE: if error.position.IsAtBeginning(): tokenutil.InsertBlankLineAfter(token.previous) else: tokenutil.InsertBlankLineAfter(token) self._AddFix(token) elif code == errors.EXTRA_LINE: self._DeleteToken(token) self._AddFix(token) elif code == errors.WRONG_BLANK_LINE_COUNT: if not token.previous: # TODO(user): Add an insertBefore method to tokenutil. return num_lines = error.fix_data should_delete = False if num_lines < 0: num_lines *= -1 should_delete = True for unused_i in xrange(1, num_lines + 1): if should_delete: # TODO(user): DeleteToken should update line numbers. self._DeleteToken(token.previous) else: tokenutil.InsertBlankLineAfter(token.previous) self._AddFix(token) elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING: end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END) if end_quote: single_quote_start = Token( "'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number) single_quote_end = Token( "'", Type.SINGLE_QUOTE_STRING_START, end_quote.line, token.line_number) tokenutil.InsertTokenAfter(single_quote_start, token) tokenutil.InsertTokenAfter(single_quote_end, end_quote) self._DeleteToken(token) self._DeleteToken(end_quote) self._AddFix([token, end_quote]) elif code == errors.MISSING_BRACES_AROUND_TYPE: fixed_tokens = [] start_token = token.attached_object.type_start_token if start_token.type != Type.DOC_START_BRACE: leading_space = ( len(start_token.string) - len(start_token.string.lstrip())) if leading_space: start_token = tokenutil.SplitToken(start_token, leading_space) # Fix case where start and end token were the same. if token.attached_object.type_end_token == start_token.previous: token.attached_object.type_end_token = start_token new_token = Token('{', Type.DOC_START_BRACE, start_token.line, start_token.line_number) tokenutil.InsertTokenAfter(new_token, start_token.previous) token.attached_object.type_start_token = new_token fixed_tokens.append(new_token) end_token = token.attached_object.type_end_token if end_token.type != Type.DOC_END_BRACE: # If the start token was a brace, the end token will be a # FLAG_ENDING_TYPE token, if there wasn't a starting brace then # the end token is the last token of the actual type. last_type = end_token if not fixed_tokens: last_type = end_token.previous while last_type.string.isspace(): last_type = last_type.previous # If there was no starting brace then a lone end brace wouldn't have # been type end token. Now that we've added any missing start brace, # see if the last effective type token was an end brace. if last_type.type != Type.DOC_END_BRACE: trailing_space = (len(last_type.string) - len(last_type.string.rstrip())) if trailing_space: tokenutil.SplitToken(last_type, len(last_type.string) - trailing_space) new_token = Token('}', Type.DOC_END_BRACE, last_type.line, last_type.line_number) tokenutil.InsertTokenAfter(new_token, last_type) token.attached_object.type_end_token = new_token fixed_tokens.append(new_token) self._AddFix(fixed_tokens) elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED: require_start_token = error.fix_data sorter = requireprovidesorter.RequireProvideSorter() sorter.FixRequires(require_start_token) self._AddFix(require_start_token) elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED: provide_start_token = error.fix_data sorter = requireprovidesorter.RequireProvideSorter() sorter.FixProvides(provide_start_token) self._AddFix(provide_start_token) elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC: if token.previous.string == '{' and token.next.string == '}': self._DeleteToken(token.previous) self._DeleteToken(token.next) self._AddFix([token]) elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION: match = INVERTED_AUTHOR_SPEC.match(token.string) if match: token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'), match.group('email'), match.group('whitespace_after_name'), match.group('name'), match.group('trailing_characters')) self._AddFix(token) elif (code == errors.WRONG_INDENTATION and not FLAGS.disable_indentation_fixing): token = tokenutil.GetFirstTokenInSameLine(token) actual = error.position.start expected = error.position.length # Cases where first token is param but with leading spaces. if (len(token.string.lstrip()) == len(token.string) - actual and token.string.lstrip()): token.string = token.string.lstrip() actual = 0 if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0: token.string = token.string.lstrip() + (' ' * expected) self._AddFix([token]) else: # We need to add indentation. new_token = Token(' ' * expected, Type.WHITESPACE, token.line, token.line_number) # Note that we'll never need to add indentation at the first line, # since it will always not be indented. Therefore it's safe to assume # token.previous exists. tokenutil.InsertTokenAfter(new_token, token.previous) self._AddFix([token]) elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT, errors.MISSING_END_OF_SCOPE_COMMENT]: # Only fix cases where }); is found with no trailing content on the line # other than a comment. Value of 'token' is set to } for this error. if (token.type == Type.END_BLOCK and token.next.type == Type.END_PAREN and token.next.next.type == Type.SEMICOLON): current_token = token.next.next.next removed_tokens = [] while current_token and current_token.line_number == token.line_number: if current_token.IsAnyType(Type.WHITESPACE, Type.START_SINGLE_LINE_COMMENT, Type.COMMENT): removed_tokens.append(current_token) current_token = current_token.next else: return if removed_tokens: self._DeleteTokens(removed_tokens[0], len(removed_tokens)) whitespace_token = Token(' ', Type.WHITESPACE, token.line, token.line_number) start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT, token.line, token.line_number) comment_token = Token(' goog.scope', Type.COMMENT, token.line, token.line_number) insertion_tokens = [whitespace_token, start_comment_token, comment_token] tokenutil.InsertTokensAfter(insertion_tokens, token.next.next) self._AddFix(removed_tokens + insertion_tokens) elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]: tokens_in_line = tokenutil.GetAllTokensInSameLine(token) self._DeleteTokens(tokens_in_line[0], len(tokens_in_line)) self._AddFix(tokens_in_line) elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]: is_provide = code == errors.MISSING_GOOG_PROVIDE is_require = code == errors.MISSING_GOOG_REQUIRE missing_namespaces = error.fix_data[0] need_blank_line = error.fix_data[1] if need_blank_line is None: # TODO(user): This happens when there are no existing # goog.provide or goog.require statements to position new statements # relative to. Consider handling this case with a heuristic. return insert_location = token.previous # If inserting a missing require with no existing requires, insert a # blank line first. if need_blank_line and is_require: tokenutil.InsertBlankLineAfter(insert_location) insert_location = insert_location.next for missing_namespace in missing_namespaces: new_tokens = self._GetNewRequireOrProvideTokens( is_provide, missing_namespace, insert_location.line_number + 1) tokenutil.InsertLineAfter(insert_location, new_tokens) insert_location = new_tokens[-1] self._AddFix(new_tokens) # If inserting a missing provide with no existing provides, insert a # blank line after. if need_blank_line and is_provide: tokenutil.InsertBlankLineAfter(insert_location) def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number): """Returns a list of tokens to create a goog.require/provide statement. Args: is_provide: True if getting tokens for a provide, False for require. namespace: The required or provided namespaces to get tokens for. line_number: The line number the new require or provide statement will be on. Returns: Tokens to create a new goog.require or goog.provide statement. """ string = 'goog.require' if is_provide: string = 'goog.provide' line_text = string + '(\'' + namespace + '\');\n' return [ Token(string, Type.IDENTIFIER, line_text, line_number), Token('(', Type.START_PAREN, line_text, line_number), Token('\'', Type.SINGLE_QUOTE_STRING_START, line_text, line_number), Token(namespace, Type.STRING_TEXT, line_text, line_number), Token('\'', Type.SINGLE_QUOTE_STRING_END, line_text, line_number), Token(')', Type.END_PAREN, line_text, line_number), Token(';', Type.SEMICOLON, line_text, line_number) ] def _DeleteToken(self, token): """Deletes the specified token from the linked list of tokens. Updates instance variables pointing to tokens such as _file_token if they reference the deleted token. Args: token: The token to delete. """ if token == self._file_token: self._file_token = token.next tokenutil.DeleteToken(token) def _DeleteTokens(self, token, token_count): """Deletes the given number of tokens starting with the given token. Updates instance variables pointing to tokens such as _file_token if they reference the deleted token. Args: token: The first token to delete. token_count: The total number of tokens to delete. """ if token == self._file_token: for unused_i in xrange(token_count): self._file_token = self._file_token.next tokenutil.DeleteTokens(token, token_count) def FinishFile(self): """Called when the current file has finished style checking. Used to go back and fix any errors in the file. It currently supports both js and html files. For js files it does a simple dump of all tokens, but in order to support html file, we need to merge the original file with the new token set back together. This works because the tokenized html file is the original html file with all non js lines kept but blanked out with one blank line token per line of html. """ if self._file_fix_count: # Get the original file content for html. if self._file_is_html: f = open(self._file_name, 'r') original_lines = f.readlines() f.close() f = self._external_file if not f: print 'Fixed %d errors in %s' % (self._file_fix_count, self._file_name) f = open(self._file_name, 'w') token = self._file_token # If something got inserted before first token (e.g. due to sorting) # then move to start. Bug 8398202. while token.previous: token = token.previous char_count = 0 line = '' while token: line += token.string char_count += len(token.string) if token.IsLastInLine(): # We distinguish if a blank line in html was from stripped original # file or newly added error fix by looking at the "org_line_number" # field on the token. It is only set in the tokenizer, so for all # error fixes, the value should be None. if (line or not self._file_is_html or token.orig_line_number is None): f.write(line) f.write('\n') else: f.write(original_lines[token.orig_line_number - 1]) line = '' if char_count > 80 and token.line_number in self._file_changed_lines: print 'WARNING: Line %d of %s is now longer than 80 characters.' % ( token.line_number, self._file_name) char_count = 0 token = token.next if not self._external_file: # Close the file if we created it f.close() closure_linter-2.3.13/closure_linter/statetracker_test.py0000750014730400116100000000647012247733554023357 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2012 The Closure Linter Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the statetracker module.""" # Allow non-Google copyright # pylint: disable=g-bad-file-header __author__ = ('nnaze@google.com (Nathan Naze)') import unittest as googletest from closure_linter import javascripttokens from closure_linter import statetracker from closure_linter import testutil class _FakeDocFlag(object): def __repr__(self): return '@%s %s' % (self.flag_type, self.name) class IdentifierTest(googletest.TestCase): def testJustIdentifier(self): a = javascripttokens.JavaScriptToken( 'abc', javascripttokens.JavaScriptTokenType.IDENTIFIER, 'abc', 1) st = statetracker.StateTracker() st.HandleToken(a, None) class DocCommentTest(googletest.TestCase): @staticmethod def _MakeDocFlagFake(flag_type, name=None): flag = _FakeDocFlag() flag.flag_type = flag_type flag.name = name return flag def testDocFlags(self): comment = statetracker.DocComment(None) a = self._MakeDocFlagFake('param', 'foo') comment.AddFlag(a) b = self._MakeDocFlagFake('param', '') comment.AddFlag(b) c = self._MakeDocFlagFake('param', 'bar') comment.AddFlag(c) self.assertEquals( ['foo', 'bar'], comment.ordered_params) self.assertEquals( [a, b, c], comment.GetDocFlags()) def testInvalidate(self): comment = statetracker.DocComment(None) self.assertFalse(comment.invalidated) self.assertFalse(comment.IsInvalidated()) comment.Invalidate() self.assertTrue(comment.invalidated) self.assertTrue(comment.IsInvalidated()) def testSuppressionOnly(self): comment = statetracker.DocComment(None) self.assertFalse(comment.SuppressionOnly()) comment.AddFlag(self._MakeDocFlagFake('suppress')) self.assertTrue(comment.SuppressionOnly()) comment.AddFlag(self._MakeDocFlagFake('foo')) self.assertFalse(comment.SuppressionOnly()) def testRepr(self): comment = statetracker.DocComment(None) comment.AddFlag(self._MakeDocFlagFake('param', 'foo')) comment.AddFlag(self._MakeDocFlagFake('param', 'bar')) self.assertEquals( '', repr(comment)) def testDocFlagParam(self): comment = self._ParseComment(""" /** * @param {string} [name] Name of customer. */""") flag = comment.GetFlag('param') self.assertEquals('string', flag.type) self.assertEquals('[name]', flag.name) def _ParseComment(self, script): """Parse a script that contains one comment and return it.""" _, comments = testutil.ParseFunctionsAndComments(script) self.assertEquals(1, len(comments)) return comments[0] if __name__ == '__main__': googletest.main() closure_linter-2.3.13/closure_linter/javascripttokenizer.py0000750014730400116100000003657112247733554023732 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Regular expression based JavaScript parsing classes.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') import copy import re from closure_linter import javascripttokens from closure_linter.common import matcher from closure_linter.common import tokenizer # Shorthand Type = javascripttokens.JavaScriptTokenType Matcher = matcher.Matcher class JavaScriptModes(object): """Enumeration of the different matcher modes used for JavaScript.""" TEXT_MODE = 'text' SINGLE_QUOTE_STRING_MODE = 'single_quote_string' DOUBLE_QUOTE_STRING_MODE = 'double_quote_string' BLOCK_COMMENT_MODE = 'block_comment' DOC_COMMENT_MODE = 'doc_comment' DOC_COMMENT_LEX_SPACES_MODE = 'doc_comment_spaces' LINE_COMMENT_MODE = 'line_comment' PARAMETER_MODE = 'parameter' FUNCTION_MODE = 'function' class JavaScriptTokenizer(tokenizer.Tokenizer): """JavaScript tokenizer. Convert JavaScript code in to an array of tokens. """ # Useful patterns for JavaScript parsing. IDENTIFIER_CHAR = r'A-Za-z0-9_$.' # Number patterns based on: # http://www.mozilla.org/js/language/js20-2000-07/formal/lexer-grammar.html MANTISSA = r""" (\d+(?!\.)) | # Matches '10' (\d+\.(?!\d)) | # Matches '10.' (\d*\.\d+) # Matches '.5' or '10.5' """ DECIMAL_LITERAL = r'(%s)([eE][-+]?\d+)?' % MANTISSA HEX_LITERAL = r'0[xX][0-9a-fA-F]+' NUMBER = re.compile(r""" ((%s)|(%s)) """ % (HEX_LITERAL, DECIMAL_LITERAL), re.VERBOSE) # Strings come in three parts - first we match the start of the string, then # the contents, then the end. The contents consist of any character except a # backslash or end of string, or a backslash followed by any character, or a # backslash followed by end of line to support correct parsing of multi-line # strings. SINGLE_QUOTE = re.compile(r"'") SINGLE_QUOTE_TEXT = re.compile(r"([^'\\]|\\(.|$))+") DOUBLE_QUOTE = re.compile(r'"') DOUBLE_QUOTE_TEXT = re.compile(r'([^"\\]|\\(.|$))+') START_SINGLE_LINE_COMMENT = re.compile(r'//') END_OF_LINE_SINGLE_LINE_COMMENT = re.compile(r'//$') START_DOC_COMMENT = re.compile(r'/\*\*') START_BLOCK_COMMENT = re.compile(r'/\*') END_BLOCK_COMMENT = re.compile(r'\*/') BLOCK_COMMENT_TEXT = re.compile(r'([^*]|\*(?!/))+') # Comment text is anything that we are not going to parse into another special # token like (inline) flags or end comments. Complicated regex to match # most normal characters, and '*', '{', '}', and '@' when we are sure that # it is safe. Expression [^*{\s]@ must come first, or the other options will # match everything before @, and we won't match @'s that aren't part of flags # like in email addresses in the @author tag. DOC_COMMENT_TEXT = re.compile(r'([^*{}\s]@|[^*{}@]|\*(?!/))+') DOC_COMMENT_NO_SPACES_TEXT = re.compile(r'([^*{}\s]@|[^*{}@\s]|\*(?!/))+') # Match the prefix ' * ' that starts every line of jsdoc. Want to include # spaces after the '*', but nothing else that occurs after a '*', and don't # want to match the '*' in '*/'. DOC_PREFIX = re.compile(r'\s*\*(\s+|(?!/))') START_BLOCK = re.compile('{') END_BLOCK = re.compile('}') REGEX_CHARACTER_CLASS = r""" \[ # Opening bracket ([^\]\\]|\\.)* # Anything but a ] or \, # or a backslash followed by anything \] # Closing bracket """ # We ensure the regex is followed by one of the above tokens to avoid # incorrectly parsing something like x / y / z as x REGEX(/ y /) z POST_REGEX_LIST = [ ';', ',', r'\.', r'\)', r'\]', '$', r'\/\/', r'\/\*', ':', '}'] REGEX = re.compile(r""" / # opening slash (?!\*) # not the start of a comment (\\.|[^\[\/\\]|(%s))* # a backslash followed by anything, # or anything but a / or [ or \, # or a character class / # closing slash [gimsx]* # optional modifiers (?=\s*(%s)) """ % (REGEX_CHARACTER_CLASS, '|'.join(POST_REGEX_LIST)), re.VERBOSE) ANYTHING = re.compile(r'.*') PARAMETERS = re.compile(r'[^\)]+') CLOSING_PAREN_WITH_SPACE = re.compile(r'\)\s*') FUNCTION_DECLARATION = re.compile(r'\bfunction\b') OPENING_PAREN = re.compile(r'\(') CLOSING_PAREN = re.compile(r'\)') OPENING_BRACKET = re.compile(r'\[') CLOSING_BRACKET = re.compile(r'\]') # We omit these JS keywords from the list: # function - covered by FUNCTION_DECLARATION. # delete, in, instanceof, new, typeof - included as operators. # this - included in identifiers. # null, undefined - not included, should go in some "special constant" list. KEYWORD_LIST = ['break', 'case', 'catch', 'continue', 'default', 'do', 'else', 'finally', 'for', 'if', 'return', 'switch', 'throw', 'try', 'var', 'while', 'with'] # Match a keyword string followed by a non-identifier character in order to # not match something like doSomething as do + Something. KEYWORD = re.compile('(%s)((?=[^%s])|$)' % ( '|'.join(KEYWORD_LIST), IDENTIFIER_CHAR)) # List of regular expressions to match as operators. Some notes: for our # purposes, the comma behaves similarly enough to a normal operator that we # include it here. r'\bin\b' actually matches 'in' surrounded by boundary # characters - this may not match some very esoteric uses of the in operator. # Operators that are subsets of larger operators must come later in this list # for proper matching, e.g., '>>' must come AFTER '>>>'. OPERATOR_LIST = [',', r'\+\+', '===', '!==', '>>>=', '>>>', '==', '>=', '<=', '!=', '<<=', '>>=', '<<', '>>', '>', '<', r'\+=', r'\+', '--', '\^=', '-=', '-', '/=', '/', r'\*=', r'\*', '%=', '%', '&&', r'\|\|', '&=', '&', r'\|=', r'\|', '=', '!', ':', '\?', r'\^', r'\bdelete\b', r'\bin\b', r'\binstanceof\b', r'\bnew\b', r'\btypeof\b', r'\bvoid\b'] OPERATOR = re.compile('|'.join(OPERATOR_LIST)) WHITESPACE = re.compile(r'\s+') SEMICOLON = re.compile(r';') # Technically JavaScript identifiers can't contain '.', but we treat a set of # nested identifiers as a single identifier. NESTED_IDENTIFIER = r'[a-zA-Z_$][%s.]*' % IDENTIFIER_CHAR IDENTIFIER = re.compile(NESTED_IDENTIFIER) SIMPLE_LVALUE = re.compile(r""" (?P%s) # a valid identifier (?=\s* # optional whitespace \= # look ahead to equal sign (?!=)) # not follwed by equal """ % NESTED_IDENTIFIER, re.VERBOSE) # A doc flag is a @ sign followed by non-space characters that appears at the # beginning of the line, after whitespace, or after a '{'. The look-behind # check is necessary to not match someone@google.com as a flag. DOC_FLAG = re.compile(r'(^|(?<=\s))@(?P[a-zA-Z]+)') # To properly parse parameter names, we need to tokenize whitespace into a # token. DOC_FLAG_LEX_SPACES = re.compile(r'(^|(?<=\s))@(?P%s)\b' % '|'.join(['param'])) DOC_INLINE_FLAG = re.compile(r'(?<={)@(?P[a-zA-Z]+)') # Star followed by non-slash, i.e a star that does not end a comment. # This is used for TYPE_GROUP below. SAFE_STAR = r'(\*(?!/))' COMMON_DOC_MATCHERS = [ # Find the end of the comment. Matcher(END_BLOCK_COMMENT, Type.END_DOC_COMMENT, JavaScriptModes.TEXT_MODE), # Tokenize documented flags like @private. Matcher(DOC_INLINE_FLAG, Type.DOC_INLINE_FLAG), Matcher(DOC_FLAG_LEX_SPACES, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE), # Encountering a doc flag should leave lex spaces mode. Matcher(DOC_FLAG, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_MODE), # Tokenize braces so we can find types. Matcher(START_BLOCK, Type.DOC_START_BRACE), Matcher(END_BLOCK, Type.DOC_END_BRACE), Matcher(DOC_PREFIX, Type.DOC_PREFIX, None, True)] # The token matcher groups work as follows: it is an list of Matcher objects. # The matchers will be tried in this order, and the first to match will be # returned. Hence the order is important because the matchers that come first # overrule the matchers that come later. JAVASCRIPT_MATCHERS = { # Matchers for basic text mode. JavaScriptModes.TEXT_MODE: [ # Check a big group - strings, starting comments, and regexes - all # of which could be intertwined. 'string with /regex/', # /regex with 'string'/, /* comment with /regex/ and string */ (and so # on) Matcher(START_DOC_COMMENT, Type.START_DOC_COMMENT, JavaScriptModes.DOC_COMMENT_MODE), Matcher(START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT, JavaScriptModes.BLOCK_COMMENT_MODE), Matcher(END_OF_LINE_SINGLE_LINE_COMMENT, Type.START_SINGLE_LINE_COMMENT), Matcher(START_SINGLE_LINE_COMMENT, Type.START_SINGLE_LINE_COMMENT, JavaScriptModes.LINE_COMMENT_MODE), Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START, JavaScriptModes.SINGLE_QUOTE_STRING_MODE), Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START, JavaScriptModes.DOUBLE_QUOTE_STRING_MODE), Matcher(REGEX, Type.REGEX), # Next we check for start blocks appearing outside any of the items # above. Matcher(START_BLOCK, Type.START_BLOCK), Matcher(END_BLOCK, Type.END_BLOCK), # Then we search for function declarations. Matcher(FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION, JavaScriptModes.FUNCTION_MODE), # Next, we convert non-function related parens to tokens. Matcher(OPENING_PAREN, Type.START_PAREN), Matcher(CLOSING_PAREN, Type.END_PAREN), # Next, we convert brackets to tokens. Matcher(OPENING_BRACKET, Type.START_BRACKET), Matcher(CLOSING_BRACKET, Type.END_BRACKET), # Find numbers. This has to happen before operators because scientific # notation numbers can have + and - in them. Matcher(NUMBER, Type.NUMBER), # Find operators and simple assignments Matcher(SIMPLE_LVALUE, Type.SIMPLE_LVALUE), Matcher(OPERATOR, Type.OPERATOR), # Find key words and whitespace. Matcher(KEYWORD, Type.KEYWORD), Matcher(WHITESPACE, Type.WHITESPACE), # Find identifiers. Matcher(IDENTIFIER, Type.IDENTIFIER), # Finally, we convert semicolons to tokens. Matcher(SEMICOLON, Type.SEMICOLON)], # Matchers for single quote strings. JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [ Matcher(SINGLE_QUOTE_TEXT, Type.STRING_TEXT), Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END, JavaScriptModes.TEXT_MODE)], # Matchers for double quote strings. JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [ Matcher(DOUBLE_QUOTE_TEXT, Type.STRING_TEXT), Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END, JavaScriptModes.TEXT_MODE)], # Matchers for block comments. JavaScriptModes.BLOCK_COMMENT_MODE: [ # First we check for exiting a block comment. Matcher(END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT, JavaScriptModes.TEXT_MODE), # Match non-comment-ending text.. Matcher(BLOCK_COMMENT_TEXT, Type.COMMENT)], # Matchers for doc comments. JavaScriptModes.DOC_COMMENT_MODE: COMMON_DOC_MATCHERS + [ Matcher(DOC_COMMENT_TEXT, Type.COMMENT)], JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: COMMON_DOC_MATCHERS + [ Matcher(WHITESPACE, Type.COMMENT), Matcher(DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)], # Matchers for single line comments. JavaScriptModes.LINE_COMMENT_MODE: [ # We greedy match until the end of the line in line comment mode. Matcher(ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)], # Matchers for code after the function keyword. JavaScriptModes.FUNCTION_MODE: [ # Must match open paren before anything else and move into parameter # mode, otherwise everything inside the parameter list is parsed # incorrectly. Matcher(OPENING_PAREN, Type.START_PARAMETERS, JavaScriptModes.PARAMETER_MODE), Matcher(WHITESPACE, Type.WHITESPACE), Matcher(IDENTIFIER, Type.FUNCTION_NAME)], # Matchers for function parameters JavaScriptModes.PARAMETER_MODE: [ # When in function parameter mode, a closing paren is treated specially. # Everything else is treated as lines of parameters. Matcher(CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS, JavaScriptModes.TEXT_MODE), Matcher(PARAMETERS, Type.PARAMETERS, JavaScriptModes.PARAMETER_MODE)]} # When text is not matched, it is given this default type based on mode. # If unspecified in this map, the default default is Type.NORMAL. JAVASCRIPT_DEFAULT_TYPES = { JavaScriptModes.DOC_COMMENT_MODE: Type.COMMENT, JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: Type.COMMENT } def __init__(self, parse_js_doc = True): """Create a tokenizer object. Args: parse_js_doc: Whether to do detailed parsing of javascript doc comments, or simply treat them as normal comments. Defaults to parsing JsDoc. """ matchers = self.JAVASCRIPT_MATCHERS if not parse_js_doc: # Make a copy so the original doesn't get modified. matchers = copy.deepcopy(matchers) matchers[JavaScriptModes.DOC_COMMENT_MODE] = matchers[ JavaScriptModes.BLOCK_COMMENT_MODE] tokenizer.Tokenizer.__init__(self, JavaScriptModes.TEXT_MODE, matchers, self.JAVASCRIPT_DEFAULT_TYPES) def _CreateToken(self, string, token_type, line, line_number, values=None): """Creates a new JavaScriptToken object. Args: string: The string of input the token contains. token_type: The type of token. line: The text of the line this token is in. line_number: The line number of the token. values: A dict of named values within the token. For instance, a function declaration may have a value called 'name' which captures the name of the function. """ return javascripttokens.JavaScriptToken(string, token_type, line, line_number, values, line_number) closure_linter-2.3.13/closure_linter/runner.py0000640014730400116100000001367112247733554021134 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2012 The Closure Linter Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Main lint function. Tokenizes file, runs passes, and feeds to checker.""" # Allow non-Google copyright # pylint: disable=g-bad-file-header __author__ = 'nnaze@google.com (Nathan Naze)' import traceback import gflags as flags from closure_linter import checker from closure_linter import ecmametadatapass from closure_linter import errors from closure_linter import javascriptstatetracker from closure_linter import javascripttokenizer from closure_linter.common import error from closure_linter.common import htmlutil from closure_linter.common import tokens flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'], 'List of files with relaxed documentation checks. Will not ' 'report errors for missing documentation, some missing ' 'descriptions, or methods whose @return tags don\'t have a ' 'matching return statement.') flags.DEFINE_boolean('error_trace', False, 'Whether to show error exceptions.') def _GetLastNonWhiteSpaceToken(start_token): """Get the last non-whitespace token in a token stream.""" ret_token = None whitespace_tokens = frozenset([ tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE]) for t in start_token: if t.type not in whitespace_tokens: ret_token = t return ret_token def _IsHtml(filename): return filename.endswith('.html') or filename.endswith('.htm') def _Tokenize(fileobj): """Tokenize a file. Args: fileobj: file-like object (or iterable lines) with the source. Returns: The first token in the token stream and the ending mode of the tokenizer. """ tokenizer = javascripttokenizer.JavaScriptTokenizer() start_token = tokenizer.TokenizeFile(fileobj) return start_token, tokenizer.mode def _IsLimitedDocCheck(filename, limited_doc_files): """Whether this this a limited-doc file. Args: filename: The filename. limited_doc_files: Iterable of strings. Suffixes of filenames that should be limited doc check. Returns: Whether the file should be limited check. """ for limited_doc_filename in limited_doc_files: if filename.endswith(limited_doc_filename): return True return False def Run(filename, error_handler, source=None): """Tokenize, run passes, and check the given file. Args: filename: The path of the file to check error_handler: The error handler to report errors to. source: A file-like object with the file source. If omitted, the file will be read from the filename path. """ if not source: try: source = open(filename) except IOError: error_handler.HandleFile(filename, None) error_handler.HandleError( error.Error(errors.FILE_NOT_FOUND, 'File not found')) error_handler.FinishFile() return if _IsHtml(filename): source_file = htmlutil.GetScriptLines(source) else: source_file = source token, tokenizer_mode = _Tokenize(source_file) error_handler.HandleFile(filename, token) # If we did not end in the basic mode, this a failed parse. if tokenizer_mode is not javascripttokenizer.JavaScriptModes.TEXT_MODE: error_handler.HandleError( error.Error(errors.FILE_IN_BLOCK, 'File ended in mode "%s".' % tokenizer_mode, _GetLastNonWhiteSpaceToken(token))) # Run the ECMA pass error_token = None ecma_pass = ecmametadatapass.EcmaMetaDataPass() error_token = RunMetaDataPass(token, ecma_pass, error_handler, filename) is_limited_doc_check = ( _IsLimitedDocCheck(filename, flags.FLAGS.limited_doc_files)) _RunChecker(token, error_handler, is_limited_doc_check, is_html=_IsHtml(filename), stop_token=error_token) error_handler.FinishFile() def RunMetaDataPass(start_token, metadata_pass, error_handler, filename=''): """Run a metadata pass over a token stream. Args: start_token: The first token in a token stream. metadata_pass: Metadata pass to run. error_handler: The error handler to report errors to. filename: Filename of the source. Returns: The token where the error occurred (if any). """ try: metadata_pass.Process(start_token) except ecmametadatapass.ParseError, parse_err: if flags.FLAGS.error_trace: traceback.print_exc() error_token = parse_err.token error_msg = str(parse_err) error_handler.HandleError( error.Error(errors.FILE_DOES_NOT_PARSE, ('Error parsing file at token "%s". Unable to ' 'check the rest of file.' '\nError "%s"' % (error_token, error_msg)), error_token)) return error_token except Exception: # pylint: disable=broad-except traceback.print_exc() error_handler.HandleError( error.Error( errors.FILE_DOES_NOT_PARSE, 'Internal error in %s' % filename)) def _RunChecker(start_token, error_handler, limited_doc_checks, is_html, stop_token=None): state_tracker = javascriptstatetracker.JavaScriptStateTracker() style_checker = checker.JavaScriptStyleChecker( state_tracker=state_tracker, error_handler=error_handler) style_checker.Check(start_token, is_html=is_html, limited_doc_checks=limited_doc_checks, stop_token=stop_token) closure_linter-2.3.13/closure_linter/checkerbase.py0000750014730400116100000001516312247733554022062 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base classes for writing checkers that operate on tokens.""" # Allow non-Google copyright # pylint: disable=g-bad-file-header __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)', 'jacobr@google.com (Jacob Richman)') from closure_linter import errorrules from closure_linter.common import error class LintRulesBase(object): """Base class for all classes defining the lint rules for a language.""" def __init__(self): self.__checker = None def Initialize(self, checker, limited_doc_checks, is_html): """Initializes to prepare to check a file. Args: checker: Class to report errors to. limited_doc_checks: Whether doc checking is relaxed for this file. is_html: Whether the file is an HTML file with extracted contents. """ self.__checker = checker self._limited_doc_checks = limited_doc_checks self._is_html = is_html def _HandleError(self, code, message, token, position=None, fix_data=None): """Call the HandleError function for the checker we are associated with.""" if errorrules.ShouldReportError(code): self.__checker.HandleError(code, message, token, position, fix_data) def _SetLimitedDocChecks(self, limited_doc_checks): """Sets whether doc checking is relaxed for this file. Args: limited_doc_checks: Whether doc checking is relaxed for this file. """ self._limited_doc_checks = limited_doc_checks def CheckToken(self, token, parser_state): """Checks a token, given the current parser_state, for warnings and errors. Args: token: The current token under consideration. parser_state: Object that indicates the parser state in the page. Raises: TypeError: If not overridden. """ raise TypeError('Abstract method CheckToken not implemented') def Finalize(self, parser_state): """Perform all checks that need to occur after all lines are processed. Args: parser_state: State of the parser after parsing all tokens Raises: TypeError: If not overridden. """ raise TypeError('Abstract method Finalize not implemented') class CheckerBase(object): """This class handles checking a LintRules object against a file.""" def __init__(self, error_handler, lint_rules, state_tracker): """Initialize a checker object. Args: error_handler: Object that handles errors. lint_rules: LintRules object defining lint errors given a token and state_tracker object. state_tracker: Object that tracks the current state in the token stream. """ self._error_handler = error_handler self._lint_rules = lint_rules self._state_tracker = state_tracker self._has_errors = False def HandleError(self, code, message, token, position=None, fix_data=None): """Prints out the given error message including a line number. Args: code: The error code. message: The error to print. token: The token where the error occurred, or None if it was a file-wide issue. position: The position of the error, defaults to None. fix_data: Metadata used for fixing the error. """ self._has_errors = True self._error_handler.HandleError( error.Error(code, message, token, position, fix_data)) def HasErrors(self): """Returns true if the style checker has found any errors. Returns: True if the style checker has found any errors. """ return self._has_errors def Check(self, start_token, limited_doc_checks=False, is_html=False, stop_token=None): """Checks a token stream, reporting errors to the error reporter. Args: start_token: First token in token stream. limited_doc_checks: Whether doc checking is relaxed for this file. is_html: Whether the file being checked is an HTML file with extracted contents. stop_token: If given, check should stop at this token. """ self._lint_rules.Initialize(self, limited_doc_checks, is_html) self._ExecutePass(start_token, self._LintPass, stop_token=stop_token) self._lint_rules.Finalize(self._state_tracker) def _LintPass(self, token): """Checks an individual token for lint warnings/errors. Used to encapsulate the logic needed to check an individual token so that it can be passed to _ExecutePass. Args: token: The token to check. """ self._lint_rules.CheckToken(token, self._state_tracker) def _ExecutePass(self, token, pass_function, stop_token=None): """Calls the given function for every token in the given token stream. As each token is passed to the given function, state is kept up to date and, depending on the error_trace flag, errors are either caught and reported, or allowed to bubble up so developers can see the full stack trace. If a parse error is specified, the pass will proceed as normal until the token causing the parse error is reached. Args: token: The first token in the token stream. pass_function: The function to call for each token in the token stream. stop_token: The last token to check (if given). Raises: Exception: If any error occurred while calling the given function. """ self._state_tracker.Reset() while token: # When we are looking at a token and decided to delete the whole line, we # will delete all of them in the "HandleToken()" below. So the current # token and subsequent ones may already be deleted here. The way we # delete a token does not wipe out the previous and next pointers of the # deleted token. So we need to check the token itself to make sure it is # not deleted. if not token.is_deleted: # End the pass at the stop token if stop_token and token is stop_token: return self._state_tracker.HandleToken( token, self._state_tracker.GetLastNonSpaceToken()) pass_function(token) self._state_tracker.HandleAfterToken(token) token = token.next closure_linter-2.3.13/closure_linter/tokenutil_test.py0000640014730400116100000001677612247733554022711 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2012 The Closure Linter Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the scopeutil module.""" # Allow non-Google copyright # pylint: disable=g-bad-file-header __author__ = ('nnaze@google.com (Nathan Naze)') import unittest as googletest from closure_linter import ecmametadatapass from closure_linter import javascripttokens from closure_linter import testutil from closure_linter import tokenutil class FakeToken(object): pass class TokenUtilTest(googletest.TestCase): def testGetTokenRange(self): a = FakeToken() b = FakeToken() c = FakeToken() d = FakeToken() e = FakeToken() a.next = b b.next = c c.next = d self.assertEquals([a, b, c, d], tokenutil.GetTokenRange(a, d)) # This is an error as e does not come after a in the token chain. self.assertRaises(Exception, lambda: tokenutil.GetTokenRange(a, e)) def testTokensToString(self): a = FakeToken() b = FakeToken() c = FakeToken() d = FakeToken() e = FakeToken() a.string = 'aaa' b.string = 'bbb' c.string = 'ccc' d.string = 'ddd' e.string = 'eee' a.line_number = 5 b.line_number = 6 c.line_number = 6 d.line_number = 10 e.line_number = 11 self.assertEquals( 'aaa\nbbbccc\n\n\n\nddd\neee', tokenutil.TokensToString([a, b, c, d, e])) self.assertEquals( 'ddd\neee\naaa\nbbbccc', tokenutil.TokensToString([d, e, a, b, c]), 'Neighboring tokens not in line_number order should have a newline ' 'between them.') def testGetPreviousCodeToken(self): tokens = testutil.TokenizeSource(""" start1. // comment /* another comment */ end1 """) def _GetTokenStartingWith(token_starts_with): for t in tokens: if t.string.startswith(token_starts_with): return t self.assertEquals( None, tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('start1'))) self.assertEquals( 'start1.', tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('end1')).string) def testGetNextCodeToken(self): tokens = testutil.TokenizeSource(""" start1. // comment /* another comment */ end1 """) def _GetTokenStartingWith(token_starts_with): for t in tokens: if t.string.startswith(token_starts_with): return t self.assertEquals( 'end1', tokenutil.GetNextCodeToken(_GetTokenStartingWith('start1')).string) self.assertEquals( None, tokenutil.GetNextCodeToken(_GetTokenStartingWith('end1'))) def testGetIdentifierStart(self): tokens = testutil.TokenizeSource(""" start1 . // comment prototype. /* another comment */ end1 ['edge'][case].prototype. end2 = function() {} """) def _GetTokenStartingWith(token_starts_with): for t in tokens: if t.string.startswith(token_starts_with): return t self.assertEquals( 'start1', tokenutil.GetIdentifierStart(_GetTokenStartingWith('end1')).string) self.assertEquals( 'start1', tokenutil.GetIdentifierStart(_GetTokenStartingWith('start1')).string) self.assertEquals( None, tokenutil.GetIdentifierStart(_GetTokenStartingWith('end2'))) def testInsertTokenBefore(self): self.AssertInsertTokenAfterBefore(False) def testInsertTokenAfter(self): self.AssertInsertTokenAfterBefore(True) def AssertInsertTokenAfterBefore(self, after): new_token = javascripttokens.JavaScriptToken( 'a', javascripttokens.JavaScriptTokenType.IDENTIFIER, 1, 1) existing_token1 = javascripttokens.JavaScriptToken( 'var', javascripttokens.JavaScriptTokenType.KEYWORD, 1, 1) existing_token1.start_index = 0 existing_token1.metadata = ecmametadatapass.EcmaMetaData() existing_token2 = javascripttokens.JavaScriptToken( ' ', javascripttokens.JavaScriptTokenType.WHITESPACE, 1, 1) existing_token2.start_index = 3 existing_token2.metadata = ecmametadatapass.EcmaMetaData() existing_token2.metadata.last_code = existing_token1 existing_token1.next = existing_token2 existing_token2.previous = existing_token1 if after: tokenutil.InsertTokenAfter(new_token, existing_token1) else: tokenutil.InsertTokenBefore(new_token, existing_token2) self.assertEquals(existing_token1, new_token.previous) self.assertEquals(existing_token2, new_token.next) self.assertEquals(new_token, existing_token1.next) self.assertEquals(new_token, existing_token2.previous) self.assertEquals(existing_token1, new_token.metadata.last_code) self.assertEquals(new_token, existing_token2.metadata.last_code) self.assertEquals(0, existing_token1.start_index) self.assertEquals(3, new_token.start_index) self.assertEquals(4, existing_token2.start_index) def testGetIdentifierForToken(self): tokens = testutil.TokenizeSource(""" start1.abc.def.prototype. onContinuedLine (start2.abc.def .hij.klm .nop) start3.abc.def .hij = function() {}; // An absurd multi-liner. start4.abc.def. hij. klm = function() {}; start5 . aaa . bbb . ccc shouldntBePartOfThePreviousSymbol start6.abc.def ghi.shouldntBePartOfThePreviousSymbol var start7 = 42; function start8() { } start9.abc. // why is there a comment here? def /* another comment */ shouldntBePart start10.abc // why is there a comment here? .def /* another comment */ shouldntBePart start11.abc. middle1.shouldNotBeIdentifier """) def _GetTokenStartingWith(token_starts_with): for t in tokens: if t.string.startswith(token_starts_with): return t self.assertEquals( 'start1.abc.def.prototype.onContinuedLine', tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start1'))) self.assertEquals( 'start2.abc.def.hij.klm.nop', tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start2'))) self.assertEquals( 'start3.abc.def.hij', tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start3'))) self.assertEquals( 'start4.abc.def.hij.klm', tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start4'))) self.assertEquals( 'start5.aaa.bbb.ccc', tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start5'))) self.assertEquals( 'start6.abc.def', tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start6'))) self.assertEquals( 'start7', tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start7'))) self.assertEquals( 'start8', tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start8'))) self.assertEquals( 'start9.abc.def', tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start9'))) self.assertEquals( 'start10.abc.def', tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start10'))) self.assertIsNone( tokenutil.GetIdentifierForToken(_GetTokenStartingWith('middle1'))) if __name__ == '__main__': googletest.main() closure_linter-2.3.13/closure_linter/common/0000750014730400116100000000000012247733574020532 5ustar ajpeng00000000000000closure_linter-2.3.13/closure_linter/common/simplefileflags.py0000750014730400116100000001176312247733553024262 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Determines the list of files to be checked from command line arguments.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') import glob import os import re import gflags as flags FLAGS = flags.FLAGS flags.DEFINE_multistring( 'recurse', None, 'Recurse in to the subdirectories of the given path', short_name='r') flags.DEFINE_list( 'exclude_directories', ('_demos'), 'Exclude the specified directories (only applicable along with -r or ' '--presubmit)', short_name='e') flags.DEFINE_list( 'exclude_files', ('deps.js'), 'Exclude the specified files', short_name='x') def MatchesSuffixes(filename, suffixes): """Returns whether the given filename matches one of the given suffixes. Args: filename: Filename to check. suffixes: Sequence of suffixes to check. Returns: Whether the given filename matches one of the given suffixes. """ suffix = filename[filename.rfind('.'):] return suffix in suffixes def _GetUserSpecifiedFiles(argv, suffixes): """Returns files to be linted, specified directly on the command line. Can handle the '*' wildcard in filenames, but no other wildcards. Args: argv: Sequence of command line arguments. The second and following arguments are assumed to be files that should be linted. suffixes: Expected suffixes for the file type being checked. Returns: A sequence of files to be linted. """ files = argv[1:] or [] all_files = [] lint_files = [] # Perform any necessary globs. for f in files: if f.find('*') != -1: for result in glob.glob(f): all_files.append(result) else: all_files.append(f) for f in all_files: if MatchesSuffixes(f, suffixes): lint_files.append(f) return lint_files def _GetRecursiveFiles(suffixes): """Returns files to be checked specified by the --recurse flag. Args: suffixes: Expected suffixes for the file type being checked. Returns: A list of files to be checked. """ lint_files = [] # Perform any request recursion if FLAGS.recurse: for start in FLAGS.recurse: for root, subdirs, files in os.walk(start): for f in files: if MatchesSuffixes(f, suffixes): lint_files.append(os.path.join(root, f)) return lint_files def GetAllSpecifiedFiles(argv, suffixes): """Returns all files specified by the user on the commandline. Args: argv: Sequence of command line arguments. The second and following arguments are assumed to be files that should be linted. suffixes: Expected suffixes for the file type Returns: A list of all files specified directly or indirectly (via flags) on the command line by the user. """ files = _GetUserSpecifiedFiles(argv, suffixes) if FLAGS.recurse: files += _GetRecursiveFiles(suffixes) return FilterFiles(files) def FilterFiles(files): """Filters the list of files to be linted be removing any excluded files. Filters out files excluded using --exclude_files and --exclude_directories. Args: files: Sequence of files that needs filtering. Returns: Filtered list of files to be linted. """ num_files = len(files) ignore_dirs_regexs = [] for ignore in FLAGS.exclude_directories: ignore_dirs_regexs.append(re.compile(r'(^|[\\/])%s[\\/]' % ignore)) result_files = [] for f in files: add_file = True for exclude in FLAGS.exclude_files: if f.endswith('/' + exclude) or f == exclude: add_file = False break for ignore in ignore_dirs_regexs: if ignore.search(f): # Break out of ignore loop so we don't add to # filtered files. add_file = False break if add_file: # Convert everything to absolute paths so we can easily remove duplicates # using a set. result_files.append(os.path.abspath(f)) skipped = num_files - len(result_files) if skipped: print 'Skipping %d file(s).' % skipped return set(result_files) def GetFileList(argv, file_type, suffixes): """Parse the flags and return the list of files to check. Args: argv: Sequence of command line arguments. suffixes: Sequence of acceptable suffixes for the file type. Returns: The list of files to check. """ return sorted(GetAllSpecifiedFiles(argv, suffixes)) def IsEmptyArgumentList(argv): return not (len(argv[1:]) or FLAGS.recurse) closure_linter-2.3.13/closure_linter/common/tokenizer.py0000750014730400116100000001330312247733553023116 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Regular expression based lexer.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') from closure_linter.common import tokens # Shorthand Type = tokens.TokenType class Tokenizer(object): """General purpose tokenizer. Attributes: mode: The latest mode of the tokenizer. This allows patterns to distinguish if they are mid-comment, mid-parameter list, etc. matchers: Dictionary of modes to sequences of matchers that define the patterns to check at any given time. default_types: Dictionary of modes to types, defining what type to give non-matched text when in the given mode. Defaults to Type.NORMAL. """ def __init__(self, starting_mode, matchers, default_types): """Initialize the tokenizer. Args: starting_mode: Mode to start in. matchers: Dictionary of modes to sequences of matchers that defines the patterns to check at any given time. default_types: Dictionary of modes to types, defining what type to give non-matched text when in the given mode. Defaults to Type.NORMAL. """ self.__starting_mode = starting_mode self.matchers = matchers self.default_types = default_types def TokenizeFile(self, file): """Tokenizes the given file. Args: file: An iterable that yields one line of the file at a time. Returns: The first token in the file """ # The current mode. self.mode = self.__starting_mode # The first token in the stream. self.__first_token = None # The last token added to the token stream. self.__last_token = None # The current line number. self.__line_number = 0 for line in file: self.__line_number += 1 self.__TokenizeLine(line) return self.__first_token def _CreateToken(self, string, token_type, line, line_number, values=None): """Creates a new Token object (or subclass). Args: string: The string of input the token represents. token_type: The type of token. line: The text of the line this token is in. line_number: The line number of the token. values: A dict of named values within the token. For instance, a function declaration may have a value called 'name' which captures the name of the function. Returns: The newly created Token object. """ return tokens.Token(string, token_type, line, line_number, values, line_number) def __TokenizeLine(self, line): """Tokenizes the given line. Args: line: The contents of the line. """ string = line.rstrip('\n\r\f') line_number = self.__line_number self.__start_index = 0 if not string: self.__AddToken(self._CreateToken('', Type.BLANK_LINE, line, line_number)) return normal_token = '' index = 0 while index < len(string): for matcher in self.matchers[self.mode]: if matcher.line_start and index > 0: continue match = matcher.regex.match(string, index) if match: if normal_token: self.__AddToken( self.__CreateNormalToken(self.mode, normal_token, line, line_number)) normal_token = '' # Add the match. self.__AddToken(self._CreateToken(match.group(), matcher.type, line, line_number, match.groupdict())) # Change the mode to the correct one for after this match. self.mode = matcher.result_mode or self.mode # Shorten the string to be matched. index = match.end() break else: # If the for loop finishes naturally (i.e. no matches) we just add the # first character to the string of consecutive non match characters. # These will constitute a NORMAL token. if string: normal_token += string[index:index + 1] index += 1 if normal_token: self.__AddToken( self.__CreateNormalToken(self.mode, normal_token, line, line_number)) def __CreateNormalToken(self, mode, string, line, line_number): """Creates a normal token. Args: mode: The current mode. string: The string to tokenize. line: The line of text. line_number: The line number within the file. Returns: A Token object, of the default type for the current mode. """ type = Type.NORMAL if mode in self.default_types: type = self.default_types[mode] return self._CreateToken(string, type, line, line_number) def __AddToken(self, token): """Add the given token to the token stream. Args: token: The token to add. """ # Store the first token, or point the previous token to this one. if not self.__first_token: self.__first_token = token else: self.__last_token.next = token # Establish the doubly linked list token.previous = self.__last_token self.__last_token = token # Compute the character indices token.start_index = self.__start_index self.__start_index += token.length closure_linter-2.3.13/closure_linter/common/error.py0000750014730400116100000000404512247733553022240 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Error object commonly used in linters.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') class Error(object): """Object representing a style error.""" def __init__(self, code, message, token=None, position=None, fix_data=None): """Initialize the error object. Args: code: The numeric error code. message: The error message string. token: The tokens.Token where the error occurred. position: The position of the error within the token. fix_data: Data to be used in autofixing. Codes with fix_data are: GOOG_REQUIRES_NOT_ALPHABETIZED - List of string value tokens that are class names in goog.requires calls. """ self.code = code self.message = message self.token = token self.position = position if token: self.start_index = token.start_index else: self.start_index = 0 self.fix_data = fix_data if self.position: self.start_index += self.position.start def Compare(a, b): """Compare two error objects, by source code order. Args: a: First error object. b: Second error object. Returns: A Negative/0/Positive number when a is before/the same as/after b. """ line_diff = a.token.line_number - b.token.line_number if line_diff: return line_diff return a.start_index - b.start_index Compare = staticmethod(Compare) closure_linter-2.3.13/closure_linter/common/erroraccumulator.py0000750014730400116100000000243212247733553024476 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Linter error handler class that accumulates an array of errors.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') from closure_linter.common import errorhandler class ErrorAccumulator(errorhandler.ErrorHandler): """Error handler object that accumulates errors in a list.""" def __init__(self): self._errors = [] def HandleError(self, error): """Append the error to the list. Args: error: The error object """ self._errors.append(error) def GetErrors(self): """Returns the accumulated errors. Returns: A sequence of errors. """ return self._errors closure_linter-2.3.13/closure_linter/common/htmlutil.py0000750014730400116100000001117412247733553022752 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for dealing with HTML.""" __author__ = ('robbyw@google.com (Robert Walker)') import cStringIO import formatter import htmllib import HTMLParser import re class ScriptExtractor(htmllib.HTMLParser): """Subclass of HTMLParser that extracts script contents from an HTML file. Also inserts appropriate blank lines so that line numbers in the extracted code match the line numbers in the original HTML. """ def __init__(self): """Initialize a ScriptExtractor.""" htmllib.HTMLParser.__init__(self, formatter.NullFormatter()) self._in_script = False self._text = '' def start_script(self, attrs): """Internal handler for the start of a script tag. Args: attrs: The attributes of the script tag, as a list of tuples. """ for attribute in attrs: if attribute[0].lower() == 'src': # Skip script tags with a src specified. return self._in_script = True def end_script(self): """Internal handler for the end of a script tag.""" self._in_script = False def handle_data(self, data): """Internal handler for character data. Args: data: The character data from the HTML file. """ if self._in_script: # If the last line contains whitespace only, i.e. is just there to # properly align a tag, strip the whitespace. if data.rstrip(' \t') != data.rstrip(' \t\n\r\f'): data = data.rstrip(' \t') self._text += data else: self._AppendNewlines(data) def handle_comment(self, data): """Internal handler for HTML comments. Args: data: The text of the comment. """ self._AppendNewlines(data) def _AppendNewlines(self, data): """Count the number of newlines in the given string and append them. This ensures line numbers are correct for reported errors. Args: data: The data to count newlines in. """ # We append 'x' to both sides of the string to ensure that splitlines # gives us an accurate count. for i in xrange(len(('x' + data + 'x').splitlines()) - 1): self._text += '\n' def GetScriptLines(self): """Return the extracted script lines. Returns: The extracted script lines as a list of strings. """ return self._text.splitlines() def GetScriptLines(f): """Extract script tag contents from the given HTML file. Args: f: The HTML file. Returns: Lines in the HTML file that are from script tags. """ extractor = ScriptExtractor() # The HTML parser chokes on text like Array., so we patch # that bug by replacing the < with < - escaping all text inside script # tags would be better but it's a bit of a catch 22. contents = f.read() contents = re.sub(r'<([^\s\w/])', lambda x: '<%s' % x.group(1), contents) extractor.feed(contents) extractor.close() return extractor.GetScriptLines() def StripTags(str): """Returns the string with HTML tags stripped. Args: str: An html string. Returns: The html string with all tags stripped. If there was a parse error, returns the text successfully parsed so far. """ # Brute force approach to stripping as much HTML as possible. If there is a # parsing error, don't strip text before parse error position, and continue # trying from there. final_text = '' finished = False while not finished: try: strip = _HtmlStripper() strip.feed(str) strip.close() str = strip.get_output() final_text += str finished = True except HTMLParser.HTMLParseError, e: final_text += str[:e.offset] str = str[e.offset + 1:] return final_text class _HtmlStripper(HTMLParser.HTMLParser): """Simple class to strip tags from HTML. Does so by doing nothing when encountering tags, and appending character data to a buffer when that is encountered. """ def __init__(self): self.reset() self.__output = cStringIO.StringIO() def handle_data(self, d): self.__output.write(d) def get_output(self): return self.__output.getvalue() closure_linter-2.3.13/closure_linter/common/tokens.py0000750014730400116100000001120512247733553022406 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes to represent tokens and positions within them.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') class TokenType(object): """Token types common to all languages.""" NORMAL = 'normal' WHITESPACE = 'whitespace' BLANK_LINE = 'blank line' class Token(object): """Token class for intelligent text splitting. The token class represents a string of characters and an identifying type. Attributes: type: The type of token. string: The characters the token comprises. length: The length of the token. line: The text of the line the token is found in. line_number: The number of the line the token is found in. values: Dictionary of values returned from the tokens regex match. previous: The token before this one. next: The token after this one. start_index: The character index in the line where this token starts. attached_object: Object containing more information about this token. metadata: Object containing metadata about this token. Must be added by a separate metadata pass. """ def __init__(self, string, token_type, line, line_number, values=None, orig_line_number=None): """Creates a new Token object. Args: string: The string of input the token contains. token_type: The type of token. line: The text of the line this token is in. line_number: The line number of the token. values: A dict of named values within the token. For instance, a function declaration may have a value called 'name' which captures the name of the function. orig_line_number: The line number of the original file this token comes from. This should be only set during the tokenization process. For newly created error fix tokens after that, it should be None. """ self.type = token_type self.string = string self.length = len(string) self.line = line self.line_number = line_number self.orig_line_number = orig_line_number self.values = values self.is_deleted = False # These parts can only be computed when the file is fully tokenized self.previous = None self.next = None self.start_index = None # This part is set in statetracker.py # TODO(robbyw): Wrap this in to metadata self.attached_object = None # This part is set in *metadatapass.py self.metadata = None def IsFirstInLine(self): """Tests if this token is the first token in its line. Returns: Whether the token is the first token in its line. """ return not self.previous or self.previous.line_number != self.line_number def IsLastInLine(self): """Tests if this token is the last token in its line. Returns: Whether the token is the last token in its line. """ return not self.next or self.next.line_number != self.line_number def IsType(self, token_type): """Tests if this token is of the given type. Args: token_type: The type to test for. Returns: True if the type of this token matches the type passed in. """ return self.type == token_type def IsAnyType(self, *token_types): """Tests if this token is any of the given types. Args: token_types: The types to check. Also accepts a single array. Returns: True if the type of this token is any of the types passed in. """ if not isinstance(token_types[0], basestring): return self.type in token_types[0] else: return self.type in token_types def __repr__(self): return '' % (self.type, self.string, self.values, self.line_number, self.metadata) def __iter__(self): """Returns a token iterator.""" node = self while node: yield node node = node.next def __reversed__(self): """Returns a reverse-direction token iterator.""" node = self while node: yield node node = node.previous closure_linter-2.3.13/closure_linter/common/lintrunner.py0000750014730400116100000000235212247733553023306 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Interface for a lint running wrapper.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') class LintRunner(object): """Interface for a lint running wrapper.""" def __init__(self): if self.__class__ == LintRunner: raise NotImplementedError('class LintRunner is abstract') def Run(self, filenames, error_handler): """Run a linter on the given filenames. Args: filenames: The filenames to check error_handler: An ErrorHandler object Returns: The error handler, which may have been used to collect error info. """ closure_linter-2.3.13/closure_linter/common/position.py0000750014730400116100000000637412247733553022762 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes to represent positions within strings.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') class Position(object): """Object representing a segment of a string. Attributes: start: The index in to the string where the segment starts. length: The length of the string segment. """ def __init__(self, start, length): """Initialize the position object. Args: start: The start index. length: The number of characters to include. """ self.start = start self.length = length def Get(self, string): """Returns this range of the given string. Args: string: The string to slice. Returns: The string within the range specified by this object. """ return string[self.start:self.start + self.length] def Set(self, target, source): """Sets this range within the target string to the source string. Args: target: The target string. source: The source string. Returns: The resulting string """ return target[:self.start] + source + target[self.start + self.length:] def AtEnd(string): """Create a Position representing the end of the given string. Args: string: The string to represent the end of. Returns: The created Position object. """ return Position(len(string), 0) AtEnd = staticmethod(AtEnd) def IsAtEnd(self, string): """Returns whether this position is at the end of the given string. Args: string: The string to test for the end of. Returns: Whether this position is at the end of the given string. """ return self.start == len(string) and self.length == 0 def AtBeginning(): """Create a Position representing the beginning of any string. Returns: The created Position object. """ return Position(0, 0) AtBeginning = staticmethod(AtBeginning) def IsAtBeginning(self): """Returns whether this position is at the beginning of any string. Returns: Whether this position is at the beginning of any string. """ return self.start == 0 and self.length == 0 def All(string): """Create a Position representing the entire string. Args: string: The string to represent the entirety of. Returns: The created Position object. """ return Position(0, len(string)) All = staticmethod(All) def Index(index): """Returns a Position object for the specified index. Args: index: The index to select, inclusively. Returns: The created Position object. """ return Position(index, 1) Index = staticmethod(Index) closure_linter-2.3.13/closure_linter/common/matcher.py0000750014730400116100000000415612247733553022535 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Regular expression based JavaScript matcher classes.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') from closure_linter.common import position from closure_linter.common import tokens # Shorthand Token = tokens.Token Position = position.Position class Matcher(object): """A token matcher. Specifies a pattern to match, the type of token it represents, what mode the token changes to, and what mode the token applies to. Modes allow more advanced grammars to be incorporated, and are also necessary to tokenize line by line. We can have different patterns apply to different modes - i.e. looking for documentation while in comment mode. Attributes: regex: The regular expression representing this matcher. type: The type of token indicated by a successful match. result_mode: The mode to move to after a successful match. """ def __init__(self, regex, token_type, result_mode=None, line_start=False): """Create a new matcher template. Args: regex: The regular expression to match. token_type: The type of token a successful match indicates. result_mode: What mode to change to after a successful match. Defaults to None, which means to not change the current mode. line_start: Whether this matcher should only match string at the start of a line. """ self.regex = regex self.type = token_type self.result_mode = result_mode self.line_start = line_start closure_linter-2.3.13/closure_linter/common/__init__.py0000750014730400116100000000125012247733553022641 0ustar ajpeng00000000000000#!/usr/bin/env python # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Package indicator for gjslint.common.""" closure_linter-2.3.13/closure_linter/common/erroroutput.py0000640014730400116100000000301512247733553023513 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2012 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions to format errors.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)', 'nnaze@google.com (Nathan Naze)') def GetUnixErrorOutput(filename, error, new_error=False): """Get a output line for an error in UNIX format.""" line = '' if error.token: line = '%d' % error.token.line_number error_code = '%04d' % error.code if new_error: error_code = 'New Error ' + error_code return '%s:%s:(%s) %s' % (filename, line, error_code, error.message) def GetErrorOutput(error, new_error=False): """Get a output line for an error in regular format.""" line = '' if error.token: line = 'Line %d, ' % error.token.line_number code = 'E:%04d' % error.code error_message = error.message if new_error: error_message = 'New Error ' + error_message return '%s%s: %s' % (line, code, error.message) closure_linter-2.3.13/closure_linter/common/tokens_test.py0000640014730400116100000000602112247733553023443 0ustar ajpeng00000000000000#!/usr/bin/env python # Copyright 2011 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __author__ = 'nnaze@google.com (Nathan Naze)' import unittest as googletest from closure_linter.common import tokens def _CreateDummyToken(): return tokens.Token('foo', None, 1, 1) def _CreateDummyTokens(count): dummy_tokens = [] for _ in xrange(count): dummy_tokens.append(_CreateDummyToken()) return dummy_tokens def _SetTokensAsNeighbors(neighbor_tokens): for i in xrange(len(neighbor_tokens)): prev_index = i - 1 next_index = i + 1 if prev_index >= 0: neighbor_tokens[i].previous = neighbor_tokens[prev_index] if next_index < len(neighbor_tokens): neighbor_tokens[i].next = neighbor_tokens[next_index] class TokensTest(googletest.TestCase): def testIsFirstInLine(self): # First token in file (has no previous). self.assertTrue(_CreateDummyToken().IsFirstInLine()) a, b = _CreateDummyTokens(2) _SetTokensAsNeighbors([a, b]) # Tokens on same line a.line_number = 30 b.line_number = 30 self.assertFalse(b.IsFirstInLine()) # Tokens on different lines b.line_number = 31 self.assertTrue(b.IsFirstInLine()) def testIsLastInLine(self): # Last token in file (has no next). self.assertTrue(_CreateDummyToken().IsLastInLine()) a, b = _CreateDummyTokens(2) _SetTokensAsNeighbors([a, b]) # Tokens on same line a.line_number = 30 b.line_number = 30 self.assertFalse(a.IsLastInLine()) b.line_number = 31 self.assertTrue(a.IsLastInLine()) def testIsType(self): a = tokens.Token('foo', 'fakeType1', 1, 1) self.assertTrue(a.IsType('fakeType1')) self.assertFalse(a.IsType('fakeType2')) def testIsAnyType(self): a = tokens.Token('foo', 'fakeType1', 1, 1) self.assertTrue(a.IsAnyType(['fakeType1', 'fakeType2'])) self.assertFalse(a.IsAnyType(['fakeType3', 'fakeType4'])) def testRepr(self): a = tokens.Token('foo', 'fakeType1', 1, 1) self.assertEquals('', str(a)) def testIter(self): dummy_tokens = _CreateDummyTokens(5) _SetTokensAsNeighbors(dummy_tokens) a, b, c, d, e = dummy_tokens i = iter(a) self.assertListEqual([a, b, c, d, e], list(i)) def testReverseIter(self): dummy_tokens = _CreateDummyTokens(5) _SetTokensAsNeighbors(dummy_tokens) a, b, c, d, e = dummy_tokens ri = reversed(e) self.assertListEqual([e, d, c, b, a], list(ri)) if __name__ == '__main__': googletest.main() closure_linter-2.3.13/closure_linter/common/errorhandler.py0000750014730400116100000000335012247733553023574 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Interface for a linter error handler. Error handlers aggregate a set of errors from multiple files and can optionally perform some action based on the reported errors, for example, logging the error or automatically fixing it. """ __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') class ErrorHandler(object): """Error handler interface.""" def __init__(self): if self.__class__ == ErrorHandler: raise NotImplementedError('class ErrorHandler is abstract') def HandleFile(self, filename, first_token): """Notifies this ErrorHandler that subsequent errors are in filename. Args: filename: The file being linted. first_token: The first token of the file. """ def HandleError(self, error): """Append the error to the list. Args: error: The error object """ def FinishFile(self): """Finishes handling the current file. Should be called after all errors in a file have been handled. """ def GetErrors(self): """Returns the accumulated errors. Returns: A sequence of errors. """ closure_linter-2.3.13/closure_linter/common/filetestcase.py0000750014730400116100000000746512247733553023573 0ustar ajpeng00000000000000#!/usr/bin/env python # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test case that runs a checker on a file, matching errors against annotations. Runs the given checker on the given file, accumulating all errors. The list of errors is then matched against those annotated in the file. Based heavily on devtools/javascript/gpylint/full_test.py. """ __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') import re import unittest as googletest from closure_linter.common import erroraccumulator class AnnotatedFileTestCase(googletest.TestCase): """Test case to run a linter against a single file.""" # Matches an all caps letters + underscores error identifer _MESSAGE = {'msg': '[A-Z][A-Z_]+'} # Matches a //, followed by an optional line number with a +/-, followed by a # list of message IDs. Used to extract expected messages from testdata files. # TODO(robbyw): Generalize to use different commenting patterns. _EXPECTED_RE = re.compile(r'\s*//\s*(?:(?P[+-]?[0-9]+):)?' r'\s*(?P%(msg)s(?:,\s*%(msg)s)*)' % _MESSAGE) def __init__(self, filename, lint_callable, converter): """Create a single file lint test case. Args: filename: Filename to test. lint_callable: Callable that lints a file. This is usually runner.Run(). converter: Function taking an error string and returning an error code. """ googletest.TestCase.__init__(self, 'runTest') self._filename = filename self._messages = [] self._lint_callable = lint_callable self._converter = converter def shortDescription(self): """Provides a description for the test.""" return 'Run linter on %s' % self._filename def runTest(self): """Runs the test.""" try: filename = self._filename stream = open(filename) except IOError as ex: raise IOError('Could not find testdata resource for %s: %s' % (self._filename, ex)) expected = self._GetExpectedMessages(stream) got = self._ProcessFileAndGetMessages(filename) self.assertEqual(expected, got) def _GetExpectedMessages(self, stream): """Parse a file and get a sorted list of expected messages.""" messages = [] for i, line in enumerate(stream): match = self._EXPECTED_RE.search(line) if match: line = match.group('line') msg_ids = match.group('msgs') if line is None: line = i + 1 elif line.startswith('+') or line.startswith('-'): line = i + 1 + int(line) else: line = int(line) for msg_id in msg_ids.split(','): # Ignore a spurious message from the license preamble. if msg_id != 'WITHOUT': messages.append((line, self._converter(msg_id.strip()))) stream.seek(0) messages.sort() return messages def _ProcessFileAndGetMessages(self, filename): """Trap gjslint's output parse it to get messages added.""" error_accumulator = erroraccumulator.ErrorAccumulator() self._lint_callable(filename, error_accumulator) errors = error_accumulator.GetErrors() # Convert to expected tuple format. error_msgs = [(error.token.line_number, error.code) for error in errors] error_msgs.sort() return error_msgs closure_linter-2.3.13/closure_linter/javascriptstatetracker.py0000750014730400116100000001206312247733554024402 0ustar ajpeng00000000000000#!/usr/bin/env python # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Parser for JavaScript files.""" from closure_linter import javascripttokens from closure_linter import statetracker from closure_linter import tokenutil # Shorthand Type = javascripttokens.JavaScriptTokenType class JsDocFlag(statetracker.DocFlag): """Javascript doc flag object. Attribute: flag_type: param, return, define, type, etc. flag_token: The flag token. type_start_token: The first token specifying the flag JS type, including braces. type_end_token: The last token specifying the flag JS type, including braces. type: The JavaScript type spec. name_token: The token specifying the flag name. name: The flag name description_start_token: The first token in the description. description_end_token: The end token in the description. description: The description. """ # Please keep these lists alphabetized. # Some projects use the following extensions to JsDoc. # TODO(robbyw): determine which of these, if any, should be illegal. EXTENDED_DOC = frozenset([ 'class', 'code', 'desc', 'final', 'hidden', 'inheritDoc', 'link', 'meaning', 'provideGoog', 'throws']) LEGAL_DOC = EXTENDED_DOC | statetracker.DocFlag.LEGAL_DOC def __init__(self, flag_token): """Creates the JsDocFlag object and attaches it to the given start token. Args: flag_token: The starting token of the flag. """ statetracker.DocFlag.__init__(self, flag_token) class JavaScriptStateTracker(statetracker.StateTracker): """JavaScript state tracker. Inherits from the core EcmaScript StateTracker adding extra state tracking functionality needed for JavaScript. """ def __init__(self): """Initializes a JavaScript token stream state tracker.""" statetracker.StateTracker.__init__(self, JsDocFlag) def Reset(self): self._scope_depth = 0 self._block_stack = [] super(JavaScriptStateTracker, self).Reset() def InTopLevel(self): """Compute whether we are at the top level in the class. This function call is language specific. In some languages like JavaScript, a function is top level if it is not inside any parenthesis. In languages such as ActionScript, a function is top level if it is directly within a class. Returns: Whether we are at the top level in the class. """ return self._scope_depth == self.ParenthesesDepth() def InFunction(self): """Returns true if the current token is within a function. This js-specific override ignores goog.scope functions. Returns: True if the current token is within a function. """ return self._scope_depth != self.FunctionDepth() def InNonScopeBlock(self): """Compute whether we are nested within a non-goog.scope block. Returns: True if the token is not enclosed in a block that does not originate from a goog.scope statement. False otherwise. """ return self._scope_depth != self.BlockDepth() def GetBlockType(self, token): """Determine the block type given a START_BLOCK token. Code blocks come after parameters, keywords like else, and closing parens. Args: token: The current token. Can be assumed to be type START_BLOCK Returns: Code block type for current token. """ last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, reverse=True) if last_code.type in (Type.END_PARAMETERS, Type.END_PAREN, Type.KEYWORD) and not last_code.IsKeyword('return'): return self.CODE else: return self.OBJECT_LITERAL def GetCurrentBlockStart(self): """Gets the start token of current block. Returns: Starting token of current block. None if not in block. """ if self._block_stack: return self._block_stack[-1] else: return None def HandleToken(self, token, last_non_space_token): """Handles the given token and updates state. Args: token: The token to handle. last_non_space_token: The last non space token encountered """ if token.type == Type.START_BLOCK: self._block_stack.append(token) if token.type == Type.IDENTIFIER and token.string == 'goog.scope': self._scope_depth += 1 if token.type == Type.END_BLOCK: start_token = self._block_stack.pop() if tokenutil.GoogScopeOrNoneFromStartBlock(start_token): self._scope_depth -= 1 super(JavaScriptStateTracker, self).HandleToken(token, last_non_space_token) closure_linter-2.3.13/closure_linter/__init__.py0000750014730400116100000000124112247733554021352 0ustar ajpeng00000000000000#!/usr/bin/env python # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Package indicator for gjslint.""" closure_linter-2.3.13/closure_linter/scopeutil_test.py0000640014730400116100000001425512247733554022670 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2012 The Closure Linter Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the scopeutil module.""" # Allow non-Google copyright # pylint: disable=g-bad-file-header __author__ = ('nnaze@google.com (Nathan Naze)') import unittest as googletest from closure_linter import ecmametadatapass from closure_linter import scopeutil from closure_linter import testutil def _FindContexts(start_token): """Depth first search of all contexts referenced by a token stream. Includes contexts' parents, which might not be directly referenced by any token in the stream. Args: start_token: First token in the token stream. Yields: All contexts referenced by this token stream. """ seen_contexts = set() # For each token, yield the context if we haven't seen it before. for token in start_token: token_context = token.metadata.context contexts = [token_context] # Also grab all the context's ancestors. parent = token_context.parent while parent: contexts.append(parent) parent = parent.parent # Yield each of these contexts if we've not seen them. for context in contexts: if context not in seen_contexts: yield context seen_contexts.add(context) def _FindFirstContextOfType(token, context_type): """Returns the first statement context.""" for context in _FindContexts(token): if context.type == context_type: return context class StatementTest(googletest.TestCase): def assertAlias(self, expected_match, script): start_token = testutil.TokenizeSourceAndRunEcmaPass(script) statement = _FindFirstContextOfType( start_token, ecmametadatapass.EcmaContext.STATEMENT) match = scopeutil.MatchAlias(statement) self.assertEquals(expected_match, match) def testSimpleAliases(self): self.assertAlias( ('foo', 'goog.foo'), 'var foo = goog.foo;') self.assertAlias( ('foo', 'goog.foo'), 'var foo = goog.foo') # No semicolon def testAliasWithComment(self): self.assertAlias( ('Component', 'goog.ui.Component'), 'var Component = /* comment */ goog.ui.Component;') def testMultilineComment(self): self.assertAlias( ('Component', 'goog.ui.Component'), 'var Component = \n goog.ui.Component;') def testNonSymbolAliasVarStatements(self): self.assertAlias(None, 'var foo = 3;') self.assertAlias(None, 'var foo = function() {};') self.assertAlias(None, 'for(var foo = bar;;){}') self.assertAlias(None, 'var foo = bar ? baz : qux;') class ScopeBlockTest(googletest.TestCase): @staticmethod def _GetBlocks(source): start_token = testutil.TokenizeSourceAndRunEcmaPass(source) for context in _FindContexts(start_token): if context.type is ecmametadatapass.EcmaContext.BLOCK: yield context def assertNoBlocks(self, script): blocks = list(self._GetBlocks(script)) self.assertEquals([], blocks) def testNotBlocks(self): # Ensure these are not considered blocks. self.assertNoBlocks('goog.scope(if{});') self.assertNoBlocks('goog.scope(for{});') self.assertNoBlocks('goog.scope(switch{});') self.assertNoBlocks('goog.scope(function foo{});') def testNonScopeBlocks(self): blocks = list(self._GetBlocks('goog.scope(try{});')) self.assertEquals(1, len(blocks)) self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) blocks = list(self._GetBlocks('goog.scope(function(a,b){});')) self.assertEquals(1, len(blocks)) self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) blocks = list(self._GetBlocks('goog.scope(try{} catch(){});')) # Two blocks: try and catch. self.assertEquals(2, len(blocks)) self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) blocks = list(self._GetBlocks('goog.scope(try{} catch(){} finally {});')) self.assertEquals(3, len(blocks)) self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) class AliasTest(googletest.TestCase): def setUp(self): self.start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT) def testMatchAliasStatement(self): matches = set() for context in _FindContexts(self.start_token): match = scopeutil.MatchAlias(context) if match: matches.add(match) self.assertEquals( set([('bar', 'baz'), ('foo', 'this.foo_'), ('Component', 'goog.ui.Component'), ('MyClass', 'myproject.foo.MyClass'), ('NonClosurizedClass', 'aaa.bbb.NonClosurizedClass')]), matches) def testMatchAliasStatement_withClosurizedNamespaces(self): closurized_namepaces = frozenset(['goog', 'myproject']) matches = set() for context in _FindContexts(self.start_token): match = scopeutil.MatchAlias(context) if match: unused_alias, symbol = match if scopeutil.IsInClosurizedNamespace(symbol, closurized_namepaces): matches.add(match) self.assertEquals( set([('MyClass', 'myproject.foo.MyClass'), ('Component', 'goog.ui.Component')]), matches) _TEST_SCRIPT = """ goog.scope(function() { var Component = goog.ui.Component; // scope alias var MyClass = myproject.foo.MyClass; // scope alias // Scope alias of non-Closurized namespace. var NonClosurizedClass = aaa.bbb.NonClosurizedClass; var foo = this.foo_; // non-scope object property alias var bar = baz; // variable alias var component = new Component(); }); """ if __name__ == '__main__': googletest.main() closure_linter-2.3.13/closure_linter/error_fixer_test.py0000640014730400116100000000326712247733554023210 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2012 The Closure Linter Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the error_fixer module.""" # Allow non-Google copyright # pylint: disable=g-bad-file-header import unittest as googletest from closure_linter import error_fixer from closure_linter import testutil class ErrorFixerTest(googletest.TestCase): """Unit tests for error_fixer.""" def setUp(self): self.error_fixer = error_fixer.ErrorFixer() def testDeleteToken(self): start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT) second_token = start_token.next self.error_fixer.HandleFile('test_file', start_token) self.error_fixer._DeleteToken(start_token) self.assertEqual(second_token, self.error_fixer._file_token) def testDeleteTokens(self): start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT) fourth_token = start_token.next.next.next self.error_fixer.HandleFile('test_file', start_token) self.error_fixer._DeleteTokens(start_token, 3) self.assertEqual(fourth_token, self.error_fixer._file_token) _TEST_SCRIPT = """\ var x = 3; """ if __name__ == '__main__': googletest.main() closure_linter-2.3.13/closure_linter/strict_test.py0000750014730400116100000000365412247733554022174 0ustar ajpeng00000000000000#!/usr/bin/env python # Copyright 2013 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for gjslint --strict. Tests errors that can be thrown by gjslint when in strict mode. """ import unittest import gflags as flags import unittest as googletest from closure_linter import errors from closure_linter import runner from closure_linter.common import erroraccumulator flags.FLAGS.strict = True class StrictTest(unittest.TestCase): """Tests scenarios where strict generates warnings.""" def testUnclosedString(self): """Tests warnings are reported when nothing is disabled. b/11450054. """ original = [ 'bug = function() {', ' (\'foo\'\');', '};', '', ] expected = [errors.FILE_DOES_NOT_PARSE, errors.MULTI_LINE_STRING, errors.FILE_IN_BLOCK] self._AssertErrors(original, expected) def _AssertErrors(self, original, expected_errors): """Asserts that the error fixer corrects original to expected.""" # Trap gjslint's output parse it to get messages added. error_accumulator = erroraccumulator.ErrorAccumulator() runner.Run('testing.js', error_accumulator, source=original) error_nums = [e.code for e in error_accumulator.GetErrors()] error_nums.sort() expected_errors.sort() self.assertListEqual(error_nums, expected_errors) if __name__ == '__main__': googletest.main() closure_linter-2.3.13/closure_linter/aliaspass_test.py0000750014730400116100000001160612247733554022640 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2012 The Closure Linter Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the aliaspass module.""" # Allow non-Google copyright # pylint: disable=g-bad-file-header __author__ = ('nnaze@google.com (Nathan Naze)') import unittest as googletest from closure_linter import aliaspass from closure_linter import errors from closure_linter import testutil from closure_linter.common import erroraccumulator def _GetTokenByLineAndString(start_token, string, line_number): for token in start_token: if token.line_number == line_number and token.string == string: return token class AliasPassTest(googletest.TestCase): def testInvalidGoogScopeCall(self): start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCOPE_SCRIPT) error_accumulator = erroraccumulator.ErrorAccumulator() alias_pass = aliaspass.AliasPass( error_handler=error_accumulator) alias_pass.Process(start_token) alias_errors = error_accumulator.GetErrors() self.assertEquals(1, len(alias_errors)) alias_error = alias_errors[0] self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, alias_error.code) self.assertEquals('goog.scope', alias_error.token.string) def testAliasedIdentifiers(self): start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT) alias_pass = aliaspass.AliasPass(set(['goog', 'myproject'])) alias_pass.Process(start_token) alias_token = _GetTokenByLineAndString(start_token, 'Event', 4) self.assertTrue(alias_token.metadata.is_alias_definition) my_class_token = _GetTokenByLineAndString(start_token, 'myClass', 8) self.assertIsNone(my_class_token.metadata.aliased_symbol) component_token = _GetTokenByLineAndString(start_token, 'Component', 16) self.assertEquals('goog.ui.Component', component_token.metadata.aliased_symbol) event_token = _GetTokenByLineAndString(start_token, 'Event.Something', 16) self.assertEquals('goog.events.Event.Something', event_token.metadata.aliased_symbol) non_closurized_token = _GetTokenByLineAndString( start_token, 'NonClosurizedClass', 17) self.assertIsNone(non_closurized_token.metadata.aliased_symbol) long_start_token = _GetTokenByLineAndString(start_token, 'Event.', 20) self.assertEquals('goog.events.Event.MultilineIdentifier.someMethod', long_start_token.metadata.aliased_symbol) def testMultipleGoogScopeCalls(self): start_token = testutil.TokenizeSourceAndRunEcmaPass( _TEST_MULTIPLE_SCOPE_SCRIPT) error_accumulator = erroraccumulator.ErrorAccumulator() alias_pass = aliaspass.AliasPass( set(['goog', 'myproject']), error_handler=error_accumulator) alias_pass.Process(start_token) alias_errors = error_accumulator.GetErrors() self.assertEquals(3, len(alias_errors)) error = alias_errors[0] self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, error.code) self.assertEquals(7, error.token.line_number) error = alias_errors[1] self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code) self.assertEquals(7, error.token.line_number) error = alias_errors[2] self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code) self.assertEquals(11, error.token.line_number) _TEST_ALIAS_SCRIPT = """ goog.scope(function() { var events = goog.events; // scope alias var Event = events.Event; // nested scope alias // This should not be registered as an aliased identifier because // it appears before the alias. var myClass = new MyClass(); var Component = goog.ui.Component; // scope alias var MyClass = myproject.foo.MyClass; // scope alias // Scope alias of non-Closurized namespace. var NonClosurizedClass = aaa.bbb.NonClosurizedClass; var component = new Component(Event.Something); var nonClosurized = NonClosurizedClass(); // A created namespace with a really long identifier. Event. MultilineIdentifier. someMethod = function() {}; }); """ _TEST_SCOPE_SCRIPT = """ function foo () { // This goog.scope call is invalid. goog.scope(function() { }); } """ _TEST_MULTIPLE_SCOPE_SCRIPT = """ goog.scope(function() { // do nothing }); function foo() { var test = goog.scope; // We should not see goog.scope mentioned. } // This goog.scope invalid. There can be only one. goog.scope(function() { }); """ if __name__ == '__main__': googletest.main() closure_linter-2.3.13/closure_linter/statetracker.py0000640014730400116100000011157212247733554022316 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Light weight EcmaScript state tracker that reads tokens and tracks state.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') import re from closure_linter import javascripttokenizer from closure_linter import javascripttokens from closure_linter import tokenutil # Shorthand Type = javascripttokens.JavaScriptTokenType class DocFlag(object): """Generic doc flag object. Attribute: flag_type: param, return, define, type, etc. flag_token: The flag token. type_start_token: The first token specifying the flag type, including braces. type_end_token: The last token specifying the flag type, including braces. type: The type spec. name_token: The token specifying the flag name. name: The flag name description_start_token: The first token in the description. description_end_token: The end token in the description. description: The description. """ # Please keep these lists alphabetized. # The list of standard jsdoc tags is from STANDARD_DOC = frozenset([ 'author', 'bug', 'classTemplate', 'consistentIdGenerator', 'const', 'constructor', 'define', 'deprecated', 'dict', 'enum', 'export', 'expose', 'extends', 'externs', 'fileoverview', 'idGenerator', 'implements', 'implicitCast', 'interface', 'lends', 'license', 'ngInject', # This annotation is specific to AngularJS. 'noalias', 'nocompile', 'nosideeffects', 'override', 'owner', 'param', 'preserve', 'private', 'protected', 'public', 'return', 'see', 'stableIdGenerator', 'struct', 'supported', 'template', 'this', 'type', 'typedef', 'wizaction', # This annotation is specific to Wiz. 'wizmodule', # This annotation is specific to Wiz. ]) ANNOTATION = frozenset(['preserveTry', 'suppress']) LEGAL_DOC = STANDARD_DOC | ANNOTATION # Includes all Closure Compiler @suppress types. # Not all of these annotations are interpreted by Closure Linter. # # Specific cases: # - accessControls is supported by the compiler at the expression # and method level to suppress warnings about private/protected # access (method level applies to all references in the method). # The linter mimics the compiler behavior. SUPPRESS_TYPES = frozenset([ 'accessControls', 'ambiguousFunctionDecl', 'checkRegExp', 'checkStructDictInheritance', 'checkTypes', 'checkVars', 'const', 'constantProperty', 'deprecated', 'duplicate', 'es5Strict', 'externsValidation', 'extraProvide', 'extraRequire', 'fileoverviewTags', 'globalThis', 'internetExplorerChecks', 'invalidCasts', 'missingProperties', 'missingProvide', 'missingRequire', 'missingReturn', 'nonStandardJsDocs', 'strictModuleDepCheck', 'tweakValidation', 'typeInvalidation', 'undefinedNames', 'undefinedVars', 'underscore', 'unknownDefines', 'unusedPrivateMembers', 'uselessCode', 'visibility', 'with']) HAS_DESCRIPTION = frozenset([ 'define', 'deprecated', 'desc', 'fileoverview', 'license', 'param', 'preserve', 'return', 'supported']) HAS_TYPE = frozenset([ 'define', 'enum', 'extends', 'implements', 'param', 'return', 'type', 'suppress', 'const']) TYPE_ONLY = frozenset(['enum', 'extends', 'implements', 'suppress', 'type', 'const']) HAS_NAME = frozenset(['param']) EMPTY_COMMENT_LINE = re.compile(r'^\s*\*?\s*$') EMPTY_STRING = re.compile(r'^\s*$') def __init__(self, flag_token): """Creates the DocFlag object and attaches it to the given start token. Args: flag_token: The starting token of the flag. """ self.flag_token = flag_token self.flag_type = flag_token.string.strip().lstrip('@') # Extract type, if applicable. self.type = None self.type_start_token = None self.type_end_token = None if self.flag_type in self.HAS_TYPE: brace = tokenutil.SearchUntil(flag_token, [Type.DOC_START_BRACE], Type.FLAG_ENDING_TYPES) if brace: end_token, contents = _GetMatchingEndBraceAndContents(brace) self.type = contents self.type_start_token = brace self.type_end_token = end_token elif (self.flag_type in self.TYPE_ONLY and flag_token.next.type not in Type.FLAG_ENDING_TYPES and flag_token.line_number == flag_token.next.line_number): # b/10407058. If the flag is expected to be followed by a type then # search for type in same line only. If no token after flag in same # line then conclude that no type is specified. self.type_start_token = flag_token.next self.type_end_token, self.type = _GetEndTokenAndContents( self.type_start_token) if self.type is not None: self.type = self.type.strip() # Extract name, if applicable. self.name_token = None self.name = None if self.flag_type in self.HAS_NAME: # Handle bad case, name could be immediately after flag token. self.name_token = _GetNextPartialIdentifierToken(flag_token) # Handle good case, if found token is after type start, look for # a identifier (substring to cover cases like [cnt] b/4197272) after # type end, since types contain identifiers. if (self.type and self.name_token and tokenutil.Compare(self.name_token, self.type_start_token) > 0): self.name_token = _GetNextPartialIdentifierToken(self.type_end_token) if self.name_token: self.name = self.name_token.string # Extract description, if applicable. self.description_start_token = None self.description_end_token = None self.description = None if self.flag_type in self.HAS_DESCRIPTION: search_start_token = flag_token if self.name_token and self.type_end_token: if tokenutil.Compare(self.type_end_token, self.name_token) > 0: search_start_token = self.type_end_token else: search_start_token = self.name_token elif self.name_token: search_start_token = self.name_token elif self.type: search_start_token = self.type_end_token interesting_token = tokenutil.Search(search_start_token, Type.FLAG_DESCRIPTION_TYPES | Type.FLAG_ENDING_TYPES) if interesting_token.type in Type.FLAG_DESCRIPTION_TYPES: self.description_start_token = interesting_token self.description_end_token, self.description = ( _GetEndTokenAndContents(interesting_token)) class DocComment(object): """JavaScript doc comment object. Attributes: ordered_params: Ordered list of parameters documented. start_token: The token that starts the doc comment. end_token: The token that ends the doc comment. suppressions: Map of suppression type to the token that added it. """ def __init__(self, start_token): """Create the doc comment object. Args: start_token: The first token in the doc comment. """ self.__flags = [] self.start_token = start_token self.end_token = None self.suppressions = {} self.invalidated = False @property def ordered_params(self): """Gives the list of parameter names as a list of strings.""" params = [] for flag in self.__flags: if flag.flag_type == 'param' and flag.name: params.append(flag.name) return params def Invalidate(self): """Indicate that the JSDoc is well-formed but we had problems parsing it. This is a short-circuiting mechanism so that we don't emit false positives about well-formed doc comments just because we don't support hot new syntaxes. """ self.invalidated = True def IsInvalidated(self): """Test whether Invalidate() has been called.""" return self.invalidated def AddSuppression(self, token): """Add a new error suppression flag. Args: token: The suppression flag token. """ #TODO(user): Error if no braces brace = tokenutil.SearchUntil(token, [Type.DOC_START_BRACE], [Type.DOC_FLAG]) if brace: end_token, contents = _GetMatchingEndBraceAndContents(brace) for suppression in contents.split('|'): self.suppressions[suppression] = token def SuppressionOnly(self): """Returns whether this comment contains only suppression flags.""" if not self.__flags: return False for flag in self.__flags: if flag.flag_type != 'suppress': return False return True def AddFlag(self, flag): """Add a new document flag. Args: flag: DocFlag object. """ self.__flags.append(flag) def InheritsDocumentation(self): """Test if the jsdoc implies documentation inheritance. Returns: True if documentation may be pulled off the superclass. """ return self.HasFlag('inheritDoc') or self.HasFlag('override') def HasFlag(self, flag_type): """Test if the given flag has been set. Args: flag_type: The type of the flag to check. Returns: True if the flag is set. """ for flag in self.__flags: if flag.flag_type == flag_type: return True return False def GetFlag(self, flag_type): """Gets the last flag of the given type. Args: flag_type: The type of the flag to get. Returns: The last instance of the given flag type in this doc comment. """ for flag in reversed(self.__flags): if flag.flag_type == flag_type: return flag def GetDocFlags(self): """Return the doc flags for this comment.""" return list(self.__flags) def _YieldDescriptionTokens(self): for token in self.start_token: if (token is self.end_token or token.type is javascripttokens.JavaScriptTokenType.DOC_FLAG or token.type not in javascripttokens.JavaScriptTokenType.COMMENT_TYPES): return if token.type not in [ javascripttokens.JavaScriptTokenType.START_DOC_COMMENT, javascripttokens.JavaScriptTokenType.END_DOC_COMMENT, javascripttokens.JavaScriptTokenType.DOC_PREFIX]: yield token @property def description(self): return tokenutil.TokensToString( self._YieldDescriptionTokens()) def GetTargetIdentifier(self): """Returns the identifier (as a string) that this is a comment for. Note that this uses method uses GetIdentifierForToken to get the full identifier, even if broken up by whitespace, newlines, or comments, and thus could be longer than GetTargetToken().string. Returns: The identifier for the token this comment is for. """ token = self.GetTargetToken() if token: return tokenutil.GetIdentifierForToken(token) def GetTargetToken(self): """Get this comment's target token. Returns: The token that is the target of this comment, or None if there isn't one. """ # File overviews describe the file, not a token. if self.HasFlag('fileoverview'): return skip_types = frozenset([ Type.WHITESPACE, Type.BLANK_LINE, Type.START_PAREN]) target_types = frozenset([ Type.FUNCTION_NAME, Type.IDENTIFIER, Type.SIMPLE_LVALUE]) token = self.end_token.next while token: if token.type in target_types: return token # Handles the case of a comment on "var foo = ...' if token.IsKeyword('var'): next_code_token = tokenutil.CustomSearch( token, lambda t: t.type not in Type.NON_CODE_TYPES) if (next_code_token and next_code_token.IsType(Type.SIMPLE_LVALUE)): return next_code_token return # Handles the case of a comment on "function foo () {}" if token.type is Type.FUNCTION_DECLARATION: next_code_token = tokenutil.CustomSearch( token, lambda t: t.type not in Type.NON_CODE_TYPES) if next_code_token.IsType(Type.FUNCTION_NAME): return next_code_token return # Skip types will end the search. if token.type not in skip_types: return token = token.next def CompareParameters(self, params): """Computes the edit distance and list from the function params to the docs. Uses the Levenshtein edit distance algorithm, with code modified from http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python Args: params: The parameter list for the function declaration. Returns: The edit distance, the edit list. """ source_len, target_len = len(self.ordered_params), len(params) edit_lists = [[]] distance = [[]] for i in range(target_len+1): edit_lists[0].append(['I'] * i) distance[0].append(i) for j in range(1, source_len+1): edit_lists.append([['D'] * j]) distance.append([j]) for i in range(source_len): for j in range(target_len): cost = 1 if self.ordered_params[i] == params[j]: cost = 0 deletion = distance[i][j+1] + 1 insertion = distance[i+1][j] + 1 substitution = distance[i][j] + cost edit_list = None best = None if deletion <= insertion and deletion <= substitution: # Deletion is best. best = deletion edit_list = list(edit_lists[i][j+1]) edit_list.append('D') elif insertion <= substitution: # Insertion is best. best = insertion edit_list = list(edit_lists[i+1][j]) edit_list.append('I') edit_lists[i+1].append(edit_list) else: # Substitution is best. best = substitution edit_list = list(edit_lists[i][j]) if cost: edit_list.append('S') else: edit_list.append('=') edit_lists[i+1].append(edit_list) distance[i+1].append(best) return distance[source_len][target_len], edit_lists[source_len][target_len] def __repr__(self): """Returns a string representation of this object. Returns: A string representation of this object. """ return '' % ( str(self.ordered_params), str(self.__flags)) # # Helper methods used by DocFlag and DocComment to parse out flag information. # def _GetMatchingEndBraceAndContents(start_brace): """Returns the matching end brace and contents between the two braces. If any FLAG_ENDING_TYPE token is encountered before a matching end brace, then that token is used as the matching ending token. Contents will have all comment prefixes stripped out of them, and all comment prefixes in between the start and end tokens will be split out into separate DOC_PREFIX tokens. Args: start_brace: The DOC_START_BRACE token immediately before desired contents. Returns: The matching ending token (DOC_END_BRACE or FLAG_ENDING_TYPE) and a string of the contents between the matching tokens, minus any comment prefixes. """ open_count = 1 close_count = 0 contents = [] # We don't consider the start brace part of the type string. token = start_brace.next while open_count != close_count: if token.type == Type.DOC_START_BRACE: open_count += 1 elif token.type == Type.DOC_END_BRACE: close_count += 1 if token.type != Type.DOC_PREFIX: contents.append(token.string) if token.type in Type.FLAG_ENDING_TYPES: break token = token.next #Don't include the end token (end brace, end doc comment, etc.) in type. token = token.previous contents = contents[:-1] return token, ''.join(contents) def _GetNextPartialIdentifierToken(start_token): """Returns the first token having identifier as substring after a token. Searches each token after the start to see if it contains an identifier. If found, token is returned. If no identifier is found returns None. Search is abandoned when a FLAG_ENDING_TYPE token is found. Args: start_token: The token to start searching after. Returns: The token found containing identifier, None otherwise. """ token = start_token.next while token and token.type not in Type.FLAG_ENDING_TYPES: match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.search( token.string) if match is not None and token.type == Type.COMMENT: return token token = token.next return None def _GetEndTokenAndContents(start_token): """Returns last content token and all contents before FLAG_ENDING_TYPE token. Comment prefixes are split into DOC_PREFIX tokens and stripped from the returned contents. Args: start_token: The token immediately before the first content token. Returns: The last content token and a string of all contents including start and end tokens, with comment prefixes stripped. """ iterator = start_token last_line = iterator.line_number last_token = None contents = '' doc_depth = 0 while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0: if (iterator.IsFirstInLine() and DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)): # If we have a blank comment line, consider that an implicit # ending of the description. This handles a case like: # # * @return {boolean} True # * # * Note: This is a sentence. # # The note is not part of the @return description, but there was # no definitive ending token. Rather there was a line containing # only a doc comment prefix or whitespace. break # b/2983692 # don't prematurely match against a @flag if inside a doc flag # need to think about what is the correct behavior for unterminated # inline doc flags if (iterator.type == Type.DOC_START_BRACE and iterator.next.type == Type.DOC_INLINE_FLAG): doc_depth += 1 elif (iterator.type == Type.DOC_END_BRACE and doc_depth > 0): doc_depth -= 1 if iterator.type in Type.FLAG_DESCRIPTION_TYPES: contents += iterator.string last_token = iterator iterator = iterator.next if iterator.line_number != last_line: contents += '\n' last_line = iterator.line_number end_token = last_token if DocFlag.EMPTY_STRING.match(contents): contents = None else: # Strip trailing newline. contents = contents[:-1] return end_token, contents class Function(object): """Data about a JavaScript function. Attributes: block_depth: Block depth the function began at. doc: The DocComment associated with the function. has_return: If the function has a return value. has_this: If the function references the 'this' object. is_assigned: If the function is part of an assignment. is_constructor: If the function is a constructor. name: The name of the function, whether given in the function keyword or as the lvalue the function is assigned to. start_token: First token of the function (the function' keyword token). end_token: Last token of the function (the closing '}' token). parameters: List of parameter names. """ def __init__(self, block_depth, is_assigned, doc, name): self.block_depth = block_depth self.is_assigned = is_assigned self.is_constructor = doc and doc.HasFlag('constructor') self.is_interface = doc and doc.HasFlag('interface') self.has_return = False self.has_throw = False self.has_this = False self.name = name self.doc = doc self.start_token = None self.end_token = None self.parameters = None class StateTracker(object): """EcmaScript state tracker. Tracks block depth, function names, etc. within an EcmaScript token stream. """ OBJECT_LITERAL = 'o' CODE = 'c' def __init__(self, doc_flag=DocFlag): """Initializes a JavaScript token stream state tracker. Args: doc_flag: An optional custom DocFlag used for validating documentation flags. """ self._doc_flag = doc_flag self.Reset() def Reset(self): """Resets the state tracker to prepare for processing a new page.""" self._block_depth = 0 self._is_block_close = False self._paren_depth = 0 self._function_stack = [] self._functions_by_name = {} self._last_comment = None self._doc_comment = None self._cumulative_params = None self._block_types = [] self._last_non_space_token = None self._last_line = None self._first_token = None self._documented_identifiers = set() self._variables_in_scope = [] def InFunction(self): """Returns true if the current token is within a function. Returns: True if the current token is within a function. """ return bool(self._function_stack) def InConstructor(self): """Returns true if the current token is within a constructor. Returns: True if the current token is within a constructor. """ return self.InFunction() and self._function_stack[-1].is_constructor def InInterfaceMethod(self): """Returns true if the current token is within an interface method. Returns: True if the current token is within an interface method. """ if self.InFunction(): if self._function_stack[-1].is_interface: return True else: name = self._function_stack[-1].name prototype_index = name.find('.prototype.') if prototype_index != -1: class_function_name = name[0:prototype_index] if (class_function_name in self._functions_by_name and self._functions_by_name[class_function_name].is_interface): return True return False def InTopLevelFunction(self): """Returns true if the current token is within a top level function. Returns: True if the current token is within a top level function. """ return len(self._function_stack) == 1 and self.InTopLevel() def InAssignedFunction(self): """Returns true if the current token is within a function variable. Returns: True if if the current token is within a function variable """ return self.InFunction() and self._function_stack[-1].is_assigned def IsFunctionOpen(self): """Returns true if the current token is a function block open. Returns: True if the current token is a function block open. """ return (self._function_stack and self._function_stack[-1].block_depth == self._block_depth - 1) def IsFunctionClose(self): """Returns true if the current token is a function block close. Returns: True if the current token is a function block close. """ return (self._function_stack and self._function_stack[-1].block_depth == self._block_depth) def InBlock(self): """Returns true if the current token is within a block. Returns: True if the current token is within a block. """ return bool(self._block_depth) def IsBlockClose(self): """Returns true if the current token is a block close. Returns: True if the current token is a block close. """ return self._is_block_close def InObjectLiteral(self): """Returns true if the current token is within an object literal. Returns: True if the current token is within an object literal. """ return self._block_depth and self._block_types[-1] == self.OBJECT_LITERAL def InObjectLiteralDescendant(self): """Returns true if the current token has an object literal ancestor. Returns: True if the current token has an object literal ancestor. """ return self.OBJECT_LITERAL in self._block_types def InParentheses(self): """Returns true if the current token is within parentheses. Returns: True if the current token is within parentheses. """ return bool(self._paren_depth) def ParenthesesDepth(self): """Returns the number of parens surrounding the token. Returns: The number of parenthesis surrounding the token. """ return self._paren_depth def BlockDepth(self): """Returns the number of blocks in which the token is nested. Returns: The number of blocks in which the token is nested. """ return self._block_depth def FunctionDepth(self): """Returns the number of functions in which the token is nested. Returns: The number of functions in which the token is nested. """ return len(self._function_stack) def InTopLevel(self): """Whether we are at the top level in the class. This function call is language specific. In some languages like JavaScript, a function is top level if it is not inside any parenthesis. In languages such as ActionScript, a function is top level if it is directly within a class. """ raise TypeError('Abstract method InTopLevel not implemented') def GetBlockType(self, token): """Determine the block type given a START_BLOCK token. Code blocks come after parameters, keywords like else, and closing parens. Args: token: The current token. Can be assumed to be type START_BLOCK. Returns: Code block type for current token. """ raise TypeError('Abstract method GetBlockType not implemented') def GetParams(self): """Returns the accumulated input params as an array. In some EcmasSript languages, input params are specified like (param:Type, param2:Type2, ...) in other they are specified just as (param, param2) We handle both formats for specifying parameters here and leave it to the compilers for each language to detect compile errors. This allows more code to be reused between lint checkers for various EcmaScript languages. Returns: The accumulated input params as an array. """ params = [] if self._cumulative_params: params = re.compile(r'\s+').sub('', self._cumulative_params).split(',') # Strip out the type from parameters of the form name:Type. params = map(lambda param: param.split(':')[0], params) return params def GetLastComment(self): """Return the last plain comment that could be used as documentation. Returns: The last plain comment that could be used as documentation. """ return self._last_comment def GetDocComment(self): """Return the most recent applicable documentation comment. Returns: The last applicable documentation comment. """ return self._doc_comment def HasDocComment(self, identifier): """Returns whether the identifier has been documented yet. Args: identifier: The identifier. Returns: Whether the identifier has been documented yet. """ return identifier in self._documented_identifiers def InDocComment(self): """Returns whether the current token is in a doc comment. Returns: Whether the current token is in a doc comment. """ return self._doc_comment and self._doc_comment.end_token is None def GetDocFlag(self): """Returns the current documentation flags. Returns: The current documentation flags. """ return self._doc_flag def IsTypeToken(self, t): if self.InDocComment() and t.type not in (Type.START_DOC_COMMENT, Type.DOC_FLAG, Type.DOC_INLINE_FLAG, Type.DOC_PREFIX): f = tokenutil.SearchUntil(t, [Type.DOC_FLAG], [Type.START_DOC_COMMENT], None, True) if (f and f.attached_object.type_start_token is not None and f.attached_object.type_end_token is not None): return (tokenutil.Compare(t, f.attached_object.type_start_token) > 0 and tokenutil.Compare(t, f.attached_object.type_end_token) < 0) return False def GetFunction(self): """Return the function the current code block is a part of. Returns: The current Function object. """ if self._function_stack: return self._function_stack[-1] def GetBlockDepth(self): """Return the block depth. Returns: The current block depth. """ return self._block_depth def GetLastNonSpaceToken(self): """Return the last non whitespace token.""" return self._last_non_space_token def GetLastLine(self): """Return the last line.""" return self._last_line def GetFirstToken(self): """Return the very first token in the file.""" return self._first_token def IsVariableInScope(self, token_string): """Checks if string is variable in current scope. For given string it checks whether the string is a defined variable (including function param) in current state. E.g. if variables defined (variables in current scope) is docs then docs, docs.length etc will be considered as variable in current scope. This will help in avoding extra goog.require for variables. Args: token_string: String to check if its is a variable in current scope. Returns: true if given string is a variable in current scope. """ for variable in self._variables_in_scope: if (token_string == variable or token_string.startswith(variable + '.')): return True return False def HandleToken(self, token, last_non_space_token): """Handles the given token and updates state. Args: token: The token to handle. last_non_space_token: """ self._is_block_close = False if not self._first_token: self._first_token = token # Track block depth. type = token.type if type == Type.START_BLOCK: self._block_depth += 1 # Subclasses need to handle block start very differently because # whether a block is a CODE or OBJECT_LITERAL block varies significantly # by language. self._block_types.append(self.GetBlockType(token)) # When entering a function body, record its parameters. if self.InFunction(): function = self._function_stack[-1] if self._block_depth == function.block_depth + 1: function.parameters = self.GetParams() # Track block depth. elif type == Type.END_BLOCK: self._is_block_close = not self.InObjectLiteral() self._block_depth -= 1 self._block_types.pop() # Track parentheses depth. elif type == Type.START_PAREN: self._paren_depth += 1 # Track parentheses depth. elif type == Type.END_PAREN: self._paren_depth -= 1 elif type == Type.COMMENT: self._last_comment = token.string elif type == Type.START_DOC_COMMENT: self._last_comment = None self._doc_comment = DocComment(token) elif type == Type.END_DOC_COMMENT: self._doc_comment.end_token = token elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG): flag = self._doc_flag(token) token.attached_object = flag self._doc_comment.AddFlag(flag) if flag.flag_type == 'suppress': self._doc_comment.AddSuppression(token) elif type == Type.FUNCTION_DECLARATION: last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None, True) doc = None # Only functions outside of parens are eligible for documentation. if not self._paren_depth: doc = self._doc_comment name = '' is_assigned = last_code and (last_code.IsOperator('=') or last_code.IsOperator('||') or last_code.IsOperator('&&') or (last_code.IsOperator(':') and not self.InObjectLiteral())) if is_assigned: # TODO(robbyw): This breaks for x[2] = ... # Must use loop to find full function name in the case of line-wrapped # declarations (bug 1220601) like: # my.function.foo. # bar = function() ... identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE, None, True) while identifier and identifier.type in ( Type.IDENTIFIER, Type.SIMPLE_LVALUE): name = identifier.string + name # Traverse behind us, skipping whitespace and comments. while True: identifier = identifier.previous if not identifier or not identifier.type in Type.NON_CODE_TYPES: break else: next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES) while next_token and next_token.IsType(Type.FUNCTION_NAME): name += next_token.string next_token = tokenutil.Search(next_token, Type.FUNCTION_NAME, 2) function = Function(self._block_depth, is_assigned, doc, name) function.start_token = token self._function_stack.append(function) self._functions_by_name[name] = function # Add a delimiter in stack for scope variables to define start of # function. This helps in popping variables of this function when # function declaration ends. self._variables_in_scope.append('') elif type == Type.START_PARAMETERS: self._cumulative_params = '' elif type == Type.PARAMETERS: self._cumulative_params += token.string self._variables_in_scope.extend(self.GetParams()) elif type == Type.KEYWORD and token.string == 'return': next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES) if not next_token.IsType(Type.SEMICOLON): function = self.GetFunction() if function: function.has_return = True elif type == Type.KEYWORD and token.string == 'throw': function = self.GetFunction() if function: function.has_throw = True elif type == Type.KEYWORD and token.string == 'var': function = self.GetFunction() next_token = tokenutil.Search(token, [Type.IDENTIFIER, Type.SIMPLE_LVALUE]) if next_token: if next_token.type == Type.SIMPLE_LVALUE: self._variables_in_scope.append(next_token.values['identifier']) else: self._variables_in_scope.append(next_token.string) elif type == Type.SIMPLE_LVALUE: identifier = token.values['identifier'] jsdoc = self.GetDocComment() if jsdoc: self._documented_identifiers.add(identifier) self._HandleIdentifier(identifier, True) elif type == Type.IDENTIFIER: self._HandleIdentifier(token.string, False) # Detect documented non-assignments. next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES) if next_token and next_token.IsType(Type.SEMICOLON): if (self._last_non_space_token and self._last_non_space_token.IsType(Type.END_DOC_COMMENT)): self._documented_identifiers.add(token.string) def _HandleIdentifier(self, identifier, is_assignment): """Process the given identifier. Currently checks if it references 'this' and annotates the function accordingly. Args: identifier: The identifer to process. is_assignment: Whether the identifer is being written to. """ if identifier == 'this' or identifier.startswith('this.'): function = self.GetFunction() if function: function.has_this = True def HandleAfterToken(self, token): """Handle updating state after a token has been checked. This function should be used for destructive state changes such as deleting a tracked object. Args: token: The token to handle. """ type = token.type if type == Type.SEMICOLON or type == Type.END_PAREN or ( type == Type.END_BRACKET and self._last_non_space_token.type not in ( Type.SINGLE_QUOTE_STRING_END, Type.DOUBLE_QUOTE_STRING_END)): # We end on any numeric array index, but keep going for string based # array indices so that we pick up manually exported identifiers. self._doc_comment = None self._last_comment = None elif type == Type.END_BLOCK: self._doc_comment = None self._last_comment = None if self.InFunction() and self.IsFunctionClose(): # TODO(robbyw): Detect the function's name for better errors. function = self._function_stack.pop() function.end_token = token # Pop all variables till delimiter ('') those were defined in the # function being closed so make them out of scope. while self._variables_in_scope and self._variables_in_scope[-1]: self._variables_in_scope.pop() # Pop delimiter if self._variables_in_scope: self._variables_in_scope.pop() elif type == Type.END_PARAMETERS and self._doc_comment: self._doc_comment = None self._last_comment = None if not token.IsAnyType(Type.WHITESPACE, Type.BLANK_LINE): self._last_non_space_token = token self._last_line = token.line closure_linter-2.3.13/closure_linter/error_check.py0000750014730400116100000000727012247733554022111 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2011 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Specific JSLint errors checker.""" import gflags as flags FLAGS = flags.FLAGS class Rule(object): """Different rules to check.""" # Documentations for specific rules goes in flag definition. BLANK_LINES_AT_TOP_LEVEL = 'blank_lines_at_top_level' INDENTATION = 'indentation' WELL_FORMED_AUTHOR = 'well_formed_author' NO_BRACES_AROUND_INHERIT_DOC = 'no_braces_around_inherit_doc' BRACES_AROUND_TYPE = 'braces_around_type' OPTIONAL_TYPE_MARKER = 'optional_type_marker' VARIABLE_ARG_MARKER = 'variable_arg_marker' UNUSED_PRIVATE_MEMBERS = 'unused_private_members' UNUSED_LOCAL_VARIABLES = 'unused_local_variables' # Rule to raise all known errors. ALL = 'all' # All rules that are to be checked when using the strict flag. E.g. the rules # that are specific to the stricter Closure style. CLOSURE_RULES = frozenset([BLANK_LINES_AT_TOP_LEVEL, INDENTATION, WELL_FORMED_AUTHOR, NO_BRACES_AROUND_INHERIT_DOC, BRACES_AROUND_TYPE, OPTIONAL_TYPE_MARKER, VARIABLE_ARG_MARKER]) flags.DEFINE_boolean('strict', False, 'Whether to validate against the stricter Closure style. ' 'This includes ' + (', '.join(Rule.CLOSURE_RULES)) + '.') flags.DEFINE_multistring('jslint_error', [], 'List of specific lint errors to check. Here is a list' ' of accepted values:\n' ' - ' + Rule.ALL + ': enables all following errors.\n' ' - ' + Rule.BLANK_LINES_AT_TOP_LEVEL + ': validates' 'number of blank lines between blocks at top level.\n' ' - ' + Rule.INDENTATION + ': checks correct ' 'indentation of code.\n' ' - ' + Rule.WELL_FORMED_AUTHOR + ': validates the ' '@author JsDoc tags.\n' ' - ' + Rule.NO_BRACES_AROUND_INHERIT_DOC + ': ' 'forbids braces around @inheritdoc JsDoc tags.\n' ' - ' + Rule.BRACES_AROUND_TYPE + ': enforces braces ' 'around types in JsDoc tags.\n' ' - ' + Rule.OPTIONAL_TYPE_MARKER + ': checks correct ' 'use of optional marker = in param types.\n' ' - ' + Rule.UNUSED_PRIVATE_MEMBERS + ': checks for ' 'unused private variables.\n') def ShouldCheck(rule): """Returns whether the optional rule should be checked. Computes different flags (strict, jslint_error, jslint_noerror) to find out if this specific rule should be checked. Args: rule: Name of the rule (see Rule). Returns: True if the rule should be checked according to the flags, otherwise False. """ if rule in FLAGS.jslint_error or Rule.ALL in FLAGS.jslint_error: return True # Checks strict rules. return FLAGS.strict and rule in Rule.CLOSURE_RULES closure_linter-2.3.13/closure_linter/fixjsstyle.py0000750014730400116100000000313012247733554022016 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Automatically fix simple style guide violations.""" __author__ = 'robbyw@google.com (Robert Walker)' import sys import gflags as flags from closure_linter import error_fixer from closure_linter import runner from closure_linter.common import simplefileflags as fileflags FLAGS = flags.FLAGS flags.DEFINE_list('additional_extensions', None, 'List of additional file ' 'extensions (not js) that should be treated as ' 'JavaScript files.') def main(argv=None): """Main function. Args: argv: Sequence of command line arguments. """ if argv is None: argv = flags.FLAGS(sys.argv) suffixes = ['.js'] if FLAGS.additional_extensions: suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions] files = fileflags.GetFileList(argv, 'JavaScript', suffixes) fixer = error_fixer.ErrorFixer() # Check the list of files. for filename in files: runner.Run(filename, fixer) if __name__ == '__main__': main() closure_linter-2.3.13/closure_linter/errorrules.py0000750014730400116100000000434412247733554022026 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2010 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Linter error rules class for Closure Linter.""" __author__ = 'robbyw@google.com (Robert Walker)' import gflags as flags from closure_linter import errors FLAGS = flags.FLAGS flags.DEFINE_boolean('jsdoc', True, 'Whether to report errors for missing JsDoc.') flags.DEFINE_list('disable', None, 'Disable specific error. Usage Ex.: gjslint --disable 1,' '0011 foo.js.') flags.DEFINE_integer('max_line_length', 80, 'Maximum line length allowed ' 'without warning.', lower_bound=1) disabled_error_nums = None def GetMaxLineLength(): """Returns allowed maximum length of line. Returns: Length of line allowed without any warning. """ return FLAGS.max_line_length def ShouldReportError(error): """Whether the given error should be reported. Returns: True for all errors except missing documentation errors and disabled errors. For missing documentation, it returns the value of the jsdoc flag. """ global disabled_error_nums if disabled_error_nums is None: disabled_error_nums = [] if FLAGS.disable: for error_str in FLAGS.disable: error_num = 0 try: error_num = int(error_str) except ValueError: pass disabled_error_nums.append(error_num) return ((FLAGS.jsdoc or error not in ( errors.MISSING_PARAMETER_DOCUMENTATION, errors.MISSING_RETURN_DOCUMENTATION, errors.MISSING_MEMBER_DOCUMENTATION, errors.MISSING_PRIVATE, errors.MISSING_JSDOC_TAG_THIS)) and (not FLAGS.disable or error not in disabled_error_nums)) closure_linter-2.3.13/closure_linter/fixjsstyle_test.py0000750014730400116100000002376212247733554023072 0ustar ajpeng00000000000000#!/usr/bin/env python # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Medium tests for the gpylint auto-fixer.""" __author__ = 'robbyw@google.com (Robby Walker)' import StringIO import gflags as flags import unittest as googletest from closure_linter import error_fixer from closure_linter import runner _RESOURCE_PREFIX = 'closure_linter/testdata' flags.FLAGS.strict = True flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js') flags.FLAGS.closurized_namespaces = ('goog', 'dummy') class FixJsStyleTest(googletest.TestCase): """Test case to for gjslint auto-fixing.""" def testFixJsStyle(self): test_cases = [ ['fixjsstyle.in.js', 'fixjsstyle.out.js'], ['indentation.js', 'fixjsstyle.indentation.out.js'], ['fixjsstyle.html.in.html', 'fixjsstyle.html.out.html']] for [running_input_file, running_output_file] in test_cases: input_filename = None golden_filename = None current_filename = None try: input_filename = '%s/%s' % (_RESOURCE_PREFIX, running_input_file) current_filename = input_filename golden_filename = '%s/%s' % (_RESOURCE_PREFIX, running_output_file) current_filename = golden_filename except IOError as ex: raise IOError('Could not find testdata resource for %s: %s' % (current_filename, ex)) if running_input_file == 'fixjsstyle.in.js': with open(input_filename) as f: for line in f: # Go to last line. pass self.assertTrue(line == line.rstrip(), '%s file should not end ' 'with a new line.' % (input_filename)) # Autofix the file, sending output to a fake file. actual = StringIO.StringIO() runner.Run(input_filename, error_fixer.ErrorFixer(actual)) # Now compare the files. actual.seek(0) expected = open(golden_filename, 'r') self.assertEqual(actual.readlines(), expected.readlines()) def testUnsortedRequires(self): """Tests handling of unsorted goog.require statements without header. Bug 8398202. """ original = [ 'goog.require(\'dummy.aa\');', 'goog.require(\'dummy.Cc\');', 'goog.require(\'dummy.Dd\');', '', 'function a() {', ' dummy.aa.i = 1;', ' dummy.Cc.i = 1;', ' dummy.Dd.i = 1;', '}', ] expected = [ 'goog.require(\'dummy.Cc\');', 'goog.require(\'dummy.Dd\');', 'goog.require(\'dummy.aa\');', '', 'function a() {', ' dummy.aa.i = 1;', ' dummy.Cc.i = 1;', ' dummy.Dd.i = 1;', '}', ] self._AssertFixes(original, expected, include_header=False) def testMissingExtraAndUnsortedRequires(self): """Tests handling of missing extra and unsorted goog.require statements.""" original = [ 'goog.require(\'dummy.aa\');', 'goog.require(\'dummy.Cc\');', 'goog.require(\'dummy.Dd\');', '', 'var x = new dummy.Bb();', 'dummy.Cc.someMethod();', 'dummy.aa.someMethod();', ] expected = [ 'goog.require(\'dummy.Bb\');', 'goog.require(\'dummy.Cc\');', 'goog.require(\'dummy.aa\');', '', 'var x = new dummy.Bb();', 'dummy.Cc.someMethod();', 'dummy.aa.someMethod();', ] self._AssertFixes(original, expected) def testUnsortedProvides(self): """Tests handling of unsorted goog.provide statements without header. Bug 8398202. """ original = [ 'goog.provide(\'dummy.aa\');', 'goog.provide(\'dummy.Cc\');', 'goog.provide(\'dummy.Dd\');', '', 'dummy.aa = function() {};' 'dummy.Cc = function() {};' 'dummy.Dd = function() {};' ] expected = [ 'goog.provide(\'dummy.Cc\');', 'goog.provide(\'dummy.Dd\');', 'goog.provide(\'dummy.aa\');', '', 'dummy.aa = function() {};' 'dummy.Cc = function() {};' 'dummy.Dd = function() {};' ] self._AssertFixes(original, expected, include_header=False) def testMissingExtraAndUnsortedProvides(self): """Tests handling of missing extra and unsorted goog.provide statements.""" original = [ 'goog.provide(\'dummy.aa\');', 'goog.provide(\'dummy.Cc\');', 'goog.provide(\'dummy.Dd\');', '', 'dummy.Cc = function() {};', 'dummy.Bb = function() {};', 'dummy.aa.someMethod = function();', ] expected = [ 'goog.provide(\'dummy.Bb\');', 'goog.provide(\'dummy.Cc\');', 'goog.provide(\'dummy.aa\');', '', 'dummy.Cc = function() {};', 'dummy.Bb = function() {};', 'dummy.aa.someMethod = function();', ] self._AssertFixes(original, expected) def testNoRequires(self): """Tests positioning of missing requires without existing requires.""" original = [ 'goog.provide(\'dummy.Something\');', '', 'dummy.Something = function() {};', '', 'var x = new dummy.Bb();', ] expected = [ 'goog.provide(\'dummy.Something\');', '', 'goog.require(\'dummy.Bb\');', '', 'dummy.Something = function() {};', '', 'var x = new dummy.Bb();', ] self._AssertFixes(original, expected) def testNoProvides(self): """Tests positioning of missing provides without existing provides.""" original = [ 'goog.require(\'dummy.Bb\');', '', 'dummy.Something = function() {};', '', 'var x = new dummy.Bb();', ] expected = [ 'goog.provide(\'dummy.Something\');', '', 'goog.require(\'dummy.Bb\');', '', 'dummy.Something = function() {};', '', 'var x = new dummy.Bb();', ] self._AssertFixes(original, expected) def testOutputOkayWhenFirstTokenIsDeleted(self): """Tests that autofix output is is correct when first token is deleted. Regression test for bug 4581567 """ original = ['"use strict";'] expected = ["'use strict';"] self._AssertFixes(original, expected, include_header=False) def testGoogScopeIndentation(self): """Tests Handling a typical end-of-scope indentation fix.""" original = [ 'goog.scope(function() {', ' // TODO(brain): Take over the world.', '}); // goog.scope', ] expected = [ 'goog.scope(function() {', '// TODO(brain): Take over the world.', '}); // goog.scope', ] self._AssertFixes(original, expected) def testMissingEndOfScopeComment(self): """Tests Handling a missing comment at end of goog.scope.""" original = [ 'goog.scope(function() {', '});', ] expected = [ 'goog.scope(function() {', '}); // goog.scope', ] self._AssertFixes(original, expected) def testMissingEndOfScopeCommentWithOtherComment(self): """Tests handling an irrelevant comment at end of goog.scope.""" original = [ 'goog.scope(function() {', "}); // I don't belong here!", ] expected = [ 'goog.scope(function() {', '}); // goog.scope', ] self._AssertFixes(original, expected) def testMalformedEndOfScopeComment(self): """Tests Handling a malformed comment at end of goog.scope.""" original = [ 'goog.scope(function() {', '}); // goog.scope FTW', ] expected = [ 'goog.scope(function() {', '}); // goog.scope', ] self._AssertFixes(original, expected) def testEndsWithIdentifier(self): """Tests Handling case where script ends with identifier. Bug 7643404.""" original = [ 'goog.provide(\'xyz\');', '', 'abc' ] expected = [ 'goog.provide(\'xyz\');', '', 'abc;' ] self._AssertFixes(original, expected) def testFileStartsWithSemicolon(self): """Tests handling files starting with semicolon. b/10062516 """ original = [ ';goog.provide(\'xyz\');', '', 'abc;' ] expected = [ 'goog.provide(\'xyz\');', '', 'abc;' ] self._AssertFixes(original, expected, include_header=False) def testCodeStartsWithSemicolon(self): """Tests handling code in starting with semicolon after comments. b/10062516 """ original = [ ';goog.provide(\'xyz\');', '', 'abc;' ] expected = [ 'goog.provide(\'xyz\');', '', 'abc;' ] self._AssertFixes(original, expected) def _AssertFixes(self, original, expected, include_header=True): """Asserts that the error fixer corrects original to expected.""" if include_header: original = self._GetHeader() + original expected = self._GetHeader() + expected actual = StringIO.StringIO() runner.Run('testing.js', error_fixer.ErrorFixer(actual), original) actual.seek(0) expected = [x + '\n' for x in expected] self.assertListEqual(actual.readlines(), expected) def _GetHeader(self): """Returns a fake header for a JavaScript file.""" return [ '// Copyright 2011 Google Inc. All Rights Reserved.', '', '/**', ' * @fileoverview Fake file overview.', ' * @author fake@google.com (Fake Person)', ' */', '' ] if __name__ == '__main__': googletest.main() closure_linter-2.3.13/closure_linter/not_strict_test.py0000750014730400116100000000441612247733554023051 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2011 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for gjslint --nostrict. Tests errors that can be thrown by gjslint when not in strict mode. """ import os import sys import unittest import gflags as flags import unittest as googletest from closure_linter import errors from closure_linter import runner from closure_linter.common import filetestcase _RESOURCE_PREFIX = 'closure_linter/testdata' flags.FLAGS.strict = False flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires') flags.FLAGS.closurized_namespaces = ('goog', 'dummy') flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js', 'limited_doc_checks.js') # List of files under testdata to test. # We need to list files explicitly since pyglib can't list directories. _TEST_FILES = [ 'not_strict.js' ] class GJsLintTestSuite(unittest.TestSuite): """Test suite to run a GJsLintTest for each of several files. If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in testdata to test. Otherwise, _TEST_FILES is used. """ def __init__(self, tests=()): unittest.TestSuite.__init__(self, tests) argv = sys.argv and sys.argv[1:] or [] if argv: test_files = argv else: test_files = _TEST_FILES for test_file in test_files: resource_path = os.path.join(_RESOURCE_PREFIX, test_file) self.addTest(filetestcase.AnnotatedFileTestCase(resource_path, runner.Run, errors.ByName)) if __name__ == '__main__': # Don't let main parse args; it happens in the TestSuite. googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite') closure_linter-2.3.13/closure_linter/closurizednamespacesinfo_test.py0000750014730400116100000005550212247733554025762 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2010 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for ClosurizedNamespacesInfo.""" import unittest as googletest from closure_linter import aliaspass from closure_linter import closurizednamespacesinfo from closure_linter import ecmametadatapass from closure_linter import javascriptstatetracker from closure_linter import javascripttokens from closure_linter import testutil from closure_linter import tokenutil # pylint: disable=g-bad-name TokenType = javascripttokens.JavaScriptTokenType class ClosurizedNamespacesInfoTest(googletest.TestCase): """Tests for ClosurizedNamespacesInfo.""" _test_cases = { 'goog.global.anything': None, 'package.CONSTANT': 'package', 'package.methodName': 'package', 'package.subpackage.methodName': 'package.subpackage', 'package.subpackage.methodName.apply': 'package.subpackage', 'package.ClassName.something': 'package.ClassName', 'package.ClassName.Enum.VALUE.methodName': 'package.ClassName', 'package.ClassName.CONSTANT': 'package.ClassName', 'package.namespace.CONSTANT.methodName': 'package.namespace', 'package.ClassName.inherits': 'package.ClassName', 'package.ClassName.apply': 'package.ClassName', 'package.ClassName.methodName.apply': 'package.ClassName', 'package.ClassName.methodName.call': 'package.ClassName', 'package.ClassName.prototype.methodName': 'package.ClassName', 'package.ClassName.privateMethod_': 'package.ClassName', 'package.className.privateProperty_': 'package.className', 'package.className.privateProperty_.methodName': 'package.className', 'package.ClassName.PrivateEnum_': 'package.ClassName', 'package.ClassName.prototype.methodName.apply': 'package.ClassName', 'package.ClassName.property.subProperty': 'package.ClassName', 'package.className.prototype.something.somethingElse': 'package.className' } def testGetClosurizedNamespace(self): """Tests that the correct namespace is returned for various identifiers.""" namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo( closurized_namespaces=['package'], ignored_extra_namespaces=[]) for identifier, expected_namespace in self._test_cases.items(): actual_namespace = namespaces_info.GetClosurizedNamespace(identifier) self.assertEqual( expected_namespace, actual_namespace, 'expected namespace "' + str(expected_namespace) + '" for identifier "' + str(identifier) + '" but was "' + str(actual_namespace) + '"') def testIgnoredExtraNamespaces(self): """Tests that ignored_extra_namespaces are ignored.""" token = self._GetRequireTokens('package.Something') namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo( closurized_namespaces=['package'], ignored_extra_namespaces=['package.Something']) self.assertFalse(namespaces_info.IsExtraRequire(token), 'Should be valid since it is in ignored namespaces.') namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo( ['package'], []) self.assertTrue(namespaces_info.IsExtraRequire(token), 'Should be invalid since it is not in ignored namespaces.') def testIsExtraProvide_created(self): """Tests that provides for created namespaces are not extra.""" input_lines = [ 'goog.provide(\'package.Foo\');', 'package.Foo = function() {};' ] token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( input_lines, ['package']) self.assertFalse(namespaces_info.IsExtraProvide(token), 'Should not be extra since it is created.') def testIsExtraProvide_createdIdentifier(self): """Tests that provides for created identifiers are not extra.""" input_lines = [ 'goog.provide(\'package.Foo.methodName\');', 'package.Foo.methodName = function() {};' ] token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( input_lines, ['package']) self.assertFalse(namespaces_info.IsExtraProvide(token), 'Should not be extra since it is created.') def testIsExtraProvide_notCreated(self): """Tests that provides for non-created namespaces are extra.""" input_lines = ['goog.provide(\'package.Foo\');'] token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( input_lines, ['package']) self.assertTrue(namespaces_info.IsExtraProvide(token), 'Should be extra since it is not created.') def testIsExtraProvide_duplicate(self): """Tests that providing a namespace twice makes the second one extra.""" input_lines = [ 'goog.provide(\'package.Foo\');', 'goog.provide(\'package.Foo\');', 'package.Foo = function() {};' ] token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( input_lines, ['package']) # Advance to the second goog.provide token. token = tokenutil.Search(token.next, TokenType.IDENTIFIER) self.assertTrue(namespaces_info.IsExtraProvide(token), 'Should be extra since it is already provided.') def testIsExtraProvide_notClosurized(self): """Tests that provides of non-closurized namespaces are not extra.""" input_lines = ['goog.provide(\'notclosurized.Foo\');'] token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( input_lines, ['package']) self.assertFalse(namespaces_info.IsExtraProvide(token), 'Should not be extra since it is not closurized.') def testIsExtraRequire_used(self): """Tests that requires for used namespaces are not extra.""" input_lines = [ 'goog.require(\'package.Foo\');', 'var x = package.Foo.methodName();' ] token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( input_lines, ['package']) self.assertFalse(namespaces_info.IsExtraRequire(token), 'Should not be extra since it is used.') def testIsExtraRequire_usedIdentifier(self): """Tests that requires for used methods on classes are extra.""" input_lines = [ 'goog.require(\'package.Foo.methodName\');', 'var x = package.Foo.methodName();' ] token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( input_lines, ['package']) self.assertTrue(namespaces_info.IsExtraRequire(token), 'Should require the package, not the method specifically.') def testIsExtraRequire_notUsed(self): """Tests that requires for unused namespaces are extra.""" input_lines = ['goog.require(\'package.Foo\');'] token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( input_lines, ['package']) self.assertTrue(namespaces_info.IsExtraRequire(token), 'Should be extra since it is not used.') def testIsExtraRequire_notClosurized(self): """Tests that requires of non-closurized namespaces are not extra.""" input_lines = ['goog.require(\'notclosurized.Foo\');'] token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( input_lines, ['package']) self.assertFalse(namespaces_info.IsExtraRequire(token), 'Should not be extra since it is not closurized.') def testIsExtraRequire_objectOnClass(self): """Tests that requiring an object on a class is extra.""" input_lines = [ 'goog.require(\'package.Foo.Enum\');', 'var x = package.Foo.Enum.VALUE1;', ] token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( input_lines, ['package']) self.assertTrue(namespaces_info.IsExtraRequire(token), 'The whole class, not the object, should be required.'); def testIsExtraRequire_constantOnClass(self): """Tests that requiring a constant on a class is extra.""" input_lines = [ 'goog.require(\'package.Foo.CONSTANT\');', 'var x = package.Foo.CONSTANT', ] token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( input_lines, ['package']) self.assertTrue(namespaces_info.IsExtraRequire(token), 'The class, not the constant, should be required.'); def testIsExtraRequire_constantNotOnClass(self): """Tests that requiring a constant not on a class is OK.""" input_lines = [ 'goog.require(\'package.subpackage.CONSTANT\');', 'var x = package.subpackage.CONSTANT', ] token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( input_lines, ['package']) self.assertFalse(namespaces_info.IsExtraRequire(token), 'Constants can be required except on classes.'); def testIsExtraRequire_methodNotOnClass(self): """Tests that requiring a method not on a class is OK.""" input_lines = [ 'goog.require(\'package.subpackage.method\');', 'var x = package.subpackage.method()', ] token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( input_lines, ['package']) self.assertFalse(namespaces_info.IsExtraRequire(token), 'Methods can be required except on classes.'); def testIsExtraRequire_defaults(self): """Tests that there are no warnings about extra requires for test utils""" input_lines = ['goog.require(\'goog.testing.jsunit\');'] token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( input_lines, ['goog']) self.assertFalse(namespaces_info.IsExtraRequire(token), 'Should not be extra since it is for testing.') def testGetMissingProvides_provided(self): """Tests that provided functions don't cause a missing provide.""" input_lines = [ 'goog.provide(\'package.Foo\');', 'package.Foo = function() {};' ] namespaces_info = self._GetNamespacesInfoForScript( input_lines, ['package']) self.assertEquals(0, len(namespaces_info.GetMissingProvides())) def testGetMissingProvides_providedIdentifier(self): """Tests that provided identifiers don't cause a missing provide.""" input_lines = [ 'goog.provide(\'package.Foo.methodName\');', 'package.Foo.methodName = function() {};' ] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) self.assertEquals(0, len(namespaces_info.GetMissingProvides())) def testGetMissingProvides_providedParentIdentifier(self): """Tests that provided identifiers on a class don't cause a missing provide on objects attached to that class.""" input_lines = [ 'goog.provide(\'package.foo.ClassName\');', 'package.foo.ClassName.methodName = function() {};', 'package.foo.ClassName.ObjectName = 1;', ] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) self.assertEquals(0, len(namespaces_info.GetMissingProvides())) def testGetMissingProvides_unprovided(self): """Tests that unprovided functions cause a missing provide.""" input_lines = ['package.Foo = function() {};'] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) missing_provides = namespaces_info.GetMissingProvides() self.assertEquals(1, len(missing_provides)) missing_provide = missing_provides.popitem() self.assertEquals('package.Foo', missing_provide[0]) self.assertEquals(1, missing_provide[1]) def testGetMissingProvides_privatefunction(self): """Tests that unprovided private functions don't cause a missing provide.""" input_lines = ['package.Foo_ = function() {};'] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) self.assertEquals(0, len(namespaces_info.GetMissingProvides())) def testGetMissingProvides_required(self): """Tests that required namespaces don't cause a missing provide.""" input_lines = [ 'goog.require(\'package.Foo\');', 'package.Foo.methodName = function() {};' ] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) self.assertEquals(0, len(namespaces_info.GetMissingProvides())) def testGetMissingRequires_required(self): """Tests that required namespaces don't cause a missing require.""" input_lines = [ 'goog.require(\'package.Foo\');', 'package.Foo();' ] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) self.assertEquals(0, len(namespaces_info.GetMissingProvides())) def testGetMissingRequires_requiredIdentifier(self): """Tests that required namespaces satisfy identifiers on that namespace.""" input_lines = [ 'goog.require(\'package.Foo\');', 'package.Foo.methodName();' ] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) self.assertEquals(0, len(namespaces_info.GetMissingProvides())) def testGetMissingRequires_requiredParentClass(self): """Tests that requiring a parent class of an object is sufficient to prevent a missing require on that object.""" input_lines = [ 'goog.require(\'package.Foo\');', 'package.Foo.methodName();', 'package.Foo.methodName(package.Foo.ObjectName);' ] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) self.assertEquals(0, len(namespaces_info.GetMissingRequires())) def testGetMissingRequires_unrequired(self): """Tests that unrequired namespaces cause a missing require.""" input_lines = ['package.Foo();'] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) missing_requires = namespaces_info.GetMissingRequires() self.assertEquals(1, len(missing_requires)) missing_req = missing_requires.popitem() self.assertEquals('package.Foo', missing_req[0]) self.assertEquals(1, missing_req[1]) def testGetMissingRequires_provided(self): """Tests that provided namespaces satisfy identifiers on that namespace.""" input_lines = [ 'goog.provide(\'package.Foo\');', 'package.Foo.methodName();' ] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) self.assertEquals(0, len(namespaces_info.GetMissingRequires())) def testGetMissingRequires_created(self): """Tests that created namespaces do not satisfy usage of an identifier.""" input_lines = [ 'package.Foo = function();', 'package.Foo.methodName();', 'package.Foo.anotherMethodName1();', 'package.Foo.anotherMethodName2();' ] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) missing_requires = namespaces_info.GetMissingRequires() self.assertEquals(1, len(missing_requires)) missing_require = missing_requires.popitem() self.assertEquals('package.Foo', missing_require[0]) # Make sure line number of first occurrence is reported self.assertEquals(2, missing_require[1]) def testGetMissingRequires_createdIdentifier(self): """Tests that created identifiers satisfy usage of the identifier.""" input_lines = [ 'package.Foo.methodName = function();', 'package.Foo.methodName();' ] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) self.assertEquals(0, len(namespaces_info.GetMissingRequires())) def testGetMissingRequires_objectOnClass(self): """Tests that we should require a class, not the object on the class.""" input_lines = [ 'goog.require(\'package.Foo.Enum\');', 'var x = package.Foo.Enum.VALUE1;', ] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) self.assertEquals(1, len(namespaces_info.GetMissingRequires()), 'The whole class, not the object, should be required.') def testGetMissingRequires_variableWithSameName(self): """Tests that we should not goog.require variables and parameters. b/5362203 Variables in scope are not missing namespaces. """ input_lines = [ 'goog.provide(\'Foo\');', 'Foo.A = function();', 'Foo.A.prototype.method = function(ab) {', ' if (ab) {', ' var docs;', ' var lvalue = new Obj();', ' // Variable in scope hence not goog.require here.', ' docs.foo.abc = 1;', ' lvalue.next();', ' }', ' // Since js is function scope this should also not goog.require.', ' docs.foo.func();', ' // Its not a variable in scope hence goog.require.', ' dummy.xyz.reset();', ' return this.method2();', '};', 'Foo.A.prototype.method1 = function(docs, abcd, xyz) {', ' // Parameter hence not goog.require.', ' docs.nodes.length = 2;', ' lvalue.abc.reset();', '};' ] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['Foo', 'docs', 'lvalue', 'dummy']) missing_requires = namespaces_info.GetMissingRequires() self.assertEquals(2, len(missing_requires)) self.assertItemsEqual( {'dummy.xyz': 14, 'lvalue.abc': 20}, missing_requires) def testIsFirstProvide(self): """Tests operation of the isFirstProvide method.""" input_lines = [ 'goog.provide(\'package.Foo\');', 'package.Foo.methodName();' ] token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( input_lines, ['package']) self.assertTrue(namespaces_info.IsFirstProvide(token)) def testGetWholeIdentifierString(self): """Tests that created identifiers satisfy usage of the identifier.""" input_lines = [ 'package.Foo.', ' veryLong.', ' identifier;' ] token = testutil.TokenizeSource(input_lines) self.assertEquals('package.Foo.veryLong.identifier', tokenutil.GetIdentifierForToken(token)) self.assertEquals(None, tokenutil.GetIdentifierForToken(token.next)) def testScopified(self): """Tests that a goog.scope call is noticed.""" input_lines = [ 'goog.scope(function() {', '});' ] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) self.assertTrue(namespaces_info._scopified_file) def testScope_unusedAlias(self): """Tests that an used alias symbol doesn't result in a require.""" input_lines = [ 'goog.scope(function() {', 'var Event = goog.events.Event;', '});' ] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) missing_requires = namespaces_info.GetMissingRequires() self.assertEquals({}, missing_requires) def testScope_usedAlias(self): """Tests that aliased symbols result in correct requires.""" input_lines = [ 'goog.scope(function() {', 'var Event = goog.events.Event;', 'var dom = goog.dom;', 'Event(dom.classes.get);', '});' ] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) missing_requires = namespaces_info.GetMissingRequires() self.assertEquals({'goog.dom.classes': 4, 'goog.events.Event': 4}, missing_requires) def testScope_provides(self): """Tests that aliased symbols result in correct provides.""" input_lines = [ 'goog.scope(function() {', 'goog.bar = {};', 'var bar = goog.bar;', 'bar.Foo = {};', '});' ] namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) missing_provides = namespaces_info.GetMissingProvides() self.assertEquals({'goog.bar.Foo': 4}, missing_provides) def testSetTestOnlyNamespaces(self): """Tests that a namespace in setTestOnly makes it a valid provide.""" namespaces_info = self._GetNamespacesInfoForScript([ 'goog.setTestOnly(\'goog.foo.barTest\');' ], ['goog']) token = self._GetProvideTokens('goog.foo.barTest') self.assertFalse(namespaces_info.IsExtraProvide(token)) token = self._GetProvideTokens('goog.foo.bazTest') self.assertTrue(namespaces_info.IsExtraProvide(token)) def testSetTestOnlyComment(self): """Ensure a comment in setTestOnly does not cause a created namespace.""" namespaces_info = self._GetNamespacesInfoForScript([ 'goog.setTestOnly(\'this is a comment\');' ], ['goog']) self.assertEquals( [], namespaces_info._created_namespaces, 'A comment in setTestOnly should not modify created namespaces.') def _GetNamespacesInfoForScript(self, script, closurized_namespaces=None): _, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( script, closurized_namespaces) return namespaces_info def _GetStartTokenAndNamespacesInfoForScript( self, script, closurized_namespaces): token = testutil.TokenizeSource(script) return token, self._GetInitializedNamespacesInfo( token, closurized_namespaces, []) def _GetInitializedNamespacesInfo(self, token, closurized_namespaces, ignored_extra_namespaces): """Returns a namespaces info initialized with the given token stream.""" namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo( closurized_namespaces=closurized_namespaces, ignored_extra_namespaces=ignored_extra_namespaces) state_tracker = javascriptstatetracker.JavaScriptStateTracker() ecma_pass = ecmametadatapass.EcmaMetaDataPass() ecma_pass.Process(token) alias_pass = aliaspass.AliasPass(closurized_namespaces) alias_pass.Process(token) while token: state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken()) namespaces_info.ProcessToken(token, state_tracker) state_tracker.HandleAfterToken(token) token = token.next return namespaces_info def _GetProvideTokens(self, namespace): """Returns a list of tokens for a goog.require of the given namespace.""" line_text = 'goog.require(\'' + namespace + '\');\n' return testutil.TokenizeSource([line_text]) def _GetRequireTokens(self, namespace): """Returns a list of tokens for a goog.require of the given namespace.""" line_text = 'goog.require(\'' + namespace + '\');\n' return testutil.TokenizeSource([line_text]) if __name__ == '__main__': googletest.main() closure_linter-2.3.13/closure_linter/errorrecord.py0000640014730400116100000000376712247733554022160 0ustar ajpeng00000000000000#!/usr/bin/env python # Copyright 2012 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A simple, pickle-serializable class to represent a lint error.""" __author__ = 'nnaze@google.com (Nathan Naze)' import gflags as flags from closure_linter import errors from closure_linter.common import erroroutput FLAGS = flags.FLAGS class ErrorRecord(object): """Record-keeping struct that can be serialized back from a process. Attributes: path: Path to the file. error_string: Error string for the user. new_error: Whether this is a "new error" (see errors.NEW_ERRORS). """ def __init__(self, path, error_string, new_error): self.path = path self.error_string = error_string self.new_error = new_error def MakeErrorRecord(path, error): """Make an error record with correctly formatted error string. Errors are not able to be serialized (pickled) over processes because of their pointers to the complex token/context graph. We use an intermediary serializable class to pass back just the relevant information. Args: path: Path of file the error was found in. error: An error.Error instance. Returns: _ErrorRecord instance. """ new_error = error.code in errors.NEW_ERRORS if FLAGS.unix_mode: error_string = erroroutput.GetUnixErrorOutput( path, error, new_error=new_error) else: error_string = erroroutput.GetErrorOutput(error, new_error=new_error) return ErrorRecord(path, error_string, new_error) closure_linter-2.3.13/closure_linter/gjslint.py0000750014730400116100000002034312247733554021271 0ustar ajpeng00000000000000#!/usr/bin/env python # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Checks JavaScript files for common style guide violations. gjslint.py is designed to be used as a PRESUBMIT script to check for javascript style guide violations. As of now, it checks for the following violations: * Missing and extra spaces * Lines longer than 80 characters * Missing newline at end of file * Missing semicolon after function declaration * Valid JsDoc including parameter matching Someday it will validate to the best of its ability against the entirety of the JavaScript style guide. This file is a front end that parses arguments and flags. The core of the code is in tokenizer.py and checker.py. """ __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)', 'nnaze@google.com (Nathan Naze)',) import errno import itertools import platform import sys import time import gflags as flags from closure_linter import errorrecord from closure_linter import runner from closure_linter.common import erroraccumulator from closure_linter.common import simplefileflags as fileflags # Attempt import of multiprocessing (should be available in Python 2.6 and up). try: # pylint: disable=g-import-not-at-top import multiprocessing except ImportError: multiprocessing = None FLAGS = flags.FLAGS flags.DEFINE_boolean('unix_mode', False, 'Whether to emit warnings in standard unix format.') flags.DEFINE_boolean('beep', True, 'Whether to beep when errors are found.') flags.DEFINE_boolean('time', False, 'Whether to emit timing statistics.') flags.DEFINE_boolean('check_html', False, 'Whether to check javascript in html files.') flags.DEFINE_boolean('summary', False, 'Whether to show an error count summary.') flags.DEFINE_list('additional_extensions', None, 'List of additional file ' 'extensions (not js) that should be treated as ' 'JavaScript files.') flags.DEFINE_boolean('multiprocess', platform.system() is 'Linux' and bool(multiprocessing), 'Whether to attempt parallelized linting using the ' 'multiprocessing module. Enabled by default on Linux ' 'if the multiprocessing module is present (Python 2.6+). ' 'Otherwise disabled by default. ' 'Disabling may make debugging easier.') GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time', '--check_html', '--summary'] def _MultiprocessCheckPaths(paths): """Run _CheckPath over mutltiple processes. Tokenization, passes, and checks are expensive operations. Running in a single process, they can only run on one CPU/core. Instead, shard out linting over all CPUs with multiprocessing to parallelize. Args: paths: paths to check. Yields: errorrecord.ErrorRecords for any found errors. """ pool = multiprocessing.Pool() path_results = pool.imap(_CheckPath, paths) for results in path_results: for result in results: yield result # Force destruct before returning, as this can sometimes raise spurious # "interrupted system call" (EINTR), which we can ignore. try: pool.close() pool.join() del pool except OSError as err: if err.errno is not errno.EINTR: raise err def _CheckPaths(paths): """Run _CheckPath on all paths in one thread. Args: paths: paths to check. Yields: errorrecord.ErrorRecords for any found errors. """ for path in paths: results = _CheckPath(path) for record in results: yield record def _CheckPath(path): """Check a path and return any errors. Args: path: paths to check. Returns: A list of errorrecord.ErrorRecords for any found errors. """ error_handler = erroraccumulator.ErrorAccumulator() runner.Run(path, error_handler) make_error_record = lambda err: errorrecord.MakeErrorRecord(path, err) return map(make_error_record, error_handler.GetErrors()) def _GetFilePaths(argv): suffixes = ['.js'] if FLAGS.additional_extensions: suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions] if FLAGS.check_html: suffixes += ['.html', '.htm'] return fileflags.GetFileList(argv, 'JavaScript', suffixes) # Error printing functions def _PrintFileSummary(paths, records): """Print a detailed summary of the number of errors in each file.""" paths = list(paths) paths.sort() for path in paths: path_errors = [e for e in records if e.path == path] print '%s: %d' % (path, len(path_errors)) def _PrintFileSeparator(path): print '----- FILE : %s -----' % path def _PrintSummary(paths, error_records): """Print a summary of the number of errors and files.""" error_count = len(error_records) all_paths = set(paths) all_paths_count = len(all_paths) if error_count is 0: print '%d files checked, no errors found.' % all_paths_count new_error_count = len([e for e in error_records if e.new_error]) error_paths = set([e.path for e in error_records]) error_paths_count = len(error_paths) no_error_paths_count = all_paths_count - error_paths_count if error_count or new_error_count: print ('Found %d errors, including %d new errors, in %d files ' '(%d files OK).' % ( error_count, new_error_count, error_paths_count, no_error_paths_count)) def _PrintErrorRecords(error_records): """Print error records strings in the expected format.""" current_path = None for record in error_records: if current_path != record.path: current_path = record.path if not FLAGS.unix_mode: _PrintFileSeparator(current_path) print record.error_string def _FormatTime(t): """Formats a duration as a human-readable string. Args: t: A duration in seconds. Returns: A formatted duration string. """ if t < 1: return '%dms' % round(t * 1000) else: return '%.2fs' % t def main(argv=None): """Main function. Args: argv: Sequence of command line arguments. """ if argv is None: argv = flags.FLAGS(sys.argv) if FLAGS.time: start_time = time.time() suffixes = ['.js'] if FLAGS.additional_extensions: suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions] if FLAGS.check_html: suffixes += ['.html', '.htm'] paths = fileflags.GetFileList(argv, 'JavaScript', suffixes) if FLAGS.multiprocess: records_iter = _MultiprocessCheckPaths(paths) else: records_iter = _CheckPaths(paths) records_iter, records_iter_copy = itertools.tee(records_iter, 2) _PrintErrorRecords(records_iter_copy) error_records = list(records_iter) _PrintSummary(paths, error_records) exit_code = 0 # If there are any errors if error_records: exit_code += 1 # If there are any new errors if [r for r in error_records if r.new_error]: exit_code += 2 if exit_code: if FLAGS.summary: _PrintFileSummary(paths, error_records) if FLAGS.beep: # Make a beep noise. sys.stdout.write(chr(7)) # Write out instructions for using fixjsstyle script to fix some of the # reported errors. fix_args = [] for flag in sys.argv[1:]: for f in GJSLINT_ONLY_FLAGS: if flag.startswith(f): break else: fix_args.append(flag) print """ Some of the errors reported by GJsLint may be auto-fixable using the script fixjsstyle. Please double check any changes it makes and report any bugs. The script can be run by executing: fixjsstyle %s """ % ' '.join(fix_args) if FLAGS.time: print 'Done in %s.' % _FormatTime(time.time() - start_time) sys.exit(exit_code) if __name__ == '__main__': main() closure_linter-2.3.13/closure_linter/checker.py0000750014730400116100000000746412247733554021234 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Core methods for checking JS files for common style guide violations.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') import gflags as flags from closure_linter import aliaspass from closure_linter import checkerbase from closure_linter import closurizednamespacesinfo from closure_linter import javascriptlintrules flags.DEFINE_list('closurized_namespaces', '', 'Namespace prefixes, used for testing of' 'goog.provide/require') flags.DEFINE_list('ignored_extra_namespaces', '', 'Fully qualified namespaces that should be not be reported ' 'as extra by the linter.') class JavaScriptStyleChecker(checkerbase.CheckerBase): """Checker that applies JavaScriptLintRules.""" def __init__(self, state_tracker, error_handler): """Initialize an JavaScriptStyleChecker object. Args: state_tracker: State tracker. error_handler: Error handler to pass all errors to. """ self._namespaces_info = None self._alias_pass = None if flags.FLAGS.closurized_namespaces: self._namespaces_info = ( closurizednamespacesinfo.ClosurizedNamespacesInfo( flags.FLAGS.closurized_namespaces, flags.FLAGS.ignored_extra_namespaces)) self._alias_pass = aliaspass.AliasPass( flags.FLAGS.closurized_namespaces, error_handler) checkerbase.CheckerBase.__init__( self, error_handler=error_handler, lint_rules=javascriptlintrules.JavaScriptLintRules( self._namespaces_info), state_tracker=state_tracker) def Check(self, start_token, limited_doc_checks=False, is_html=False, stop_token=None): """Checks a token stream for lint warnings/errors. Adds a separate pass for computing dependency information based on goog.require and goog.provide statements prior to the main linting pass. Args: start_token: The first token in the token stream. limited_doc_checks: Whether to perform limited checks. is_html: Whether this token stream is HTML. stop_token: If given, checks should stop at this token. """ self._lint_rules.Initialize(self, limited_doc_checks, is_html) if self._alias_pass: self._alias_pass.Process(start_token) # To maximize the amount of errors that get reported before a parse error # is displayed, don't run the dependency pass if a parse error exists. if self._namespaces_info: self._namespaces_info.Reset() self._ExecutePass(start_token, self._DependencyPass, stop_token) self._ExecutePass(start_token, self._LintPass, stop_token) # If we have a stop_token, we didn't end up reading the whole file and, # thus, don't call Finalize to do end-of-file checks. if not stop_token: self._lint_rules.Finalize(self._state_tracker) def _DependencyPass(self, token): """Processes an individual token for dependency information. Used to encapsulate the logic needed to process an individual token so that it can be passed to _ExecutePass. Args: token: The token to process. """ self._namespaces_info.ProcessToken(token, self._state_tracker) closure_linter-2.3.13/closure_linter/requireprovidesorter_test.py0000640014730400116100000001167012247733554025163 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2012 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for RequireProvideSorter.""" import unittest as googletest from closure_linter import javascripttokens from closure_linter import requireprovidesorter from closure_linter import testutil # pylint: disable=g-bad-name TokenType = javascripttokens.JavaScriptTokenType class RequireProvideSorterTest(googletest.TestCase): """Tests for RequireProvideSorter.""" def testGetFixedProvideString(self): """Tests that fixed string constains proper comments also.""" input_lines = [ 'goog.provide(\'package.xyz\');', '/** @suppress {extraprovide} **/', 'goog.provide(\'package.abcd\');' ] expected_lines = [ '/** @suppress {extraprovide} **/', 'goog.provide(\'package.abcd\');', 'goog.provide(\'package.xyz\');' ] token = testutil.TokenizeSourceAndRunEcmaPass(input_lines) sorter = requireprovidesorter.RequireProvideSorter() fixed_provide_string = sorter.GetFixedProvideString(token) self.assertEquals(expected_lines, fixed_provide_string.splitlines()) def testGetFixedRequireString(self): """Tests that fixed string constains proper comments also.""" input_lines = [ 'goog.require(\'package.xyz\');', '/** This is needed for scope. **/', 'goog.require(\'package.abcd\');' ] expected_lines = [ '/** This is needed for scope. **/', 'goog.require(\'package.abcd\');', 'goog.require(\'package.xyz\');' ] token = testutil.TokenizeSourceAndRunEcmaPass(input_lines) sorter = requireprovidesorter.RequireProvideSorter() fixed_require_string = sorter.GetFixedRequireString(token) self.assertEquals(expected_lines, fixed_require_string.splitlines()) def testFixRequires_removeBlankLines(self): """Tests that blank lines are omitted in sorted goog.require statements.""" input_lines = [ 'goog.provide(\'package.subpackage.Whatever\');', '', 'goog.require(\'package.subpackage.ClassB\');', '', 'goog.require(\'package.subpackage.ClassA\');' ] expected_lines = [ 'goog.provide(\'package.subpackage.Whatever\');', '', 'goog.require(\'package.subpackage.ClassA\');', 'goog.require(\'package.subpackage.ClassB\');' ] token = testutil.TokenizeSourceAndRunEcmaPass(input_lines) sorter = requireprovidesorter.RequireProvideSorter() sorter.FixRequires(token) self.assertEquals(expected_lines, self._GetLines(token)) def fixRequiresTest_withTestOnly(self, position): """Regression-tests sorting even with a goog.setTestOnly statement. Args: position: The position in the list where to insert the goog.setTestOnly statement. Will be used to test all possible combinations for this test. """ input_lines = [ 'goog.provide(\'package.subpackage.Whatever\');', '', 'goog.require(\'package.subpackage.ClassB\');', 'goog.require(\'package.subpackage.ClassA\');' ] expected_lines = [ 'goog.provide(\'package.subpackage.Whatever\');', '', 'goog.require(\'package.subpackage.ClassA\');', 'goog.require(\'package.subpackage.ClassB\');' ] input_lines.insert(position, 'goog.setTestOnly();') expected_lines.insert(position, 'goog.setTestOnly();') token = testutil.TokenizeSourceAndRunEcmaPass(input_lines) sorter = requireprovidesorter.RequireProvideSorter() sorter.FixRequires(token) self.assertEquals(expected_lines, self._GetLines(token)) def testFixRequires_withTestOnly(self): """Regression-tests sorting even after a goog.setTestOnly statement.""" # goog.setTestOnly at first line. self.fixRequiresTest_withTestOnly(position=0) # goog.setTestOnly after goog.provide. self.fixRequiresTest_withTestOnly(position=1) # goog.setTestOnly before goog.require. self.fixRequiresTest_withTestOnly(position=2) # goog.setTestOnly after goog.require. self.fixRequiresTest_withTestOnly(position=4) def _GetLines(self, token): """Returns an array of lines based on the specified token stream.""" lines = [] line = '' while token: line += token.string if token.IsLastInLine(): lines.append(line) line = '' token = token.next return lines if __name__ == '__main__': googletest.main() closure_linter-2.3.13/closure_linter/closurizednamespacesinfo.py0000750014730400116100000004630712247733554024726 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Logic for computing dependency information for closurized JavaScript files. Closurized JavaScript files express dependencies using goog.require and goog.provide statements. In order for the linter to detect when a statement is missing or unnecessary, all identifiers in the JavaScript file must first be processed to determine if they constitute the creation or usage of a dependency. """ import re from closure_linter import javascripttokens from closure_linter import tokenutil # pylint: disable=g-bad-name TokenType = javascripttokens.JavaScriptTokenType DEFAULT_EXTRA_NAMESPACES = [ 'goog.testing.asserts', 'goog.testing.jsunit', ] class ClosurizedNamespacesInfo(object): """Dependency information for closurized JavaScript files. Processes token streams for dependency creation or usage and provides logic for determining if a given require or provide statement is unnecessary or if there are missing require or provide statements. """ def __init__(self, closurized_namespaces, ignored_extra_namespaces): """Initializes an instance the ClosurizedNamespacesInfo class. Args: closurized_namespaces: A list of namespace prefixes that should be processed for dependency information. Non-matching namespaces are ignored. ignored_extra_namespaces: A list of namespaces that should not be reported as extra regardless of whether they are actually used. """ self._closurized_namespaces = closurized_namespaces self._ignored_extra_namespaces = (ignored_extra_namespaces + DEFAULT_EXTRA_NAMESPACES) self.Reset() def Reset(self): """Resets the internal state to prepare for processing a new file.""" # A list of goog.provide tokens in the order they appeared in the file. self._provide_tokens = [] # A list of goog.require tokens in the order they appeared in the file. self._require_tokens = [] # Namespaces that are already goog.provided. self._provided_namespaces = [] # Namespaces that are already goog.required. self._required_namespaces = [] # Note that created_namespaces and used_namespaces contain both namespaces # and identifiers because there are many existing cases where a method or # constant is provided directly instead of its namespace. Ideally, these # two lists would only have to contain namespaces. # A list of tuples where the first element is the namespace of an identifier # created in the file, the second is the identifier itself and the third is # the line number where it's created. self._created_namespaces = [] # A list of tuples where the first element is the namespace of an identifier # used in the file, the second is the identifier itself and the third is the # line number where it's used. self._used_namespaces = [] # A list of seemingly-unnecessary namespaces that are goog.required() and # annotated with @suppress {extraRequire}. self._suppressed_requires = [] # A list of goog.provide tokens which are duplicates. self._duplicate_provide_tokens = [] # A list of goog.require tokens which are duplicates. self._duplicate_require_tokens = [] # Whether this file is in a goog.scope. Someday, we may add support # for checking scopified namespaces, but for now let's just fail # in a more reasonable way. self._scopified_file = False # TODO(user): Handle the case where there are 2 different requires # that can satisfy the same dependency, but only one is necessary. def GetProvidedNamespaces(self): """Returns the namespaces which are already provided by this file. Returns: A list of strings where each string is a 'namespace' corresponding to an existing goog.provide statement in the file being checked. """ return set(self._provided_namespaces) def GetRequiredNamespaces(self): """Returns the namespaces which are already required by this file. Returns: A list of strings where each string is a 'namespace' corresponding to an existing goog.require statement in the file being checked. """ return set(self._required_namespaces) def IsExtraProvide(self, token): """Returns whether the given goog.provide token is unnecessary. Args: token: A goog.provide token. Returns: True if the given token corresponds to an unnecessary goog.provide statement, otherwise False. """ namespace = tokenutil.GetStringAfterToken(token) base_namespace = namespace.split('.', 1)[0] if base_namespace not in self._closurized_namespaces: return False if token in self._duplicate_provide_tokens: return True # TODO(user): There's probably a faster way to compute this. for created_namespace, created_identifier, _ in self._created_namespaces: if namespace == created_namespace or namespace == created_identifier: return False return True def IsExtraRequire(self, token): """Returns whether the given goog.require token is unnecessary. Args: token: A goog.require token. Returns: True if the given token corresponds to an unnecessary goog.require statement, otherwise False. """ namespace = tokenutil.GetStringAfterToken(token) base_namespace = namespace.split('.', 1)[0] if base_namespace not in self._closurized_namespaces: return False if namespace in self._ignored_extra_namespaces: return False if token in self._duplicate_require_tokens: return True if namespace in self._suppressed_requires: return False # If the namespace contains a component that is initial caps, then that # must be the last component of the namespace. parts = namespace.split('.') if len(parts) > 1 and parts[-2][0].isupper(): return True # TODO(user): There's probably a faster way to compute this. for used_namespace, used_identifier, _ in self._used_namespaces: if namespace == used_namespace or namespace == used_identifier: return False return True def GetMissingProvides(self): """Returns the dict of missing provided namespaces for the current file. Returns: Returns a dictionary of key as string and value as integer where each string(key) is a namespace that should be provided by this file, but is not and integer(value) is first line number where it's defined. """ missing_provides = dict() for namespace, identifier, line_number in self._created_namespaces: if (not self._IsPrivateIdentifier(identifier) and namespace not in self._provided_namespaces and identifier not in self._provided_namespaces and namespace not in self._required_namespaces and namespace not in missing_provides): missing_provides[namespace] = line_number return missing_provides def GetMissingRequires(self): """Returns the dict of missing required namespaces for the current file. For each non-private identifier used in the file, find either a goog.require, goog.provide or a created identifier that satisfies it. goog.require statements can satisfy the identifier by requiring either the namespace of the identifier or the identifier itself. goog.provide statements can satisfy the identifier by providing the namespace of the identifier. A created identifier can only satisfy the used identifier if it matches it exactly (necessary since things can be defined on a namespace in more than one file). Note that provided namespaces should be a subset of created namespaces, but we check both because in some cases we can't always detect the creation of the namespace. Returns: Returns a dictionary of key as string and value integer where each string(key) is a namespace that should be required by this file, but is not and integer(value) is first line number where it's used. """ external_dependencies = set(self._required_namespaces) # Assume goog namespace is always available. external_dependencies.add('goog') created_identifiers = set() for namespace, identifier, line_number in self._created_namespaces: created_identifiers.add(identifier) missing_requires = dict() for namespace, identifier, line_number in self._used_namespaces: if (not self._IsPrivateIdentifier(identifier) and namespace not in external_dependencies and namespace not in self._provided_namespaces and identifier not in external_dependencies and identifier not in created_identifiers and namespace not in missing_requires): missing_requires[namespace] = line_number return missing_requires def _IsPrivateIdentifier(self, identifier): """Returns whether the given identifer is private.""" pieces = identifier.split('.') for piece in pieces: if piece.endswith('_'): return True return False def IsFirstProvide(self, token): """Returns whether token is the first provide token.""" return self._provide_tokens and token == self._provide_tokens[0] def IsFirstRequire(self, token): """Returns whether token is the first require token.""" return self._require_tokens and token == self._require_tokens[0] def IsLastProvide(self, token): """Returns whether token is the last provide token.""" return self._provide_tokens and token == self._provide_tokens[-1] def IsLastRequire(self, token): """Returns whether token is the last require token.""" return self._require_tokens and token == self._require_tokens[-1] def ProcessToken(self, token, state_tracker): """Processes the given token for dependency information. Args: token: The token to process. state_tracker: The JavaScript state tracker. """ # Note that this method is in the critical path for the linter and has been # optimized for performance in the following ways: # - Tokens are checked by type first to minimize the number of function # calls necessary to determine if action needs to be taken for the token. # - The most common tokens types are checked for first. # - The number of function calls has been minimized (thus the length of this # function. if token.type == TokenType.IDENTIFIER: # TODO(user): Consider saving the whole identifier in metadata. whole_identifier_string = tokenutil.GetIdentifierForToken(token) if whole_identifier_string is None: # We only want to process the identifier one time. If the whole string # identifier is None, that means this token was part of a multi-token # identifier, but it was not the first token of the identifier. return # In the odd case that a goog.require is encountered inside a function, # just ignore it (e.g. dynamic loading in test runners). if token.string == 'goog.require' and not state_tracker.InFunction(): self._require_tokens.append(token) namespace = tokenutil.GetStringAfterToken(token) if namespace in self._required_namespaces: self._duplicate_require_tokens.append(token) else: self._required_namespaces.append(namespace) # If there is a suppression for the require, add a usage for it so it # gets treated as a regular goog.require (i.e. still gets sorted). jsdoc = state_tracker.GetDocComment() if jsdoc and ('extraRequire' in jsdoc.suppressions): self._suppressed_requires.append(namespace) self._AddUsedNamespace(state_tracker, namespace, token.line_number) elif token.string == 'goog.provide': self._provide_tokens.append(token) namespace = tokenutil.GetStringAfterToken(token) if namespace in self._provided_namespaces: self._duplicate_provide_tokens.append(token) else: self._provided_namespaces.append(namespace) # If there is a suppression for the provide, add a creation for it so it # gets treated as a regular goog.provide (i.e. still gets sorted). jsdoc = state_tracker.GetDocComment() if jsdoc and ('extraProvide' in jsdoc.suppressions): self._AddCreatedNamespace(state_tracker, namespace, token.line_number) elif token.string == 'goog.scope': self._scopified_file = True elif token.string == 'goog.setTestOnly': # Since the message is optional, we don't want to scan to later lines. for t in tokenutil.GetAllTokensInSameLine(token): if t.type == TokenType.STRING_TEXT: message = t.string if re.match(r'^\w+(\.\w+)+$', message): # This looks like a namespace. If it's a Closurized namespace, # consider it created. base_namespace = message.split('.', 1)[0] if base_namespace in self._closurized_namespaces: self._AddCreatedNamespace(state_tracker, message, token.line_number) break else: jsdoc = state_tracker.GetDocComment() if token.metadata and token.metadata.aliased_symbol: whole_identifier_string = token.metadata.aliased_symbol if jsdoc and jsdoc.HasFlag('typedef'): self._AddCreatedNamespace(state_tracker, whole_identifier_string, token.line_number, namespace=self.GetClosurizedNamespace( whole_identifier_string)) else: if not (token.metadata and token.metadata.is_alias_definition): self._AddUsedNamespace(state_tracker, whole_identifier_string, token.line_number) elif token.type == TokenType.SIMPLE_LVALUE: identifier = token.values['identifier'] start_token = tokenutil.GetIdentifierStart(token) if start_token and start_token != token: # Multi-line identifier being assigned. Get the whole identifier. identifier = tokenutil.GetIdentifierForToken(start_token) else: start_token = token # If an alias is defined on the start_token, use it instead. if (start_token and start_token.metadata and start_token.metadata.aliased_symbol and not start_token.metadata.is_alias_definition): identifier = start_token.metadata.aliased_symbol if identifier: namespace = self.GetClosurizedNamespace(identifier) if state_tracker.InFunction(): self._AddUsedNamespace(state_tracker, identifier, token.line_number) elif namespace and namespace != 'goog': self._AddCreatedNamespace(state_tracker, identifier, token.line_number, namespace=namespace) elif token.type == TokenType.DOC_FLAG: flag_type = token.attached_object.flag_type is_interface = state_tracker.GetDocComment().HasFlag('interface') if flag_type == 'implements' or (flag_type == 'extends' and is_interface): # Interfaces should be goog.require'd. doc_start = tokenutil.Search(token, TokenType.DOC_START_BRACE) interface = tokenutil.Search(doc_start, TokenType.COMMENT) self._AddUsedNamespace(state_tracker, interface.string, token.line_number) def _AddCreatedNamespace(self, state_tracker, identifier, line_number, namespace=None): """Adds the namespace of an identifier to the list of created namespaces. If the identifier is annotated with a 'missingProvide' suppression, it is not added. Args: state_tracker: The JavaScriptStateTracker instance. identifier: The identifier to add. line_number: Line number where namespace is created. namespace: The namespace of the identifier or None if the identifier is also the namespace. """ if not namespace: namespace = identifier jsdoc = state_tracker.GetDocComment() if jsdoc and 'missingProvide' in jsdoc.suppressions: return self._created_namespaces.append([namespace, identifier, line_number]) def _AddUsedNamespace(self, state_tracker, identifier, line_number): """Adds the namespace of an identifier to the list of used namespaces. If the identifier is annotated with a 'missingRequire' suppression, it is not added. Args: state_tracker: The JavaScriptStateTracker instance. identifier: An identifier which has been used. line_number: Line number where namespace is used. """ jsdoc = state_tracker.GetDocComment() if jsdoc and 'missingRequire' in jsdoc.suppressions: return namespace = self.GetClosurizedNamespace(identifier) # b/5362203 If its a variable in scope then its not a required namespace. if namespace and not state_tracker.IsVariableInScope(namespace): self._used_namespaces.append([namespace, identifier, line_number]) def GetClosurizedNamespace(self, identifier): """Given an identifier, returns the namespace that identifier is from. Args: identifier: The identifier to extract a namespace from. Returns: The namespace the given identifier resides in, or None if one could not be found. """ if identifier.startswith('goog.global'): # Ignore goog.global, since it is, by definition, global. return None parts = identifier.split('.') for namespace in self._closurized_namespaces: if not identifier.startswith(namespace + '.'): continue last_part = parts[-1] if not last_part: # TODO(robbyw): Handle this: it's a multi-line identifier. return None # The namespace for a class is the shortest prefix ending in a class # name, which starts with a capital letter but is not a capitalized word. # # We ultimately do not want to allow requiring or providing of inner # classes/enums. Instead, a file should provide only the top-level class # and users should require only that. namespace = [] for part in parts: if part == 'prototype' or part.isupper(): return '.'.join(namespace) namespace.append(part) if part[0].isupper(): return '.'.join(namespace) # At this point, we know there's no class or enum, so the namespace is # just the identifier with the last part removed. With the exception of # apply, inherits, and call, which should also be stripped. if parts[-1] in ('apply', 'inherits', 'call'): parts.pop() parts.pop() # If the last part ends with an underscore, it is a private variable, # method, or enum. The namespace is whatever is before it. if parts and parts[-1].endswith('_'): parts.pop() return '.'.join(parts) return None closure_linter-2.3.13/closure_linter/aliaspass.py0000640014730400116100000001674712247733554021612 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2012 The Closure Linter Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Pass that scans for goog.scope aliases and lint/usage errors.""" # Allow non-Google copyright # pylint: disable=g-bad-file-header __author__ = ('nnaze@google.com (Nathan Naze)') import itertools from closure_linter import ecmametadatapass from closure_linter import errors from closure_linter import javascripttokens from closure_linter import scopeutil from closure_linter import tokenutil from closure_linter.common import error # TODO(nnaze): Create a Pass interface and move this class, EcmaMetaDataPass, # and related classes onto it. def _GetAliasForIdentifier(identifier, alias_map): """Returns the aliased_symbol name for an identifier. Example usage: >>> alias_map = {'MyClass': 'goog.foo.MyClass'} >>> _GetAliasForIdentifier('MyClass.prototype.action', alias_map) 'goog.foo.MyClass.prototype.action' >>> _GetAliasForIdentifier('MyClass.prototype.action', {}) None Args: identifier: The identifier. alias_map: A dictionary mapping a symbol to an alias. Returns: The aliased symbol name or None if not found. """ ns = identifier.split('.', 1)[0] aliased_symbol = alias_map.get(ns) if aliased_symbol: return aliased_symbol + identifier[len(ns):] class AliasPass(object): """Pass to identify goog.scope() usages. Identifies goog.scope() usages and finds lint/usage errors. Notes any aliases of symbols in Closurized namespaces (that is, reassignments such as "var MyClass = goog.foo.MyClass;") and annotates identifiers when they're using an alias (so they may be expanded to the full symbol later -- that "MyClass.prototype.action" refers to "goog.foo.MyClass.prototype.action" when expanded.). """ def __init__(self, closurized_namespaces=None, error_handler=None): """Creates a new pass. Args: closurized_namespaces: A set of Closurized namespaces (e.g. 'goog'). error_handler: An error handler to report lint errors to. """ self._error_handler = error_handler # If we have namespaces, freeze the set. if closurized_namespaces: closurized_namespaces = frozenset(closurized_namespaces) self._closurized_namespaces = closurized_namespaces def Process(self, start_token): """Runs the pass on a token stream. Args: start_token: The first token in the stream. """ # TODO(nnaze): Add more goog.scope usage checks. self._CheckGoogScopeCalls(start_token) # If we have closurized namespaces, identify aliased identifiers. if self._closurized_namespaces: context = start_token.metadata.context root_context = context.GetRoot() self._ProcessRootContext(root_context) def _CheckGoogScopeCalls(self, start_token): """Check goog.scope calls for lint/usage errors.""" def IsScopeToken(token): return (token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER and token.string == 'goog.scope') # Find all the goog.scope tokens in the file scope_tokens = [t for t in start_token if IsScopeToken(t)] for token in scope_tokens: scope_context = token.metadata.context if not (scope_context.type == ecmametadatapass.EcmaContext.STATEMENT and scope_context.parent.type == ecmametadatapass.EcmaContext.ROOT): self._MaybeReportError( error.Error(errors.INVALID_USE_OF_GOOG_SCOPE, 'goog.scope call not in global scope', token)) # There should be only one goog.scope reference. Register errors for # every instance after the first. for token in scope_tokens[1:]: self._MaybeReportError( error.Error(errors.EXTRA_GOOG_SCOPE_USAGE, 'More than one goog.scope call in file.', token)) def _MaybeReportError(self, err): """Report an error to the handler (if registered).""" if self._error_handler: self._error_handler.HandleError(err) @classmethod def _YieldAllContexts(cls, context): """Yields all contexts that are contained by the given context.""" yield context for child_context in context.children: for descendent_child in cls._YieldAllContexts(child_context): yield descendent_child @staticmethod def _IsTokenInParentBlock(token, parent_block): """Determines whether the given token is contained by the given block. Args: token: A token parent_block: An EcmaContext. Returns: Whether the token is in a context that is or is a child of the given parent_block context. """ context = token.metadata.context while context: if context is parent_block: return True context = context.parent return False def _ProcessRootContext(self, root_context): """Processes all goog.scope blocks under the root context.""" assert root_context.type is ecmametadatapass.EcmaContext.ROOT # Identify all goog.scope blocks. goog_scope_blocks = itertools.ifilter( scopeutil.IsGoogScopeBlock, self._YieldAllContexts(root_context)) # Process each block to find aliases. for scope_block in goog_scope_blocks: self._ProcessGoogScopeBlock(scope_block) def _ProcessGoogScopeBlock(self, scope_block): """Scans a goog.scope block to find aliases and mark alias tokens.""" alias_map = dict() # Iterate over every token in the scope_block. Each token points to one # context, but multiple tokens may point to the same context. We only want # to check each context once, so keep track of those we've seen. seen_contexts = set() token = scope_block.start_token while token and self._IsTokenInParentBlock(token, scope_block): token_context = token.metadata.context # Check to see if this token is an alias. if token_context not in seen_contexts: seen_contexts.add(token_context) # If this is a alias statement in the goog.scope block. if (token_context.type == ecmametadatapass.EcmaContext.VAR and token_context.parent.parent is scope_block): match = scopeutil.MatchAlias(token_context.parent) # If this is an alias, remember it in the map. if match: alias, symbol = match symbol = _GetAliasForIdentifier(symbol, alias_map) or symbol if scopeutil.IsInClosurizedNamespace(symbol, self._closurized_namespaces): alias_map[alias] = symbol # If this token is an identifier that matches an alias, # mark the token as an alias to the original symbol. if (token.type is javascripttokens.JavaScriptTokenType.SIMPLE_LVALUE or token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER): identifier = tokenutil.GetIdentifierForToken(token) if identifier: aliased_symbol = _GetAliasForIdentifier(identifier, alias_map) if aliased_symbol: token.metadata.aliased_symbol = aliased_symbol token = token.next # Get next token closure_linter-2.3.13/closure_linter/ecmametadatapass.py0000750014730400116100000004754712247733554023133 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2010 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Metadata pass for annotating tokens in EcmaScript files.""" __author__ = ('robbyw@google.com (Robert Walker)') from closure_linter import javascripttokens from closure_linter import tokenutil TokenType = javascripttokens.JavaScriptTokenType class ParseError(Exception): """Exception indicating a parse error at the given token. Attributes: token: The token where the parse error occurred. """ def __init__(self, token, message=None): """Initialize a parse error at the given token with an optional message. Args: token: The token where the parse error occurred. message: A message describing the parse error. """ Exception.__init__(self, message) self.token = token class EcmaContext(object): """Context object for EcmaScript languages. Attributes: type: The context type. start_token: The token where this context starts. end_token: The token where this context ends. parent: The parent context. """ # The root context. ROOT = 'root' # A block of code. BLOCK = 'block' # A pseudo-block of code for a given case or default section. CASE_BLOCK = 'case_block' # Block of statements in a for loop's parentheses. FOR_GROUP_BLOCK = 'for_block' # An implied block of code for 1 line if, while, and for statements IMPLIED_BLOCK = 'implied_block' # An index in to an array or object. INDEX = 'index' # An array literal in []. ARRAY_LITERAL = 'array_literal' # An object literal in {}. OBJECT_LITERAL = 'object_literal' # An individual element in an array or object literal. LITERAL_ELEMENT = 'literal_element' # The portion of a ternary statement between ? and : TERNARY_TRUE = 'ternary_true' # The portion of a ternary statment after : TERNARY_FALSE = 'ternary_false' # The entire switch statment. This will contain a GROUP with the variable # and a BLOCK with the code. # Since that BLOCK is not a normal block, it can not contain statements except # for case and default. SWITCH = 'switch' # A normal comment. COMMENT = 'comment' # A JsDoc comment. DOC = 'doc' # An individual statement. STATEMENT = 'statement' # Code within parentheses. GROUP = 'group' # Parameter names in a function declaration. PARAMETERS = 'parameters' # A set of variable declarations appearing after the 'var' keyword. VAR = 'var' # Context types that are blocks. BLOCK_TYPES = frozenset([ ROOT, BLOCK, CASE_BLOCK, FOR_GROUP_BLOCK, IMPLIED_BLOCK]) def __init__(self, context_type, start_token, parent=None): """Initializes the context object. Args: context_type: The context type. start_token: The token where this context starts. parent: The parent context. Attributes: type: The context type. start_token: The token where this context starts. end_token: The token where this context ends. parent: The parent context. children: The child contexts of this context, in order. """ self.type = context_type self.start_token = start_token self.end_token = None self.parent = None self.children = [] if parent: parent.AddChild(self) def __repr__(self): """Returns a string representation of the context object.""" stack = [] context = self while context: stack.append(context.type) context = context.parent return 'Context(%s)' % ' > '.join(stack) def AddChild(self, child): """Adds a child to this context and sets child's parent to this context. Args: child: A child EcmaContext. The child's parent will be set to this context. """ child.parent = self self.children.append(child) self.children.sort(EcmaContext._CompareContexts) def GetRoot(self): """Get the root context that contains this context, if any.""" context = self while context: if context.type is EcmaContext.ROOT: return context context = context.parent @staticmethod def _CompareContexts(context1, context2): """Sorts contexts 1 and 2 by start token document position.""" return tokenutil.Compare(context1.start_token, context2.start_token) class EcmaMetaData(object): """Token metadata for EcmaScript languages. Attributes: last_code: The last code token to appear before this one. context: The context this token appears in. operator_type: The operator type, will be one of the *_OPERATOR constants defined below. aliased_symbol: The full symbol being identified, as a string (e.g. an 'XhrIo' alias for 'goog.net.XhrIo'). Only applicable to identifier tokens. This is set in aliaspass.py and is a best guess. is_alias_definition: True if the symbol is part of an alias definition. If so, these symbols won't be counted towards goog.requires/provides. """ UNARY_OPERATOR = 'unary' UNARY_POST_OPERATOR = 'unary_post' BINARY_OPERATOR = 'binary' TERNARY_OPERATOR = 'ternary' def __init__(self): """Initializes a token metadata object.""" self.last_code = None self.context = None self.operator_type = None self.is_implied_semicolon = False self.is_implied_block = False self.is_implied_block_close = False self.aliased_symbol = None self.is_alias_definition = False def __repr__(self): """Returns a string representation of the context object.""" parts = ['%r' % self.context] if self.operator_type: parts.append('optype: %r' % self.operator_type) if self.is_implied_semicolon: parts.append('implied;') if self.aliased_symbol: parts.append('alias for: %s' % self.aliased_symbol) return 'MetaData(%s)' % ', '.join(parts) def IsUnaryOperator(self): return self.operator_type in (EcmaMetaData.UNARY_OPERATOR, EcmaMetaData.UNARY_POST_OPERATOR) def IsUnaryPostOperator(self): return self.operator_type == EcmaMetaData.UNARY_POST_OPERATOR class EcmaMetaDataPass(object): """A pass that iterates over all tokens and builds metadata about them.""" def __init__(self): """Initialize the meta data pass object.""" self.Reset() def Reset(self): """Resets the metadata pass to prepare for the next file.""" self._token = None self._context = None self._AddContext(EcmaContext.ROOT) self._last_code = None def _CreateContext(self, context_type): """Overridable by subclasses to create the appropriate context type.""" return EcmaContext(context_type, self._token, self._context) def _CreateMetaData(self): """Overridable by subclasses to create the appropriate metadata type.""" return EcmaMetaData() def _AddContext(self, context_type): """Adds a context of the given type to the context stack. Args: context_type: The type of context to create """ self._context = self._CreateContext(context_type) def _PopContext(self): """Moves up one level in the context stack. Returns: The former context. Raises: ParseError: If the root context is popped. """ top_context = self._context top_context.end_token = self._token self._context = top_context.parent if self._context: return top_context else: raise ParseError(self._token) def _PopContextType(self, *stop_types): """Pops the context stack until a context of the given type is popped. Args: *stop_types: The types of context to pop to - stops at the first match. Returns: The context object of the given type that was popped. """ last = None while not last or last.type not in stop_types: last = self._PopContext() return last def _EndStatement(self): """Process the end of a statement.""" self._PopContextType(EcmaContext.STATEMENT) if self._context.type == EcmaContext.IMPLIED_BLOCK: self._token.metadata.is_implied_block_close = True self._PopContext() def _ProcessContext(self): """Process the context at the current token. Returns: The context that should be assigned to the current token, or None if the current context after this method should be used. Raises: ParseError: When the token appears in an invalid context. """ token = self._token token_type = token.type if self._context.type in EcmaContext.BLOCK_TYPES: # Whenever we're in a block, we add a statement context. We make an # exception for switch statements since they can only contain case: and # default: and therefore don't directly contain statements. # The block we add here may be immediately removed in some cases, but # that causes no harm. parent = self._context.parent if not parent or parent.type != EcmaContext.SWITCH: self._AddContext(EcmaContext.STATEMENT) elif self._context.type == EcmaContext.ARRAY_LITERAL: self._AddContext(EcmaContext.LITERAL_ELEMENT) if token_type == TokenType.START_PAREN: if self._last_code and self._last_code.IsKeyword('for'): # for loops contain multiple statements in the group unlike while, # switch, if, etc. self._AddContext(EcmaContext.FOR_GROUP_BLOCK) else: self._AddContext(EcmaContext.GROUP) elif token_type == TokenType.END_PAREN: result = self._PopContextType(EcmaContext.GROUP, EcmaContext.FOR_GROUP_BLOCK) keyword_token = result.start_token.metadata.last_code # keyword_token will not exist if the open paren is the first line of the # file, for example if all code is wrapped in an immediately executed # annonymous function. if keyword_token and keyword_token.string in ('if', 'for', 'while'): next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES) if next_code.type != TokenType.START_BLOCK: # Check for do-while. is_do_while = False pre_keyword_token = keyword_token.metadata.last_code if (pre_keyword_token and pre_keyword_token.type == TokenType.END_BLOCK): start_block_token = pre_keyword_token.metadata.context.start_token is_do_while = start_block_token.metadata.last_code.string == 'do' # If it's not do-while, it's an implied block. if not is_do_while: self._AddContext(EcmaContext.IMPLIED_BLOCK) token.metadata.is_implied_block = True return result # else (not else if) with no open brace after it should be considered the # start of an implied block, similar to the case with if, for, and while # above. elif (token_type == TokenType.KEYWORD and token.string == 'else'): next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES) if (next_code.type != TokenType.START_BLOCK and (next_code.type != TokenType.KEYWORD or next_code.string != 'if')): self._AddContext(EcmaContext.IMPLIED_BLOCK) token.metadata.is_implied_block = True elif token_type == TokenType.START_PARAMETERS: self._AddContext(EcmaContext.PARAMETERS) elif token_type == TokenType.END_PARAMETERS: return self._PopContextType(EcmaContext.PARAMETERS) elif token_type == TokenType.START_BRACKET: if (self._last_code and self._last_code.type in TokenType.EXPRESSION_ENDER_TYPES): self._AddContext(EcmaContext.INDEX) else: self._AddContext(EcmaContext.ARRAY_LITERAL) elif token_type == TokenType.END_BRACKET: return self._PopContextType(EcmaContext.INDEX, EcmaContext.ARRAY_LITERAL) elif token_type == TokenType.START_BLOCK: if (self._last_code.type in (TokenType.END_PAREN, TokenType.END_PARAMETERS) or self._last_code.IsKeyword('else') or self._last_code.IsKeyword('do') or self._last_code.IsKeyword('try') or self._last_code.IsKeyword('finally') or (self._last_code.IsOperator(':') and self._last_code.metadata.context.type == EcmaContext.CASE_BLOCK)): # else, do, try, and finally all might have no () before {. # Also, handle the bizzare syntax case 10: {...}. self._AddContext(EcmaContext.BLOCK) else: self._AddContext(EcmaContext.OBJECT_LITERAL) elif token_type == TokenType.END_BLOCK: context = self._PopContextType(EcmaContext.BLOCK, EcmaContext.OBJECT_LITERAL) if self._context.type == EcmaContext.SWITCH: # The end of the block also means the end of the switch statement it # applies to. return self._PopContext() return context elif token.IsKeyword('switch'): self._AddContext(EcmaContext.SWITCH) elif (token_type == TokenType.KEYWORD and token.string in ('case', 'default') and self._context.type != EcmaContext.OBJECT_LITERAL): # Pop up to but not including the switch block. while self._context.parent.type != EcmaContext.SWITCH: self._PopContext() if self._context.parent is None: raise ParseError(token, 'Encountered case/default statement ' 'without switch statement') elif token.IsOperator('?'): self._AddContext(EcmaContext.TERNARY_TRUE) elif token.IsOperator(':'): if self._context.type == EcmaContext.OBJECT_LITERAL: self._AddContext(EcmaContext.LITERAL_ELEMENT) elif self._context.type == EcmaContext.TERNARY_TRUE: self._PopContext() self._AddContext(EcmaContext.TERNARY_FALSE) # Handle nested ternary statements like: # foo = bar ? baz ? 1 : 2 : 3 # When we encounter the second ":" the context is # ternary_false > ternary_true > statement > root elif (self._context.type == EcmaContext.TERNARY_FALSE and self._context.parent.type == EcmaContext.TERNARY_TRUE): self._PopContext() # Leave current ternary false context. self._PopContext() # Leave current parent ternary true self._AddContext(EcmaContext.TERNARY_FALSE) elif self._context.parent.type == EcmaContext.SWITCH: self._AddContext(EcmaContext.CASE_BLOCK) elif token.IsKeyword('var'): self._AddContext(EcmaContext.VAR) elif token.IsOperator(','): while self._context.type not in (EcmaContext.VAR, EcmaContext.ARRAY_LITERAL, EcmaContext.OBJECT_LITERAL, EcmaContext.STATEMENT, EcmaContext.PARAMETERS, EcmaContext.GROUP): self._PopContext() elif token_type == TokenType.SEMICOLON: self._EndStatement() def Process(self, first_token): """Processes the token stream starting with the given token.""" self._token = first_token while self._token: self._ProcessToken() if self._token.IsCode(): self._last_code = self._token self._token = self._token.next try: self._PopContextType(self, EcmaContext.ROOT) except ParseError: # Ignore the "popped to root" error. pass def _ProcessToken(self): """Process the given token.""" token = self._token token.metadata = self._CreateMetaData() context = (self._ProcessContext() or self._context) token.metadata.context = context token.metadata.last_code = self._last_code # Determine the operator type of the token, if applicable. if token.type == TokenType.OPERATOR: token.metadata.operator_type = self._GetOperatorType(token) # Determine if there is an implied semicolon after the token. if token.type != TokenType.SEMICOLON: next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES) # A statement like if (x) does not need a semicolon after it is_implied_block = self._context == EcmaContext.IMPLIED_BLOCK is_last_code_in_line = token.IsCode() and ( not next_code or next_code.line_number != token.line_number) is_continued_identifier = (token.type == TokenType.IDENTIFIER and token.string.endswith('.')) is_continued_operator = (token.type == TokenType.OPERATOR and not token.metadata.IsUnaryPostOperator()) is_continued_dot = token.string == '.' next_code_is_operator = next_code and next_code.type == TokenType.OPERATOR next_code_is_dot = next_code and next_code.string == '.' is_end_of_block = ( token.type == TokenType.END_BLOCK and token.metadata.context.type != EcmaContext.OBJECT_LITERAL) is_multiline_string = token.type == TokenType.STRING_TEXT is_continued_var_decl = (token.IsKeyword('var') and next_code and (next_code.type in [TokenType.IDENTIFIER, TokenType.SIMPLE_LVALUE]) and token.line_number < next_code.line_number) next_code_is_block = next_code and next_code.type == TokenType.START_BLOCK if (is_last_code_in_line and self._StatementCouldEndInContext() and not is_multiline_string and not is_end_of_block and not is_continued_var_decl and not is_continued_identifier and not is_continued_operator and not is_continued_dot and not next_code_is_dot and not next_code_is_operator and not is_implied_block and not next_code_is_block): token.metadata.is_implied_semicolon = True self._EndStatement() def _StatementCouldEndInContext(self): """Returns if the current statement (if any) may end in this context.""" # In the basic statement or variable declaration context, statement can # always end in this context. if self._context.type in (EcmaContext.STATEMENT, EcmaContext.VAR): return True # End of a ternary false branch inside a statement can also be the # end of the statement, for example: # var x = foo ? foo.bar() : null # In this case the statement ends after the null, when the context stack # looks like ternary_false > var > statement > root. if (self._context.type == EcmaContext.TERNARY_FALSE and self._context.parent.type in (EcmaContext.STATEMENT, EcmaContext.VAR)): return True # In all other contexts like object and array literals, ternary true, etc. # the statement can't yet end. return False def _GetOperatorType(self, token): """Returns the operator type of the given operator token. Args: token: The token to get arity for. Returns: The type of the operator. One of the *_OPERATOR constants defined in EcmaMetaData. """ if token.string == '?': return EcmaMetaData.TERNARY_OPERATOR if token.string in TokenType.UNARY_OPERATORS: return EcmaMetaData.UNARY_OPERATOR last_code = token.metadata.last_code if not last_code or last_code.type == TokenType.END_BLOCK: return EcmaMetaData.UNARY_OPERATOR if (token.string in TokenType.UNARY_POST_OPERATORS and last_code.type in TokenType.EXPRESSION_ENDER_TYPES): return EcmaMetaData.UNARY_POST_OPERATOR if (token.string in TokenType.UNARY_OK_OPERATORS and last_code.type not in TokenType.EXPRESSION_ENDER_TYPES and last_code.string not in TokenType.UNARY_POST_OPERATORS): return EcmaMetaData.UNARY_OPERATOR return EcmaMetaData.BINARY_OPERATOR closure_linter-2.3.13/closure_linter/testutil.py0000640014730400116100000000501612247733554021472 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2012 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions for testing gjslint components.""" # Allow non-Google copyright # pylint: disable=g-bad-file-header __author__ = ('nnaze@google.com (Nathan Naze)') import StringIO from closure_linter import ecmametadatapass from closure_linter import javascriptstatetracker from closure_linter import javascripttokenizer def TokenizeSource(source): """Convert a source into a string of tokens. Args: source: A source file as a string or file-like object (iterates lines). Returns: The first token of the resulting token stream. """ if isinstance(source, basestring): source = StringIO.StringIO(source) tokenizer = javascripttokenizer.JavaScriptTokenizer() return tokenizer.TokenizeFile(source) def TokenizeSourceAndRunEcmaPass(source): """Tokenize a source and run the EcmaMetaDataPass on it. Args: source: A source file as a string or file-like object (iterates lines). Returns: The first token of the resulting token stream. """ start_token = TokenizeSource(source) ecma_pass = ecmametadatapass.EcmaMetaDataPass() ecma_pass.Process(start_token) return start_token def ParseFunctionsAndComments(source): """Run the tokenizer and tracker and return comments and functions found. Args: source: A source file as a string or file-like object (iterates lines). Returns: The functions and comments as a tuple. """ start_token = TokenizeSourceAndRunEcmaPass(source) tracker = javascriptstatetracker.JavaScriptStateTracker() functions = [] comments = [] for token in start_token: tracker.HandleToken(token, tracker.GetLastNonSpaceToken()) function = tracker.GetFunction() if function and function not in functions: functions.append(function) comment = tracker.GetDocComment() if comment and comment not in comments: comments.append(comment) tracker.HandleAfterToken(token) return functions, comments closure_linter-2.3.13/closure_linter/errors.py0000750014730400116100000001013012247733554021124 0ustar ajpeng00000000000000#!/usr/bin/env python # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Error codes for JavaScript style checker.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') def ByName(name): """Get the error code for the given error name. Args: name: The name of the error Returns: The error code """ return globals()[name] # "File-fatal" errors - these errors stop further parsing of a single file FILE_NOT_FOUND = -1 FILE_DOES_NOT_PARSE = -2 # Spacing EXTRA_SPACE = 1 MISSING_SPACE = 2 EXTRA_LINE = 3 MISSING_LINE = 4 ILLEGAL_TAB = 5 WRONG_INDENTATION = 6 WRONG_BLANK_LINE_COUNT = 7 # Semicolons MISSING_SEMICOLON = 10 MISSING_SEMICOLON_AFTER_FUNCTION = 11 ILLEGAL_SEMICOLON_AFTER_FUNCTION = 12 REDUNDANT_SEMICOLON = 13 # Miscellaneous ILLEGAL_PROTOTYPE_MEMBER_VALUE = 100 LINE_TOO_LONG = 110 LINE_STARTS_WITH_OPERATOR = 120 COMMA_AT_END_OF_LITERAL = 121 MULTI_LINE_STRING = 130 UNNECESSARY_DOUBLE_QUOTED_STRING = 131 UNUSED_PRIVATE_MEMBER = 132 UNUSED_LOCAL_VARIABLE = 133 # Requires, provides GOOG_REQUIRES_NOT_ALPHABETIZED = 140 GOOG_PROVIDES_NOT_ALPHABETIZED = 141 MISSING_GOOG_REQUIRE = 142 MISSING_GOOG_PROVIDE = 143 EXTRA_GOOG_REQUIRE = 144 EXTRA_GOOG_PROVIDE = 145 # JsDoc INVALID_JSDOC_TAG = 200 INVALID_USE_OF_DESC_TAG = 201 NO_BUG_NUMBER_AFTER_BUG_TAG = 202 MISSING_PARAMETER_DOCUMENTATION = 210 EXTRA_PARAMETER_DOCUMENTATION = 211 WRONG_PARAMETER_DOCUMENTATION = 212 MISSING_JSDOC_TAG_TYPE = 213 MISSING_JSDOC_TAG_DESCRIPTION = 214 MISSING_JSDOC_PARAM_NAME = 215 OUT_OF_ORDER_JSDOC_TAG_TYPE = 216 MISSING_RETURN_DOCUMENTATION = 217 UNNECESSARY_RETURN_DOCUMENTATION = 218 MISSING_BRACES_AROUND_TYPE = 219 MISSING_MEMBER_DOCUMENTATION = 220 MISSING_PRIVATE = 221 EXTRA_PRIVATE = 222 INVALID_OVERRIDE_PRIVATE = 223 INVALID_INHERIT_DOC_PRIVATE = 224 MISSING_JSDOC_TAG_THIS = 225 UNNECESSARY_BRACES_AROUND_INHERIT_DOC = 226 INVALID_AUTHOR_TAG_DESCRIPTION = 227 JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230 JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231 JSDOC_MISSING_OPTIONAL_TYPE = 232 JSDOC_MISSING_OPTIONAL_PREFIX = 233 JSDOC_MISSING_VAR_ARGS_TYPE = 234 JSDOC_MISSING_VAR_ARGS_NAME = 235 # TODO(robbyw): Split this in to more specific syntax problems. INCORRECT_SUPPRESS_SYNTAX = 250 INVALID_SUPPRESS_TYPE = 251 UNNECESSARY_SUPPRESS = 252 # File ending FILE_MISSING_NEWLINE = 300 FILE_IN_BLOCK = 301 # Interfaces INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400 INTERFACE_METHOD_CANNOT_HAVE_CODE = 401 # Comments MISSING_END_OF_SCOPE_COMMENT = 500 MALFORMED_END_OF_SCOPE_COMMENT = 501 # goog.scope - Namespace aliasing # TODO(nnaze) Add additional errors here and in aliaspass.py INVALID_USE_OF_GOOG_SCOPE = 600 EXTRA_GOOG_SCOPE_USAGE = 601 # ActionScript specific errors: # TODO(user): move these errors to their own file and move all JavaScript # specific errors to their own file as well. # All ActionScript specific errors should have error number at least 1000. FUNCTION_MISSING_RETURN_TYPE = 1132 PARAMETER_MISSING_TYPE = 1133 VAR_MISSING_TYPE = 1134 PARAMETER_MISSING_DEFAULT_VALUE = 1135 IMPORTS_NOT_ALPHABETIZED = 1140 IMPORT_CONTAINS_WILDCARD = 1141 UNUSED_IMPORT = 1142 INVALID_TRACE_SEVERITY_LEVEL = 1250 MISSING_TRACE_SEVERITY_LEVEL = 1251 MISSING_TRACE_MESSAGE = 1252 REMOVE_TRACE_BEFORE_SUBMIT = 1253 REMOVE_COMMENT_BEFORE_SUBMIT = 1254 # End of list of ActionScript specific errors. NEW_ERRORS = frozenset([ # Errors added after 2.0.2: WRONG_INDENTATION, MISSING_SEMICOLON, # Errors added after 2.3.9: JSDOC_MISSING_VAR_ARGS_TYPE, JSDOC_MISSING_VAR_ARGS_NAME, # Errors added after 2.3.13: ]) closure_linter-2.3.13/closure_linter/javascripttokens.py0000750014730400116100000001153312247733554023212 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes to represent JavaScript tokens.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') from closure_linter.common import tokens class JavaScriptTokenType(tokens.TokenType): """Enumeration of JavaScript token types, and useful sets of token types.""" NUMBER = 'number' START_SINGLE_LINE_COMMENT = '//' START_BLOCK_COMMENT = '/*' START_DOC_COMMENT = '/**' END_BLOCK_COMMENT = '*/' END_DOC_COMMENT = 'doc */' COMMENT = 'comment' SINGLE_QUOTE_STRING_START = "'string" SINGLE_QUOTE_STRING_END = "string'" DOUBLE_QUOTE_STRING_START = '"string' DOUBLE_QUOTE_STRING_END = 'string"' STRING_TEXT = 'string' START_BLOCK = '{' END_BLOCK = '}' START_PAREN = '(' END_PAREN = ')' START_BRACKET = '[' END_BRACKET = ']' REGEX = '/regex/' FUNCTION_DECLARATION = 'function(...)' FUNCTION_NAME = 'function functionName(...)' START_PARAMETERS = 'startparams(' PARAMETERS = 'pa,ra,ms' END_PARAMETERS = ')endparams' SEMICOLON = ';' DOC_FLAG = '@flag' DOC_INLINE_FLAG = '{@flag ...}' DOC_START_BRACE = 'doc {' DOC_END_BRACE = 'doc }' DOC_PREFIX = 'comment prefix: * ' SIMPLE_LVALUE = 'lvalue=' KEYWORD = 'keyword' OPERATOR = 'operator' IDENTIFIER = 'identifier' STRING_TYPES = frozenset([ SINGLE_QUOTE_STRING_START, SINGLE_QUOTE_STRING_END, DOUBLE_QUOTE_STRING_START, DOUBLE_QUOTE_STRING_END, STRING_TEXT]) COMMENT_TYPES = frozenset([START_SINGLE_LINE_COMMENT, COMMENT, START_BLOCK_COMMENT, START_DOC_COMMENT, END_BLOCK_COMMENT, END_DOC_COMMENT, DOC_START_BRACE, DOC_END_BRACE, DOC_FLAG, DOC_INLINE_FLAG, DOC_PREFIX]) FLAG_DESCRIPTION_TYPES = frozenset([ DOC_INLINE_FLAG, COMMENT, DOC_START_BRACE, DOC_END_BRACE]) FLAG_ENDING_TYPES = frozenset([DOC_FLAG, END_DOC_COMMENT]) NON_CODE_TYPES = COMMENT_TYPES | frozenset([ tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE]) UNARY_OPERATORS = ['!', 'new', 'delete', 'typeof', 'void'] UNARY_OK_OPERATORS = ['--', '++', '-', '+'] + UNARY_OPERATORS UNARY_POST_OPERATORS = ['--', '++'] # An expression ender is any token that can end an object - i.e. we could have # x.y or [1, 2], or (10 + 9) or {a: 10}. EXPRESSION_ENDER_TYPES = [tokens.TokenType.NORMAL, IDENTIFIER, NUMBER, SIMPLE_LVALUE, END_BRACKET, END_PAREN, END_BLOCK, SINGLE_QUOTE_STRING_END, DOUBLE_QUOTE_STRING_END] class JavaScriptToken(tokens.Token): """JavaScript token subclass of Token, provides extra instance checks. The following token types have data in attached_object: - All JsDoc flags: a parser.JsDocFlag object. """ def IsKeyword(self, keyword): """Tests if this token is the given keyword. Args: keyword: The keyword to compare to. Returns: True if this token is a keyword token with the given name. """ return self.type == JavaScriptTokenType.KEYWORD and self.string == keyword def IsOperator(self, operator): """Tests if this token is the given operator. Args: operator: The operator to compare to. Returns: True if this token is a operator token with the given name. """ return self.type == JavaScriptTokenType.OPERATOR and self.string == operator def IsAssignment(self): """Tests if this token is an assignment operator. Returns: True if this token is an assignment operator. """ return (self.type == JavaScriptTokenType.OPERATOR and self.string.endswith('=') and self.string not in ('==', '!=', '>=', '<=', '===', '!==')) def IsComment(self): """Tests if this token is any part of a comment. Returns: True if this token is any part of a comment. """ return self.type in JavaScriptTokenType.COMMENT_TYPES def IsCode(self): """Tests if this token is code, as opposed to a comment or whitespace.""" return self.type not in JavaScriptTokenType.NON_CODE_TYPES def __repr__(self): return '' % (self.line_number, self.type, self.string, self.values, self.metadata) closure_linter-2.3.13/closure_linter/indentation.py0000750014730400116100000005100612247733554022133 0ustar ajpeng00000000000000#!/usr/bin/env python # Copyright 2010 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Methods for checking EcmaScript files for indentation issues.""" __author__ = ('robbyw@google.com (Robert Walker)') import gflags as flags from closure_linter import ecmametadatapass from closure_linter import errors from closure_linter import javascripttokens from closure_linter import tokenutil from closure_linter.common import error from closure_linter.common import position flags.DEFINE_boolean('debug_indentation', False, 'Whether to print debugging information for indentation.') # Shorthand Context = ecmametadatapass.EcmaContext Error = error.Error Position = position.Position Type = javascripttokens.JavaScriptTokenType # The general approach: # # 1. Build a stack of tokens that can affect indentation. # For each token, we determine if it is a block or continuation token. # Some tokens need to be temporarily overwritten in case they are removed # before the end of the line. # Much of the work here is determining which tokens to keep on the stack # at each point. Operators, for example, should be removed once their # expression or line is gone, while parentheses must stay until the matching # end parentheses is found. # # 2. Given that stack, determine the allowable indentations. # Due to flexible indentation rules in JavaScript, there may be many # allowable indentations for each stack. We follows the general # "no false positives" approach of GJsLint and build the most permissive # set possible. class TokenInfo(object): """Stores information about a token. Attributes: token: The token is_block: Whether the token represents a block indentation. is_transient: Whether the token should be automatically removed without finding a matching end token. overridden_by: TokenInfo for a token that overrides the indentation that this token would require. is_permanent_override: Whether the override on this token should persist even after the overriding token is removed from the stack. For example: x([ 1], 2); needs this to be set so the last line is not required to be a continuation indent. line_number: The effective line number of this token. Will either be the actual line number or the one before it in the case of a mis-wrapped operator. """ def __init__(self, token, is_block=False): """Initializes a TokenInfo object. Args: token: The token is_block: Whether the token represents a block indentation. """ self.token = token self.overridden_by = None self.is_permanent_override = False self.is_block = is_block self.is_transient = not is_block and token.type not in ( Type.START_PAREN, Type.START_PARAMETERS) self.line_number = token.line_number def __repr__(self): result = '\n %s' % self.token if self.overridden_by: result = '%s OVERRIDDEN [by "%s"]' % ( result, self.overridden_by.token.string) result += ' {is_block: %s, is_transient: %s}' % ( self.is_block, self.is_transient) return result class IndentationRules(object): """EmcaScript indentation rules. Can be used to find common indentation errors in JavaScript, ActionScript and other Ecma like scripting languages. """ def __init__(self): """Initializes the IndentationRules checker.""" self._stack = [] # Map from line number to number of characters it is off in indentation. self._start_index_offset = {} def Finalize(self): if self._stack: old_stack = self._stack self._stack = [] raise Exception('INTERNAL ERROR: indentation stack is not empty: %r' % old_stack) def CheckToken(self, token, state): """Checks a token for indentation errors. Args: token: The current token under consideration state: Additional information about the current tree state Returns: An error array [error code, error string, error token] if the token is improperly indented, or None if indentation is correct. """ token_type = token.type indentation_errors = [] stack = self._stack is_first = self._IsFirstNonWhitespaceTokenInLine(token) # Add tokens that could decrease indentation before checking. if token_type == Type.END_PAREN: self._PopTo(Type.START_PAREN) elif token_type == Type.END_PARAMETERS: self._PopTo(Type.START_PARAMETERS) elif token_type == Type.END_BRACKET: self._PopTo(Type.START_BRACKET) elif token_type == Type.END_BLOCK: start_token = self._PopTo(Type.START_BLOCK) # Check for required goog.scope comment. if start_token: goog_scope = tokenutil.GoogScopeOrNoneFromStartBlock(start_token.token) if goog_scope is not None: if not token.line.endswith('; // goog.scope\n'): if (token.line.find('//') > -1 and token.line.find('goog.scope') > token.line.find('//')): indentation_errors.append([ errors.MALFORMED_END_OF_SCOPE_COMMENT, ('Malformed end of goog.scope comment. Please use the ' 'exact following syntax to close the scope:\n' '}); // goog.scope'), token, Position(token.start_index, token.length)]) else: indentation_errors.append([ errors.MISSING_END_OF_SCOPE_COMMENT, ('Missing comment for end of goog.scope which opened at line ' '%d. End the scope with:\n' '}); // goog.scope' % (start_token.line_number)), token, Position(token.start_index, token.length)]) elif token_type == Type.KEYWORD and token.string in ('case', 'default'): self._Add(self._PopTo(Type.START_BLOCK)) elif is_first and token.string == '.': # This token should have been on the previous line, so treat it as if it # was there. info = TokenInfo(token) info.line_number = token.line_number - 1 self._Add(info) elif token_type == Type.SEMICOLON: self._PopTransient() not_binary_operator = (token_type != Type.OPERATOR or token.metadata.IsUnaryOperator()) not_dot = token.string != '.' if is_first and not_binary_operator and not_dot and token.type not in ( Type.COMMENT, Type.DOC_PREFIX, Type.STRING_TEXT): if flags.FLAGS.debug_indentation: print 'Line #%d: stack %r' % (token.line_number, stack) # Ignore lines that start in JsDoc since we don't check them properly yet. # TODO(robbyw): Support checking JsDoc indentation. # Ignore lines that start as multi-line strings since indentation is N/A. # Ignore lines that start with operators since we report that already. # Ignore lines with tabs since we report that already. expected = self._GetAllowableIndentations() actual = self._GetActualIndentation(token) # Special case comments describing else, case, and default. Allow them # to outdent to the parent block. if token_type in Type.COMMENT_TYPES: next_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES) if next_code and next_code.type == Type.END_BLOCK: next_code = tokenutil.SearchExcept(next_code, Type.NON_CODE_TYPES) if next_code and next_code.string in ('else', 'case', 'default'): # TODO(robbyw): This almost certainly introduces false negatives. expected |= self._AddToEach(expected, -2) if actual >= 0 and actual not in expected: expected = sorted(expected) indentation_errors.append([ errors.WRONG_INDENTATION, 'Wrong indentation: expected any of {%s} but got %d' % ( ', '.join( ['%d' % x for x in expected]), actual), token, Position(actual, expected[0])]) self._start_index_offset[token.line_number] = expected[0] - actual # Add tokens that could increase indentation. if token_type == Type.START_BRACKET: self._Add(TokenInfo( token=token, is_block=token.metadata.context.type == Context.ARRAY_LITERAL)) elif token_type == Type.START_BLOCK or token.metadata.is_implied_block: self._Add(TokenInfo(token=token, is_block=True)) elif token_type in (Type.START_PAREN, Type.START_PARAMETERS): self._Add(TokenInfo(token=token, is_block=False)) elif token_type == Type.KEYWORD and token.string == 'return': self._Add(TokenInfo(token)) elif not token.IsLastInLine() and ( token.IsAssignment() or token.IsOperator('?')): self._Add(TokenInfo(token=token)) # Handle implied block closes. if token.metadata.is_implied_block_close: self._PopToImpliedBlock() # Add some tokens only if they appear at the end of the line. is_last = self._IsLastCodeInLine(token) if is_last: if token_type == Type.OPERATOR: if token.string == ':': if stack and stack[-1].token.string == '?': # When a ternary : is on a different line than its '?', it doesn't # add indentation. if token.line_number == stack[-1].token.line_number: self._Add(TokenInfo(token)) elif token.metadata.context.type == Context.CASE_BLOCK: # Pop transient tokens from say, line continuations, e.g., # case x. # y: # Want to pop the transient 4 space continuation indent. self._PopTransient() # Starting the body of the case statement, which is a type of # block. self._Add(TokenInfo(token=token, is_block=True)) elif token.metadata.context.type == Context.LITERAL_ELEMENT: # When in an object literal, acts as operator indicating line # continuations. self._Add(TokenInfo(token)) pass else: # ':' might also be a statement label, no effect on indentation in # this case. pass elif token.string != ',': self._Add(TokenInfo(token)) else: # The token is a comma. if token.metadata.context.type == Context.VAR: self._Add(TokenInfo(token)) elif token.metadata.context.type != Context.PARAMETERS: self._PopTransient() elif (token.string.endswith('.') and token_type in (Type.IDENTIFIER, Type.NORMAL)): self._Add(TokenInfo(token)) elif token_type == Type.PARAMETERS and token.string.endswith(','): # Parameter lists. self._Add(TokenInfo(token)) elif token.IsKeyword('var'): self._Add(TokenInfo(token)) elif token.metadata.is_implied_semicolon: self._PopTransient() elif token.IsAssignment(): self._Add(TokenInfo(token)) return indentation_errors def _AddToEach(self, original, amount): """Returns a new set with the given amount added to each element. Args: original: The original set of numbers amount: The amount to add to each element Returns: A new set containing each element of the original set added to the amount. """ return set([x + amount for x in original]) _HARD_STOP_TYPES = (Type.START_PAREN, Type.START_PARAMETERS, Type.START_BRACKET) _HARD_STOP_STRINGS = ('return', '?') def _IsHardStop(self, token): """Determines if the given token can have a hard stop after it. Args: token: token to examine Returns: Whether the token can have a hard stop after it. Hard stops are indentations defined by the position of another token as in indentation lined up with return, (, [, and ?. """ return (token.type in self._HARD_STOP_TYPES or token.string in self._HARD_STOP_STRINGS or token.IsAssignment()) def _GetAllowableIndentations(self): """Computes the set of allowable indentations. Returns: The set of allowable indentations, given the current stack. """ expected = set([0]) hard_stops = set([]) # Whether the tokens are still in the same continuation, meaning additional # indentation is optional. As an example: # x = 5 + # 6 + # 7; # The second '+' does not add any required indentation. in_same_continuation = False for token_info in self._stack: token = token_info.token # Handle normal additive indentation tokens. if not token_info.overridden_by and token.string != 'return': if token_info.is_block: expected = self._AddToEach(expected, 2) hard_stops = self._AddToEach(hard_stops, 2) in_same_continuation = False elif in_same_continuation: expected |= self._AddToEach(expected, 4) hard_stops |= self._AddToEach(hard_stops, 4) else: expected = self._AddToEach(expected, 4) hard_stops |= self._AddToEach(hard_stops, 4) in_same_continuation = True # Handle hard stops after (, [, return, =, and ? if self._IsHardStop(token): override_is_hard_stop = (token_info.overridden_by and self._IsHardStop( token_info.overridden_by.token)) if not override_is_hard_stop: start_index = token.start_index if token.line_number in self._start_index_offset: start_index += self._start_index_offset[token.line_number] if (token.type in (Type.START_PAREN, Type.START_PARAMETERS) and not token_info.overridden_by): hard_stops.add(start_index + 1) elif token.string == 'return' and not token_info.overridden_by: hard_stops.add(start_index + 7) elif token.type == Type.START_BRACKET: hard_stops.add(start_index + 1) elif token.IsAssignment(): hard_stops.add(start_index + len(token.string) + 1) elif token.IsOperator('?') and not token_info.overridden_by: hard_stops.add(start_index + 2) return (expected | hard_stops) or set([0]) def _GetActualIndentation(self, token): """Gets the actual indentation of the line containing the given token. Args: token: Any token on the line. Returns: The actual indentation of the line containing the given token. Returns -1 if this line should be ignored due to the presence of tabs. """ # Move to the first token in the line token = tokenutil.GetFirstTokenInSameLine(token) # If it is whitespace, it is the indentation. if token.type == Type.WHITESPACE: if token.string.find('\t') >= 0: return -1 else: return len(token.string) elif token.type == Type.PARAMETERS: return len(token.string) - len(token.string.lstrip()) else: return 0 def _IsFirstNonWhitespaceTokenInLine(self, token): """Determines if the given token is the first non-space token on its line. Args: token: The token. Returns: True if the token is the first non-whitespace token on its line. """ if token.type in (Type.WHITESPACE, Type.BLANK_LINE): return False if token.IsFirstInLine(): return True return (token.previous and token.previous.IsFirstInLine() and token.previous.type == Type.WHITESPACE) def _IsLastCodeInLine(self, token): """Determines if the given token is the last code token on its line. Args: token: The token. Returns: True if the token is the last code token on its line. """ if token.type in Type.NON_CODE_TYPES: return False start_token = token while True: token = token.next if not token or token.line_number != start_token.line_number: return True if token.type not in Type.NON_CODE_TYPES: return False def _Add(self, token_info): """Adds the given token info to the stack. Args: token_info: The token information to add. """ if self._stack and self._stack[-1].token == token_info.token: # Don't add the same token twice. return if token_info.is_block or token_info.token.type == Type.START_PAREN: token_info.overridden_by = ( tokenutil.GoogScopeOrNoneFromStartBlock(token_info.token)) index = 1 while index <= len(self._stack): stack_info = self._stack[-index] stack_token = stack_info.token if stack_info.line_number == token_info.line_number: # In general, tokens only override each other when they are on # the same line. stack_info.overridden_by = token_info if (token_info.token.type == Type.START_BLOCK and (stack_token.IsAssignment() or stack_token.type in (Type.IDENTIFIER, Type.START_PAREN))): # Multi-line blocks have lasting overrides, as in: # callFn({ # a: 10 # }, # 30); # b/11450054. If a string is not closed properly then close_block # could be null. close_block = token_info.token.metadata.context.end_token stack_info.is_permanent_override = close_block and ( close_block.line_number != token_info.token.line_number) elif (token_info.token.type == Type.START_BLOCK and token_info.token.metadata.context.type == Context.BLOCK and (stack_token.IsAssignment() or stack_token.type == Type.IDENTIFIER)): # When starting a function block, the override can transcend lines. # For example # long.long.name = function( # a) { # In this case the { and the = are on different lines. But the # override should still apply. stack_info.overridden_by = token_info stack_info.is_permanent_override = True else: break index += 1 self._stack.append(token_info) def _Pop(self): """Pops the top token from the stack. Returns: The popped token info. """ token_info = self._stack.pop() if token_info.token.type not in (Type.START_BLOCK, Type.START_BRACKET): # Remove any temporary overrides. self._RemoveOverrides(token_info) else: # For braces and brackets, which can be object and array literals, remove # overrides when the literal is closed on the same line. token_check = token_info.token same_type = token_check.type goal_type = None if token_info.token.type == Type.START_BRACKET: goal_type = Type.END_BRACKET else: goal_type = Type.END_BLOCK line_number = token_info.token.line_number count = 0 while token_check and token_check.line_number == line_number: if token_check.type == goal_type: count -= 1 if not count: self._RemoveOverrides(token_info) break if token_check.type == same_type: count += 1 token_check = token_check.next return token_info def _PopToImpliedBlock(self): """Pops the stack until an implied block token is found.""" while not self._Pop().token.metadata.is_implied_block: pass def _PopTo(self, stop_type): """Pops the stack until a token of the given type is popped. Args: stop_type: The type of token to pop to. Returns: The token info of the given type that was popped. """ last = None while True: last = self._Pop() if last.token.type == stop_type: break return last def _RemoveOverrides(self, token_info): """Marks any token that was overridden by this token as active again. Args: token_info: The token that is being removed from the stack. """ for stack_token in self._stack: if (stack_token.overridden_by == token_info and not stack_token.is_permanent_override): stack_token.overridden_by = None def _PopTransient(self): """Pops all transient tokens - i.e. not blocks, literals, or parens.""" while self._stack and self._stack[-1].is_transient: self._Pop() closure_linter-2.3.13/closure_linter/javascriptlintrules.py0000750014730400116100000007565612247733554023750 0ustar ajpeng00000000000000#!/usr/bin/env python # Copyright 2011 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Methods for checking JS files for common style guide violations. These style guide violations should only apply to JavaScript and not an Ecma scripting languages. """ __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)', 'jacobr@google.com (Jacob Richman)') import re from closure_linter import ecmalintrules from closure_linter import error_check from closure_linter import errors from closure_linter import javascripttokenizer from closure_linter import javascripttokens from closure_linter import requireprovidesorter from closure_linter import tokenutil from closure_linter.common import error from closure_linter.common import position # Shorthand Error = error.Error Position = position.Position Rule = error_check.Rule Type = javascripttokens.JavaScriptTokenType class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules): """JavaScript lint rules that catch JavaScript specific style errors.""" def __init__(self, namespaces_info): """Initializes a JavaScriptLintRules instance.""" ecmalintrules.EcmaScriptLintRules.__init__(self) self._namespaces_info = namespaces_info self._declared_private_member_tokens = {} self._declared_private_members = set() self._used_private_members = set() # A stack of dictionaries, one for each function scope entered. Each # dictionary is keyed by an identifier that defines a local variable and has # a token as its value. self._unused_local_variables_by_scope = [] def HandleMissingParameterDoc(self, token, param_name): """Handle errors associated with a parameter missing a param tag.""" self._HandleError(errors.MISSING_PARAMETER_DOCUMENTATION, 'Missing docs for parameter: "%s"' % param_name, token) def __ContainsRecordType(self, token): """Check whether the given token contains a record type. Args: token: The token being checked Returns: True if the token contains a record type, False otherwise. """ # If we see more than one left-brace in the string of an annotation token, # then there's a record type in there. return ( token and token.type == Type.DOC_FLAG and token.attached_object.type is not None and token.attached_object.type.find('{') != token.string.rfind('{')) def CheckToken(self, token, state): """Checks a token, given the current parser_state, for warnings and errors. Args: token: The current token under consideration state: parser_state object that indicates the current state in the page """ # For @param don't ignore record type. if (self.__ContainsRecordType(token) and token.attached_object.flag_type != 'param'): # We should bail out and not emit any warnings for this annotation. # TODO(nicksantos): Support record types for real. state.GetDocComment().Invalidate() return # Call the base class's CheckToken function. super(JavaScriptLintRules, self).CheckToken(token, state) # Store some convenience variables namespaces_info = self._namespaces_info if error_check.ShouldCheck(Rule.UNUSED_LOCAL_VARIABLES): self._CheckUnusedLocalVariables(token, state) if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS): # Find all assignments to private members. if token.type == Type.SIMPLE_LVALUE: identifier = token.string if identifier.endswith('_') and not identifier.endswith('__'): doc_comment = state.GetDocComment() suppressed = (doc_comment and doc_comment.HasFlag('suppress') and (doc_comment.GetFlag('suppress').type == 'underscore' or doc_comment.GetFlag('suppress').type == 'unusedPrivateMembers')) if not suppressed: # Look for static members defined on a provided namespace. if namespaces_info: namespace = namespaces_info.GetClosurizedNamespace(identifier) provided_namespaces = namespaces_info.GetProvidedNamespaces() else: namespace = None provided_namespaces = set() # Skip cases of this.something_.somethingElse_. regex = re.compile(r'^this\.[a-zA-Z_]+$') if namespace in provided_namespaces or regex.match(identifier): variable = identifier.split('.')[-1] self._declared_private_member_tokens[variable] = token self._declared_private_members.add(variable) elif not identifier.endswith('__'): # Consider setting public members of private members to be a usage. for piece in identifier.split('.'): if piece.endswith('_'): self._used_private_members.add(piece) # Find all usages of private members. if token.type == Type.IDENTIFIER: for piece in token.string.split('.'): if piece.endswith('_'): self._used_private_members.add(piece) if token.type == Type.DOC_FLAG: flag = token.attached_object if flag.flag_type == 'param' and flag.name_token is not None: self._CheckForMissingSpaceBeforeToken( token.attached_object.name_token) if flag.type is not None and flag.name is not None: if error_check.ShouldCheck(Rule.VARIABLE_ARG_MARKER): # Check for variable arguments marker in type. if (flag.type.startswith('...') and flag.name != 'var_args'): self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_NAME, 'Variable length argument %s must be renamed ' 'to var_args.' % flag.name, token) elif (not flag.type.startswith('...') and flag.name == 'var_args'): self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_TYPE, 'Variable length argument %s type must start ' 'with \'...\'.' % flag.name, token) if error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER): # Check for optional marker in type. if (flag.type.endswith('=') and not flag.name.startswith('opt_')): self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX, 'Optional parameter name %s must be prefixed ' 'with opt_.' % flag.name, token) elif (not flag.type.endswith('=') and flag.name.startswith('opt_')): self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE, 'Optional parameter %s type must end with =.' % flag.name, token) if flag.flag_type in state.GetDocFlag().HAS_TYPE: # Check for both missing type token and empty type braces '{}' # Missing suppress types are reported separately and we allow enums # and const without types. if (flag.flag_type not in ('suppress', 'enum', 'const') and (not flag.type or flag.type.isspace())): self._HandleError(errors.MISSING_JSDOC_TAG_TYPE, 'Missing type in %s tag' % token.string, token) elif flag.name_token and flag.type_end_token and tokenutil.Compare( flag.type_end_token, flag.name_token) > 0: self._HandleError( errors.OUT_OF_ORDER_JSDOC_TAG_TYPE, 'Type should be immediately after %s tag' % token.string, token) elif token.type == Type.DOUBLE_QUOTE_STRING_START: next_token = token.next while next_token.type == Type.STRING_TEXT: if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search( next_token.string): break next_token = next_token.next else: self._HandleError( errors.UNNECESSARY_DOUBLE_QUOTED_STRING, 'Single-quoted string preferred over double-quoted string.', token, position=Position.All(token.string)) elif token.type == Type.END_DOC_COMMENT: doc_comment = state.GetDocComment() # When @externs appears in a @fileoverview comment, it should trigger # the same limited doc checks as a special filename like externs.js. if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag('externs'): self._SetLimitedDocChecks(True) if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL) and not self._is_html and state.InTopLevel() and not state.InNonScopeBlock()): # Check if we're in a fileoverview or constructor JsDoc. is_constructor = ( doc_comment.HasFlag('constructor') or doc_comment.HasFlag('interface')) # @fileoverview is an optional tag so if the dosctring is the first # token in the file treat it as a file level docstring. is_file_level_comment = ( doc_comment.HasFlag('fileoverview') or not doc_comment.start_token.previous) # If the comment is not a file overview, and it does not immediately # precede some code, skip it. # NOTE: The tokenutil methods are not used here because of their # behavior at the top of a file. next_token = token.next if (not next_token or (not is_file_level_comment and next_token.type in Type.NON_CODE_TYPES)): return # Don't require extra blank lines around suppression of extra # goog.require errors. if (doc_comment.SuppressionOnly() and next_token.type == Type.IDENTIFIER and next_token.string in ['goog.provide', 'goog.require']): return # Find the start of this block (include comments above the block, unless # this is a file overview). block_start = doc_comment.start_token if not is_file_level_comment: token = block_start.previous while token and token.type in Type.COMMENT_TYPES: block_start = token token = token.previous # Count the number of blank lines before this block. blank_lines = 0 token = block_start.previous while token and token.type in [Type.WHITESPACE, Type.BLANK_LINE]: if token.type == Type.BLANK_LINE: # A blank line. blank_lines += 1 elif token.type == Type.WHITESPACE and not token.line.strip(): # A line with only whitespace on it. blank_lines += 1 token = token.previous # Log errors. error_message = False expected_blank_lines = 0 # Only need blank line before file overview if it is not the beginning # of the file, e.g. copyright is first. if is_file_level_comment and blank_lines == 0 and block_start.previous: error_message = 'Should have a blank line before a file overview.' expected_blank_lines = 1 elif is_constructor and blank_lines != 3: error_message = ( 'Should have 3 blank lines before a constructor/interface.') expected_blank_lines = 3 elif (not is_file_level_comment and not is_constructor and blank_lines != 2): error_message = 'Should have 2 blank lines between top-level blocks.' expected_blank_lines = 2 if error_message: self._HandleError( errors.WRONG_BLANK_LINE_COUNT, error_message, block_start, position=Position.AtBeginning(), fix_data=expected_blank_lines - blank_lines) elif token.type == Type.END_BLOCK: if state.InFunction() and state.IsFunctionClose(): is_immediately_called = (token.next and token.next.type == Type.START_PAREN) function = state.GetFunction() if not self._limited_doc_checks: if (function.has_return and function.doc and not is_immediately_called and not function.doc.HasFlag('return') and not function.doc.InheritsDocumentation() and not function.doc.HasFlag('constructor')): # Check for proper documentation of return value. self._HandleError( errors.MISSING_RETURN_DOCUMENTATION, 'Missing @return JsDoc in function with non-trivial return', function.doc.end_token, position=Position.AtBeginning()) elif (not function.has_return and not function.has_throw and function.doc and function.doc.HasFlag('return') and not state.InInterfaceMethod()): return_flag = function.doc.GetFlag('return') if (return_flag.type is None or ( 'undefined' not in return_flag.type and 'void' not in return_flag.type and '*' not in return_flag.type)): self._HandleError( errors.UNNECESSARY_RETURN_DOCUMENTATION, 'Found @return JsDoc on function that returns nothing', return_flag.flag_token, position=Position.AtBeginning()) # b/4073735. Method in object literal definition of prototype can # safely reference 'this'. prototype_object_literal = False block_start = None previous_code = None previous_previous_code = None # Search for cases where prototype is defined as object literal. # previous_previous_code # | previous_code # | | block_start # | | | # a.b.prototype = { # c : function() { # this.d = 1; # } # } # If in object literal, find first token of block so to find previous # tokens to check above condition. if state.InObjectLiteral(): block_start = state.GetCurrentBlockStart() # If an object literal then get previous token (code type). For above # case it should be '='. if block_start: previous_code = tokenutil.SearchExcept(block_start, Type.NON_CODE_TYPES, reverse=True) # If previous token to block is '=' then get its previous token. if previous_code and previous_code.IsOperator('='): previous_previous_code = tokenutil.SearchExcept(previous_code, Type.NON_CODE_TYPES, reverse=True) # If variable/token before '=' ends with '.prototype' then its above # case of prototype defined with object literal. prototype_object_literal = (previous_previous_code and previous_previous_code.string.endswith( '.prototype')) if (function.has_this and function.doc and not function.doc.HasFlag('this') and not function.is_constructor and not function.is_interface and '.prototype.' not in function.name and not prototype_object_literal): self._HandleError( errors.MISSING_JSDOC_TAG_THIS, 'Missing @this JsDoc in function referencing "this". (' 'this usually means you are trying to reference "this" in ' 'a static function, or you have forgotten to mark a ' 'constructor with @constructor)', function.doc.end_token, position=Position.AtBeginning()) elif token.type == Type.IDENTIFIER: if token.string == 'goog.inherits' and not state.InFunction(): if state.GetLastNonSpaceToken().line_number == token.line_number: self._HandleError( errors.MISSING_LINE, 'Missing newline between constructor and goog.inherits', token, position=Position.AtBeginning()) extra_space = state.GetLastNonSpaceToken().next while extra_space != token: if extra_space.type == Type.BLANK_LINE: self._HandleError( errors.EXTRA_LINE, 'Extra line between constructor and goog.inherits', extra_space) extra_space = extra_space.next # TODO(robbyw): Test the last function was a constructor. # TODO(robbyw): Test correct @extends and @implements documentation. elif (token.string == 'goog.provide' and not state.InFunction() and namespaces_info is not None): namespace = tokenutil.GetStringAfterToken(token) # Report extra goog.provide statement. if not namespace or namespaces_info.IsExtraProvide(token): if not namespace: msg = 'Empty namespace in goog.provide' else: msg = 'Unnecessary goog.provide: ' + namespace # Hint to user if this is a Test namespace. if namespace.endswith('Test'): msg += (' *Test namespaces must be mentioned in the ' 'goog.setTestOnly() call') self._HandleError( errors.EXTRA_GOOG_PROVIDE, msg, token, position=Position.AtBeginning()) if namespaces_info.IsLastProvide(token): # Report missing provide statements after the last existing provide. missing_provides = namespaces_info.GetMissingProvides() if missing_provides: self._ReportMissingProvides( missing_provides, tokenutil.GetLastTokenInSameLine(token).next, False) # If there are no require statements, missing requires should be # reported after the last provide. if not namespaces_info.GetRequiredNamespaces(): missing_requires = namespaces_info.GetMissingRequires() if missing_requires: self._ReportMissingRequires( missing_requires, tokenutil.GetLastTokenInSameLine(token).next, True) elif (token.string == 'goog.require' and not state.InFunction() and namespaces_info is not None): namespace = tokenutil.GetStringAfterToken(token) # If there are no provide statements, missing provides should be # reported before the first require. if (namespaces_info.IsFirstRequire(token) and not namespaces_info.GetProvidedNamespaces()): missing_provides = namespaces_info.GetMissingProvides() if missing_provides: self._ReportMissingProvides( missing_provides, tokenutil.GetFirstTokenInSameLine(token), True) # Report extra goog.require statement. if not namespace or namespaces_info.IsExtraRequire(token): if not namespace: msg = 'Empty namespace in goog.require' else: msg = 'Unnecessary goog.require: ' + namespace self._HandleError( errors.EXTRA_GOOG_REQUIRE, msg, token, position=Position.AtBeginning()) # Report missing goog.require statements. if namespaces_info.IsLastRequire(token): missing_requires = namespaces_info.GetMissingRequires() if missing_requires: self._ReportMissingRequires( missing_requires, tokenutil.GetLastTokenInSameLine(token).next, False) elif token.type == Type.OPERATOR: last_in_line = token.IsLastInLine() # If the token is unary and appears to be used in a unary context # it's ok. Otherwise, if it's at the end of the line or immediately # before a comment, it's ok. # Don't report an error before a start bracket - it will be reported # by that token's space checks. if (not token.metadata.IsUnaryOperator() and not last_in_line and not token.next.IsComment() and not token.next.IsOperator(',') and token.next.type not in (Type.WHITESPACE, Type.END_PAREN, Type.END_BRACKET, Type.SEMICOLON, Type.START_BRACKET)): self._HandleError( errors.MISSING_SPACE, 'Missing space after "%s"' % token.string, token, position=Position.AtEnd(token.string)) elif token.type == Type.WHITESPACE: first_in_line = token.IsFirstInLine() last_in_line = token.IsLastInLine() # Check whitespace length if it's not the first token of the line and # if it's not immediately before a comment. if not last_in_line and not first_in_line and not token.next.IsComment(): # Ensure there is no space after opening parentheses. if (token.previous.type in (Type.START_PAREN, Type.START_BRACKET, Type.FUNCTION_NAME) or token.next.type == Type.START_PARAMETERS): self._HandleError( errors.EXTRA_SPACE, 'Extra space after "%s"' % token.previous.string, token, position=Position.All(token.string)) elif token.type == Type.SEMICOLON: previous_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, reverse=True) if not previous_token: self._HandleError( errors.REDUNDANT_SEMICOLON, 'Semicolon without any statement', token, position=Position.AtEnd(token.string)) elif (previous_token.type == Type.KEYWORD and previous_token.string not in ['break', 'continue', 'return']): self._HandleError( errors.REDUNDANT_SEMICOLON, ('Semicolon after \'%s\' without any statement.' ' Looks like an error.' % previous_token.string), token, position=Position.AtEnd(token.string)) def _CheckUnusedLocalVariables(self, token, state): """Checks for unused local variables in function blocks. Args: token: The token to check. state: The state tracker. """ # We don't use state.InFunction because that disregards scope functions. in_function = state.FunctionDepth() > 0 if token.type == Type.SIMPLE_LVALUE or token.type == Type.IDENTIFIER: if in_function: identifier = token.string # Check whether the previous token was var. previous_code_token = tokenutil.CustomSearch( token, lambda t: t.type not in Type.NON_CODE_TYPES, reverse=True) if previous_code_token and previous_code_token.IsKeyword('var'): # Add local variable declaration to the top of the unused locals # stack. self._unused_local_variables_by_scope[-1][identifier] = token elif token.type == Type.IDENTIFIER: # This covers most cases where the variable is used as an identifier. self._MarkLocalVariableUsed(token) elif token.type == Type.SIMPLE_LVALUE and '.' in identifier: # This covers cases where a value is assigned to a property of the # variable. self._MarkLocalVariableUsed(token) elif token.type == Type.START_BLOCK: if in_function and state.IsFunctionOpen(): # Push a new map onto the stack self._unused_local_variables_by_scope.append({}) elif token.type == Type.END_BLOCK: if state.IsFunctionClose(): # Pop the stack and report any remaining locals as unused. unused_local_variables = self._unused_local_variables_by_scope.pop() for unused_token in unused_local_variables.values(): self._HandleError( errors.UNUSED_LOCAL_VARIABLE, 'Unused local variable: %s.' % unused_token.string, unused_token) def _MarkLocalVariableUsed(self, token): """Marks the local variable as used in the relevant scope. Marks the local variable as used in the scope nearest to the current scope that matches the given token. Args: token: The token representing the potential usage of a local variable. """ identifier = token.string.split('.')[0] # Find the first instance of the identifier in the stack of function scopes # and mark it used. for unused_local_variables in reversed( self._unused_local_variables_by_scope): if identifier in unused_local_variables: del unused_local_variables[identifier] break def _ReportMissingProvides(self, missing_provides, token, need_blank_line): """Reports missing provide statements to the error handler. Args: missing_provides: A dictionary of string(key) and integer(value) where each string(key) is a namespace that should be provided, but is not and integer(value) is first line number where it's required. token: The token where the error was detected (also where the new provides will be inserted. need_blank_line: Whether a blank line needs to be inserted after the new provides are inserted. May be True, False, or None, where None indicates that the insert location is unknown. """ missing_provides_msg = 'Missing the following goog.provide statements:\n' missing_provides_msg += '\n'.join(['goog.provide(\'%s\');' % x for x in sorted(missing_provides)]) missing_provides_msg += '\n' missing_provides_msg += '\nFirst line where provided: \n' missing_provides_msg += '\n'.join( [' %s : line %d' % (x, missing_provides[x]) for x in sorted(missing_provides)]) missing_provides_msg += '\n' self._HandleError( errors.MISSING_GOOG_PROVIDE, missing_provides_msg, token, position=Position.AtBeginning(), fix_data=(missing_provides.keys(), need_blank_line)) def _ReportMissingRequires(self, missing_requires, token, need_blank_line): """Reports missing require statements to the error handler. Args: missing_requires: A dictionary of string(key) and integer(value) where each string(key) is a namespace that should be required, but is not and integer(value) is first line number where it's required. token: The token where the error was detected (also where the new requires will be inserted. need_blank_line: Whether a blank line needs to be inserted before the new requires are inserted. May be True, False, or None, where None indicates that the insert location is unknown. """ missing_requires_msg = 'Missing the following goog.require statements:\n' missing_requires_msg += '\n'.join(['goog.require(\'%s\');' % x for x in sorted(missing_requires)]) missing_requires_msg += '\n' missing_requires_msg += '\nFirst line where required: \n' missing_requires_msg += '\n'.join( [' %s : line %d' % (x, missing_requires[x]) for x in sorted(missing_requires)]) missing_requires_msg += '\n' self._HandleError( errors.MISSING_GOOG_REQUIRE, missing_requires_msg, token, position=Position.AtBeginning(), fix_data=(missing_requires.keys(), need_blank_line)) def Finalize(self, state): """Perform all checks that need to occur after all lines are processed.""" # Call the base class's Finalize function. super(JavaScriptLintRules, self).Finalize(state) if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS): # Report an error for any declared private member that was never used. unused_private_members = (self._declared_private_members - self._used_private_members) for variable in unused_private_members: token = self._declared_private_member_tokens[variable] self._HandleError(errors.UNUSED_PRIVATE_MEMBER, 'Unused private member: %s.' % token.string, token) # Clear state to prepare for the next file. self._declared_private_member_tokens = {} self._declared_private_members = set() self._used_private_members = set() namespaces_info = self._namespaces_info if namespaces_info is not None: # If there are no provide or require statements, missing provides and # requires should be reported on line 1. if (not namespaces_info.GetProvidedNamespaces() and not namespaces_info.GetRequiredNamespaces()): missing_provides = namespaces_info.GetMissingProvides() if missing_provides: self._ReportMissingProvides( missing_provides, state.GetFirstToken(), None) missing_requires = namespaces_info.GetMissingRequires() if missing_requires: self._ReportMissingRequires( missing_requires, state.GetFirstToken(), None) self._CheckSortedRequiresProvides(state.GetFirstToken()) def _CheckSortedRequiresProvides(self, token): """Checks that all goog.require and goog.provide statements are sorted. Note that this method needs to be run after missing statements are added to preserve alphabetical order. Args: token: The first token in the token stream. """ sorter = requireprovidesorter.RequireProvideSorter() first_provide_token = sorter.CheckProvides(token) if first_provide_token: new_order = sorter.GetFixedProvideString(first_provide_token) self._HandleError( errors.GOOG_PROVIDES_NOT_ALPHABETIZED, 'goog.provide classes must be alphabetized. The correct code is:\n' + new_order, first_provide_token, position=Position.AtBeginning(), fix_data=first_provide_token) first_require_token = sorter.CheckRequires(token) if first_require_token: new_order = sorter.GetFixedRequireString(first_require_token) self._HandleError( errors.GOOG_REQUIRES_NOT_ALPHABETIZED, 'goog.require classes must be alphabetized. The correct code is:\n' + new_order, first_require_token, position=Position.AtBeginning(), fix_data=first_require_token) def GetLongLineExceptions(self): """Gets a list of regexps for lines which can be longer than the limit. Returns: A list of regexps, used as matches (rather than searches). """ return [ re.compile(r'goog\.require\(.+\);?\s*$'), re.compile(r'goog\.provide\(.+\);?\s*$'), re.compile(r'[\s/*]*@visibility\s*{.*}[\s*/]*$'), ] closure_linter-2.3.13/closure_linter/tokenutil.py0000750014730400116100000004671412247733554021647 0ustar ajpeng00000000000000#!/usr/bin/env python # # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Token utility functions.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') import copy import StringIO from closure_linter.common import tokens from closure_linter.javascripttokens import JavaScriptToken from closure_linter.javascripttokens import JavaScriptTokenType # Shorthand Type = tokens.TokenType def GetFirstTokenInSameLine(token): """Returns the first token in the same line as token. Args: token: Any token in the line. Returns: The first token in the same line as token. """ while not token.IsFirstInLine(): token = token.previous return token def GetFirstTokenInPreviousLine(token): """Returns the first token in the previous line as token. Args: token: Any token in the line. Returns: The first token in the previous line as token, or None if token is on the first line. """ first_in_line = GetFirstTokenInSameLine(token) if first_in_line.previous: return GetFirstTokenInSameLine(first_in_line.previous) return None def GetLastTokenInSameLine(token): """Returns the last token in the same line as token. Args: token: Any token in the line. Returns: The last token in the same line as token. """ while not token.IsLastInLine(): token = token.next return token def GetAllTokensInSameLine(token): """Returns all tokens in the same line as the given token. Args: token: Any token in the line. Returns: All tokens on the same line as the given token. """ first_token = GetFirstTokenInSameLine(token) last_token = GetLastTokenInSameLine(token) tokens_in_line = [] while first_token != last_token: tokens_in_line.append(first_token) first_token = first_token.next tokens_in_line.append(last_token) return tokens_in_line def CustomSearch(start_token, func, end_func=None, distance=None, reverse=False): """Returns the first token where func is True within distance of this token. Args: start_token: The token to start searching from func: The function to call to test a token for applicability end_func: The function to call to test a token to determine whether to abort the search. distance: The number of tokens to look through before failing search. Must be positive. If unspecified, will search until the end of the token chain reverse: When true, search the tokens before this one instead of the tokens after it Returns: The first token matching func within distance of this token, or None if no such token is found. """ token = start_token if reverse: while token and (distance is None or distance > 0): previous = token.previous if previous: if func(previous): return previous if end_func and end_func(previous): return None token = previous if distance is not None: distance -= 1 else: while token and (distance is None or distance > 0): next_token = token.next if next_token: if func(next_token): return next_token if end_func and end_func(next_token): return None token = next_token if distance is not None: distance -= 1 return None def Search(start_token, token_types, distance=None, reverse=False): """Returns the first token of type in token_types within distance. Args: start_token: The token to start searching from token_types: The allowable types of the token being searched for distance: The number of tokens to look through before failing search. Must be positive. If unspecified, will search until the end of the token chain reverse: When true, search the tokens before this one instead of the tokens after it Returns: The first token of any type in token_types within distance of this token, or None if no such token is found. """ return CustomSearch(start_token, lambda token: token.IsAnyType(token_types), None, distance, reverse) def SearchExcept(start_token, token_types, distance=None, reverse=False): """Returns the first token not of any type in token_types within distance. Args: start_token: The token to start searching from token_types: The unallowable types of the token being searched for distance: The number of tokens to look through before failing search. Must be positive. If unspecified, will search until the end of the token chain reverse: When true, search the tokens before this one instead of the tokens after it Returns: The first token of any type in token_types within distance of this token, or None if no such token is found. """ return CustomSearch(start_token, lambda token: not token.IsAnyType(token_types), None, distance, reverse) def SearchUntil(start_token, token_types, end_types, distance=None, reverse=False): """Returns the first token of type in token_types before a token of end_type. Args: start_token: The token to start searching from. token_types: The allowable types of the token being searched for. end_types: Types of tokens to abort search if we find. distance: The number of tokens to look through before failing search. Must be positive. If unspecified, will search until the end of the token chain reverse: When true, search the tokens before this one instead of the tokens after it Returns: The first token of any type in token_types within distance of this token before any tokens of type in end_type, or None if no such token is found. """ return CustomSearch(start_token, lambda token: token.IsAnyType(token_types), lambda token: token.IsAnyType(end_types), distance, reverse) def DeleteToken(token): """Deletes the given token from the linked list. Args: token: The token to delete """ # When deleting a token, we do not update the deleted token itself to make # sure the previous and next pointers are still pointing to tokens which are # not deleted. Also it is very hard to keep track of all previously deleted # tokens to update them when their pointers become invalid. So we add this # flag that any token linked list iteration logic can skip deleted node safely # when its current token is deleted. token.is_deleted = True if token.previous: token.previous.next = token.next if token.next: token.next.previous = token.previous following_token = token.next while following_token and following_token.metadata.last_code == token: following_token.metadata.last_code = token.metadata.last_code following_token = following_token.next def DeleteTokens(token, token_count): """Deletes the given number of tokens starting with the given token. Args: token: The token to start deleting at. token_count: The total number of tokens to delete. """ for i in xrange(1, token_count): DeleteToken(token.next) DeleteToken(token) def InsertTokenBefore(new_token, token): """Insert new_token before token. Args: new_token: A token to be added to the stream token: A token already in the stream """ new_token.next = token new_token.previous = token.previous new_token.metadata = copy.copy(token.metadata) if new_token.IsCode(): old_last_code = token.metadata.last_code following_token = token while (following_token and following_token.metadata.last_code == old_last_code): following_token.metadata.last_code = new_token following_token = following_token.next token.previous = new_token if new_token.previous: new_token.previous.next = new_token if new_token.start_index is None: if new_token.line_number == token.line_number: new_token.start_index = token.start_index else: previous_token = new_token.previous if previous_token: new_token.start_index = (previous_token.start_index + len(previous_token.string)) else: new_token.start_index = 0 iterator = new_token.next while iterator and iterator.line_number == new_token.line_number: iterator.start_index += len(new_token.string) iterator = iterator.next def InsertTokenAfter(new_token, token): """Insert new_token after token. Args: new_token: A token to be added to the stream token: A token already in the stream """ new_token.previous = token new_token.next = token.next new_token.metadata = copy.copy(token.metadata) if token.IsCode(): new_token.metadata.last_code = token if new_token.IsCode(): following_token = token.next while following_token and following_token.metadata.last_code == token: following_token.metadata.last_code = new_token following_token = following_token.next token.next = new_token if new_token.next: new_token.next.previous = new_token if new_token.start_index is None: if new_token.line_number == token.line_number: new_token.start_index = token.start_index + len(token.string) else: new_token.start_index = 0 iterator = new_token.next while iterator and iterator.line_number == new_token.line_number: iterator.start_index += len(new_token.string) iterator = iterator.next def InsertTokensAfter(new_tokens, token): """Insert multiple tokens after token. Args: new_tokens: An array of tokens to be added to the stream token: A token already in the stream """ # TODO(user): It would be nicer to have InsertTokenAfter defer to here # instead of vice-versa. current_token = token for new_token in new_tokens: InsertTokenAfter(new_token, current_token) current_token = new_token def InsertSpaceTokenAfter(token): """Inserts a space token after the given token. Args: token: The token to insert a space token after Returns: A single space token """ space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line, token.line_number) InsertTokenAfter(space_token, token) def InsertBlankLineAfter(token): """Inserts a blank line after the given token. Args: token: The token to insert a blank line after Returns: A single space token """ blank_token = JavaScriptToken('', Type.BLANK_LINE, '', token.line_number + 1) InsertLineAfter(token, [blank_token]) def InsertLineAfter(token, new_tokens): """Inserts a new line consisting of new_tokens after the given token. Args: token: The token to insert after. new_tokens: The tokens that will make up the new line. """ insert_location = token for new_token in new_tokens: InsertTokenAfter(new_token, insert_location) insert_location = new_token # Update all subsequent line numbers. next_token = new_tokens[-1].next while next_token: next_token.line_number += 1 next_token = next_token.next def SplitToken(token, position): """Splits the token into two tokens at position. Args: token: The token to split position: The position to split at. Will be the beginning of second token. Returns: The new second token. """ new_string = token.string[position:] token.string = token.string[:position] new_token = JavaScriptToken(new_string, token.type, token.line, token.line_number) InsertTokenAfter(new_token, token) return new_token def Compare(token1, token2): """Compares two tokens and determines their relative order. Args: token1: The first token to compare. token2: The second token to compare. Returns: A negative integer, zero, or a positive integer as the first token is before, equal, or after the second in the token stream. """ if token2.line_number != token1.line_number: return token1.line_number - token2.line_number else: return token1.start_index - token2.start_index def GoogScopeOrNoneFromStartBlock(token): """Determines if the given START_BLOCK is part of a goog.scope statement. Args: token: A token of type START_BLOCK. Returns: The goog.scope function call token, or None if such call doesn't exist. """ if token.type != JavaScriptTokenType.START_BLOCK: return None # Search for a goog.scope statement, which will be 5 tokens before the # block. Illustration of the tokens found prior to the start block: # goog.scope(function() { # 5 4 3 21 ^ maybe_goog_scope = token for unused_i in xrange(5): maybe_goog_scope = (maybe_goog_scope.previous if maybe_goog_scope and maybe_goog_scope.previous else None) if maybe_goog_scope and maybe_goog_scope.string == 'goog.scope': return maybe_goog_scope def GetTokenRange(start_token, end_token): """Returns a list of tokens between the two given, inclusive. Args: start_token: Start token in the range. end_token: End token in the range. Returns: A list of tokens, in order, from start_token to end_token (including start and end). Returns none if the tokens do not describe a valid range. """ token_range = [] token = start_token while token: token_range.append(token) if token == end_token: return token_range token = token.next def TokensToString(token_iterable): """Convert a number of tokens into a string. Newlines will be inserted whenever the line_number of two neighboring strings differ. Args: token_iterable: The tokens to turn to a string. Returns: A string representation of the given tokens. """ buf = StringIO.StringIO() token_list = list(token_iterable) if not token_list: return '' line_number = token_list[0].line_number for token in token_list: while line_number < token.line_number: line_number += 1 buf.write('\n') if line_number > token.line_number: line_number = token.line_number buf.write('\n') buf.write(token.string) return buf.getvalue() def GetPreviousCodeToken(token): """Returns the code token before the specified token. Args: token: A token. Returns: The code token before the specified token or None if no such token exists. """ return CustomSearch( token, lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES, reverse=True) def GetNextCodeToken(token): """Returns the next code token after the specified token. Args: token: A token. Returns: The next code token after the specified token or None if no such token exists. """ return CustomSearch( token, lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES, reverse=False) def GetIdentifierStart(token): """Returns the first token in an identifier. Given a token which is part of an identifier, returns the token at the start of the identifier. Args: token: A token which is part of an identifier. Returns: The token at the start of the identifier or None if the identifier was not of the form 'a.b.c' (e.g. "['a']['b'].c"). """ start_token = token previous_code_token = GetPreviousCodeToken(token) while (previous_code_token and ( previous_code_token.IsType(JavaScriptTokenType.IDENTIFIER) or _IsDot(previous_code_token))): start_token = previous_code_token previous_code_token = GetPreviousCodeToken(previous_code_token) if _IsDot(start_token): return None return start_token def GetIdentifierForToken(token): """Get the symbol specified by a token. Given a token, this function additionally concatenates any parts of an identifying symbol being identified that are split by whitespace or a newline. The function will return None if the token is not the first token of an identifier. Args: token: The first token of a symbol. Returns: The whole symbol, as a string. """ # Search backward to determine if this token is the first token of the # identifier. If it is not the first token, return None to signal that this # token should be ignored. prev_token = token.previous while prev_token: if (prev_token.IsType(JavaScriptTokenType.IDENTIFIER) or _IsDot(prev_token)): return None if (prev_token.IsType(tokens.TokenType.WHITESPACE) or prev_token.IsAnyType(JavaScriptTokenType.COMMENT_TYPES)): prev_token = prev_token.previous else: break # A "function foo()" declaration. if token.type is JavaScriptTokenType.FUNCTION_NAME: return token.string # A "var foo" declaration (if the previous token is 'var') previous_code_token = GetPreviousCodeToken(token) if previous_code_token and previous_code_token.IsKeyword('var'): return token.string # Otherwise, this is potentially a namespaced (goog.foo.bar) identifier that # could span multiple lines or be broken up by whitespace. We need # to concatenate. identifier_types = set([ JavaScriptTokenType.IDENTIFIER, JavaScriptTokenType.SIMPLE_LVALUE ]) assert token.type in identifier_types # Start with the first token symbol_tokens = [token] if token.next: for t in token.next: last_symbol_token = symbol_tokens[-1] # An identifier is part of the previous symbol if it has a trailing # dot. if t.type in identifier_types: if last_symbol_token.string.endswith('.'): symbol_tokens.append(t) continue else: break # A dot is part of the previous symbol if it does not have a trailing # dot. if _IsDot(t): if not last_symbol_token.string.endswith('.'): symbol_tokens.append(t) continue else: break # Skip any whitespace if t.type in JavaScriptTokenType.NON_CODE_TYPES: continue # This is the end of the identifier. Stop iterating. break if symbol_tokens: return ''.join([t.string for t in symbol_tokens]) def GetStringAfterToken(token): """Get string after token. Args: token: Search will be done after this token. Returns: String if found after token else None (empty string will also return None). Search until end of string as in case of empty string Type.STRING_TEXT is not present/found and don't want to return next string. E.g. a = ''; b = 'test'; When searching for string after 'a' if search is not limited by end of string then it will return 'test' which is not desirable as there is a empty string before that. This will return None for cases where string is empty or no string found as in both cases there is no Type.STRING_TEXT. """ string_token = SearchUntil(token, JavaScriptTokenType.STRING_TEXT, [JavaScriptTokenType.SINGLE_QUOTE_STRING_END, JavaScriptTokenType.DOUBLE_QUOTE_STRING_END]) if string_token: return string_token.string else: return None def _IsDot(token): """Whether the token represents a "dot" operator (foo.bar).""" return token.type is tokens.TokenType.NORMAL and token.string == '.' closure_linter-2.3.13/README0000640014730400116100000000051212247733554015066 0ustar ajpeng00000000000000This repository contains the Closure Linter - a style checker for JavaScript. To install the application, run python ./setup.py install After installing, you get two helper applications installed into /usr/local/bin: gjslint.py - runs the linter and checks for errors fixjsstyle.py - tries to fix errors automatically