summaryrefslogtreecommitdiff
path: root/tools/closure_linter
diff options
context:
space:
mode:
Diffstat (limited to 'tools/closure_linter')
-rw-r--r--tools/closure_linter/AUTHORS6
-rw-r--r--tools/closure_linter/LICENSE176
-rw-r--r--tools/closure_linter/README9
-rw-r--r--tools/closure_linter/build/lib/closure_linter/__init__.py16
-rw-r--r--tools/closure_linter/build/lib/closure_linter/aliaspass.py248
-rw-r--r--tools/closure_linter/build/lib/closure_linter/aliaspass_test.py191
-rw-r--r--tools/closure_linter/build/lib/closure_linter/checker.py108
-rw-r--r--tools/closure_linter/build/lib/closure_linter/checkerbase.py192
-rw-r--r--tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo.py578
-rw-r--r--tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo_test.py873
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/__init__.py16
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/error.py65
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/erroraccumulator.py46
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/errorhandler.py61
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/erroroutput.py52
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/filetestcase.py115
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/htmlutil.py170
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/lintrunner.py39
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/matcher.py60
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/position.py126
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/simplefileflags.py190
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/tokenizer.py185
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/tokens.py145
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/tokens_test.py113
-rw-r--r--tools/closure_linter/build/lib/closure_linter/ecmalintrules.py844
-rw-r--r--tools/closure_linter/build/lib/closure_linter/ecmametadatapass.py574
-rw-r--r--tools/closure_linter/build/lib/closure_linter/error_check.py95
-rw-r--r--tools/closure_linter/build/lib/closure_linter/error_fixer.py618
-rw-r--r--tools/closure_linter/build/lib/closure_linter/error_fixer_test.py57
-rw-r--r--tools/closure_linter/build/lib/closure_linter/errorrecord.py66
-rw-r--r--tools/closure_linter/build/lib/closure_linter/errorrules.py72
-rw-r--r--tools/closure_linter/build/lib/closure_linter/errorrules_test.py117
-rw-r--r--tools/closure_linter/build/lib/closure_linter/errors.py154
-rw-r--r--tools/closure_linter/build/lib/closure_linter/fixjsstyle.py66
-rw-r--r--tools/closure_linter/build/lib/closure_linter/fixjsstyle_test.py615
-rw-r--r--tools/closure_linter/build/lib/closure_linter/full_test.py121
-rw-r--r--tools/closure_linter/build/lib/closure_linter/gjslint.py319
-rw-r--r--tools/closure_linter/build/lib/closure_linter/indentation.py617
-rw-r--r--tools/closure_linter/build/lib/closure_linter/javascriptlintrules.py754
-rw-r--r--tools/closure_linter/build/lib/closure_linter/javascriptstatetracker.py150
-rw-r--r--tools/closure_linter/build/lib/closure_linter/javascriptstatetracker_test.py278
-rw-r--r--tools/closure_linter/build/lib/closure_linter/javascripttokenizer.py463
-rw-r--r--tools/closure_linter/build/lib/closure_linter/javascripttokens.py153
-rw-r--r--tools/closure_linter/build/lib/closure_linter/not_strict_test.py74
-rw-r--r--tools/closure_linter/build/lib/closure_linter/requireprovidesorter.py329
-rw-r--r--tools/closure_linter/build/lib/closure_linter/requireprovidesorter_test.py155
-rw-r--r--tools/closure_linter/build/lib/closure_linter/runner.py198
-rw-r--r--tools/closure_linter/build/lib/closure_linter/runner_test.py101
-rw-r--r--tools/closure_linter/build/lib/closure_linter/scopeutil.py206
-rw-r--r--tools/closure_linter/build/lib/closure_linter/scopeutil_test.py222
-rw-r--r--tools/closure_linter/build/lib/closure_linter/statetracker.py1294
-rw-r--r--tools/closure_linter/build/lib/closure_linter/statetracker_test.py123
-rw-r--r--tools/closure_linter/build/lib/closure_linter/strict_test.py67
-rw-r--r--tools/closure_linter/build/lib/closure_linter/testutil.py94
-rw-r--r--tools/closure_linter/build/lib/closure_linter/tokenutil.py697
-rw-r--r--tools/closure_linter/build/lib/closure_linter/tokenutil_test.py297
-rw-r--r--tools/closure_linter/build/lib/closure_linter/typeannotation.py401
-rw-r--r--tools/closure_linter/build/lib/closure_linter/typeannotation_test.py232
-rw-r--r--tools/closure_linter/closure_linter.egg-info/PKG-INFO10
-rw-r--r--tools/closure_linter/closure_linter.egg-info/SOURCES.txt63
-rw-r--r--tools/closure_linter/closure_linter.egg-info/dependency_links.txt1
-rw-r--r--tools/closure_linter/closure_linter.egg-info/entry_points.txt4
-rw-r--r--tools/closure_linter/closure_linter.egg-info/requires.txt1
-rw-r--r--tools/closure_linter/closure_linter.egg-info/top_level.txt1
-rwxr-xr-xtools/closure_linter/closure_linter/__init__.py16
-rw-r--r--tools/closure_linter/closure_linter/aliaspass.py248
-rwxr-xr-xtools/closure_linter/closure_linter/aliaspass_test.py191
-rwxr-xr-xtools/closure_linter/closure_linter/checker.py108
-rwxr-xr-xtools/closure_linter/closure_linter/checkerbase.py192
-rwxr-xr-xtools/closure_linter/closure_linter/closurizednamespacesinfo.py578
-rwxr-xr-xtools/closure_linter/closure_linter/closurizednamespacesinfo_test.py873
-rwxr-xr-xtools/closure_linter/closure_linter/common/__init__.py16
-rwxr-xr-xtools/closure_linter/closure_linter/common/error.py65
-rwxr-xr-xtools/closure_linter/closure_linter/common/erroraccumulator.py46
-rwxr-xr-xtools/closure_linter/closure_linter/common/errorhandler.py61
-rw-r--r--tools/closure_linter/closure_linter/common/erroroutput.py52
-rwxr-xr-xtools/closure_linter/closure_linter/common/filetestcase.py115
-rwxr-xr-xtools/closure_linter/closure_linter/common/htmlutil.py170
-rwxr-xr-xtools/closure_linter/closure_linter/common/lintrunner.py39
-rwxr-xr-xtools/closure_linter/closure_linter/common/matcher.py60
-rwxr-xr-xtools/closure_linter/closure_linter/common/position.py126
-rwxr-xr-xtools/closure_linter/closure_linter/common/simplefileflags.py190
-rwxr-xr-xtools/closure_linter/closure_linter/common/tokenizer.py185
-rwxr-xr-xtools/closure_linter/closure_linter/common/tokens.py145
-rw-r--r--tools/closure_linter/closure_linter/common/tokens_test.py113
-rwxr-xr-xtools/closure_linter/closure_linter/ecmalintrules.py844
-rwxr-xr-xtools/closure_linter/closure_linter/ecmametadatapass.py574
-rwxr-xr-xtools/closure_linter/closure_linter/error_check.py95
-rwxr-xr-xtools/closure_linter/closure_linter/error_fixer.py618
-rw-r--r--tools/closure_linter/closure_linter/error_fixer_test.py57
-rw-r--r--tools/closure_linter/closure_linter/errorrecord.py66
-rwxr-xr-xtools/closure_linter/closure_linter/errorrules.py72
-rw-r--r--tools/closure_linter/closure_linter/errorrules_test.py117
-rwxr-xr-xtools/closure_linter/closure_linter/errors.py154
-rwxr-xr-xtools/closure_linter/closure_linter/fixjsstyle.py66
-rwxr-xr-xtools/closure_linter/closure_linter/fixjsstyle_test.py615
-rwxr-xr-xtools/closure_linter/closure_linter/full_test.py121
-rwxr-xr-xtools/closure_linter/closure_linter/gjslint.py319
-rwxr-xr-xtools/closure_linter/closure_linter/indentation.py617
-rw-r--r--tools/closure_linter/closure_linter/javascriptlintrules.py754
-rwxr-xr-xtools/closure_linter/closure_linter/javascriptstatetracker.py150
-rw-r--r--tools/closure_linter/closure_linter/javascriptstatetracker_test.py278
-rwxr-xr-xtools/closure_linter/closure_linter/javascripttokenizer.py463
-rwxr-xr-xtools/closure_linter/closure_linter/javascripttokens.py153
-rwxr-xr-xtools/closure_linter/closure_linter/not_strict_test.py74
-rwxr-xr-xtools/closure_linter/closure_linter/requireprovidesorter.py329
-rw-r--r--tools/closure_linter/closure_linter/requireprovidesorter_test.py155
-rw-r--r--tools/closure_linter/closure_linter/runner.py198
-rw-r--r--tools/closure_linter/closure_linter/runner_test.py101
-rw-r--r--tools/closure_linter/closure_linter/scopeutil.py206
-rw-r--r--tools/closure_linter/closure_linter/scopeutil_test.py222
-rw-r--r--tools/closure_linter/closure_linter/statetracker.py1294
-rwxr-xr-xtools/closure_linter/closure_linter/statetracker_test.py123
-rwxr-xr-xtools/closure_linter/closure_linter/strict_test.py67
-rw-r--r--tools/closure_linter/closure_linter/testdata/all_js_wrapped.js5
-rw-r--r--tools/closure_linter/closure_linter/testdata/blank_lines.js104
-rw-r--r--tools/closure_linter/closure_linter/testdata/bugs.js43
-rw-r--r--tools/closure_linter/closure_linter/testdata/empty_file.js0
-rw-r--r--tools/closure_linter/closure_linter/testdata/ends_with_block.js19
-rw-r--r--tools/closure_linter/closure_linter/testdata/externs.js34
-rw-r--r--tools/closure_linter/closure_linter/testdata/externs_jsdoc.js37
-rw-r--r--tools/closure_linter/closure_linter/testdata/file_level_comment.js13
-rw-r--r--tools/closure_linter/closure_linter/testdata/fixjsstyle.html.in.html52
-rw-r--r--tools/closure_linter/closure_linter/testdata/fixjsstyle.html.out.html51
-rw-r--r--tools/closure_linter/closure_linter/testdata/fixjsstyle.in.js293
-rw-r--r--tools/closure_linter/closure_linter/testdata/fixjsstyle.indentation.out.js465
-rw-r--r--tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.in.js21
-rw-r--r--tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.out.js21
-rw-r--r--tools/closure_linter/closure_linter/testdata/fixjsstyle.out.js310
-rw-r--r--tools/closure_linter/closure_linter/testdata/goog_scope.js63
-rw-r--r--tools/closure_linter/closure_linter/testdata/html_parse_error.html32
-rw-r--r--tools/closure_linter/closure_linter/testdata/indentation.js465
-rw-r--r--tools/closure_linter/closure_linter/testdata/interface.js89
-rw-r--r--tools/closure_linter/closure_linter/testdata/jsdoc.js1455
-rw-r--r--tools/closure_linter/closure_linter/testdata/limited_doc_checks.js29
-rw-r--r--tools/closure_linter/closure_linter/testdata/minimal.js1
-rw-r--r--tools/closure_linter/closure_linter/testdata/not_strict.js42
-rw-r--r--tools/closure_linter/closure_linter/testdata/other.js459
-rw-r--r--tools/closure_linter/closure_linter/testdata/provide_blank.js29
-rw-r--r--tools/closure_linter/closure_linter/testdata/provide_extra.js39
-rw-r--r--tools/closure_linter/closure_linter/testdata/provide_missing.js40
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_alias.js14
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_all_caps.js30
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_blank.js29
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_extra.js35
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_function.js22
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_function_missing.js24
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_function_through_both.js23
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_function_through_namespace.js22
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_interface.js31
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_interface_alias.js34
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_interface_base.js31
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_lower_case.js30
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_missing.js40
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_numeric.js30
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_provide_blank.js31
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_provide_missing.js76
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_provide_ok.js214
-rw-r--r--tools/closure_linter/closure_linter/testdata/semicolon_missing.js18
-rw-r--r--tools/closure_linter/closure_linter/testdata/simple.html33
-rw-r--r--tools/closure_linter/closure_linter/testdata/spaces.js354
-rw-r--r--tools/closure_linter/closure_linter/testdata/tokenizer.js78
-rw-r--r--tools/closure_linter/closure_linter/testdata/unparseable.js44
-rw-r--r--tools/closure_linter/closure_linter/testdata/unused_local_variables.js88
-rw-r--r--tools/closure_linter/closure_linter/testdata/unused_private_members.js205
-rw-r--r--tools/closure_linter/closure_linter/testdata/utf8.html26
-rw-r--r--tools/closure_linter/closure_linter/testutil.py94
-rwxr-xr-xtools/closure_linter/closure_linter/tokenutil.py697
-rw-r--r--tools/closure_linter/closure_linter/tokenutil_test.py297
-rw-r--r--tools/closure_linter/closure_linter/typeannotation.py401
-rwxr-xr-xtools/closure_linter/closure_linter/typeannotation_test.py232
-rw-r--r--tools/closure_linter/dist/closure_linter-2.3.17-py2.7.eggbin315656 -> 0 bytes
-rwxr-xr-xtools/closure_linter/setup.py38
173 files changed, 0 insertions, 34506 deletions
diff --git a/tools/closure_linter/AUTHORS b/tools/closure_linter/AUTHORS
deleted file mode 100644
index 2f72bd6b2f..0000000000
--- a/tools/closure_linter/AUTHORS
+++ /dev/null
@@ -1,6 +0,0 @@
-# This is a list of contributors to the Closure Linter.
-
-# Names should be added to this file like so:
-# Name or Organization <email address>
-
-Google Inc.
diff --git a/tools/closure_linter/LICENSE b/tools/closure_linter/LICENSE
deleted file mode 100644
index d9a10c0d8e..0000000000
--- a/tools/closure_linter/LICENSE
+++ /dev/null
@@ -1,176 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
diff --git a/tools/closure_linter/README b/tools/closure_linter/README
deleted file mode 100644
index 4a21b2defc..0000000000
--- a/tools/closure_linter/README
+++ /dev/null
@@ -1,9 +0,0 @@
-This repository contains the Closure Linter - a style checker for JavaScript.
-
-To install the application, run
- python ./setup.py install
-
-After installing, you get two helper applications installed into /usr/local/bin:
-
- gjslint.py - runs the linter and checks for errors
- fixjsstyle.py - tries to fix errors automatically
diff --git a/tools/closure_linter/build/lib/closure_linter/__init__.py b/tools/closure_linter/build/lib/closure_linter/__init__.py
deleted file mode 100644
index 1798c8cfff..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Package indicator for gjslint."""
diff --git a/tools/closure_linter/build/lib/closure_linter/aliaspass.py b/tools/closure_linter/build/lib/closure_linter/aliaspass.py
deleted file mode 100644
index bb37bfa07b..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/aliaspass.py
+++ /dev/null
@@ -1,248 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Pass that scans for goog.scope aliases and lint/usage errors."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-from closure_linter import ecmametadatapass
-from closure_linter import errors
-from closure_linter import javascripttokens
-from closure_linter import scopeutil
-from closure_linter import tokenutil
-from closure_linter.common import error
-
-
-# TODO(nnaze): Create a Pass interface and move this class, EcmaMetaDataPass,
-# and related classes onto it.
-
-
-def _GetAliasForIdentifier(identifier, alias_map):
- """Returns the aliased_symbol name for an identifier.
-
- Example usage:
- >>> alias_map = {'MyClass': 'goog.foo.MyClass'}
- >>> _GetAliasForIdentifier('MyClass.prototype.action', alias_map)
- 'goog.foo.MyClass.prototype.action'
-
- >>> _GetAliasForIdentifier('MyClass.prototype.action', {})
- None
-
- Args:
- identifier: The identifier.
- alias_map: A dictionary mapping a symbol to an alias.
-
- Returns:
- The aliased symbol name or None if not found.
- """
- ns = identifier.split('.', 1)[0]
- aliased_symbol = alias_map.get(ns)
- if aliased_symbol:
- return aliased_symbol + identifier[len(ns):]
-
-
-def _SetTypeAlias(js_type, alias_map):
- """Updates the alias for identifiers in a type.
-
- Args:
- js_type: A typeannotation.TypeAnnotation instance.
- alias_map: A dictionary mapping a symbol to an alias.
- """
- aliased_symbol = _GetAliasForIdentifier(js_type.identifier, alias_map)
- if aliased_symbol:
- js_type.alias = aliased_symbol
- for sub_type in js_type.IterTypes():
- _SetTypeAlias(sub_type, alias_map)
-
-
-class AliasPass(object):
- """Pass to identify goog.scope() usages.
-
- Identifies goog.scope() usages and finds lint/usage errors. Notes any
- aliases of symbols in Closurized namespaces (that is, reassignments
- such as "var MyClass = goog.foo.MyClass;") and annotates identifiers
- when they're using an alias (so they may be expanded to the full symbol
- later -- that "MyClass.prototype.action" refers to
- "goog.foo.MyClass.prototype.action" when expanded.).
- """
-
- def __init__(self, closurized_namespaces=None, error_handler=None):
- """Creates a new pass.
-
- Args:
- closurized_namespaces: A set of Closurized namespaces (e.g. 'goog').
- error_handler: An error handler to report lint errors to.
- """
-
- self._error_handler = error_handler
-
- # If we have namespaces, freeze the set.
- if closurized_namespaces:
- closurized_namespaces = frozenset(closurized_namespaces)
-
- self._closurized_namespaces = closurized_namespaces
-
- def Process(self, start_token):
- """Runs the pass on a token stream.
-
- Args:
- start_token: The first token in the stream.
- """
-
- if start_token is None:
- return
-
- # TODO(nnaze): Add more goog.scope usage checks.
- self._CheckGoogScopeCalls(start_token)
-
- # If we have closurized namespaces, identify aliased identifiers.
- if self._closurized_namespaces:
- context = start_token.metadata.context
- root_context = context.GetRoot()
- self._ProcessRootContext(root_context)
-
- def _CheckGoogScopeCalls(self, start_token):
- """Check goog.scope calls for lint/usage errors."""
-
- def IsScopeToken(token):
- return (token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER and
- token.string == 'goog.scope')
-
- # Find all the goog.scope tokens in the file
- scope_tokens = [t for t in start_token if IsScopeToken(t)]
-
- for token in scope_tokens:
- scope_context = token.metadata.context
-
- if not (scope_context.type == ecmametadatapass.EcmaContext.STATEMENT and
- scope_context.parent.type == ecmametadatapass.EcmaContext.ROOT):
- self._MaybeReportError(
- error.Error(errors.INVALID_USE_OF_GOOG_SCOPE,
- 'goog.scope call not in global scope', token))
-
- # There should be only one goog.scope reference. Register errors for
- # every instance after the first.
- for token in scope_tokens[1:]:
- self._MaybeReportError(
- error.Error(errors.EXTRA_GOOG_SCOPE_USAGE,
- 'More than one goog.scope call in file.', token))
-
- def _MaybeReportError(self, err):
- """Report an error to the handler (if registered)."""
- if self._error_handler:
- self._error_handler.HandleError(err)
-
- @classmethod
- def _YieldAllContexts(cls, context):
- """Yields all contexts that are contained by the given context."""
- yield context
- for child_context in context.children:
- for descendent_child in cls._YieldAllContexts(child_context):
- yield descendent_child
-
- @staticmethod
- def _IsTokenInParentBlock(token, parent_block):
- """Determines whether the given token is contained by the given block.
-
- Args:
- token: A token
- parent_block: An EcmaContext.
-
- Returns:
- Whether the token is in a context that is or is a child of the given
- parent_block context.
- """
- context = token.metadata.context
-
- while context:
- if context is parent_block:
- return True
- context = context.parent
-
- return False
-
- def _ProcessRootContext(self, root_context):
- """Processes all goog.scope blocks under the root context."""
-
- assert root_context.type is ecmametadatapass.EcmaContext.ROOT
-
- # Process aliases in statements in the root scope for goog.module-style
- # aliases.
- global_alias_map = {}
- for context in root_context.children:
- if context.type == ecmametadatapass.EcmaContext.STATEMENT:
- for statement_child in context.children:
- if statement_child.type == ecmametadatapass.EcmaContext.VAR:
- match = scopeutil.MatchModuleAlias(statement_child)
- if match:
- # goog.require aliases cannot use further aliases, the symbol is
- # the second part of match, directly.
- symbol = match[1]
- if scopeutil.IsInClosurizedNamespace(symbol,
- self._closurized_namespaces):
- global_alias_map[match[0]] = symbol
-
- # Process each block to find aliases.
- for context in root_context.children:
- self._ProcessBlock(context, global_alias_map)
-
- def _ProcessBlock(self, context, global_alias_map):
- """Scans a goog.scope block to find aliases and mark alias tokens."""
- alias_map = global_alias_map.copy()
-
- # Iterate over every token in the context. Each token points to one
- # context, but multiple tokens may point to the same context. We only want
- # to check each context once, so keep track of those we've seen.
- seen_contexts = set()
- token = context.start_token
- while token and self._IsTokenInParentBlock(token, context):
- token_context = token.metadata.context if token.metadata else None
-
- # Check to see if this token is an alias.
- if token_context and token_context not in seen_contexts:
- seen_contexts.add(token_context)
-
- # If this is a alias statement in the goog.scope block.
- if (token_context.type == ecmametadatapass.EcmaContext.VAR and
- scopeutil.IsGoogScopeBlock(token_context.parent.parent)):
- match = scopeutil.MatchAlias(token_context)
-
- # If this is an alias, remember it in the map.
- if match:
- alias, symbol = match
- symbol = _GetAliasForIdentifier(symbol, alias_map) or symbol
- if scopeutil.IsInClosurizedNamespace(symbol,
- self._closurized_namespaces):
- alias_map[alias] = symbol
-
- # If this token is an identifier that matches an alias,
- # mark the token as an alias to the original symbol.
- if (token.type is javascripttokens.JavaScriptTokenType.SIMPLE_LVALUE or
- token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER):
- identifier = tokenutil.GetIdentifierForToken(token)
- if identifier:
- aliased_symbol = _GetAliasForIdentifier(identifier, alias_map)
- if aliased_symbol:
- token.metadata.aliased_symbol = aliased_symbol
-
- elif token.type == javascripttokens.JavaScriptTokenType.DOC_FLAG:
- flag = token.attached_object
- if flag and flag.HasType() and flag.jstype:
- _SetTypeAlias(flag.jstype, alias_map)
-
- token = token.next # Get next token
diff --git a/tools/closure_linter/build/lib/closure_linter/aliaspass_test.py b/tools/closure_linter/build/lib/closure_linter/aliaspass_test.py
deleted file mode 100644
index 7042e53487..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/aliaspass_test.py
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for the aliaspass module."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-import unittest as googletest
-
-from closure_linter import aliaspass
-from closure_linter import errors
-from closure_linter import javascriptstatetracker
-from closure_linter import testutil
-from closure_linter.common import erroraccumulator
-
-
-def _GetTokenByLineAndString(start_token, string, line_number):
- for token in start_token:
- if token.line_number == line_number and token.string == string:
- return token
-
-
-class AliasPassTest(googletest.TestCase):
-
- def testInvalidGoogScopeCall(self):
- start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCOPE_SCRIPT)
-
- error_accumulator = erroraccumulator.ErrorAccumulator()
- alias_pass = aliaspass.AliasPass(
- error_handler=error_accumulator)
- alias_pass.Process(start_token)
-
- alias_errors = error_accumulator.GetErrors()
- self.assertEquals(1, len(alias_errors))
-
- alias_error = alias_errors[0]
-
- self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, alias_error.code)
- self.assertEquals('goog.scope', alias_error.token.string)
-
- def testAliasedIdentifiers(self):
- start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT)
- alias_pass = aliaspass.AliasPass(set(['goog', 'myproject']))
- alias_pass.Process(start_token)
-
- alias_token = _GetTokenByLineAndString(start_token, 'Event', 4)
- self.assertTrue(alias_token.metadata.is_alias_definition)
-
- my_class_token = _GetTokenByLineAndString(start_token, 'myClass', 9)
- self.assertIsNone(my_class_token.metadata.aliased_symbol)
-
- component_token = _GetTokenByLineAndString(start_token, 'Component', 17)
- self.assertEquals('goog.ui.Component',
- component_token.metadata.aliased_symbol)
-
- event_token = _GetTokenByLineAndString(start_token, 'Event.Something', 17)
- self.assertEquals('goog.events.Event.Something',
- event_token.metadata.aliased_symbol)
-
- non_closurized_token = _GetTokenByLineAndString(
- start_token, 'NonClosurizedClass', 18)
- self.assertIsNone(non_closurized_token.metadata.aliased_symbol)
-
- long_start_token = _GetTokenByLineAndString(start_token, 'Event', 24)
- self.assertEquals('goog.events.Event.MultilineIdentifier.someMethod',
- long_start_token.metadata.aliased_symbol)
-
- def testAliasedDoctypes(self):
- """Tests that aliases are correctly expanded within type annotations."""
- start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT)
- tracker = javascriptstatetracker.JavaScriptStateTracker()
- tracker.DocFlagPass(start_token, error_handler=None)
-
- alias_pass = aliaspass.AliasPass(set(['goog', 'myproject']))
- alias_pass.Process(start_token)
-
- flag_token = _GetTokenByLineAndString(start_token, '@type', 22)
- self.assertEquals(
- 'goog.events.Event.<goog.ui.Component,Array<myproject.foo.MyClass>>',
- repr(flag_token.attached_object.jstype))
-
- def testModuleAlias(self):
- start_token = testutil.TokenizeSourceAndRunEcmaPass("""
-goog.module('goog.test');
-var Alias = goog.require('goog.Alias');
-Alias.use();
-""")
- alias_pass = aliaspass.AliasPass(set(['goog']))
- alias_pass.Process(start_token)
- alias_token = _GetTokenByLineAndString(start_token, 'Alias', 3)
- self.assertTrue(alias_token.metadata.is_alias_definition)
-
- def testMultipleGoogScopeCalls(self):
- start_token = testutil.TokenizeSourceAndRunEcmaPass(
- _TEST_MULTIPLE_SCOPE_SCRIPT)
-
- error_accumulator = erroraccumulator.ErrorAccumulator()
-
- alias_pass = aliaspass.AliasPass(
- set(['goog', 'myproject']),
- error_handler=error_accumulator)
- alias_pass.Process(start_token)
-
- alias_errors = error_accumulator.GetErrors()
-
- self.assertEquals(3, len(alias_errors))
-
- error = alias_errors[0]
- self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, error.code)
- self.assertEquals(7, error.token.line_number)
-
- error = alias_errors[1]
- self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code)
- self.assertEquals(7, error.token.line_number)
-
- error = alias_errors[2]
- self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code)
- self.assertEquals(11, error.token.line_number)
-
-
-_TEST_ALIAS_SCRIPT = """
-goog.scope(function() {
-var events = goog.events; // scope alias
-var Event = events.
- Event; // nested multiline scope alias
-
-// This should not be registered as an aliased identifier because
-// it appears before the alias.
-var myClass = new MyClass();
-
-var Component = goog.ui.Component; // scope alias
-var MyClass = myproject.foo.MyClass; // scope alias
-
-// Scope alias of non-Closurized namespace.
-var NonClosurizedClass = aaa.bbb.NonClosurizedClass;
-
-var component = new Component(Event.Something);
-var nonClosurized = NonClosurizedClass();
-
-/**
- * A created namespace with a really long identifier.
- * @type {events.Event.<Component,Array<MyClass>}
- */
-Event.
- MultilineIdentifier.
- someMethod = function() {};
-});
-"""
-
-_TEST_SCOPE_SCRIPT = """
-function foo () {
- // This goog.scope call is invalid.
- goog.scope(function() {
-
- });
-}
-"""
-
-_TEST_MULTIPLE_SCOPE_SCRIPT = """
-goog.scope(function() {
- // do nothing
-});
-
-function foo() {
- var test = goog.scope; // We should not see goog.scope mentioned.
-}
-
-// This goog.scope invalid. There can be only one.
-goog.scope(function() {
-
-});
-"""
-
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/checker.py b/tools/closure_linter/build/lib/closure_linter/checker.py
deleted file mode 100644
index 1c984173b0..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/checker.py
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Core methods for checking JS files for common style guide violations."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-import gflags as flags
-
-from closure_linter import aliaspass
-from closure_linter import checkerbase
-from closure_linter import closurizednamespacesinfo
-from closure_linter import javascriptlintrules
-
-
-flags.DEFINE_list('closurized_namespaces', '',
- 'Namespace prefixes, used for testing of'
- 'goog.provide/require')
-flags.DEFINE_list('ignored_extra_namespaces', '',
- 'Fully qualified namespaces that should be not be reported '
- 'as extra by the linter.')
-
-
-class JavaScriptStyleChecker(checkerbase.CheckerBase):
- """Checker that applies JavaScriptLintRules."""
-
- def __init__(self, state_tracker, error_handler):
- """Initialize an JavaScriptStyleChecker object.
-
- Args:
- state_tracker: State tracker.
- error_handler: Error handler to pass all errors to.
- """
- self._namespaces_info = None
- self._alias_pass = None
- if flags.FLAGS.closurized_namespaces:
- self._namespaces_info = (
- closurizednamespacesinfo.ClosurizedNamespacesInfo(
- flags.FLAGS.closurized_namespaces,
- flags.FLAGS.ignored_extra_namespaces))
-
- self._alias_pass = aliaspass.AliasPass(
- flags.FLAGS.closurized_namespaces, error_handler)
-
- checkerbase.CheckerBase.__init__(
- self,
- error_handler=error_handler,
- lint_rules=javascriptlintrules.JavaScriptLintRules(
- self._namespaces_info),
- state_tracker=state_tracker)
-
- def Check(self, start_token, limited_doc_checks=False, is_html=False,
- stop_token=None):
- """Checks a token stream for lint warnings/errors.
-
- Adds a separate pass for computing dependency information based on
- goog.require and goog.provide statements prior to the main linting pass.
-
- Args:
- start_token: The first token in the token stream.
- limited_doc_checks: Whether to perform limited checks.
- is_html: Whether this token stream is HTML.
- stop_token: If given, checks should stop at this token.
- """
- self._lint_rules.Initialize(self, limited_doc_checks, is_html)
-
- self._state_tracker.DocFlagPass(start_token, self._error_handler)
-
- if self._alias_pass:
- self._alias_pass.Process(start_token)
-
- # To maximize the amount of errors that get reported before a parse error
- # is displayed, don't run the dependency pass if a parse error exists.
- if self._namespaces_info:
- self._namespaces_info.Reset()
- self._ExecutePass(start_token, self._DependencyPass, stop_token)
-
- self._ExecutePass(start_token, self._LintPass, stop_token)
-
- # If we have a stop_token, we didn't end up reading the whole file and,
- # thus, don't call Finalize to do end-of-file checks.
- if not stop_token:
- self._lint_rules.Finalize(self._state_tracker)
-
- def _DependencyPass(self, token):
- """Processes an individual token for dependency information.
-
- Used to encapsulate the logic needed to process an individual token so that
- it can be passed to _ExecutePass.
-
- Args:
- token: The token to process.
- """
- self._namespaces_info.ProcessToken(token, self._state_tracker)
diff --git a/tools/closure_linter/build/lib/closure_linter/checkerbase.py b/tools/closure_linter/build/lib/closure_linter/checkerbase.py
deleted file mode 100644
index 6679ded05b..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/checkerbase.py
+++ /dev/null
@@ -1,192 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Base classes for writing checkers that operate on tokens."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)',
- 'jacobr@google.com (Jacob Richman)')
-
-from closure_linter import errorrules
-from closure_linter.common import error
-
-
-class LintRulesBase(object):
- """Base class for all classes defining the lint rules for a language."""
-
- def __init__(self):
- self.__checker = None
-
- def Initialize(self, checker, limited_doc_checks, is_html):
- """Initializes to prepare to check a file.
-
- Args:
- checker: Class to report errors to.
- limited_doc_checks: Whether doc checking is relaxed for this file.
- is_html: Whether the file is an HTML file with extracted contents.
- """
- self.__checker = checker
- self._limited_doc_checks = limited_doc_checks
- self._is_html = is_html
-
- def _HandleError(self, code, message, token, position=None,
- fix_data=None):
- """Call the HandleError function for the checker we are associated with."""
- if errorrules.ShouldReportError(code):
- self.__checker.HandleError(code, message, token, position, fix_data)
-
- def _SetLimitedDocChecks(self, limited_doc_checks):
- """Sets whether doc checking is relaxed for this file.
-
- Args:
- limited_doc_checks: Whether doc checking is relaxed for this file.
- """
- self._limited_doc_checks = limited_doc_checks
-
- def CheckToken(self, token, parser_state):
- """Checks a token, given the current parser_state, for warnings and errors.
-
- Args:
- token: The current token under consideration.
- parser_state: Object that indicates the parser state in the page.
-
- Raises:
- TypeError: If not overridden.
- """
- raise TypeError('Abstract method CheckToken not implemented')
-
- def Finalize(self, parser_state):
- """Perform all checks that need to occur after all lines are processed.
-
- Args:
- parser_state: State of the parser after parsing all tokens
-
- Raises:
- TypeError: If not overridden.
- """
- raise TypeError('Abstract method Finalize not implemented')
-
-
-class CheckerBase(object):
- """This class handles checking a LintRules object against a file."""
-
- def __init__(self, error_handler, lint_rules, state_tracker):
- """Initialize a checker object.
-
- Args:
- error_handler: Object that handles errors.
- lint_rules: LintRules object defining lint errors given a token
- and state_tracker object.
- state_tracker: Object that tracks the current state in the token stream.
-
- """
- self._error_handler = error_handler
- self._lint_rules = lint_rules
- self._state_tracker = state_tracker
-
- self._has_errors = False
-
- def HandleError(self, code, message, token, position=None,
- fix_data=None):
- """Prints out the given error message including a line number.
-
- Args:
- code: The error code.
- message: The error to print.
- token: The token where the error occurred, or None if it was a file-wide
- issue.
- position: The position of the error, defaults to None.
- fix_data: Metadata used for fixing the error.
- """
- self._has_errors = True
- self._error_handler.HandleError(
- error.Error(code, message, token, position, fix_data))
-
- def HasErrors(self):
- """Returns true if the style checker has found any errors.
-
- Returns:
- True if the style checker has found any errors.
- """
- return self._has_errors
-
- def Check(self, start_token, limited_doc_checks=False, is_html=False,
- stop_token=None):
- """Checks a token stream, reporting errors to the error reporter.
-
- Args:
- start_token: First token in token stream.
- limited_doc_checks: Whether doc checking is relaxed for this file.
- is_html: Whether the file being checked is an HTML file with extracted
- contents.
- stop_token: If given, check should stop at this token.
- """
-
- self._lint_rules.Initialize(self, limited_doc_checks, is_html)
- self._ExecutePass(start_token, self._LintPass, stop_token=stop_token)
- self._lint_rules.Finalize(self._state_tracker)
-
- def _LintPass(self, token):
- """Checks an individual token for lint warnings/errors.
-
- Used to encapsulate the logic needed to check an individual token so that it
- can be passed to _ExecutePass.
-
- Args:
- token: The token to check.
- """
- self._lint_rules.CheckToken(token, self._state_tracker)
-
- def _ExecutePass(self, token, pass_function, stop_token=None):
- """Calls the given function for every token in the given token stream.
-
- As each token is passed to the given function, state is kept up to date and,
- depending on the error_trace flag, errors are either caught and reported, or
- allowed to bubble up so developers can see the full stack trace. If a parse
- error is specified, the pass will proceed as normal until the token causing
- the parse error is reached.
-
- Args:
- token: The first token in the token stream.
- pass_function: The function to call for each token in the token stream.
- stop_token: The last token to check (if given).
-
- Raises:
- Exception: If any error occurred while calling the given function.
- """
-
- self._state_tracker.Reset()
- while token:
- # When we are looking at a token and decided to delete the whole line, we
- # will delete all of them in the "HandleToken()" below. So the current
- # token and subsequent ones may already be deleted here. The way we
- # delete a token does not wipe out the previous and next pointers of the
- # deleted token. So we need to check the token itself to make sure it is
- # not deleted.
- if not token.is_deleted:
- # End the pass at the stop token
- if stop_token and token is stop_token:
- return
-
- self._state_tracker.HandleToken(
- token, self._state_tracker.GetLastNonSpaceToken())
- pass_function(token)
- self._state_tracker.HandleAfterToken(token)
-
- token = token.next
diff --git a/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo.py b/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo.py
deleted file mode 100644
index e7cbfd3318..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo.py
+++ /dev/null
@@ -1,578 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Logic for computing dependency information for closurized JavaScript files.
-
-Closurized JavaScript files express dependencies using goog.require and
-goog.provide statements. In order for the linter to detect when a statement is
-missing or unnecessary, all identifiers in the JavaScript file must first be
-processed to determine if they constitute the creation or usage of a dependency.
-"""
-
-
-
-import re
-
-from closure_linter import javascripttokens
-from closure_linter import tokenutil
-
-# pylint: disable=g-bad-name
-TokenType = javascripttokens.JavaScriptTokenType
-
-DEFAULT_EXTRA_NAMESPACES = [
- 'goog.testing.asserts',
- 'goog.testing.jsunit',
-]
-
-
-class UsedNamespace(object):
- """A type for information about a used namespace."""
-
- def __init__(self, namespace, identifier, token, alias_definition):
- """Initializes the instance.
-
- Args:
- namespace: the namespace of an identifier used in the file
- identifier: the complete identifier
- token: the token that uses the namespace
- alias_definition: a boolean stating whether the namespace is only to used
- for an alias definition and should not be required.
- """
- self.namespace = namespace
- self.identifier = identifier
- self.token = token
- self.alias_definition = alias_definition
-
- def GetLine(self):
- return self.token.line_number
-
- def __repr__(self):
- return 'UsedNamespace(%s)' % ', '.join(
- ['%s=%s' % (k, repr(v)) for k, v in self.__dict__.iteritems()])
-
-
-class ClosurizedNamespacesInfo(object):
- """Dependency information for closurized JavaScript files.
-
- Processes token streams for dependency creation or usage and provides logic
- for determining if a given require or provide statement is unnecessary or if
- there are missing require or provide statements.
- """
-
- def __init__(self, closurized_namespaces, ignored_extra_namespaces):
- """Initializes an instance the ClosurizedNamespacesInfo class.
-
- Args:
- closurized_namespaces: A list of namespace prefixes that should be
- processed for dependency information. Non-matching namespaces are
- ignored.
- ignored_extra_namespaces: A list of namespaces that should not be reported
- as extra regardless of whether they are actually used.
- """
- self._closurized_namespaces = closurized_namespaces
- self._ignored_extra_namespaces = (ignored_extra_namespaces +
- DEFAULT_EXTRA_NAMESPACES)
- self.Reset()
-
- def Reset(self):
- """Resets the internal state to prepare for processing a new file."""
-
- # A list of goog.provide tokens in the order they appeared in the file.
- self._provide_tokens = []
-
- # A list of goog.require tokens in the order they appeared in the file.
- self._require_tokens = []
-
- # Namespaces that are already goog.provided.
- self._provided_namespaces = []
-
- # Namespaces that are already goog.required.
- self._required_namespaces = []
-
- # Note that created_namespaces and used_namespaces contain both namespaces
- # and identifiers because there are many existing cases where a method or
- # constant is provided directly instead of its namespace. Ideally, these
- # two lists would only have to contain namespaces.
-
- # A list of tuples where the first element is the namespace of an identifier
- # created in the file, the second is the identifier itself and the third is
- # the line number where it's created.
- self._created_namespaces = []
-
- # A list of UsedNamespace instances.
- self._used_namespaces = []
-
- # A list of seemingly-unnecessary namespaces that are goog.required() and
- # annotated with @suppress {extraRequire}.
- self._suppressed_requires = []
-
- # A list of goog.provide tokens which are duplicates.
- self._duplicate_provide_tokens = []
-
- # A list of goog.require tokens which are duplicates.
- self._duplicate_require_tokens = []
-
- # Whether this file is in a goog.scope. Someday, we may add support
- # for checking scopified namespaces, but for now let's just fail
- # in a more reasonable way.
- self._scopified_file = False
-
- # TODO(user): Handle the case where there are 2 different requires
- # that can satisfy the same dependency, but only one is necessary.
-
- def GetProvidedNamespaces(self):
- """Returns the namespaces which are already provided by this file.
-
- Returns:
- A list of strings where each string is a 'namespace' corresponding to an
- existing goog.provide statement in the file being checked.
- """
- return set(self._provided_namespaces)
-
- def GetRequiredNamespaces(self):
- """Returns the namespaces which are already required by this file.
-
- Returns:
- A list of strings where each string is a 'namespace' corresponding to an
- existing goog.require statement in the file being checked.
- """
- return set(self._required_namespaces)
-
- def IsExtraProvide(self, token):
- """Returns whether the given goog.provide token is unnecessary.
-
- Args:
- token: A goog.provide token.
-
- Returns:
- True if the given token corresponds to an unnecessary goog.provide
- statement, otherwise False.
- """
- namespace = tokenutil.GetStringAfterToken(token)
-
- if self.GetClosurizedNamespace(namespace) is None:
- return False
-
- if token in self._duplicate_provide_tokens:
- return True
-
- # TODO(user): There's probably a faster way to compute this.
- for created_namespace, created_identifier, _ in self._created_namespaces:
- if namespace == created_namespace or namespace == created_identifier:
- return False
-
- return True
-
- def IsExtraRequire(self, token):
- """Returns whether the given goog.require token is unnecessary.
-
- Args:
- token: A goog.require token.
-
- Returns:
- True if the given token corresponds to an unnecessary goog.require
- statement, otherwise False.
- """
- namespace = tokenutil.GetStringAfterToken(token)
-
- if self.GetClosurizedNamespace(namespace) is None:
- return False
-
- if namespace in self._ignored_extra_namespaces:
- return False
-
- if token in self._duplicate_require_tokens:
- return True
-
- if namespace in self._suppressed_requires:
- return False
-
- # If the namespace contains a component that is initial caps, then that
- # must be the last component of the namespace.
- parts = namespace.split('.')
- if len(parts) > 1 and parts[-2][0].isupper():
- return True
-
- # TODO(user): There's probably a faster way to compute this.
- for ns in self._used_namespaces:
- if (not ns.alias_definition and (
- namespace == ns.namespace or namespace == ns.identifier)):
- return False
-
- return True
-
- def GetMissingProvides(self):
- """Returns the dict of missing provided namespaces for the current file.
-
- Returns:
- Returns a dictionary of key as string and value as integer where each
- string(key) is a namespace that should be provided by this file, but is
- not and integer(value) is first line number where it's defined.
- """
- missing_provides = dict()
- for namespace, identifier, line_number in self._created_namespaces:
- if (not self._IsPrivateIdentifier(identifier) and
- namespace not in self._provided_namespaces and
- identifier not in self._provided_namespaces and
- namespace not in self._required_namespaces and
- namespace not in missing_provides):
- missing_provides[namespace] = line_number
-
- return missing_provides
-
- def GetMissingRequires(self):
- """Returns the dict of missing required namespaces for the current file.
-
- For each non-private identifier used in the file, find either a
- goog.require, goog.provide or a created identifier that satisfies it.
- goog.require statements can satisfy the identifier by requiring either the
- namespace of the identifier or the identifier itself. goog.provide
- statements can satisfy the identifier by providing the namespace of the
- identifier. A created identifier can only satisfy the used identifier if
- it matches it exactly (necessary since things can be defined on a
- namespace in more than one file). Note that provided namespaces should be
- a subset of created namespaces, but we check both because in some cases we
- can't always detect the creation of the namespace.
-
- Returns:
- Returns a dictionary of key as string and value integer where each
- string(key) is a namespace that should be required by this file, but is
- not and integer(value) is first line number where it's used.
- """
- external_dependencies = set(self._required_namespaces)
-
- # Assume goog namespace is always available.
- external_dependencies.add('goog')
- # goog.module is treated as a builtin, too (for goog.module.get).
- external_dependencies.add('goog.module')
-
- created_identifiers = set()
- for unused_namespace, identifier, unused_line_number in (
- self._created_namespaces):
- created_identifiers.add(identifier)
-
- missing_requires = dict()
- illegal_alias_statements = dict()
-
- def ShouldRequireNamespace(namespace, identifier):
- """Checks if a namespace would normally be required."""
- return (
- not self._IsPrivateIdentifier(identifier) and
- namespace not in external_dependencies and
- namespace not in self._provided_namespaces and
- identifier not in external_dependencies and
- identifier not in created_identifiers and
- namespace not in missing_requires)
-
- # First check all the used identifiers where we know that their namespace
- # needs to be provided (unless they are optional).
- for ns in self._used_namespaces:
- namespace = ns.namespace
- identifier = ns.identifier
- if (not ns.alias_definition and
- ShouldRequireNamespace(namespace, identifier)):
- missing_requires[namespace] = ns.GetLine()
-
- # Now that all required namespaces are known, we can check if the alias
- # definitions (that are likely being used for typeannotations that don't
- # need explicit goog.require statements) are already covered. If not
- # the user shouldn't use the alias.
- for ns in self._used_namespaces:
- if (not ns.alias_definition or
- not ShouldRequireNamespace(ns.namespace, ns.identifier)):
- continue
- if self._FindNamespace(ns.identifier, self._provided_namespaces,
- created_identifiers, external_dependencies,
- missing_requires):
- continue
- namespace = ns.identifier.rsplit('.', 1)[0]
- illegal_alias_statements[namespace] = ns.token
-
- return missing_requires, illegal_alias_statements
-
- def _FindNamespace(self, identifier, *namespaces_list):
- """Finds the namespace of an identifier given a list of other namespaces.
-
- Args:
- identifier: An identifier whose parent needs to be defined.
- e.g. for goog.bar.foo we search something that provides
- goog.bar.
- *namespaces_list: var args of iterables of namespace identifiers
- Returns:
- The namespace that the given identifier is part of or None.
- """
- identifier = identifier.rsplit('.', 1)[0]
- identifier_prefix = identifier + '.'
- for namespaces in namespaces_list:
- for namespace in namespaces:
- if namespace == identifier or namespace.startswith(identifier_prefix):
- return namespace
- return None
-
- def _IsPrivateIdentifier(self, identifier):
- """Returns whether the given identifier is private."""
- pieces = identifier.split('.')
- for piece in pieces:
- if piece.endswith('_'):
- return True
- return False
-
- def IsFirstProvide(self, token):
- """Returns whether token is the first provide token."""
- return self._provide_tokens and token == self._provide_tokens[0]
-
- def IsFirstRequire(self, token):
- """Returns whether token is the first require token."""
- return self._require_tokens and token == self._require_tokens[0]
-
- def IsLastProvide(self, token):
- """Returns whether token is the last provide token."""
- return self._provide_tokens and token == self._provide_tokens[-1]
-
- def IsLastRequire(self, token):
- """Returns whether token is the last require token."""
- return self._require_tokens and token == self._require_tokens[-1]
-
- def ProcessToken(self, token, state_tracker):
- """Processes the given token for dependency information.
-
- Args:
- token: The token to process.
- state_tracker: The JavaScript state tracker.
- """
-
- # Note that this method is in the critical path for the linter and has been
- # optimized for performance in the following ways:
- # - Tokens are checked by type first to minimize the number of function
- # calls necessary to determine if action needs to be taken for the token.
- # - The most common tokens types are checked for first.
- # - The number of function calls has been minimized (thus the length of this
- # function.
-
- if token.type == TokenType.IDENTIFIER:
- # TODO(user): Consider saving the whole identifier in metadata.
- whole_identifier_string = tokenutil.GetIdentifierForToken(token)
- if whole_identifier_string is None:
- # We only want to process the identifier one time. If the whole string
- # identifier is None, that means this token was part of a multi-token
- # identifier, but it was not the first token of the identifier.
- return
-
- # In the odd case that a goog.require is encountered inside a function,
- # just ignore it (e.g. dynamic loading in test runners).
- if token.string == 'goog.require' and not state_tracker.InFunction():
- self._require_tokens.append(token)
- namespace = tokenutil.GetStringAfterToken(token)
- if namespace in self._required_namespaces:
- self._duplicate_require_tokens.append(token)
- else:
- self._required_namespaces.append(namespace)
-
- # If there is a suppression for the require, add a usage for it so it
- # gets treated as a regular goog.require (i.e. still gets sorted).
- if self._HasSuppression(state_tracker, 'extraRequire'):
- self._suppressed_requires.append(namespace)
- self._AddUsedNamespace(state_tracker, namespace, token)
-
- elif token.string == 'goog.provide':
- self._provide_tokens.append(token)
- namespace = tokenutil.GetStringAfterToken(token)
- if namespace in self._provided_namespaces:
- self._duplicate_provide_tokens.append(token)
- else:
- self._provided_namespaces.append(namespace)
-
- # If there is a suppression for the provide, add a creation for it so it
- # gets treated as a regular goog.provide (i.e. still gets sorted).
- if self._HasSuppression(state_tracker, 'extraProvide'):
- self._AddCreatedNamespace(state_tracker, namespace, token.line_number)
-
- elif token.string == 'goog.scope':
- self._scopified_file = True
-
- elif token.string == 'goog.setTestOnly':
-
- # Since the message is optional, we don't want to scan to later lines.
- for t in tokenutil.GetAllTokensInSameLine(token):
- if t.type == TokenType.STRING_TEXT:
- message = t.string
-
- if re.match(r'^\w+(\.\w+)+$', message):
- # This looks like a namespace. If it's a Closurized namespace,
- # consider it created.
- base_namespace = message.split('.', 1)[0]
- if base_namespace in self._closurized_namespaces:
- self._AddCreatedNamespace(state_tracker, message,
- token.line_number)
-
- break
- else:
- jsdoc = state_tracker.GetDocComment()
- if token.metadata and token.metadata.aliased_symbol:
- whole_identifier_string = token.metadata.aliased_symbol
- elif (token.string == 'goog.module.get' and
- not self._HasSuppression(state_tracker, 'extraRequire')):
- # Cannot use _AddUsedNamespace as this is not an identifier, but
- # already the entire namespace that's required.
- namespace = tokenutil.GetStringAfterToken(token)
- namespace = UsedNamespace(namespace, namespace, token,
- alias_definition=False)
- self._used_namespaces.append(namespace)
- if jsdoc and jsdoc.HasFlag('typedef'):
- self._AddCreatedNamespace(state_tracker, whole_identifier_string,
- token.line_number,
- namespace=self.GetClosurizedNamespace(
- whole_identifier_string))
- else:
- is_alias_definition = (token.metadata and
- token.metadata.is_alias_definition)
- self._AddUsedNamespace(state_tracker, whole_identifier_string,
- token, is_alias_definition)
-
- elif token.type == TokenType.SIMPLE_LVALUE:
- identifier = token.values['identifier']
- start_token = tokenutil.GetIdentifierStart(token)
- if start_token and start_token != token:
- # Multi-line identifier being assigned. Get the whole identifier.
- identifier = tokenutil.GetIdentifierForToken(start_token)
- else:
- start_token = token
- # If an alias is defined on the start_token, use it instead.
- if (start_token and
- start_token.metadata and
- start_token.metadata.aliased_symbol and
- not start_token.metadata.is_alias_definition):
- identifier = start_token.metadata.aliased_symbol
-
- if identifier:
- namespace = self.GetClosurizedNamespace(identifier)
- if state_tracker.InFunction():
- self._AddUsedNamespace(state_tracker, identifier, token)
- elif namespace and namespace != 'goog':
- self._AddCreatedNamespace(state_tracker, identifier,
- token.line_number, namespace=namespace)
-
- elif token.type == TokenType.DOC_FLAG:
- flag = token.attached_object
- flag_type = flag.flag_type
- if flag and flag.HasType() and flag.jstype:
- is_interface = state_tracker.GetDocComment().HasFlag('interface')
- if flag_type == 'implements' or (flag_type == 'extends'
- and is_interface):
- identifier = flag.jstype.alias or flag.jstype.identifier
- self._AddUsedNamespace(state_tracker, identifier, token)
- # Since we process doctypes only for implements and extends, the
- # type is a simple one and we don't need any iteration for subtypes.
-
- def _AddCreatedNamespace(self, state_tracker, identifier, line_number,
- namespace=None):
- """Adds the namespace of an identifier to the list of created namespaces.
-
- If the identifier is annotated with a 'missingProvide' suppression, it is
- not added.
-
- Args:
- state_tracker: The JavaScriptStateTracker instance.
- identifier: The identifier to add.
- line_number: Line number where namespace is created.
- namespace: The namespace of the identifier or None if the identifier is
- also the namespace.
- """
- if not namespace:
- namespace = identifier
-
- if self._HasSuppression(state_tracker, 'missingProvide'):
- return
-
- self._created_namespaces.append([namespace, identifier, line_number])
-
- def _AddUsedNamespace(self, state_tracker, identifier, token,
- is_alias_definition=False):
- """Adds the namespace of an identifier to the list of used namespaces.
-
- If the identifier is annotated with a 'missingRequire' suppression, it is
- not added.
-
- Args:
- state_tracker: The JavaScriptStateTracker instance.
- identifier: An identifier which has been used.
- token: The token in which the namespace is used.
- is_alias_definition: If the used namespace is part of an alias_definition.
- Aliased symbols need their parent namespace to be available, if it is
- not yet required through another symbol, an error will be thrown.
- """
- if self._HasSuppression(state_tracker, 'missingRequire'):
- return
-
- namespace = self.GetClosurizedNamespace(identifier)
- # b/5362203 If its a variable in scope then its not a required namespace.
- if namespace and not state_tracker.IsVariableInScope(namespace):
- namespace = UsedNamespace(namespace, identifier, token,
- is_alias_definition)
- self._used_namespaces.append(namespace)
-
- def _HasSuppression(self, state_tracker, suppression):
- jsdoc = state_tracker.GetDocComment()
- return jsdoc and suppression in jsdoc.suppressions
-
- def GetClosurizedNamespace(self, identifier):
- """Given an identifier, returns the namespace that identifier is from.
-
- Args:
- identifier: The identifier to extract a namespace from.
-
- Returns:
- The namespace the given identifier resides in, or None if one could not
- be found.
- """
- if identifier.startswith('goog.global'):
- # Ignore goog.global, since it is, by definition, global.
- return None
-
- parts = identifier.split('.')
- for namespace in self._closurized_namespaces:
- if not identifier.startswith(namespace + '.'):
- continue
-
- # The namespace for a class is the shortest prefix ending in a class
- # name, which starts with a capital letter but is not a capitalized word.
- #
- # We ultimately do not want to allow requiring or providing of inner
- # classes/enums. Instead, a file should provide only the top-level class
- # and users should require only that.
- namespace = []
- for part in parts:
- if part == 'prototype' or part.isupper():
- return '.'.join(namespace)
- namespace.append(part)
- if part[0].isupper():
- return '.'.join(namespace)
-
- # At this point, we know there's no class or enum, so the namespace is
- # just the identifier with the last part removed. With the exception of
- # apply, inherits, and call, which should also be stripped.
- if parts[-1] in ('apply', 'inherits', 'call'):
- parts.pop()
- parts.pop()
-
- # If the last part ends with an underscore, it is a private variable,
- # method, or enum. The namespace is whatever is before it.
- if parts and parts[-1].endswith('_'):
- parts.pop()
-
- return '.'.join(parts)
-
- return None
diff --git a/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo_test.py b/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo_test.py
deleted file mode 100644
index 7aeae21956..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo_test.py
+++ /dev/null
@@ -1,873 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for ClosurizedNamespacesInfo."""
-
-
-
-import unittest as googletest
-from closure_linter import aliaspass
-from closure_linter import closurizednamespacesinfo
-from closure_linter import ecmametadatapass
-from closure_linter import javascriptstatetracker
-from closure_linter import javascripttokens
-from closure_linter import testutil
-from closure_linter import tokenutil
-
-# pylint: disable=g-bad-name
-TokenType = javascripttokens.JavaScriptTokenType
-
-
-def _ToLineDict(illegal_alias_stmts):
- """Replaces tokens with the respective line number."""
- return {k: v.line_number for k, v in illegal_alias_stmts.iteritems()}
-
-
-class ClosurizedNamespacesInfoTest(googletest.TestCase):
- """Tests for ClosurizedNamespacesInfo."""
-
- _test_cases = {
- 'goog.global.anything': None,
- 'package.CONSTANT': 'package',
- 'package.methodName': 'package',
- 'package.subpackage.methodName': 'package.subpackage',
- 'package.subpackage.methodName.apply': 'package.subpackage',
- 'package.ClassName.something': 'package.ClassName',
- 'package.ClassName.Enum.VALUE.methodName': 'package.ClassName',
- 'package.ClassName.CONSTANT': 'package.ClassName',
- 'package.namespace.CONSTANT.methodName': 'package.namespace',
- 'package.ClassName.inherits': 'package.ClassName',
- 'package.ClassName.apply': 'package.ClassName',
- 'package.ClassName.methodName.apply': 'package.ClassName',
- 'package.ClassName.methodName.call': 'package.ClassName',
- 'package.ClassName.prototype.methodName': 'package.ClassName',
- 'package.ClassName.privateMethod_': 'package.ClassName',
- 'package.className.privateProperty_': 'package.className',
- 'package.className.privateProperty_.methodName': 'package.className',
- 'package.ClassName.PrivateEnum_': 'package.ClassName',
- 'package.ClassName.prototype.methodName.apply': 'package.ClassName',
- 'package.ClassName.property.subProperty': 'package.ClassName',
- 'package.className.prototype.something.somethingElse': 'package.className'
- }
-
- def testGetClosurizedNamespace(self):
- """Tests that the correct namespace is returned for various identifiers."""
- namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
- closurized_namespaces=['package'], ignored_extra_namespaces=[])
- for identifier, expected_namespace in self._test_cases.items():
- actual_namespace = namespaces_info.GetClosurizedNamespace(identifier)
- self.assertEqual(
- expected_namespace,
- actual_namespace,
- 'expected namespace "' + str(expected_namespace) +
- '" for identifier "' + str(identifier) + '" but was "' +
- str(actual_namespace) + '"')
-
- def testIgnoredExtraNamespaces(self):
- """Tests that ignored_extra_namespaces are ignored."""
- token = self._GetRequireTokens('package.Something')
- namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
- closurized_namespaces=['package'],
- ignored_extra_namespaces=['package.Something'])
-
- self.assertFalse(namespaces_info.IsExtraRequire(token),
- 'Should be valid since it is in ignored namespaces.')
-
- namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
- ['package'], [])
-
- self.assertTrue(namespaces_info.IsExtraRequire(token),
- 'Should be invalid since it is not in ignored namespaces.')
-
- def testIsExtraProvide_created(self):
- """Tests that provides for created namespaces are not extra."""
- input_lines = [
- 'goog.provide(\'package.Foo\');',
- 'package.Foo = function() {};'
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertFalse(namespaces_info.IsExtraProvide(token),
- 'Should not be extra since it is created.')
-
- def testIsExtraProvide_createdIdentifier(self):
- """Tests that provides for created identifiers are not extra."""
- input_lines = [
- 'goog.provide(\'package.Foo.methodName\');',
- 'package.Foo.methodName = function() {};'
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertFalse(namespaces_info.IsExtraProvide(token),
- 'Should not be extra since it is created.')
-
- def testIsExtraProvide_notCreated(self):
- """Tests that provides for non-created namespaces are extra."""
- input_lines = ['goog.provide(\'package.Foo\');']
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertTrue(namespaces_info.IsExtraProvide(token),
- 'Should be extra since it is not created.')
-
- def testIsExtraProvide_notCreatedMultipartClosurizedNamespace(self):
- """Tests that provides for non-created namespaces are extra."""
- input_lines = ['goog.provide(\'multi.part.namespace.Foo\');']
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['multi.part'])
-
- self.assertTrue(namespaces_info.IsExtraProvide(token),
- 'Should be extra since it is not created.')
-
- def testIsExtraProvide_duplicate(self):
- """Tests that providing a namespace twice makes the second one extra."""
- input_lines = [
- 'goog.provide(\'package.Foo\');',
- 'goog.provide(\'package.Foo\');',
- 'package.Foo = function() {};'
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- # Advance to the second goog.provide token.
- token = tokenutil.Search(token.next, TokenType.IDENTIFIER)
-
- self.assertTrue(namespaces_info.IsExtraProvide(token),
- 'Should be extra since it is already provided.')
-
- def testIsExtraProvide_notClosurized(self):
- """Tests that provides of non-closurized namespaces are not extra."""
- input_lines = ['goog.provide(\'notclosurized.Foo\');']
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertFalse(namespaces_info.IsExtraProvide(token),
- 'Should not be extra since it is not closurized.')
-
- def testIsExtraRequire_used(self):
- """Tests that requires for used namespaces are not extra."""
- input_lines = [
- 'goog.require(\'package.Foo\');',
- 'var x = package.Foo.methodName();'
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertFalse(namespaces_info.IsExtraRequire(token),
- 'Should not be extra since it is used.')
-
- def testIsExtraRequire_usedIdentifier(self):
- """Tests that requires for used methods on classes are extra."""
- input_lines = [
- 'goog.require(\'package.Foo.methodName\');',
- 'var x = package.Foo.methodName();'
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertTrue(namespaces_info.IsExtraRequire(token),
- 'Should require the package, not the method specifically.')
-
- def testIsExtraRequire_notUsed(self):
- """Tests that requires for unused namespaces are extra."""
- input_lines = ['goog.require(\'package.Foo\');']
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertTrue(namespaces_info.IsExtraRequire(token),
- 'Should be extra since it is not used.')
-
- def testIsExtraRequire_notUsedMultiPartClosurizedNamespace(self):
- """Tests unused require with multi-part closurized namespaces."""
-
- input_lines = ['goog.require(\'multi.part.namespace.Foo\');']
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['multi.part'])
-
- self.assertTrue(namespaces_info.IsExtraRequire(token),
- 'Should be extra since it is not used.')
-
- def testIsExtraRequire_notClosurized(self):
- """Tests that requires of non-closurized namespaces are not extra."""
- input_lines = ['goog.require(\'notclosurized.Foo\');']
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertFalse(namespaces_info.IsExtraRequire(token),
- 'Should not be extra since it is not closurized.')
-
- def testIsExtraRequire_objectOnClass(self):
- """Tests that requiring an object on a class is extra."""
- input_lines = [
- 'goog.require(\'package.Foo.Enum\');',
- 'var x = package.Foo.Enum.VALUE1;',
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertTrue(namespaces_info.IsExtraRequire(token),
- 'The whole class, not the object, should be required.');
-
- def testIsExtraRequire_constantOnClass(self):
- """Tests that requiring a constant on a class is extra."""
- input_lines = [
- 'goog.require(\'package.Foo.CONSTANT\');',
- 'var x = package.Foo.CONSTANT',
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertTrue(namespaces_info.IsExtraRequire(token),
- 'The class, not the constant, should be required.');
-
- def testIsExtraRequire_constantNotOnClass(self):
- """Tests that requiring a constant not on a class is OK."""
- input_lines = [
- 'goog.require(\'package.subpackage.CONSTANT\');',
- 'var x = package.subpackage.CONSTANT',
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertFalse(namespaces_info.IsExtraRequire(token),
- 'Constants can be required except on classes.');
-
- def testIsExtraRequire_methodNotOnClass(self):
- """Tests that requiring a method not on a class is OK."""
- input_lines = [
- 'goog.require(\'package.subpackage.method\');',
- 'var x = package.subpackage.method()',
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertFalse(namespaces_info.IsExtraRequire(token),
- 'Methods can be required except on classes.');
-
- def testIsExtraRequire_defaults(self):
- """Tests that there are no warnings about extra requires for test utils"""
- input_lines = ['goog.require(\'goog.testing.jsunit\');']
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['goog'])
-
- self.assertFalse(namespaces_info.IsExtraRequire(token),
- 'Should not be extra since it is for testing.')
-
- def testGetMissingProvides_provided(self):
- """Tests that provided functions don't cause a missing provide."""
- input_lines = [
- 'goog.provide(\'package.Foo\');',
- 'package.Foo = function() {};'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
-
- def testGetMissingProvides_providedIdentifier(self):
- """Tests that provided identifiers don't cause a missing provide."""
- input_lines = [
- 'goog.provide(\'package.Foo.methodName\');',
- 'package.Foo.methodName = function() {};'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
-
- def testGetMissingProvides_providedParentIdentifier(self):
- """Tests that provided identifiers on a class don't cause a missing provide
- on objects attached to that class."""
- input_lines = [
- 'goog.provide(\'package.foo.ClassName\');',
- 'package.foo.ClassName.methodName = function() {};',
- 'package.foo.ClassName.ObjectName = 1;',
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
-
- def testGetMissingProvides_unprovided(self):
- """Tests that unprovided functions cause a missing provide."""
- input_lines = ['package.Foo = function() {};']
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
-
- missing_provides = namespaces_info.GetMissingProvides()
- self.assertEquals(1, len(missing_provides))
- missing_provide = missing_provides.popitem()
- self.assertEquals('package.Foo', missing_provide[0])
- self.assertEquals(1, missing_provide[1])
-
- def testGetMissingProvides_privatefunction(self):
- """Tests that unprovided private functions don't cause a missing provide."""
- input_lines = ['package.Foo_ = function() {};']
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
-
- def testGetMissingProvides_required(self):
- """Tests that required namespaces don't cause a missing provide."""
- input_lines = [
- 'goog.require(\'package.Foo\');',
- 'package.Foo.methodName = function() {};'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
-
- def testGetMissingRequires_required(self):
- """Tests that required namespaces don't cause a missing require."""
- input_lines = [
- 'goog.require(\'package.Foo\');',
- 'package.Foo();'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(0, len(missing_requires))
-
- def testGetMissingRequires_requiredIdentifier(self):
- """Tests that required namespaces satisfy identifiers on that namespace."""
- input_lines = [
- 'goog.require(\'package.Foo\');',
- 'package.Foo.methodName();'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(0, len(missing_requires))
-
- def testGetMissingRequires_requiredNamespace(self):
- """Tests that required namespaces satisfy the namespace."""
- input_lines = [
- 'goog.require(\'package.soy.fooTemplate\');',
- 'render(package.soy.fooTemplate);'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(0, len(missing_requires))
-
- def testGetMissingRequires_requiredParentClass(self):
- """Tests that requiring a parent class of an object is sufficient to prevent
- a missing require on that object."""
- input_lines = [
- 'goog.require(\'package.Foo\');',
- 'package.Foo.methodName();',
- 'package.Foo.methodName(package.Foo.ObjectName);'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(0, len(missing_requires))
-
- def testGetMissingRequires_unrequired(self):
- """Tests that unrequired namespaces cause a missing require."""
- input_lines = ['package.Foo();']
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
-
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(1, len(missing_requires))
- missing_req = missing_requires.popitem()
- self.assertEquals('package.Foo', missing_req[0])
- self.assertEquals(1, missing_req[1])
-
- def testGetMissingRequires_provided(self):
- """Tests that provided namespaces satisfy identifiers on that namespace."""
- input_lines = [
- 'goog.provide(\'package.Foo\');',
- 'package.Foo.methodName();'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(0, len(missing_requires))
-
- def testGetMissingRequires_created(self):
- """Tests that created namespaces do not satisfy usage of an identifier."""
- input_lines = [
- 'package.Foo = function();',
- 'package.Foo.methodName();',
- 'package.Foo.anotherMethodName1();',
- 'package.Foo.anotherMethodName2();'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
-
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(1, len(missing_requires))
- missing_require = missing_requires.popitem()
- self.assertEquals('package.Foo', missing_require[0])
- # Make sure line number of first occurrence is reported
- self.assertEquals(2, missing_require[1])
-
- def testGetMissingRequires_createdIdentifier(self):
- """Tests that created identifiers satisfy usage of the identifier."""
- input_lines = [
- 'package.Foo.methodName = function();',
- 'package.Foo.methodName();'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(0, len(missing_requires))
-
- def testGetMissingRequires_implements(self):
- """Tests that a parametrized type requires the correct identifier."""
- input_lines = [
- '/** @constructor @implements {package.Bar<T>} */',
- 'package.Foo = function();',
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertItemsEqual({'package.Bar': 1}, missing_requires)
-
- def testGetMissingRequires_objectOnClass(self):
- """Tests that we should require a class, not the object on the class."""
- input_lines = [
- 'goog.require(\'package.Foo.Enum\');',
- 'var x = package.Foo.Enum.VALUE1;',
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(1, len(missing_requires),
- 'The whole class, not the object, should be required.')
-
- def testGetMissingRequires_variableWithSameName(self):
- """Tests that we should not goog.require variables and parameters.
-
- b/5362203 Variables in scope are not missing namespaces.
- """
- input_lines = [
- 'goog.provide(\'Foo\');',
- 'Foo.A = function();',
- 'Foo.A.prototype.method = function(ab) {',
- ' if (ab) {',
- ' var docs;',
- ' var lvalue = new Obj();',
- ' // Variable in scope hence not goog.require here.',
- ' docs.foo.abc = 1;',
- ' lvalue.next();',
- ' }',
- ' // Since js is function scope this should also not goog.require.',
- ' docs.foo.func();',
- ' // Its not a variable in scope hence goog.require.',
- ' dummy.xyz.reset();',
- ' return this.method2();',
- '};',
- 'Foo.A.prototype.method1 = function(docs, abcd, xyz) {',
- ' // Parameter hence not goog.require.',
- ' docs.nodes.length = 2;',
- ' lvalue.abc.reset();',
- '};'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['Foo',
- 'docs',
- 'lvalue',
- 'dummy'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(2, len(missing_requires))
- self.assertItemsEqual(
- {'dummy.xyz': 14,
- 'lvalue.abc': 20}, missing_requires)
-
- def testIsFirstProvide(self):
- """Tests operation of the isFirstProvide method."""
- input_lines = [
- 'goog.provide(\'package.Foo\');',
- 'package.Foo.methodName();'
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
- self.assertTrue(namespaces_info.IsFirstProvide(token))
-
- def testGetWholeIdentifierString(self):
- """Tests that created identifiers satisfy usage of the identifier."""
- input_lines = [
- 'package.Foo.',
- ' veryLong.',
- ' identifier;'
- ]
-
- token = testutil.TokenizeSource(input_lines)
-
- self.assertEquals('package.Foo.veryLong.identifier',
- tokenutil.GetIdentifierForToken(token))
-
- self.assertEquals(None,
- tokenutil.GetIdentifierForToken(token.next))
-
- def testScopified(self):
- """Tests that a goog.scope call is noticed."""
- input_lines = [
- 'goog.scope(function() {',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- self.assertTrue(namespaces_info._scopified_file)
-
- def testScope_unusedAlias(self):
- """Tests that an unused alias symbol is illegal."""
- input_lines = [
- 'goog.scope(function() {',
- 'var Event = goog.events.Event;',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
- self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
-
- def testScope_usedMultilevelAlias(self):
- """Tests that an used alias symbol in a deep namespace is ok."""
- input_lines = [
- 'goog.require(\'goog.Events\');',
- 'goog.scope(function() {',
- 'var Event = goog.Events.DeepNamespace.Event;',
- 'Event();',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
- self.assertEquals({}, illegal_alias_stmts)
-
- def testScope_usedAlias(self):
- """Tests that aliased symbols result in correct requires."""
- input_lines = [
- 'goog.scope(function() {',
- 'var Event = goog.events.Event;',
- 'var dom = goog.dom;',
- 'Event(dom.classes.get);',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, illegal_alias_stmts)
- self.assertEquals({'goog.dom.classes': 4, 'goog.events.Event': 4},
- missing_requires)
-
- def testModule_alias(self):
- """Tests that goog.module style aliases are supported."""
- input_lines = [
- 'goog.module(\'test.module\');',
- 'var Unused = goog.require(\'goog.Unused\');',
- 'var AliasedClass = goog.require(\'goog.AliasedClass\');',
- 'var x = new AliasedClass();',
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- namespaceToken = self._GetRequireTokens('goog.AliasedClass')
- self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken),
- 'AliasedClass should be marked as used')
- unusedToken = self._GetRequireTokens('goog.Unused')
- self.assertTrue(namespaces_info.IsExtraRequire(unusedToken),
- 'Unused should be marked as not used')
-
- def testModule_aliasInScope(self):
- """Tests that goog.module style aliases are supported."""
- input_lines = [
- 'goog.module(\'test.module\');',
- 'var AliasedClass = goog.require(\'goog.AliasedClass\');',
- 'goog.scope(function() {',
- 'var x = new AliasedClass();',
- '});',
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- namespaceToken = self._GetRequireTokens('goog.AliasedClass')
- self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken),
- 'AliasedClass should be marked as used')
-
- def testModule_getAlwaysProvided(self):
- """Tests that goog.module.get is recognized as a built-in."""
- input_lines = [
- 'goog.provide(\'test.MyClass\');',
- 'goog.require(\'goog.someModule\');',
- 'goog.scope(function() {',
- 'var someModule = goog.module.get(\'goog.someModule\');',
- 'test.MyClass = function() {};',
- '});',
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- self.assertEquals({}, namespaces_info.GetMissingRequires()[0])
-
- def testModule_requireForGet(self):
- """Tests that goog.module.get needs a goog.require call."""
- input_lines = [
- 'goog.provide(\'test.MyClass\');',
- 'function foo() {',
- ' var someModule = goog.module.get(\'goog.someModule\');',
- ' someModule.doSth();',
- '}',
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- self.assertEquals({'goog.someModule': 3},
- namespaces_info.GetMissingRequires()[0])
-
- def testScope_usedTypeAlias(self):
- """Tests aliased symbols in type annotations."""
- input_lines = [
- 'goog.scope(function() {',
- 'var Event = goog.events.Event;',
- '/** @type {Event} */;',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
- self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
-
- def testScope_partialAlias_typeOnly(self):
- """Tests a partial alias only used in type annotations.
-
- In this example, some goog.events namespace would need to be required
- so that evaluating goog.events.bar doesn't throw an error.
- """
- input_lines = [
- 'goog.scope(function() {',
- 'var bar = goog.events.bar;',
- '/** @type {bar.Foo} */;',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
- self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
-
- def testScope_partialAlias(self):
- """Tests a partial alias in conjunction with a type annotation.
-
- In this example, the partial alias is already defined by another type,
- therefore the doc-only type doesn't need to be required.
- """
- input_lines = [
- 'goog.scope(function() {',
- 'var bar = goog.events.bar;',
- '/** @type {bar.Event} */;',
- 'bar.EventType();'
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({'goog.events.bar.EventType': 4}, missing_requires)
- self.assertEquals({}, illegal_alias_stmts)
-
- def testScope_partialAliasRequires(self):
- """Tests partial aliases with correct requires."""
- input_lines = [
- 'goog.require(\'goog.events.bar.EventType\');',
- 'goog.scope(function() {',
- 'var bar = goog.events.bar;',
- '/** @type {bar.Event} */;',
- 'bar.EventType();'
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
- self.assertEquals({}, illegal_alias_stmts)
-
- def testScope_partialAliasRequiresBoth(self):
- """Tests partial aliases with correct requires."""
- input_lines = [
- 'goog.require(\'goog.events.bar.Event\');',
- 'goog.require(\'goog.events.bar.EventType\');',
- 'goog.scope(function() {',
- 'var bar = goog.events.bar;',
- '/** @type {bar.Event} */;',
- 'bar.EventType();'
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
- self.assertEquals({}, illegal_alias_stmts)
- event_token = self._GetRequireTokens('goog.events.bar.Event')
- self.assertTrue(namespaces_info.IsExtraRequire(event_token))
-
- def testScope_partialAliasNoSubtypeRequires(self):
- """Tests that partial aliases don't yield subtype requires (regression)."""
- input_lines = [
- 'goog.provide(\'goog.events.Foo\');',
- 'goog.scope(function() {',
- 'goog.events.Foo = {};',
- 'var Foo = goog.events.Foo;'
- 'Foo.CssName_ = {};'
- 'var CssName_ = Foo.CssName_;'
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
-
- def testScope_aliasNamespace(self):
- """Tests that an unused alias namespace is not required when available.
-
- In the example goog.events.Bar is not required, because the namespace
- goog.events is already defined because goog.events.Foo is required.
- """
- input_lines = [
- 'goog.require(\'goog.events.Foo\');',
- 'goog.scope(function() {',
- 'var Bar = goog.events.Bar;',
- '/** @type {Bar} */;',
- 'goog.events.Foo;',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
- self.assertEquals({}, illegal_alias_stmts)
-
- def testScope_aliasNamespaceIllegal(self):
- """Tests that an unused alias namespace is not required when available."""
- input_lines = [
- 'goog.scope(function() {',
- 'var Bar = goog.events.Bar;',
- '/** @type {Bar} */;',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
- self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
-
- def testScope_provides(self):
- """Tests that aliased symbols result in correct provides."""
- input_lines = [
- 'goog.scope(function() {',
- 'goog.bar = {};',
- 'var bar = goog.bar;',
- 'bar.Foo = {};',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_provides = namespaces_info.GetMissingProvides()
- self.assertEquals({'goog.bar.Foo': 4}, missing_provides)
- _, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, illegal_alias_stmts)
-
- def testSetTestOnlyNamespaces(self):
- """Tests that a namespace in setTestOnly makes it a valid provide."""
- namespaces_info = self._GetNamespacesInfoForScript([
- 'goog.setTestOnly(\'goog.foo.barTest\');'
- ], ['goog'])
-
- token = self._GetProvideTokens('goog.foo.barTest')
- self.assertFalse(namespaces_info.IsExtraProvide(token))
-
- token = self._GetProvideTokens('goog.foo.bazTest')
- self.assertTrue(namespaces_info.IsExtraProvide(token))
-
- def testSetTestOnlyComment(self):
- """Ensure a comment in setTestOnly does not cause a created namespace."""
- namespaces_info = self._GetNamespacesInfoForScript([
- 'goog.setTestOnly(\'this is a comment\');'
- ], ['goog'])
-
- self.assertEquals(
- [], namespaces_info._created_namespaces,
- 'A comment in setTestOnly should not modify created namespaces.')
-
- def _GetNamespacesInfoForScript(self, script, closurized_namespaces=None):
- _, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- script, closurized_namespaces)
-
- return namespaces_info
-
- def _GetStartTokenAndNamespacesInfoForScript(
- self, script, closurized_namespaces):
-
- token = testutil.TokenizeSource(script)
- return token, self._GetInitializedNamespacesInfo(
- token, closurized_namespaces, [])
-
- def _GetInitializedNamespacesInfo(self, token, closurized_namespaces,
- ignored_extra_namespaces):
- """Returns a namespaces info initialized with the given token stream."""
- namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
- closurized_namespaces=closurized_namespaces,
- ignored_extra_namespaces=ignored_extra_namespaces)
- state_tracker = javascriptstatetracker.JavaScriptStateTracker()
-
- ecma_pass = ecmametadatapass.EcmaMetaDataPass()
- ecma_pass.Process(token)
-
- state_tracker.DocFlagPass(token, error_handler=None)
-
- alias_pass = aliaspass.AliasPass(closurized_namespaces)
- alias_pass.Process(token)
-
- while token:
- state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
- namespaces_info.ProcessToken(token, state_tracker)
- state_tracker.HandleAfterToken(token)
- token = token.next
-
- return namespaces_info
-
- def _GetProvideTokens(self, namespace):
- """Returns a list of tokens for a goog.require of the given namespace."""
- line_text = 'goog.require(\'' + namespace + '\');\n'
- return testutil.TokenizeSource([line_text])
-
- def _GetRequireTokens(self, namespace):
- """Returns a list of tokens for a goog.require of the given namespace."""
- line_text = 'goog.require(\'' + namespace + '\');\n'
- return testutil.TokenizeSource([line_text])
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/common/__init__.py b/tools/closure_linter/build/lib/closure_linter/common/__init__.py
deleted file mode 100644
index 57930436ce..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/common/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Package indicator for gjslint.common."""
diff --git a/tools/closure_linter/build/lib/closure_linter/common/error.py b/tools/closure_linter/build/lib/closure_linter/common/error.py
deleted file mode 100644
index 4209c235b8..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/common/error.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Error object commonly used in linters."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-
-class Error(object):
- """Object representing a style error."""
-
- def __init__(self, code, message, token=None, position=None, fix_data=None):
- """Initialize the error object.
-
- Args:
- code: The numeric error code.
- message: The error message string.
- token: The tokens.Token where the error occurred.
- position: The position of the error within the token.
- fix_data: Data to be used in autofixing. Codes with fix_data are:
- GOOG_REQUIRES_NOT_ALPHABETIZED - List of string value tokens that are
- class names in goog.requires calls.
- """
- self.code = code
- self.message = message
- self.token = token
- self.position = position
- if token:
- self.start_index = token.start_index
- else:
- self.start_index = 0
- self.fix_data = fix_data
- if self.position:
- self.start_index += self.position.start
-
- def Compare(a, b):
- """Compare two error objects, by source code order.
-
- Args:
- a: First error object.
- b: Second error object.
-
- Returns:
- A Negative/0/Positive number when a is before/the same as/after b.
- """
- line_diff = a.token.line_number - b.token.line_number
- if line_diff:
- return line_diff
-
- return a.start_index - b.start_index
- Compare = staticmethod(Compare)
diff --git a/tools/closure_linter/build/lib/closure_linter/common/erroraccumulator.py b/tools/closure_linter/build/lib/closure_linter/common/erroraccumulator.py
deleted file mode 100644
index 55844ba603..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/common/erroraccumulator.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Linter error handler class that accumulates an array of errors."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-
-from closure_linter.common import errorhandler
-
-
-class ErrorAccumulator(errorhandler.ErrorHandler):
- """Error handler object that accumulates errors in a list."""
-
- def __init__(self):
- self._errors = []
-
- def HandleError(self, error):
- """Append the error to the list.
-
- Args:
- error: The error object
- """
- self._errors.append(error)
-
- def GetErrors(self):
- """Returns the accumulated errors.
-
- Returns:
- A sequence of errors.
- """
- return self._errors
diff --git a/tools/closure_linter/build/lib/closure_linter/common/errorhandler.py b/tools/closure_linter/build/lib/closure_linter/common/errorhandler.py
deleted file mode 100644
index 764d54d84c..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/common/errorhandler.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Interface for a linter error handler.
-
-Error handlers aggregate a set of errors from multiple files and can optionally
-perform some action based on the reported errors, for example, logging the error
-or automatically fixing it.
-"""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-
-class ErrorHandler(object):
- """Error handler interface."""
-
- def __init__(self):
- if self.__class__ == ErrorHandler:
- raise NotImplementedError('class ErrorHandler is abstract')
-
- def HandleFile(self, filename, first_token):
- """Notifies this ErrorHandler that subsequent errors are in filename.
-
- Args:
- filename: The file being linted.
- first_token: The first token of the file.
- """
-
- def HandleError(self, error):
- """Append the error to the list.
-
- Args:
- error: The error object
- """
-
- def FinishFile(self):
- """Finishes handling the current file.
-
- Should be called after all errors in a file have been handled.
- """
-
- def GetErrors(self):
- """Returns the accumulated errors.
-
- Returns:
- A sequence of errors.
- """
diff --git a/tools/closure_linter/build/lib/closure_linter/common/erroroutput.py b/tools/closure_linter/build/lib/closure_linter/common/erroroutput.py
deleted file mode 100644
index 149738b5d4..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/common/erroroutput.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Utility functions to format errors."""
-
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)',
- 'nnaze@google.com (Nathan Naze)')
-
-
-def GetUnixErrorOutput(filename, error, new_error=False):
- """Get a output line for an error in UNIX format."""
-
- line = ''
-
- if error.token:
- line = '%d' % error.token.line_number
-
- error_code = '%04d' % error.code
- if new_error:
- error_code = 'New Error ' + error_code
- return '%s:%s:(%s) %s' % (filename, line, error_code, error.message)
-
-
-def GetErrorOutput(error, new_error=False):
- """Get a output line for an error in regular format."""
-
- line = ''
- if error.token:
- line = 'Line %d, ' % error.token.line_number
-
- code = 'E:%04d' % error.code
-
- error_message = error.message
- if new_error:
- error_message = 'New Error ' + error_message
-
- return '%s%s: %s' % (line, code, error.message)
diff --git a/tools/closure_linter/build/lib/closure_linter/common/filetestcase.py b/tools/closure_linter/build/lib/closure_linter/common/filetestcase.py
deleted file mode 100644
index 7cd83cd1dc..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/common/filetestcase.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Test case that runs a checker on a file, matching errors against annotations.
-
-Runs the given checker on the given file, accumulating all errors. The list
-of errors is then matched against those annotated in the file. Based heavily
-on devtools/javascript/gpylint/full_test.py.
-"""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-import re
-
-import gflags as flags
-import unittest as googletest
-from closure_linter.common import erroraccumulator
-
-
-class AnnotatedFileTestCase(googletest.TestCase):
- """Test case to run a linter against a single file."""
-
- # Matches an all caps letters + underscores error identifer
- _MESSAGE = {'msg': '[A-Z][A-Z_]+'}
- # Matches a //, followed by an optional line number with a +/-, followed by a
- # list of message IDs. Used to extract expected messages from testdata files.
- # TODO(robbyw): Generalize to use different commenting patterns.
- _EXPECTED_RE = re.compile(r'\s*//\s*(?:(?P<line>[+-]?[0-9]+):)?'
- r'\s*(?P<msgs>%(msg)s(?:,\s*%(msg)s)*)' % _MESSAGE)
-
- def __init__(self, filename, lint_callable, converter):
- """Create a single file lint test case.
-
- Args:
- filename: Filename to test.
- lint_callable: Callable that lints a file. This is usually runner.Run().
- converter: Function taking an error string and returning an error code.
- """
-
- googletest.TestCase.__init__(self, 'runTest')
- self._filename = filename
- self._messages = []
- self._lint_callable = lint_callable
- self._converter = converter
-
- def setUp(self):
- flags.FLAGS.dot_on_next_line = True
-
- def tearDown(self):
- flags.FLAGS.dot_on_next_line = False
-
- def shortDescription(self):
- """Provides a description for the test."""
- return 'Run linter on %s' % self._filename
-
- def runTest(self):
- """Runs the test."""
- try:
- filename = self._filename
- stream = open(filename)
- except IOError as ex:
- raise IOError('Could not find testdata resource for %s: %s' %
- (self._filename, ex))
-
- expected = self._GetExpectedMessages(stream)
- got = self._ProcessFileAndGetMessages(filename)
- self.assertEqual(expected, got)
-
- def _GetExpectedMessages(self, stream):
- """Parse a file and get a sorted list of expected messages."""
- messages = []
- for i, line in enumerate(stream):
- match = self._EXPECTED_RE.search(line)
- if match:
- line = match.group('line')
- msg_ids = match.group('msgs')
- if line is None:
- line = i + 1
- elif line.startswith('+') or line.startswith('-'):
- line = i + 1 + int(line)
- else:
- line = int(line)
- for msg_id in msg_ids.split(','):
- # Ignore a spurious message from the license preamble.
- if msg_id != 'WITHOUT':
- messages.append((line, self._converter(msg_id.strip())))
- stream.seek(0)
- messages.sort()
- return messages
-
- def _ProcessFileAndGetMessages(self, filename):
- """Trap gjslint's output parse it to get messages added."""
- error_accumulator = erroraccumulator.ErrorAccumulator()
- self._lint_callable(filename, error_accumulator)
-
- errors = error_accumulator.GetErrors()
-
- # Convert to expected tuple format.
-
- error_msgs = [(error.token.line_number, error.code) for error in errors]
- error_msgs.sort()
- return error_msgs
diff --git a/tools/closure_linter/build/lib/closure_linter/common/htmlutil.py b/tools/closure_linter/build/lib/closure_linter/common/htmlutil.py
deleted file mode 100644
index 26d44c5908..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/common/htmlutil.py
+++ /dev/null
@@ -1,170 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Utilities for dealing with HTML."""
-
-__author__ = ('robbyw@google.com (Robert Walker)')
-
-import cStringIO
-import formatter
-import htmllib
-import HTMLParser
-import re
-
-
-class ScriptExtractor(htmllib.HTMLParser):
- """Subclass of HTMLParser that extracts script contents from an HTML file.
-
- Also inserts appropriate blank lines so that line numbers in the extracted
- code match the line numbers in the original HTML.
- """
-
- def __init__(self):
- """Initialize a ScriptExtractor."""
- htmllib.HTMLParser.__init__(self, formatter.NullFormatter())
- self._in_script = False
- self._text = ''
-
- def start_script(self, attrs):
- """Internal handler for the start of a script tag.
-
- Args:
- attrs: The attributes of the script tag, as a list of tuples.
- """
- for attribute in attrs:
- if attribute[0].lower() == 'src':
- # Skip script tags with a src specified.
- return
- self._in_script = True
-
- def end_script(self):
- """Internal handler for the end of a script tag."""
- self._in_script = False
-
- def handle_data(self, data):
- """Internal handler for character data.
-
- Args:
- data: The character data from the HTML file.
- """
- if self._in_script:
- # If the last line contains whitespace only, i.e. is just there to
- # properly align a </script> tag, strip the whitespace.
- if data.rstrip(' \t') != data.rstrip(' \t\n\r\f'):
- data = data.rstrip(' \t')
- self._text += data
- else:
- self._AppendNewlines(data)
-
- def handle_comment(self, data):
- """Internal handler for HTML comments.
-
- Args:
- data: The text of the comment.
- """
- self._AppendNewlines(data)
-
- def _AppendNewlines(self, data):
- """Count the number of newlines in the given string and append them.
-
- This ensures line numbers are correct for reported errors.
-
- Args:
- data: The data to count newlines in.
- """
- # We append 'x' to both sides of the string to ensure that splitlines
- # gives us an accurate count.
- for i in xrange(len(('x' + data + 'x').splitlines()) - 1):
- self._text += '\n'
-
- def GetScriptLines(self):
- """Return the extracted script lines.
-
- Returns:
- The extracted script lines as a list of strings.
- """
- return self._text.splitlines()
-
-
-def GetScriptLines(f):
- """Extract script tag contents from the given HTML file.
-
- Args:
- f: The HTML file.
-
- Returns:
- Lines in the HTML file that are from script tags.
- """
- extractor = ScriptExtractor()
-
- # The HTML parser chokes on text like Array.<!string>, so we patch
- # that bug by replacing the < with &lt; - escaping all text inside script
- # tags would be better but it's a bit of a catch 22.
- contents = f.read()
- contents = re.sub(r'<([^\s\w/])',
- lambda x: '&lt;%s' % x.group(1),
- contents)
-
- extractor.feed(contents)
- extractor.close()
- return extractor.GetScriptLines()
-
-
-def StripTags(str):
- """Returns the string with HTML tags stripped.
-
- Args:
- str: An html string.
-
- Returns:
- The html string with all tags stripped. If there was a parse error, returns
- the text successfully parsed so far.
- """
- # Brute force approach to stripping as much HTML as possible. If there is a
- # parsing error, don't strip text before parse error position, and continue
- # trying from there.
- final_text = ''
- finished = False
- while not finished:
- try:
- strip = _HtmlStripper()
- strip.feed(str)
- strip.close()
- str = strip.get_output()
- final_text += str
- finished = True
- except HTMLParser.HTMLParseError, e:
- final_text += str[:e.offset]
- str = str[e.offset + 1:]
-
- return final_text
-
-
-class _HtmlStripper(HTMLParser.HTMLParser):
- """Simple class to strip tags from HTML.
-
- Does so by doing nothing when encountering tags, and appending character data
- to a buffer when that is encountered.
- """
- def __init__(self):
- self.reset()
- self.__output = cStringIO.StringIO()
-
- def handle_data(self, d):
- self.__output.write(d)
-
- def get_output(self):
- return self.__output.getvalue()
diff --git a/tools/closure_linter/build/lib/closure_linter/common/lintrunner.py b/tools/closure_linter/build/lib/closure_linter/common/lintrunner.py
deleted file mode 100644
index 07842c7bfe..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/common/lintrunner.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Interface for a lint running wrapper."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-
-class LintRunner(object):
- """Interface for a lint running wrapper."""
-
- def __init__(self):
- if self.__class__ == LintRunner:
- raise NotImplementedError('class LintRunner is abstract')
-
- def Run(self, filenames, error_handler):
- """Run a linter on the given filenames.
-
- Args:
- filenames: The filenames to check
- error_handler: An ErrorHandler object
-
- Returns:
- The error handler, which may have been used to collect error info.
- """
diff --git a/tools/closure_linter/build/lib/closure_linter/common/matcher.py b/tools/closure_linter/build/lib/closure_linter/common/matcher.py
deleted file mode 100644
index 9b4402c671..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/common/matcher.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Regular expression based JavaScript matcher classes."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-from closure_linter.common import position
-from closure_linter.common import tokens
-
-# Shorthand
-Token = tokens.Token
-Position = position.Position
-
-
-class Matcher(object):
- """A token matcher.
-
- Specifies a pattern to match, the type of token it represents, what mode the
- token changes to, and what mode the token applies to.
-
- Modes allow more advanced grammars to be incorporated, and are also necessary
- to tokenize line by line. We can have different patterns apply to different
- modes - i.e. looking for documentation while in comment mode.
-
- Attributes:
- regex: The regular expression representing this matcher.
- type: The type of token indicated by a successful match.
- result_mode: The mode to move to after a successful match.
- """
-
- def __init__(self, regex, token_type, result_mode=None, line_start=False):
- """Create a new matcher template.
-
- Args:
- regex: The regular expression to match.
- token_type: The type of token a successful match indicates.
- result_mode: What mode to change to after a successful match. Defaults to
- None, which means to not change the current mode.
- line_start: Whether this matcher should only match string at the start
- of a line.
- """
- self.regex = regex
- self.type = token_type
- self.result_mode = result_mode
- self.line_start = line_start
diff --git a/tools/closure_linter/build/lib/closure_linter/common/position.py b/tools/closure_linter/build/lib/closure_linter/common/position.py
deleted file mode 100644
index cebf17ef36..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/common/position.py
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Classes to represent positions within strings."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-
-class Position(object):
- """Object representing a segment of a string.
-
- Attributes:
- start: The index in to the string where the segment starts.
- length: The length of the string segment.
- """
-
- def __init__(self, start, length):
- """Initialize the position object.
-
- Args:
- start: The start index.
- length: The number of characters to include.
- """
- self.start = start
- self.length = length
-
- def Get(self, string):
- """Returns this range of the given string.
-
- Args:
- string: The string to slice.
-
- Returns:
- The string within the range specified by this object.
- """
- return string[self.start:self.start + self.length]
-
- def Set(self, target, source):
- """Sets this range within the target string to the source string.
-
- Args:
- target: The target string.
- source: The source string.
-
- Returns:
- The resulting string
- """
- return target[:self.start] + source + target[self.start + self.length:]
-
- def AtEnd(string):
- """Create a Position representing the end of the given string.
-
- Args:
- string: The string to represent the end of.
-
- Returns:
- The created Position object.
- """
- return Position(len(string), 0)
- AtEnd = staticmethod(AtEnd)
-
- def IsAtEnd(self, string):
- """Returns whether this position is at the end of the given string.
-
- Args:
- string: The string to test for the end of.
-
- Returns:
- Whether this position is at the end of the given string.
- """
- return self.start == len(string) and self.length == 0
-
- def AtBeginning():
- """Create a Position representing the beginning of any string.
-
- Returns:
- The created Position object.
- """
- return Position(0, 0)
- AtBeginning = staticmethod(AtBeginning)
-
- def IsAtBeginning(self):
- """Returns whether this position is at the beginning of any string.
-
- Returns:
- Whether this position is at the beginning of any string.
- """
- return self.start == 0 and self.length == 0
-
- def All(string):
- """Create a Position representing the entire string.
-
- Args:
- string: The string to represent the entirety of.
-
- Returns:
- The created Position object.
- """
- return Position(0, len(string))
- All = staticmethod(All)
-
- def Index(index):
- """Returns a Position object for the specified index.
-
- Args:
- index: The index to select, inclusively.
-
- Returns:
- The created Position object.
- """
- return Position(index, 1)
- Index = staticmethod(Index)
diff --git a/tools/closure_linter/build/lib/closure_linter/common/simplefileflags.py b/tools/closure_linter/build/lib/closure_linter/common/simplefileflags.py
deleted file mode 100644
index 3402bef3a1..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/common/simplefileflags.py
+++ /dev/null
@@ -1,190 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Determines the list of files to be checked from command line arguments."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-import glob
-import os
-import re
-
-import gflags as flags
-
-
-FLAGS = flags.FLAGS
-
-flags.DEFINE_multistring(
- 'recurse',
- None,
- 'Recurse in to the subdirectories of the given path',
- short_name='r')
-flags.DEFINE_list(
- 'exclude_directories',
- ('_demos'),
- 'Exclude the specified directories (only applicable along with -r or '
- '--presubmit)',
- short_name='e')
-flags.DEFINE_list(
- 'exclude_files',
- ('deps.js'),
- 'Exclude the specified files',
- short_name='x')
-
-
-def MatchesSuffixes(filename, suffixes):
- """Returns whether the given filename matches one of the given suffixes.
-
- Args:
- filename: Filename to check.
- suffixes: Sequence of suffixes to check.
-
- Returns:
- Whether the given filename matches one of the given suffixes.
- """
- suffix = filename[filename.rfind('.'):]
- return suffix in suffixes
-
-
-def _GetUserSpecifiedFiles(argv, suffixes):
- """Returns files to be linted, specified directly on the command line.
-
- Can handle the '*' wildcard in filenames, but no other wildcards.
-
- Args:
- argv: Sequence of command line arguments. The second and following arguments
- are assumed to be files that should be linted.
- suffixes: Expected suffixes for the file type being checked.
-
- Returns:
- A sequence of files to be linted.
- """
- files = argv[1:] or []
- all_files = []
- lint_files = []
-
- # Perform any necessary globs.
- for f in files:
- if f.find('*') != -1:
- for result in glob.glob(f):
- all_files.append(result)
- else:
- all_files.append(f)
-
- for f in all_files:
- if MatchesSuffixes(f, suffixes):
- lint_files.append(f)
- return lint_files
-
-
-def _GetRecursiveFiles(suffixes):
- """Returns files to be checked specified by the --recurse flag.
-
- Args:
- suffixes: Expected suffixes for the file type being checked.
-
- Returns:
- A list of files to be checked.
- """
- lint_files = []
- # Perform any request recursion
- if FLAGS.recurse:
- for start in FLAGS.recurse:
- for root, subdirs, files in os.walk(start):
- for f in files:
- if MatchesSuffixes(f, suffixes):
- lint_files.append(os.path.join(root, f))
- return lint_files
-
-
-def GetAllSpecifiedFiles(argv, suffixes):
- """Returns all files specified by the user on the commandline.
-
- Args:
- argv: Sequence of command line arguments. The second and following arguments
- are assumed to be files that should be linted.
- suffixes: Expected suffixes for the file type
-
- Returns:
- A list of all files specified directly or indirectly (via flags) on the
- command line by the user.
- """
- files = _GetUserSpecifiedFiles(argv, suffixes)
-
- if FLAGS.recurse:
- files += _GetRecursiveFiles(suffixes)
-
- return FilterFiles(files)
-
-
-def FilterFiles(files):
- """Filters the list of files to be linted be removing any excluded files.
-
- Filters out files excluded using --exclude_files and --exclude_directories.
-
- Args:
- files: Sequence of files that needs filtering.
-
- Returns:
- Filtered list of files to be linted.
- """
- num_files = len(files)
-
- ignore_dirs_regexs = []
- for ignore in FLAGS.exclude_directories:
- ignore_dirs_regexs.append(re.compile(r'(^|[\\/])%s[\\/]' % ignore))
-
- result_files = []
- for f in files:
- add_file = True
- for exclude in FLAGS.exclude_files:
- if f.endswith('/' + exclude) or f == exclude:
- add_file = False
- break
- for ignore in ignore_dirs_regexs:
- if ignore.search(f):
- # Break out of ignore loop so we don't add to
- # filtered files.
- add_file = False
- break
- if add_file:
- # Convert everything to absolute paths so we can easily remove duplicates
- # using a set.
- result_files.append(os.path.abspath(f))
-
- skipped = num_files - len(result_files)
- if skipped:
- print 'Skipping %d file(s).' % skipped
-
- return set(result_files)
-
-
-def GetFileList(argv, file_type, suffixes):
- """Parse the flags and return the list of files to check.
-
- Args:
- argv: Sequence of command line arguments.
- suffixes: Sequence of acceptable suffixes for the file type.
-
- Returns:
- The list of files to check.
- """
- return sorted(GetAllSpecifiedFiles(argv, suffixes))
-
-
-def IsEmptyArgumentList(argv):
- return not (len(argv[1:]) or FLAGS.recurse)
diff --git a/tools/closure_linter/build/lib/closure_linter/common/tokenizer.py b/tools/closure_linter/build/lib/closure_linter/common/tokenizer.py
deleted file mode 100644
index 9420ea3267..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/common/tokenizer.py
+++ /dev/null
@@ -1,185 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Regular expression based lexer."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-from closure_linter.common import tokens
-
-# Shorthand
-Type = tokens.TokenType
-
-
-class Tokenizer(object):
- """General purpose tokenizer.
-
- Attributes:
- mode: The latest mode of the tokenizer. This allows patterns to distinguish
- if they are mid-comment, mid-parameter list, etc.
- matchers: Dictionary of modes to sequences of matchers that define the
- patterns to check at any given time.
- default_types: Dictionary of modes to types, defining what type to give
- non-matched text when in the given mode. Defaults to Type.NORMAL.
- """
-
- def __init__(self, starting_mode, matchers, default_types):
- """Initialize the tokenizer.
-
- Args:
- starting_mode: Mode to start in.
- matchers: Dictionary of modes to sequences of matchers that defines the
- patterns to check at any given time.
- default_types: Dictionary of modes to types, defining what type to give
- non-matched text when in the given mode. Defaults to Type.NORMAL.
- """
- self.__starting_mode = starting_mode
- self.matchers = matchers
- self.default_types = default_types
-
- def TokenizeFile(self, file):
- """Tokenizes the given file.
-
- Args:
- file: An iterable that yields one line of the file at a time.
-
- Returns:
- The first token in the file
- """
- # The current mode.
- self.mode = self.__starting_mode
- # The first token in the stream.
- self.__first_token = None
- # The last token added to the token stream.
- self.__last_token = None
- # The current line number.
- self.__line_number = 0
-
- for line in file:
- self.__line_number += 1
- self.__TokenizeLine(line)
-
- return self.__first_token
-
- def _CreateToken(self, string, token_type, line, line_number, values=None):
- """Creates a new Token object (or subclass).
-
- Args:
- string: The string of input the token represents.
- token_type: The type of token.
- line: The text of the line this token is in.
- line_number: The line number of the token.
- values: A dict of named values within the token. For instance, a
- function declaration may have a value called 'name' which captures the
- name of the function.
-
- Returns:
- The newly created Token object.
- """
- return tokens.Token(string, token_type, line, line_number, values,
- line_number)
-
- def __TokenizeLine(self, line):
- """Tokenizes the given line.
-
- Args:
- line: The contents of the line.
- """
- string = line.rstrip('\n\r\f')
- line_number = self.__line_number
- self.__start_index = 0
-
- if not string:
- self.__AddToken(self._CreateToken('', Type.BLANK_LINE, line, line_number))
- return
-
- normal_token = ''
- index = 0
- while index < len(string):
- for matcher in self.matchers[self.mode]:
- if matcher.line_start and index > 0:
- continue
-
- match = matcher.regex.match(string, index)
-
- if match:
- if normal_token:
- self.__AddToken(
- self.__CreateNormalToken(self.mode, normal_token, line,
- line_number))
- normal_token = ''
-
- # Add the match.
- self.__AddToken(self._CreateToken(match.group(), matcher.type, line,
- line_number, match.groupdict()))
-
- # Change the mode to the correct one for after this match.
- self.mode = matcher.result_mode or self.mode
-
- # Shorten the string to be matched.
- index = match.end()
-
- break
-
- else:
- # If the for loop finishes naturally (i.e. no matches) we just add the
- # first character to the string of consecutive non match characters.
- # These will constitute a NORMAL token.
- if string:
- normal_token += string[index:index + 1]
- index += 1
-
- if normal_token:
- self.__AddToken(
- self.__CreateNormalToken(self.mode, normal_token, line, line_number))
-
- def __CreateNormalToken(self, mode, string, line, line_number):
- """Creates a normal token.
-
- Args:
- mode: The current mode.
- string: The string to tokenize.
- line: The line of text.
- line_number: The line number within the file.
-
- Returns:
- A Token object, of the default type for the current mode.
- """
- type = Type.NORMAL
- if mode in self.default_types:
- type = self.default_types[mode]
- return self._CreateToken(string, type, line, line_number)
-
- def __AddToken(self, token):
- """Add the given token to the token stream.
-
- Args:
- token: The token to add.
- """
- # Store the first token, or point the previous token to this one.
- if not self.__first_token:
- self.__first_token = token
- else:
- self.__last_token.next = token
-
- # Establish the doubly linked list
- token.previous = self.__last_token
- self.__last_token = token
-
- # Compute the character indices
- token.start_index = self.__start_index
- self.__start_index += token.length
diff --git a/tools/closure_linter/build/lib/closure_linter/common/tokens.py b/tools/closure_linter/build/lib/closure_linter/common/tokens.py
deleted file mode 100644
index 4703998752..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/common/tokens.py
+++ /dev/null
@@ -1,145 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Classes to represent tokens and positions within them."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-
-class TokenType(object):
- """Token types common to all languages."""
- NORMAL = 'normal'
- WHITESPACE = 'whitespace'
- BLANK_LINE = 'blank line'
-
-
-class Token(object):
- """Token class for intelligent text splitting.
-
- The token class represents a string of characters and an identifying type.
-
- Attributes:
- type: The type of token.
- string: The characters the token comprises.
- length: The length of the token.
- line: The text of the line the token is found in.
- line_number: The number of the line the token is found in.
- values: Dictionary of values returned from the tokens regex match.
- previous: The token before this one.
- next: The token after this one.
- start_index: The character index in the line where this token starts.
- attached_object: Object containing more information about this token.
- metadata: Object containing metadata about this token. Must be added by
- a separate metadata pass.
- """
-
- def __init__(self, string, token_type, line, line_number, values=None,
- orig_line_number=None):
- """Creates a new Token object.
-
- Args:
- string: The string of input the token contains.
- token_type: The type of token.
- line: The text of the line this token is in.
- line_number: The line number of the token.
- values: A dict of named values within the token. For instance, a
- function declaration may have a value called 'name' which captures the
- name of the function.
- orig_line_number: The line number of the original file this token comes
- from. This should be only set during the tokenization process. For newly
- created error fix tokens after that, it should be None.
- """
- self.type = token_type
- self.string = string
- self.length = len(string)
- self.line = line
- self.line_number = line_number
- self.orig_line_number = orig_line_number
- self.values = values
- self.is_deleted = False
-
- # These parts can only be computed when the file is fully tokenized
- self.previous = None
- self.next = None
- self.start_index = None
-
- # This part is set in statetracker.py
- # TODO(robbyw): Wrap this in to metadata
- self.attached_object = None
-
- # This part is set in *metadatapass.py
- self.metadata = None
-
- def IsFirstInLine(self):
- """Tests if this token is the first token in its line.
-
- Returns:
- Whether the token is the first token in its line.
- """
- return not self.previous or self.previous.line_number != self.line_number
-
- def IsLastInLine(self):
- """Tests if this token is the last token in its line.
-
- Returns:
- Whether the token is the last token in its line.
- """
- return not self.next or self.next.line_number != self.line_number
-
- def IsType(self, token_type):
- """Tests if this token is of the given type.
-
- Args:
- token_type: The type to test for.
-
- Returns:
- True if the type of this token matches the type passed in.
- """
- return self.type == token_type
-
- def IsAnyType(self, *token_types):
- """Tests if this token is any of the given types.
-
- Args:
- token_types: The types to check. Also accepts a single array.
-
- Returns:
- True if the type of this token is any of the types passed in.
- """
- if not isinstance(token_types[0], basestring):
- return self.type in token_types[0]
- else:
- return self.type in token_types
-
- def __repr__(self):
- return '<Token: %s, "%s", %r, %d, %r>' % (self.type, self.string,
- self.values, self.line_number,
- self.metadata)
-
- def __iter__(self):
- """Returns a token iterator."""
- node = self
- while node:
- yield node
- node = node.next
-
- def __reversed__(self):
- """Returns a reverse-direction token iterator."""
- node = self
- while node:
- yield node
- node = node.previous
diff --git a/tools/closure_linter/build/lib/closure_linter/common/tokens_test.py b/tools/closure_linter/build/lib/closure_linter/common/tokens_test.py
deleted file mode 100644
index 01ec89d01b..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/common/tokens_test.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-__author__ = 'nnaze@google.com (Nathan Naze)'
-
-import unittest as googletest
-from closure_linter.common import tokens
-
-
-def _CreateDummyToken():
- return tokens.Token('foo', None, 1, 1)
-
-
-def _CreateDummyTokens(count):
- dummy_tokens = []
- for _ in xrange(count):
- dummy_tokens.append(_CreateDummyToken())
- return dummy_tokens
-
-
-def _SetTokensAsNeighbors(neighbor_tokens):
- for i in xrange(len(neighbor_tokens)):
- prev_index = i - 1
- next_index = i + 1
-
- if prev_index >= 0:
- neighbor_tokens[i].previous = neighbor_tokens[prev_index]
-
- if next_index < len(neighbor_tokens):
- neighbor_tokens[i].next = neighbor_tokens[next_index]
-
-
-class TokensTest(googletest.TestCase):
-
- def testIsFirstInLine(self):
-
- # First token in file (has no previous).
- self.assertTrue(_CreateDummyToken().IsFirstInLine())
-
- a, b = _CreateDummyTokens(2)
- _SetTokensAsNeighbors([a, b])
-
- # Tokens on same line
- a.line_number = 30
- b.line_number = 30
-
- self.assertFalse(b.IsFirstInLine())
-
- # Tokens on different lines
- b.line_number = 31
- self.assertTrue(b.IsFirstInLine())
-
- def testIsLastInLine(self):
- # Last token in file (has no next).
- self.assertTrue(_CreateDummyToken().IsLastInLine())
-
- a, b = _CreateDummyTokens(2)
- _SetTokensAsNeighbors([a, b])
-
- # Tokens on same line
- a.line_number = 30
- b.line_number = 30
- self.assertFalse(a.IsLastInLine())
-
- b.line_number = 31
- self.assertTrue(a.IsLastInLine())
-
- def testIsType(self):
- a = tokens.Token('foo', 'fakeType1', 1, 1)
- self.assertTrue(a.IsType('fakeType1'))
- self.assertFalse(a.IsType('fakeType2'))
-
- def testIsAnyType(self):
- a = tokens.Token('foo', 'fakeType1', 1, 1)
- self.assertTrue(a.IsAnyType(['fakeType1', 'fakeType2']))
- self.assertFalse(a.IsAnyType(['fakeType3', 'fakeType4']))
-
- def testRepr(self):
- a = tokens.Token('foo', 'fakeType1', 1, 1)
- self.assertEquals('<Token: fakeType1, "foo", None, 1, None>', str(a))
-
- def testIter(self):
- dummy_tokens = _CreateDummyTokens(5)
- _SetTokensAsNeighbors(dummy_tokens)
- a, b, c, d, e = dummy_tokens
-
- i = iter(a)
- self.assertListEqual([a, b, c, d, e], list(i))
-
- def testReverseIter(self):
- dummy_tokens = _CreateDummyTokens(5)
- _SetTokensAsNeighbors(dummy_tokens)
- a, b, c, d, e = dummy_tokens
-
- ri = reversed(e)
- self.assertListEqual([e, d, c, b, a], list(ri))
-
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/ecmalintrules.py b/tools/closure_linter/build/lib/closure_linter/ecmalintrules.py
deleted file mode 100644
index c07dffc86e..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/ecmalintrules.py
+++ /dev/null
@@ -1,844 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Core methods for checking EcmaScript files for common style guide violations.
-"""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)',
- 'jacobr@google.com (Jacob Richman)')
-
-import re
-
-import gflags as flags
-
-from closure_linter import checkerbase
-from closure_linter import ecmametadatapass
-from closure_linter import error_check
-from closure_linter import errorrules
-from closure_linter import errors
-from closure_linter import indentation
-from closure_linter import javascripttokenizer
-from closure_linter import javascripttokens
-from closure_linter import statetracker
-from closure_linter import tokenutil
-from closure_linter.common import error
-from closure_linter.common import position
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
-# TODO(user): When flipping this to True, remove logic from unit tests
-# that overrides this flag.
-flags.DEFINE_boolean('dot_on_next_line', False, 'Require dots to be'
- 'placed on the next line for wrapped expressions')
-
-# TODO(robbyw): Check for extra parens on return statements
-# TODO(robbyw): Check for 0px in strings
-# TODO(robbyw): Ensure inline jsDoc is in {}
-# TODO(robbyw): Check for valid JS types in parameter docs
-
-# Shorthand
-Context = ecmametadatapass.EcmaContext
-Error = error.Error
-Modes = javascripttokenizer.JavaScriptModes
-Position = position.Position
-Rule = error_check.Rule
-Type = javascripttokens.JavaScriptTokenType
-
-
-class EcmaScriptLintRules(checkerbase.LintRulesBase):
- """EmcaScript lint style checking rules.
-
- Can be used to find common style errors in JavaScript, ActionScript and other
- Ecma like scripting languages. Style checkers for Ecma scripting languages
- should inherit from this style checker.
- Please do not add any state to EcmaScriptLintRules or to any subclasses.
-
- All state should be added to the StateTracker subclass used for a particular
- language.
- """
-
- # It will be initialized in constructor so the flags are initialized.
- max_line_length = -1
-
- # Static constants.
- MISSING_PARAMETER_SPACE = re.compile(r',\S')
-
- EXTRA_SPACE = re.compile(r'(\(\s|\s\))')
-
- ENDS_WITH_SPACE = re.compile(r'\s$')
-
- ILLEGAL_TAB = re.compile(r'\t')
-
- # Regex used to split up complex types to check for invalid use of ? and |.
- TYPE_SPLIT = re.compile(r'[,<>()]')
-
- # Regex for form of author lines after the @author tag.
- AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)')
-
- # Acceptable tokens to remove for line too long testing.
- LONG_LINE_IGNORE = frozenset(
- ['*', '//', '@see'] +
- ['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE])
-
- JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED = frozenset([
- '@fileoverview', '@param', '@return', '@returns'])
-
- def __init__(self):
- """Initialize this lint rule object."""
- checkerbase.LintRulesBase.__init__(self)
- if EcmaScriptLintRules.max_line_length == -1:
- EcmaScriptLintRules.max_line_length = errorrules.GetMaxLineLength()
-
- def Initialize(self, checker, limited_doc_checks, is_html):
- """Initialize this lint rule object before parsing a new file."""
- checkerbase.LintRulesBase.Initialize(self, checker, limited_doc_checks,
- is_html)
- self._indentation = indentation.IndentationRules()
-
- def HandleMissingParameterDoc(self, token, param_name):
- """Handle errors associated with a parameter missing a @param tag."""
- raise TypeError('Abstract method HandleMissingParameterDoc not implemented')
-
- def _CheckLineLength(self, last_token, state):
- """Checks whether the line is too long.
-
- Args:
- last_token: The last token in the line.
- state: parser_state object that indicates the current state in the page
- """
- # Start from the last token so that we have the flag object attached to
- # and DOC_FLAG tokens.
- line_number = last_token.line_number
- token = last_token
-
- # Build a representation of the string where spaces indicate potential
- # line-break locations.
- line = []
- while token and token.line_number == line_number:
- if state.IsTypeToken(token):
- line.insert(0, 'x' * len(token.string))
- elif token.type in (Type.IDENTIFIER, Type.OPERATOR):
- # Dots are acceptable places to wrap (may be tokenized as identifiers).
- line.insert(0, token.string.replace('.', ' '))
- else:
- line.insert(0, token.string)
- token = token.previous
-
- line = ''.join(line)
- line = line.rstrip('\n\r\f')
- try:
- length = len(unicode(line, 'utf-8'))
- except (LookupError, UnicodeDecodeError):
- # Unknown encoding. The line length may be wrong, as was originally the
- # case for utf-8 (see bug 1735846). For now just accept the default
- # length, but as we find problems we can either add test for other
- # possible encodings or return without an error to protect against
- # false positives at the cost of more false negatives.
- length = len(line)
-
- if length > EcmaScriptLintRules.max_line_length:
-
- # If the line matches one of the exceptions, then it's ok.
- for long_line_regexp in self.GetLongLineExceptions():
- if long_line_regexp.match(last_token.line):
- return
-
- # If the line consists of only one "word", or multiple words but all
- # except one are ignoreable, then it's ok.
- parts = set(line.split())
-
- # We allow two "words" (type and name) when the line contains @param
- max_parts = 1
- if '@param' in parts:
- max_parts = 2
-
- # Custom tags like @requires may have url like descriptions, so ignore
- # the tag, similar to how we handle @see.
- custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags])
- if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags))
- > max_parts):
- self._HandleError(
- errors.LINE_TOO_LONG,
- 'Line too long (%d characters).' % len(line), last_token)
-
- def _CheckJsDocType(self, token, js_type):
- """Checks the given type for style errors.
-
- Args:
- token: The DOC_FLAG token for the flag whose type to check.
- js_type: The flag's typeannotation.TypeAnnotation instance.
- """
- if not js_type: return
-
- if js_type.type_group and len(js_type.sub_types) == 2:
- identifiers = [t.identifier for t in js_type.sub_types]
- if 'null' in identifiers:
- # Don't warn if the identifier is a template type (e.g. {TYPE|null}.
- if not identifiers[0].isupper() and not identifiers[1].isupper():
- self._HandleError(
- errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,
- 'Prefer "?Type" to "Type|null": "%s"' % js_type, token)
-
- # TODO(user): We should report an error for wrong usage of '?' and '|'
- # e.g. {?number|string|null} etc.
-
- for sub_type in js_type.IterTypes():
- self._CheckJsDocType(token, sub_type)
-
- def _CheckForMissingSpaceBeforeToken(self, token):
- """Checks for a missing space at the beginning of a token.
-
- Reports a MISSING_SPACE error if the token does not begin with a space or
- the previous token doesn't end with a space and the previous token is on the
- same line as the token.
-
- Args:
- token: The token being checked
- """
- # TODO(user): Check if too many spaces?
- if (len(token.string) == len(token.string.lstrip()) and
- token.previous and token.line_number == token.previous.line_number and
- len(token.previous.string) - len(token.previous.string.rstrip()) == 0):
- self._HandleError(
- errors.MISSING_SPACE,
- 'Missing space before "%s"' % token.string,
- token,
- position=Position.AtBeginning())
-
- def _CheckOperator(self, token):
- """Checks an operator for spacing and line style.
-
- Args:
- token: The operator token.
- """
- last_code = token.metadata.last_code
-
- if not self._ExpectSpaceBeforeOperator(token):
- if (token.previous and token.previous.type == Type.WHITESPACE and
- last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER) and
- last_code.line_number == token.line_number):
- self._HandleError(
- errors.EXTRA_SPACE, 'Extra space before "%s"' % token.string,
- token.previous, position=Position.All(token.previous.string))
-
- elif (token.previous and
- not token.previous.IsComment() and
- not tokenutil.IsDot(token) and
- token.previous.type in Type.EXPRESSION_ENDER_TYPES):
- self._HandleError(errors.MISSING_SPACE,
- 'Missing space before "%s"' % token.string, token,
- position=Position.AtBeginning())
-
- # Check wrapping of operators.
- next_code = tokenutil.GetNextCodeToken(token)
-
- is_dot = tokenutil.IsDot(token)
- wrapped_before = last_code and last_code.line_number != token.line_number
- wrapped_after = next_code and next_code.line_number != token.line_number
-
- if FLAGS.dot_on_next_line and is_dot and wrapped_after:
- self._HandleError(
- errors.LINE_ENDS_WITH_DOT,
- '"." must go on the following line',
- token)
- if (not is_dot and wrapped_before and
- not token.metadata.IsUnaryOperator()):
- self._HandleError(
- errors.LINE_STARTS_WITH_OPERATOR,
- 'Binary operator must go on previous line "%s"' % token.string,
- token)
-
- def _IsLabel(self, token):
- # A ':' token is considered part of a label if it occurs in a case
- # statement, a plain label, or an object literal, i.e. is not part of a
- # ternary.
-
- return (token.string == ':' and
- token.metadata.context.type in (Context.LITERAL_ELEMENT,
- Context.CASE_BLOCK,
- Context.STATEMENT))
-
- def _ExpectSpaceBeforeOperator(self, token):
- """Returns whether a space should appear before the given operator token.
-
- Args:
- token: The operator token.
-
- Returns:
- Whether there should be a space before the token.
- """
- if token.string == ',' or token.metadata.IsUnaryPostOperator():
- return False
-
- if tokenutil.IsDot(token):
- return False
-
- # Colons should appear in labels, object literals, the case of a switch
- # statement, and ternary operator. Only want a space in the case of the
- # ternary operator.
- if self._IsLabel(token):
- return False
-
- if token.metadata.IsUnaryOperator() and token.IsFirstInLine():
- return False
-
- return True
-
- def CheckToken(self, token, state):
- """Checks a token, given the current parser_state, for warnings and errors.
-
- Args:
- token: The current token under consideration
- state: parser_state object that indicates the current state in the page
- """
- # Store some convenience variables
- first_in_line = token.IsFirstInLine()
- last_in_line = token.IsLastInLine()
- last_non_space_token = state.GetLastNonSpaceToken()
-
- token_type = token.type
-
- # Process the line change.
- if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
- # TODO(robbyw): Support checking indentation in HTML files.
- indentation_errors = self._indentation.CheckToken(token, state)
- for indentation_error in indentation_errors:
- self._HandleError(*indentation_error)
-
- if last_in_line:
- self._CheckLineLength(token, state)
-
- if token_type == Type.PARAMETERS:
- # Find missing spaces in parameter lists.
- if self.MISSING_PARAMETER_SPACE.search(token.string):
- fix_data = ', '.join([s.strip() for s in token.string.split(',')])
- self._HandleError(errors.MISSING_SPACE, 'Missing space after ","',
- token, position=None, fix_data=fix_data.strip())
-
- # Find extra spaces at the beginning of parameter lists. Make sure
- # we aren't at the beginning of a continuing multi-line list.
- if not first_in_line:
- space_count = len(token.string) - len(token.string.lstrip())
- if space_count:
- self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("',
- token, position=Position(0, space_count))
-
- elif (token_type == Type.START_BLOCK and
- token.metadata.context.type == Context.BLOCK):
- self._CheckForMissingSpaceBeforeToken(token)
-
- elif token_type == Type.END_BLOCK:
- last_code = token.metadata.last_code
- if state.InFunction() and state.IsFunctionClose():
- if state.InTopLevelFunction():
- # A semicolons should not be included at the end of a function
- # declaration.
- if not state.InAssignedFunction():
- if not last_in_line and token.next.type == Type.SEMICOLON:
- self._HandleError(
- errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
- 'Illegal semicolon after function declaration',
- token.next, position=Position.All(token.next.string))
-
- # A semicolon should be included at the end of a function expression
- # that is not immediately called or used by a dot operator.
- if (state.InAssignedFunction() and token.next
- and token.next.type != Type.SEMICOLON):
- next_token = tokenutil.GetNextCodeToken(token)
- is_immediately_used = (next_token.type == Type.START_PAREN or
- tokenutil.IsDot(next_token))
- if not is_immediately_used:
- self._HandleError(
- errors.MISSING_SEMICOLON_AFTER_FUNCTION,
- 'Missing semicolon after function assigned to a variable',
- token, position=Position.AtEnd(token.string))
-
- if state.InInterfaceMethod() and last_code.type != Type.START_BLOCK:
- self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,
- 'Interface methods cannot contain code', last_code)
-
- elif (state.IsBlockClose() and
- token.next and token.next.type == Type.SEMICOLON):
- if (last_code.metadata.context.parent.type != Context.OBJECT_LITERAL
- and last_code.metadata.context.type != Context.OBJECT_LITERAL):
- self._HandleError(
- errors.REDUNDANT_SEMICOLON,
- 'No semicolon is required to end a code block',
- token.next, position=Position.All(token.next.string))
-
- elif token_type == Type.SEMICOLON:
- if token.previous and token.previous.type == Type.WHITESPACE:
- self._HandleError(
- errors.EXTRA_SPACE, 'Extra space before ";"',
- token.previous, position=Position.All(token.previous.string))
-
- if token.next and token.next.line_number == token.line_number:
- if token.metadata.context.type != Context.FOR_GROUP_BLOCK:
- # TODO(robbyw): Error about no multi-statement lines.
- pass
-
- elif token.next.type not in (
- Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):
- self._HandleError(
- errors.MISSING_SPACE,
- 'Missing space after ";" in for statement',
- token.next,
- position=Position.AtBeginning())
-
- last_code = token.metadata.last_code
- if last_code and last_code.type == Type.SEMICOLON:
- # Allow a single double semi colon in for loops for cases like:
- # for (;;) { }.
- # NOTE(user): This is not a perfect check, and will not throw an error
- # for cases like: for (var i = 0;; i < n; i++) {}, but then your code
- # probably won't work either.
- for_token = tokenutil.CustomSearch(
- last_code,
- lambda token: token.type == Type.KEYWORD and token.string == 'for',
- end_func=lambda token: token.type == Type.SEMICOLON,
- distance=None,
- reverse=True)
-
- if not for_token:
- self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',
- token, position=Position.All(token.string))
-
- elif token_type == Type.START_PAREN:
- # Ensure that opening parentheses have a space before any keyword
- # that is not being invoked like a member function.
- if (token.previous and token.previous.type == Type.KEYWORD and
- (not token.previous.metadata or
- not token.previous.metadata.last_code or
- not token.previous.metadata.last_code.string or
- token.previous.metadata.last_code.string[-1:] != '.')):
- self._HandleError(errors.MISSING_SPACE, 'Missing space before "("',
- token, position=Position.AtBeginning())
- elif token.previous and token.previous.type == Type.WHITESPACE:
- before_space = token.previous.previous
- # Ensure that there is no extra space before a function invocation,
- # even if the function being invoked happens to be a keyword.
- if (before_space and before_space.line_number == token.line_number and
- before_space.type == Type.IDENTIFIER or
- (before_space.type == Type.KEYWORD and before_space.metadata and
- before_space.metadata.last_code and
- before_space.metadata.last_code.string and
- before_space.metadata.last_code.string[-1:] == '.')):
- self._HandleError(
- errors.EXTRA_SPACE, 'Extra space before "("',
- token.previous, position=Position.All(token.previous.string))
-
- elif token_type == Type.START_BRACKET:
- self._HandleStartBracket(token, last_non_space_token)
- elif token_type in (Type.END_PAREN, Type.END_BRACKET):
- # Ensure there is no space before closing parentheses, except when
- # it's in a for statement with an omitted section, or when it's at the
- # beginning of a line.
- if (token.previous and token.previous.type == Type.WHITESPACE and
- not token.previous.IsFirstInLine() and
- not (last_non_space_token and last_non_space_token.line_number ==
- token.line_number and
- last_non_space_token.type == Type.SEMICOLON)):
- self._HandleError(
- errors.EXTRA_SPACE, 'Extra space before "%s"' %
- token.string, token.previous,
- position=Position.All(token.previous.string))
-
- elif token_type == Type.WHITESPACE:
- if self.ILLEGAL_TAB.search(token.string):
- if token.IsFirstInLine():
- if token.next:
- self._HandleError(
- errors.ILLEGAL_TAB,
- 'Illegal tab in whitespace before "%s"' % token.next.string,
- token, position=Position.All(token.string))
- else:
- self._HandleError(
- errors.ILLEGAL_TAB,
- 'Illegal tab in whitespace',
- token, position=Position.All(token.string))
- else:
- self._HandleError(
- errors.ILLEGAL_TAB,
- 'Illegal tab in whitespace after "%s"' % token.previous.string,
- token, position=Position.All(token.string))
-
- # Check whitespace length if it's not the first token of the line and
- # if it's not immediately before a comment.
- if last_in_line:
- # Check for extra whitespace at the end of a line.
- self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
- token, position=Position.All(token.string))
- elif not first_in_line and not token.next.IsComment():
- if token.length > 1:
- self._HandleError(
- errors.EXTRA_SPACE, 'Extra space after "%s"' %
- token.previous.string, token,
- position=Position(1, len(token.string) - 1))
-
- elif token_type == Type.OPERATOR:
- self._CheckOperator(token)
- elif token_type == Type.DOC_FLAG:
- flag = token.attached_object
-
- if flag.flag_type == 'bug':
- # TODO(robbyw): Check for exactly 1 space on the left.
- string = token.next.string.lstrip()
- string = string.split(' ', 1)[0]
-
- if not string.isdigit():
- self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,
- '@bug should be followed by a bug number', token)
-
- elif flag.flag_type == 'suppress':
- if flag.type is None:
- # A syntactically invalid suppress tag will get tokenized as a normal
- # flag, indicating an error.
- self._HandleError(
- errors.INCORRECT_SUPPRESS_SYNTAX,
- 'Invalid suppress syntax: should be @suppress {errortype}. '
- 'Spaces matter.', token)
- else:
- for suppress_type in flag.jstype.IterIdentifiers():
- if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
- self._HandleError(
- errors.INVALID_SUPPRESS_TYPE,
- 'Invalid suppression type: %s' % suppress_type, token)
-
- elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
- flag.flag_type == 'author'):
- # TODO(user): In non strict mode check the author tag for as much as
- # it exists, though the full form checked below isn't required.
- string = token.next.string
- result = self.AUTHOR_SPEC.match(string)
- if not result:
- self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION,
- 'Author tag line should be of the form: '
- '@author foo@somewhere.com (Your Name)',
- token.next)
- else:
- # Check spacing between email address and name. Do this before
- # checking earlier spacing so positions are easier to calculate for
- # autofixing.
- num_spaces = len(result.group(2))
- if num_spaces < 1:
- self._HandleError(errors.MISSING_SPACE,
- 'Missing space after email address',
- token.next, position=Position(result.start(2), 0))
- elif num_spaces > 1:
- self._HandleError(
- errors.EXTRA_SPACE, 'Extra space after email address',
- token.next,
- position=Position(result.start(2) + 1, num_spaces - 1))
-
- # Check for extra spaces before email address. Can't be too few, if
- # not at least one we wouldn't match @author tag.
- num_spaces = len(result.group(1))
- if num_spaces > 1:
- self._HandleError(errors.EXTRA_SPACE,
- 'Extra space before email address',
- token.next, position=Position(1, num_spaces - 1))
-
- elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and
- not self._limited_doc_checks):
- if flag.flag_type == 'param':
- if flag.name is None:
- self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,
- 'Missing name in @param tag', token)
-
- if not flag.description or flag.description is None:
- flag_name = token.type
- if 'name' in token.values:
- flag_name = '@' + token.values['name']
-
- if flag_name not in self.JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED:
- self._HandleError(
- errors.MISSING_JSDOC_TAG_DESCRIPTION,
- 'Missing description in %s tag' % flag_name, token)
- else:
- self._CheckForMissingSpaceBeforeToken(flag.description_start_token)
-
- if flag.HasType():
- if flag.type_start_token is not None:
- self._CheckForMissingSpaceBeforeToken(
- token.attached_object.type_start_token)
-
- if flag.jstype and not flag.jstype.IsEmpty():
- self._CheckJsDocType(token, flag.jstype)
-
- if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
- flag.type_start_token.type != Type.DOC_START_BRACE or
- flag.type_end_token.type != Type.DOC_END_BRACE):
- self._HandleError(
- errors.MISSING_BRACES_AROUND_TYPE,
- 'Type must always be surrounded by curly braces.', token)
-
- if token_type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
- if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
- token.values['name'] not in FLAGS.custom_jsdoc_tags):
- self._HandleError(
- errors.INVALID_JSDOC_TAG,
- 'Invalid JsDoc tag: %s' % token.values['name'], token)
-
- if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
- token.values['name'] == 'inheritDoc' and
- token_type == Type.DOC_INLINE_FLAG):
- self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
- 'Unnecessary braces around @inheritDoc',
- token)
-
- elif token_type == Type.SIMPLE_LVALUE:
- identifier = token.values['identifier']
-
- if ((not state.InFunction() or state.InConstructor()) and
- state.InTopLevel() and not state.InObjectLiteralDescendant()):
- jsdoc = state.GetDocComment()
- if not state.HasDocComment(identifier):
- # Only test for documentation on identifiers with .s in them to
- # avoid checking things like simple variables. We don't require
- # documenting assignments to .prototype itself (bug 1880803).
- if (not state.InConstructor() and
- identifier.find('.') != -1 and not
- identifier.endswith('.prototype') and not
- self._limited_doc_checks):
- comment = state.GetLastComment()
- if not (comment and comment.lower().count('jsdoc inherited')):
- self._HandleError(
- errors.MISSING_MEMBER_DOCUMENTATION,
- "No docs found for member '%s'" % identifier,
- token)
- elif jsdoc and (not state.InConstructor() or
- identifier.startswith('this.')):
- # We are at the top level and the function/member is documented.
- if identifier.endswith('_') and not identifier.endswith('__'):
- # Can have a private class which inherits documentation from a
- # public superclass.
- #
- # @inheritDoc is deprecated in favor of using @override, and they
- if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
- and ('accessControls' not in jsdoc.suppressions)):
- self._HandleError(
- errors.INVALID_OVERRIDE_PRIVATE,
- '%s should not override a private member.' % identifier,
- jsdoc.GetFlag('override').flag_token)
- if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
- and ('accessControls' not in jsdoc.suppressions)):
- self._HandleError(
- errors.INVALID_INHERIT_DOC_PRIVATE,
- '%s should not inherit from a private member.' % identifier,
- jsdoc.GetFlag('inheritDoc').flag_token)
- if (not jsdoc.HasFlag('private') and
- ('underscore' not in jsdoc.suppressions) and not
- ((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
- ('accessControls' in jsdoc.suppressions))):
- self._HandleError(
- errors.MISSING_PRIVATE,
- 'Member "%s" must have @private JsDoc.' %
- identifier, token)
- if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:
- self._HandleError(
- errors.UNNECESSARY_SUPPRESS,
- '@suppress {underscore} is not necessary with @private',
- jsdoc.suppressions['underscore'])
- elif (jsdoc.HasFlag('private') and
- not self.InExplicitlyTypedLanguage()):
- # It is convention to hide public fields in some ECMA
- # implementations from documentation using the @private tag.
- self._HandleError(
- errors.EXTRA_PRIVATE,
- 'Member "%s" must not have @private JsDoc' %
- identifier, token)
-
- # These flags are only legal on localizable message definitions;
- # such variables always begin with the prefix MSG_.
- for f in ('desc', 'hidden', 'meaning'):
- if (jsdoc.HasFlag(f)
- and not identifier.startswith('MSG_')
- and identifier.find('.MSG_') == -1):
- self._HandleError(
- errors.INVALID_USE_OF_DESC_TAG,
- 'Member "%s" should not have @%s JsDoc' % (identifier, f),
- token)
-
- # Check for illegaly assigning live objects as prototype property values.
- index = identifier.find('.prototype.')
- # Ignore anything with additional .s after the prototype.
- if index != -1 and identifier.find('.', index + 11) == -1:
- equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
- next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES)
- if next_code and (
- next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or
- next_code.IsOperator('new')):
- self._HandleError(
- errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,
- 'Member %s cannot have a non-primitive value' % identifier,
- token)
-
- elif token_type == Type.END_PARAMETERS:
- # Find extra space at the end of parameter lists. We check the token
- # prior to the current one when it is a closing paren.
- if (token.previous and token.previous.type == Type.PARAMETERS
- and self.ENDS_WITH_SPACE.search(token.previous.string)):
- self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"',
- token.previous)
-
- jsdoc = state.GetDocComment()
- if state.GetFunction().is_interface:
- if token.previous and token.previous.type == Type.PARAMETERS:
- self._HandleError(
- errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,
- 'Interface constructor cannot have parameters',
- token.previous)
- elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')
- and not jsdoc.InheritsDocumentation()
- and not state.InObjectLiteralDescendant() and not
- jsdoc.IsInvalidated()):
- distance, edit = jsdoc.CompareParameters(state.GetParams())
- if distance:
- params_iter = iter(state.GetParams())
- docs_iter = iter(jsdoc.ordered_params)
-
- for op in edit:
- if op == 'I':
- # Insertion.
- # Parsing doc comments is the same for all languages
- # but some languages care about parameters that don't have
- # doc comments and some languages don't care.
- # Languages that don't allow variables to by typed such as
- # JavaScript care but languages such as ActionScript or Java
- # that allow variables to be typed don't care.
- if not self._limited_doc_checks:
- self.HandleMissingParameterDoc(token, params_iter.next())
-
- elif op == 'D':
- # Deletion
- self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,
- 'Found docs for non-existing parameter: "%s"' %
- docs_iter.next(), token)
- elif op == 'S':
- # Substitution
- if not self._limited_doc_checks:
- self._HandleError(
- errors.WRONG_PARAMETER_DOCUMENTATION,
- 'Parameter mismatch: got "%s", expected "%s"' %
- (params_iter.next(), docs_iter.next()), token)
-
- else:
- # Equality - just advance the iterators
- params_iter.next()
- docs_iter.next()
-
- elif token_type == Type.STRING_TEXT:
- # If this is the first token after the start of the string, but it's at
- # the end of a line, we know we have a multi-line string.
- if token.previous.type in (
- Type.SINGLE_QUOTE_STRING_START,
- Type.DOUBLE_QUOTE_STRING_START) and last_in_line:
- self._HandleError(errors.MULTI_LINE_STRING,
- 'Multi-line strings are not allowed', token)
-
- # This check is orthogonal to the ones above, and repeats some types, so
- # it is a plain if and not an elif.
- if token.type in Type.COMMENT_TYPES:
- if self.ILLEGAL_TAB.search(token.string):
- self._HandleError(errors.ILLEGAL_TAB,
- 'Illegal tab in comment "%s"' % token.string, token)
-
- trimmed = token.string.rstrip()
- if last_in_line and token.string != trimmed:
- # Check for extra whitespace at the end of a line.
- self._HandleError(
- errors.EXTRA_SPACE, 'Extra space at end of line', token,
- position=Position(len(trimmed), len(token.string) - len(trimmed)))
-
- # This check is also orthogonal since it is based on metadata.
- if token.metadata.is_implied_semicolon:
- self._HandleError(errors.MISSING_SEMICOLON,
- 'Missing semicolon at end of line', token)
-
- def _HandleStartBracket(self, token, last_non_space_token):
- """Handles a token that is an open bracket.
-
- Args:
- token: The token to handle.
- last_non_space_token: The last token that was not a space.
- """
- if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and
- last_non_space_token and
- last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
- self._HandleError(
- errors.EXTRA_SPACE, 'Extra space before "["',
- token.previous, position=Position.All(token.previous.string))
- # If the [ token is the first token in a line we shouldn't complain
- # about a missing space before [. This is because some Ecma script
- # languages allow syntax like:
- # [Annotation]
- # class MyClass {...}
- # So we don't want to blindly warn about missing spaces before [.
- # In the the future, when rules for computing exactly how many spaces
- # lines should be indented are added, then we can return errors for
- # [ tokens that are improperly indented.
- # For example:
- # var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
- # [a,b,c];
- # should trigger a proper indentation warning message as [ is not indented
- # by four spaces.
- elif (not token.IsFirstInLine() and token.previous and
- token.previous.type not in (
- [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
- Type.EXPRESSION_ENDER_TYPES)):
- self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
- token, position=Position.AtBeginning())
-
- def Finalize(self, state):
- """Perform all checks that need to occur after all lines are processed.
-
- Args:
- state: State of the parser after parsing all tokens
-
- Raises:
- TypeError: If not overridden.
- """
- last_non_space_token = state.GetLastNonSpaceToken()
- # Check last line for ending with newline.
- if state.GetLastLine() and not (
- state.GetLastLine().isspace() or
- state.GetLastLine().rstrip('\n\r\f') != state.GetLastLine()):
- self._HandleError(
- errors.FILE_MISSING_NEWLINE,
- 'File does not end with new line. (%s)' % state.GetLastLine(),
- last_non_space_token)
-
- try:
- self._indentation.Finalize()
- except Exception, e:
- self._HandleError(
- errors.FILE_DOES_NOT_PARSE,
- str(e),
- last_non_space_token)
-
- def GetLongLineExceptions(self):
- """Gets a list of regexps for lines which can be longer than the limit.
-
- Returns:
- A list of regexps, used as matches (rather than searches).
- """
- return []
-
- def InExplicitlyTypedLanguage(self):
- """Returns whether this ecma implementation is explicitly typed."""
- return False
diff --git a/tools/closure_linter/build/lib/closure_linter/ecmametadatapass.py b/tools/closure_linter/build/lib/closure_linter/ecmametadatapass.py
deleted file mode 100644
index 50621610ef..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/ecmametadatapass.py
+++ /dev/null
@@ -1,574 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Metadata pass for annotating tokens in EcmaScript files."""
-
-__author__ = ('robbyw@google.com (Robert Walker)')
-
-from closure_linter import javascripttokens
-from closure_linter import tokenutil
-
-
-TokenType = javascripttokens.JavaScriptTokenType
-
-
-class ParseError(Exception):
- """Exception indicating a parse error at the given token.
-
- Attributes:
- token: The token where the parse error occurred.
- """
-
- def __init__(self, token, message=None):
- """Initialize a parse error at the given token with an optional message.
-
- Args:
- token: The token where the parse error occurred.
- message: A message describing the parse error.
- """
- Exception.__init__(self, message)
- self.token = token
-
-
-class EcmaContext(object):
- """Context object for EcmaScript languages.
-
- Attributes:
- type: The context type.
- start_token: The token where this context starts.
- end_token: The token where this context ends.
- parent: The parent context.
- """
-
- # The root context.
- ROOT = 'root'
-
- # A block of code.
- BLOCK = 'block'
-
- # A pseudo-block of code for a given case or default section.
- CASE_BLOCK = 'case_block'
-
- # Block of statements in a for loop's parentheses.
- FOR_GROUP_BLOCK = 'for_block'
-
- # An implied block of code for 1 line if, while, and for statements
- IMPLIED_BLOCK = 'implied_block'
-
- # An index in to an array or object.
- INDEX = 'index'
-
- # An array literal in [].
- ARRAY_LITERAL = 'array_literal'
-
- # An object literal in {}.
- OBJECT_LITERAL = 'object_literal'
-
- # An individual element in an array or object literal.
- LITERAL_ELEMENT = 'literal_element'
-
- # The portion of a ternary statement between ? and :
- TERNARY_TRUE = 'ternary_true'
-
- # The portion of a ternary statment after :
- TERNARY_FALSE = 'ternary_false'
-
- # The entire switch statment. This will contain a GROUP with the variable
- # and a BLOCK with the code.
-
- # Since that BLOCK is not a normal block, it can not contain statements except
- # for case and default.
- SWITCH = 'switch'
-
- # A normal comment.
- COMMENT = 'comment'
-
- # A JsDoc comment.
- DOC = 'doc'
-
- # An individual statement.
- STATEMENT = 'statement'
-
- # Code within parentheses.
- GROUP = 'group'
-
- # Parameter names in a function declaration.
- PARAMETERS = 'parameters'
-
- # A set of variable declarations appearing after the 'var' keyword.
- VAR = 'var'
-
- # Context types that are blocks.
- BLOCK_TYPES = frozenset([
- ROOT, BLOCK, CASE_BLOCK, FOR_GROUP_BLOCK, IMPLIED_BLOCK])
-
- def __init__(self, context_type, start_token, parent=None):
- """Initializes the context object.
-
- Args:
- context_type: The context type.
- start_token: The token where this context starts.
- parent: The parent context.
-
- Attributes:
- type: The context type.
- start_token: The token where this context starts.
- end_token: The token where this context ends.
- parent: The parent context.
- children: The child contexts of this context, in order.
- """
- self.type = context_type
- self.start_token = start_token
- self.end_token = None
-
- self.parent = None
- self.children = []
-
- if parent:
- parent.AddChild(self)
-
- def __repr__(self):
- """Returns a string representation of the context object."""
- stack = []
- context = self
- while context:
- stack.append(context.type)
- context = context.parent
- return 'Context(%s)' % ' > '.join(stack)
-
- def AddChild(self, child):
- """Adds a child to this context and sets child's parent to this context.
-
- Args:
- child: A child EcmaContext. The child's parent will be set to this
- context.
- """
-
- child.parent = self
-
- self.children.append(child)
- self.children.sort(EcmaContext._CompareContexts)
-
- def GetRoot(self):
- """Get the root context that contains this context, if any."""
- context = self
- while context:
- if context.type is EcmaContext.ROOT:
- return context
- context = context.parent
-
- @staticmethod
- def _CompareContexts(context1, context2):
- """Sorts contexts 1 and 2 by start token document position."""
- return tokenutil.Compare(context1.start_token, context2.start_token)
-
-
-class EcmaMetaData(object):
- """Token metadata for EcmaScript languages.
-
- Attributes:
- last_code: The last code token to appear before this one.
- context: The context this token appears in.
- operator_type: The operator type, will be one of the *_OPERATOR constants
- defined below.
- aliased_symbol: The full symbol being identified, as a string (e.g. an
- 'XhrIo' alias for 'goog.net.XhrIo'). Only applicable to identifier
- tokens. This is set in aliaspass.py and is a best guess.
- is_alias_definition: True if the symbol is part of an alias definition.
- If so, these symbols won't be counted towards goog.requires/provides.
- """
-
- UNARY_OPERATOR = 'unary'
-
- UNARY_POST_OPERATOR = 'unary_post'
-
- BINARY_OPERATOR = 'binary'
-
- TERNARY_OPERATOR = 'ternary'
-
- def __init__(self):
- """Initializes a token metadata object."""
- self.last_code = None
- self.context = None
- self.operator_type = None
- self.is_implied_semicolon = False
- self.is_implied_block = False
- self.is_implied_block_close = False
- self.aliased_symbol = None
- self.is_alias_definition = False
-
- def __repr__(self):
- """Returns a string representation of the context object."""
- parts = ['%r' % self.context]
- if self.operator_type:
- parts.append('optype: %r' % self.operator_type)
- if self.is_implied_semicolon:
- parts.append('implied;')
- if self.aliased_symbol:
- parts.append('alias for: %s' % self.aliased_symbol)
- return 'MetaData(%s)' % ', '.join(parts)
-
- def IsUnaryOperator(self):
- return self.operator_type in (EcmaMetaData.UNARY_OPERATOR,
- EcmaMetaData.UNARY_POST_OPERATOR)
-
- def IsUnaryPostOperator(self):
- return self.operator_type == EcmaMetaData.UNARY_POST_OPERATOR
-
-
-class EcmaMetaDataPass(object):
- """A pass that iterates over all tokens and builds metadata about them."""
-
- def __init__(self):
- """Initialize the meta data pass object."""
- self.Reset()
-
- def Reset(self):
- """Resets the metadata pass to prepare for the next file."""
- self._token = None
- self._context = None
- self._AddContext(EcmaContext.ROOT)
- self._last_code = None
-
- def _CreateContext(self, context_type):
- """Overridable by subclasses to create the appropriate context type."""
- return EcmaContext(context_type, self._token, self._context)
-
- def _CreateMetaData(self):
- """Overridable by subclasses to create the appropriate metadata type."""
- return EcmaMetaData()
-
- def _AddContext(self, context_type):
- """Adds a context of the given type to the context stack.
-
- Args:
- context_type: The type of context to create
- """
- self._context = self._CreateContext(context_type)
-
- def _PopContext(self):
- """Moves up one level in the context stack.
-
- Returns:
- The former context.
-
- Raises:
- ParseError: If the root context is popped.
- """
- top_context = self._context
- top_context.end_token = self._token
- self._context = top_context.parent
- if self._context:
- return top_context
- else:
- raise ParseError(self._token)
-
- def _PopContextType(self, *stop_types):
- """Pops the context stack until a context of the given type is popped.
-
- Args:
- *stop_types: The types of context to pop to - stops at the first match.
-
- Returns:
- The context object of the given type that was popped.
- """
- last = None
- while not last or last.type not in stop_types:
- last = self._PopContext()
- return last
-
- def _EndStatement(self):
- """Process the end of a statement."""
- self._PopContextType(EcmaContext.STATEMENT)
- if self._context.type == EcmaContext.IMPLIED_BLOCK:
- self._token.metadata.is_implied_block_close = True
- self._PopContext()
-
- def _ProcessContext(self):
- """Process the context at the current token.
-
- Returns:
- The context that should be assigned to the current token, or None if
- the current context after this method should be used.
-
- Raises:
- ParseError: When the token appears in an invalid context.
- """
- token = self._token
- token_type = token.type
-
- if self._context.type in EcmaContext.BLOCK_TYPES:
- # Whenever we're in a block, we add a statement context. We make an
- # exception for switch statements since they can only contain case: and
- # default: and therefore don't directly contain statements.
- # The block we add here may be immediately removed in some cases, but
- # that causes no harm.
- parent = self._context.parent
- if not parent or parent.type != EcmaContext.SWITCH:
- self._AddContext(EcmaContext.STATEMENT)
-
- elif self._context.type == EcmaContext.ARRAY_LITERAL:
- self._AddContext(EcmaContext.LITERAL_ELEMENT)
-
- if token_type == TokenType.START_PAREN:
- if self._last_code and self._last_code.IsKeyword('for'):
- # for loops contain multiple statements in the group unlike while,
- # switch, if, etc.
- self._AddContext(EcmaContext.FOR_GROUP_BLOCK)
- else:
- self._AddContext(EcmaContext.GROUP)
-
- elif token_type == TokenType.END_PAREN:
- result = self._PopContextType(EcmaContext.GROUP,
- EcmaContext.FOR_GROUP_BLOCK)
- keyword_token = result.start_token.metadata.last_code
- # keyword_token will not exist if the open paren is the first line of the
- # file, for example if all code is wrapped in an immediately executed
- # annonymous function.
- if keyword_token and keyword_token.string in ('if', 'for', 'while'):
- next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES)
- if next_code.type != TokenType.START_BLOCK:
- # Check for do-while.
- is_do_while = False
- pre_keyword_token = keyword_token.metadata.last_code
- if (pre_keyword_token and
- pre_keyword_token.type == TokenType.END_BLOCK):
- start_block_token = pre_keyword_token.metadata.context.start_token
- is_do_while = start_block_token.metadata.last_code.string == 'do'
-
- # If it's not do-while, it's an implied block.
- if not is_do_while:
- self._AddContext(EcmaContext.IMPLIED_BLOCK)
- token.metadata.is_implied_block = True
-
- return result
-
- # else (not else if) with no open brace after it should be considered the
- # start of an implied block, similar to the case with if, for, and while
- # above.
- elif (token_type == TokenType.KEYWORD and
- token.string == 'else'):
- next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES)
- if (next_code.type != TokenType.START_BLOCK and
- (next_code.type != TokenType.KEYWORD or next_code.string != 'if')):
- self._AddContext(EcmaContext.IMPLIED_BLOCK)
- token.metadata.is_implied_block = True
-
- elif token_type == TokenType.START_PARAMETERS:
- self._AddContext(EcmaContext.PARAMETERS)
-
- elif token_type == TokenType.END_PARAMETERS:
- return self._PopContextType(EcmaContext.PARAMETERS)
-
- elif token_type == TokenType.START_BRACKET:
- if (self._last_code and
- self._last_code.type in TokenType.EXPRESSION_ENDER_TYPES):
- self._AddContext(EcmaContext.INDEX)
- else:
- self._AddContext(EcmaContext.ARRAY_LITERAL)
-
- elif token_type == TokenType.END_BRACKET:
- return self._PopContextType(EcmaContext.INDEX, EcmaContext.ARRAY_LITERAL)
-
- elif token_type == TokenType.START_BLOCK:
- if (self._last_code.type in (TokenType.END_PAREN,
- TokenType.END_PARAMETERS) or
- self._last_code.IsKeyword('else') or
- self._last_code.IsKeyword('do') or
- self._last_code.IsKeyword('try') or
- self._last_code.IsKeyword('finally') or
- (self._last_code.IsOperator(':') and
- self._last_code.metadata.context.type == EcmaContext.CASE_BLOCK)):
- # else, do, try, and finally all might have no () before {.
- # Also, handle the bizzare syntax case 10: {...}.
- self._AddContext(EcmaContext.BLOCK)
- else:
- self._AddContext(EcmaContext.OBJECT_LITERAL)
-
- elif token_type == TokenType.END_BLOCK:
- context = self._PopContextType(EcmaContext.BLOCK,
- EcmaContext.OBJECT_LITERAL)
- if self._context.type == EcmaContext.SWITCH:
- # The end of the block also means the end of the switch statement it
- # applies to.
- return self._PopContext()
- return context
-
- elif token.IsKeyword('switch'):
- self._AddContext(EcmaContext.SWITCH)
-
- elif (token_type == TokenType.KEYWORD and
- token.string in ('case', 'default') and
- self._context.type != EcmaContext.OBJECT_LITERAL):
- # Pop up to but not including the switch block.
- while self._context.parent.type != EcmaContext.SWITCH:
- self._PopContext()
- if self._context.parent is None:
- raise ParseError(token, 'Encountered case/default statement '
- 'without switch statement')
-
- elif token.IsOperator('?'):
- self._AddContext(EcmaContext.TERNARY_TRUE)
-
- elif token.IsOperator(':'):
- if self._context.type == EcmaContext.OBJECT_LITERAL:
- self._AddContext(EcmaContext.LITERAL_ELEMENT)
-
- elif self._context.type == EcmaContext.TERNARY_TRUE:
- self._PopContext()
- self._AddContext(EcmaContext.TERNARY_FALSE)
-
- # Handle nested ternary statements like:
- # foo = bar ? baz ? 1 : 2 : 3
- # When we encounter the second ":" the context is
- # ternary_false > ternary_true > statement > root
- elif (self._context.type == EcmaContext.TERNARY_FALSE and
- self._context.parent.type == EcmaContext.TERNARY_TRUE):
- self._PopContext() # Leave current ternary false context.
- self._PopContext() # Leave current parent ternary true
- self._AddContext(EcmaContext.TERNARY_FALSE)
-
- elif self._context.parent.type == EcmaContext.SWITCH:
- self._AddContext(EcmaContext.CASE_BLOCK)
-
- elif token.IsKeyword('var'):
- self._AddContext(EcmaContext.VAR)
-
- elif token.IsOperator(','):
- while self._context.type not in (EcmaContext.VAR,
- EcmaContext.ARRAY_LITERAL,
- EcmaContext.OBJECT_LITERAL,
- EcmaContext.STATEMENT,
- EcmaContext.PARAMETERS,
- EcmaContext.GROUP):
- self._PopContext()
-
- elif token_type == TokenType.SEMICOLON:
- self._EndStatement()
-
- def Process(self, first_token):
- """Processes the token stream starting with the given token."""
- self._token = first_token
- while self._token:
- self._ProcessToken()
-
- if self._token.IsCode():
- self._last_code = self._token
-
- self._token = self._token.next
-
- try:
- self._PopContextType(self, EcmaContext.ROOT)
- except ParseError:
- # Ignore the "popped to root" error.
- pass
-
- def _ProcessToken(self):
- """Process the given token."""
- token = self._token
- token.metadata = self._CreateMetaData()
- context = (self._ProcessContext() or self._context)
- token.metadata.context = context
- token.metadata.last_code = self._last_code
-
- # Determine the operator type of the token, if applicable.
- if token.type == TokenType.OPERATOR:
- token.metadata.operator_type = self._GetOperatorType(token)
-
- # Determine if there is an implied semicolon after the token.
- if token.type != TokenType.SEMICOLON:
- next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES)
- # A statement like if (x) does not need a semicolon after it
- is_implied_block = self._context == EcmaContext.IMPLIED_BLOCK
- is_last_code_in_line = token.IsCode() and (
- not next_code or next_code.line_number != token.line_number)
- is_continued_operator = (token.type == TokenType.OPERATOR and
- not token.metadata.IsUnaryPostOperator())
- is_continued_dot = token.string == '.'
- next_code_is_operator = next_code and next_code.type == TokenType.OPERATOR
- is_end_of_block = (
- token.type == TokenType.END_BLOCK and
- token.metadata.context.type != EcmaContext.OBJECT_LITERAL)
- is_multiline_string = token.type == TokenType.STRING_TEXT
- is_continued_var_decl = (token.IsKeyword('var') and
- next_code and
- (next_code.type in [TokenType.IDENTIFIER,
- TokenType.SIMPLE_LVALUE]) and
- token.line_number < next_code.line_number)
- next_code_is_block = next_code and next_code.type == TokenType.START_BLOCK
- if (is_last_code_in_line and
- self._StatementCouldEndInContext() and
- not is_multiline_string and
- not is_end_of_block and
- not is_continued_var_decl and
- not is_continued_operator and
- not is_continued_dot and
- not next_code_is_operator and
- not is_implied_block and
- not next_code_is_block):
- token.metadata.is_implied_semicolon = True
- self._EndStatement()
-
- def _StatementCouldEndInContext(self):
- """Returns if the current statement (if any) may end in this context."""
- # In the basic statement or variable declaration context, statement can
- # always end in this context.
- if self._context.type in (EcmaContext.STATEMENT, EcmaContext.VAR):
- return True
-
- # End of a ternary false branch inside a statement can also be the
- # end of the statement, for example:
- # var x = foo ? foo.bar() : null
- # In this case the statement ends after the null, when the context stack
- # looks like ternary_false > var > statement > root.
- if (self._context.type == EcmaContext.TERNARY_FALSE and
- self._context.parent.type in (EcmaContext.STATEMENT, EcmaContext.VAR)):
- return True
-
- # In all other contexts like object and array literals, ternary true, etc.
- # the statement can't yet end.
- return False
-
- def _GetOperatorType(self, token):
- """Returns the operator type of the given operator token.
-
- Args:
- token: The token to get arity for.
-
- Returns:
- The type of the operator. One of the *_OPERATOR constants defined in
- EcmaMetaData.
- """
- if token.string == '?':
- return EcmaMetaData.TERNARY_OPERATOR
-
- if token.string in TokenType.UNARY_OPERATORS:
- return EcmaMetaData.UNARY_OPERATOR
-
- last_code = token.metadata.last_code
- if not last_code or last_code.type == TokenType.END_BLOCK:
- return EcmaMetaData.UNARY_OPERATOR
-
- if (token.string in TokenType.UNARY_POST_OPERATORS and
- last_code.type in TokenType.EXPRESSION_ENDER_TYPES):
- return EcmaMetaData.UNARY_POST_OPERATOR
-
- if (token.string in TokenType.UNARY_OK_OPERATORS and
- last_code.type not in TokenType.EXPRESSION_ENDER_TYPES and
- last_code.string not in TokenType.UNARY_POST_OPERATORS):
- return EcmaMetaData.UNARY_OPERATOR
-
- return EcmaMetaData.BINARY_OPERATOR
diff --git a/tools/closure_linter/build/lib/closure_linter/error_check.py b/tools/closure_linter/build/lib/closure_linter/error_check.py
deleted file mode 100644
index 8d657fe917..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/error_check.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Specific JSLint errors checker."""
-
-
-
-import gflags as flags
-
-FLAGS = flags.FLAGS
-
-
-class Rule(object):
- """Different rules to check."""
-
- # Documentations for specific rules goes in flag definition.
- BLANK_LINES_AT_TOP_LEVEL = 'blank_lines_at_top_level'
- INDENTATION = 'indentation'
- WELL_FORMED_AUTHOR = 'well_formed_author'
- NO_BRACES_AROUND_INHERIT_DOC = 'no_braces_around_inherit_doc'
- BRACES_AROUND_TYPE = 'braces_around_type'
- OPTIONAL_TYPE_MARKER = 'optional_type_marker'
- VARIABLE_ARG_MARKER = 'variable_arg_marker'
- UNUSED_PRIVATE_MEMBERS = 'unused_private_members'
- UNUSED_LOCAL_VARIABLES = 'unused_local_variables'
-
- # Rule to raise all known errors.
- ALL = 'all'
-
- # All rules that are to be checked when using the strict flag. E.g. the rules
- # that are specific to the stricter Closure style.
- CLOSURE_RULES = frozenset([BLANK_LINES_AT_TOP_LEVEL,
- INDENTATION,
- WELL_FORMED_AUTHOR,
- NO_BRACES_AROUND_INHERIT_DOC,
- BRACES_AROUND_TYPE,
- OPTIONAL_TYPE_MARKER,
- VARIABLE_ARG_MARKER])
-
-
-flags.DEFINE_boolean('strict', False,
- 'Whether to validate against the stricter Closure style. '
- 'This includes ' + (', '.join(Rule.CLOSURE_RULES)) + '.')
-flags.DEFINE_multistring('jslint_error', [],
- 'List of specific lint errors to check. Here is a list'
- ' of accepted values:\n'
- ' - ' + Rule.ALL + ': enables all following errors.\n'
- ' - ' + Rule.BLANK_LINES_AT_TOP_LEVEL + ': validates'
- 'number of blank lines between blocks at top level.\n'
- ' - ' + Rule.INDENTATION + ': checks correct '
- 'indentation of code.\n'
- ' - ' + Rule.WELL_FORMED_AUTHOR + ': validates the '
- '@author JsDoc tags.\n'
- ' - ' + Rule.NO_BRACES_AROUND_INHERIT_DOC + ': '
- 'forbids braces around @inheritdoc JsDoc tags.\n'
- ' - ' + Rule.BRACES_AROUND_TYPE + ': enforces braces '
- 'around types in JsDoc tags.\n'
- ' - ' + Rule.OPTIONAL_TYPE_MARKER + ': checks correct '
- 'use of optional marker = in param types.\n'
- ' - ' + Rule.UNUSED_PRIVATE_MEMBERS + ': checks for '
- 'unused private variables.\n'
- ' - ' + Rule.UNUSED_LOCAL_VARIABLES + ': checks for '
- 'unused local variables.\n')
-
-
-def ShouldCheck(rule):
- """Returns whether the optional rule should be checked.
-
- Computes different flags (strict, jslint_error, jslint_noerror) to find out if
- this specific rule should be checked.
-
- Args:
- rule: Name of the rule (see Rule).
-
- Returns:
- True if the rule should be checked according to the flags, otherwise False.
- """
- if rule in FLAGS.jslint_error or Rule.ALL in FLAGS.jslint_error:
- return True
- # Checks strict rules.
- return FLAGS.strict and rule in Rule.CLOSURE_RULES
diff --git a/tools/closure_linter/build/lib/closure_linter/error_fixer.py b/tools/closure_linter/build/lib/closure_linter/error_fixer.py
deleted file mode 100644
index 88f9c720ab..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/error_fixer.py
+++ /dev/null
@@ -1,618 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Main class responsible for automatically fixing simple style violations."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = 'robbyw@google.com (Robert Walker)'
-
-import re
-
-import gflags as flags
-from closure_linter import errors
-from closure_linter import javascriptstatetracker
-from closure_linter import javascripttokens
-from closure_linter import requireprovidesorter
-from closure_linter import tokenutil
-from closure_linter.common import errorhandler
-
-# Shorthand
-Token = javascripttokens.JavaScriptToken
-Type = javascripttokens.JavaScriptTokenType
-
-END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$')
-
-# Regex to represent common mistake inverting author name and email as
-# @author User Name (user@company)
-INVERTED_AUTHOR_SPEC = re.compile(r'(?P<leading_whitespace>\s*)'
- r'(?P<name>[^(]+)'
- r'(?P<whitespace_after_name>\s+)'
- r'\('
- r'(?P<email>[^\s]+@[^)\s]+)'
- r'\)'
- r'(?P<trailing_characters>.*)')
-
-FLAGS = flags.FLAGS
-flags.DEFINE_boolean('disable_indentation_fixing', False,
- 'Whether to disable automatic fixing of indentation.')
-flags.DEFINE_list('fix_error_codes', [], 'A list of specific error codes to '
- 'fix. Defaults to all supported error codes when empty. '
- 'See errors.py for a list of error codes.')
-
-
-class ErrorFixer(errorhandler.ErrorHandler):
- """Object that fixes simple style errors."""
-
- def __init__(self, external_file=None):
- """Initialize the error fixer.
-
- Args:
- external_file: If included, all output will be directed to this file
- instead of overwriting the files the errors are found in.
- """
- errorhandler.ErrorHandler.__init__(self)
-
- self._file_name = None
- self._file_token = None
- self._external_file = external_file
-
- try:
- self._fix_error_codes = set([errors.ByName(error.upper()) for error in
- FLAGS.fix_error_codes])
- except KeyError as ke:
- raise ValueError('Unknown error code ' + ke.args[0])
-
- def HandleFile(self, filename, first_token):
- """Notifies this ErrorPrinter that subsequent errors are in filename.
-
- Args:
- filename: The name of the file about to be checked.
- first_token: The first token in the file.
- """
- self._file_name = filename
- self._file_is_html = filename.endswith('.html') or filename.endswith('.htm')
- self._file_token = first_token
- self._file_fix_count = 0
- self._file_changed_lines = set()
-
- def _AddFix(self, tokens):
- """Adds the fix to the internal count.
-
- Args:
- tokens: The token or sequence of tokens changed to fix an error.
- """
- self._file_fix_count += 1
- if hasattr(tokens, 'line_number'):
- self._file_changed_lines.add(tokens.line_number)
- else:
- for token in tokens:
- self._file_changed_lines.add(token.line_number)
-
- def _FixJsDocPipeNull(self, js_type):
- """Change number|null or null|number to ?number.
-
- Args:
- js_type: The typeannotation.TypeAnnotation instance to fix.
- """
-
- # Recurse into all sub_types if the error was at a deeper level.
- map(self._FixJsDocPipeNull, js_type.IterTypes())
-
- if js_type.type_group and len(js_type.sub_types) == 2:
- # Find and remove the null sub_type:
- sub_type = None
- for sub_type in js_type.sub_types:
- if sub_type.identifier == 'null':
- map(tokenutil.DeleteToken, sub_type.tokens)
- self._AddFix(sub_type.tokens)
- break
- else:
- return
-
- first_token = js_type.FirstToken()
- question_mark = Token('?', Type.DOC_TYPE_MODIFIER, first_token.line,
- first_token.line_number)
- tokenutil.InsertTokenBefore(question_mark, first_token)
- js_type.tokens.insert(0, question_mark)
- js_type.tokens.remove(sub_type)
- js_type.or_null = True
-
- # Now also remove the separator, which is in the parent's token list,
- # either before or after the sub_type, there is exactly one. Scan for it.
- for token in js_type.tokens:
- if (token and isinstance(token, Token) and
- token.type == Type.DOC_TYPE_MODIFIER and token.string == '|'):
- tokenutil.DeleteToken(token)
- self._AddFix(token)
- break
-
- def HandleError(self, error):
- """Attempts to fix the error.
-
- Args:
- error: The error object
- """
- code = error.code
- token = error.token
-
- if self._fix_error_codes and code not in self._fix_error_codes:
- return
-
- if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL:
- self._FixJsDocPipeNull(token.attached_object.jstype)
-
- elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
- iterator = token.attached_object.type_end_token
- if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace():
- iterator = iterator.previous
-
- ending_space = len(iterator.string) - len(iterator.string.rstrip())
- iterator.string = '%s=%s' % (iterator.string.rstrip(),
- ' ' * ending_space)
-
- # Create a new flag object with updated type info.
- token.attached_object = javascriptstatetracker.JsDocFlag(token)
- self._AddFix(token)
-
- elif code == errors.JSDOC_MISSING_VAR_ARGS_TYPE:
- iterator = token.attached_object.type_start_token
- if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace():
- iterator = iterator.next
-
- starting_space = len(iterator.string) - len(iterator.string.lstrip())
- iterator.string = '%s...%s' % (' ' * starting_space,
- iterator.string.lstrip())
-
- # Create a new flag object with updated type info.
- token.attached_object = javascriptstatetracker.JsDocFlag(token)
- self._AddFix(token)
-
- elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
- errors.MISSING_SEMICOLON):
- semicolon_token = Token(';', Type.SEMICOLON, token.line,
- token.line_number)
- tokenutil.InsertTokenAfter(semicolon_token, token)
- token.metadata.is_implied_semicolon = False
- semicolon_token.metadata.is_implied_semicolon = False
- self._AddFix(token)
-
- elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
- errors.REDUNDANT_SEMICOLON,
- errors.COMMA_AT_END_OF_LITERAL):
- self._DeleteToken(token)
- self._AddFix(token)
-
- elif code == errors.INVALID_JSDOC_TAG:
- if token.string == '@returns':
- token.string = '@return'
- self._AddFix(token)
-
- elif code == errors.FILE_MISSING_NEWLINE:
- # This error is fixed implicitly by the way we restore the file
- self._AddFix(token)
-
- elif code == errors.MISSING_SPACE:
- if error.fix_data:
- token.string = error.fix_data
- self._AddFix(token)
- elif error.position:
- if error.position.IsAtBeginning():
- tokenutil.InsertSpaceTokenAfter(token.previous)
- elif error.position.IsAtEnd(token.string):
- tokenutil.InsertSpaceTokenAfter(token)
- else:
- token.string = error.position.Set(token.string, ' ')
- self._AddFix(token)
-
- elif code == errors.EXTRA_SPACE:
- if error.position:
- token.string = error.position.Set(token.string, '')
- self._AddFix(token)
-
- elif code == errors.MISSING_LINE:
- if error.position.IsAtBeginning():
- tokenutil.InsertBlankLineAfter(token.previous)
- else:
- tokenutil.InsertBlankLineAfter(token)
- self._AddFix(token)
-
- elif code == errors.EXTRA_LINE:
- self._DeleteToken(token)
- self._AddFix(token)
-
- elif code == errors.WRONG_BLANK_LINE_COUNT:
- if not token.previous:
- # TODO(user): Add an insertBefore method to tokenutil.
- return
-
- num_lines = error.fix_data
- should_delete = False
-
- if num_lines < 0:
- num_lines *= -1
- should_delete = True
-
- for unused_i in xrange(1, num_lines + 1):
- if should_delete:
- # TODO(user): DeleteToken should update line numbers.
- self._DeleteToken(token.previous)
- else:
- tokenutil.InsertBlankLineAfter(token.previous)
- self._AddFix(token)
-
- elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
- end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
- if end_quote:
- single_quote_start = Token(
- "'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number)
- single_quote_end = Token(
- "'", Type.SINGLE_QUOTE_STRING_START, end_quote.line,
- token.line_number)
-
- tokenutil.InsertTokenAfter(single_quote_start, token)
- tokenutil.InsertTokenAfter(single_quote_end, end_quote)
- self._DeleteToken(token)
- self._DeleteToken(end_quote)
- self._AddFix([token, end_quote])
-
- elif code == errors.MISSING_BRACES_AROUND_TYPE:
- fixed_tokens = []
- start_token = token.attached_object.type_start_token
-
- if start_token.type != Type.DOC_START_BRACE:
- leading_space = (
- len(start_token.string) - len(start_token.string.lstrip()))
- if leading_space:
- start_token = tokenutil.SplitToken(start_token, leading_space)
- # Fix case where start and end token were the same.
- if token.attached_object.type_end_token == start_token.previous:
- token.attached_object.type_end_token = start_token
-
- new_token = Token('{', Type.DOC_START_BRACE, start_token.line,
- start_token.line_number)
- tokenutil.InsertTokenAfter(new_token, start_token.previous)
- token.attached_object.type_start_token = new_token
- fixed_tokens.append(new_token)
-
- end_token = token.attached_object.type_end_token
- if end_token.type != Type.DOC_END_BRACE:
- # If the start token was a brace, the end token will be a
- # FLAG_ENDING_TYPE token, if there wasn't a starting brace then
- # the end token is the last token of the actual type.
- last_type = end_token
- if not fixed_tokens:
- last_type = end_token.previous
-
- while last_type.string.isspace():
- last_type = last_type.previous
-
- # If there was no starting brace then a lone end brace wouldn't have
- # been type end token. Now that we've added any missing start brace,
- # see if the last effective type token was an end brace.
- if last_type.type != Type.DOC_END_BRACE:
- trailing_space = (len(last_type.string) -
- len(last_type.string.rstrip()))
- if trailing_space:
- tokenutil.SplitToken(last_type,
- len(last_type.string) - trailing_space)
-
- new_token = Token('}', Type.DOC_END_BRACE, last_type.line,
- last_type.line_number)
- tokenutil.InsertTokenAfter(new_token, last_type)
- token.attached_object.type_end_token = new_token
- fixed_tokens.append(new_token)
-
- self._AddFix(fixed_tokens)
-
- elif code == errors.LINE_STARTS_WITH_OPERATOR:
- # Remove whitespace following the operator so the line starts clean.
- self._StripSpace(token, before=False)
-
- # Remove the operator.
- tokenutil.DeleteToken(token)
- self._AddFix(token)
-
- insertion_point = tokenutil.GetPreviousCodeToken(token)
-
- # Insert a space between the previous token and the new operator.
- space = Token(' ', Type.WHITESPACE, insertion_point.line,
- insertion_point.line_number)
- tokenutil.InsertTokenAfter(space, insertion_point)
-
- # Insert the operator on the end of the previous line.
- new_token = Token(token.string, token.type, insertion_point.line,
- insertion_point.line_number)
- tokenutil.InsertTokenAfter(new_token, space)
- self._AddFix(new_token)
-
- elif code == errors.LINE_ENDS_WITH_DOT:
- # Remove whitespace preceding the operator to remove trailing whitespace.
- self._StripSpace(token, before=True)
-
- # Remove the dot.
- tokenutil.DeleteToken(token)
- self._AddFix(token)
-
- insertion_point = tokenutil.GetNextCodeToken(token)
-
- # Insert the dot at the beginning of the next line of code.
- new_token = Token(token.string, token.type, insertion_point.line,
- insertion_point.line_number)
- tokenutil.InsertTokenBefore(new_token, insertion_point)
- self._AddFix(new_token)
-
- elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
- require_start_token = error.fix_data
- sorter = requireprovidesorter.RequireProvideSorter()
- sorter.FixRequires(require_start_token)
-
- self._AddFix(require_start_token)
-
- elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED:
- provide_start_token = error.fix_data
- sorter = requireprovidesorter.RequireProvideSorter()
- sorter.FixProvides(provide_start_token)
-
- self._AddFix(provide_start_token)
-
- elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
- if token.previous.string == '{' and token.next.string == '}':
- self._DeleteToken(token.previous)
- self._DeleteToken(token.next)
- self._AddFix([token])
-
- elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
- match = INVERTED_AUTHOR_SPEC.match(token.string)
- if match:
- token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'),
- match.group('email'),
- match.group('whitespace_after_name'),
- match.group('name'),
- match.group('trailing_characters'))
- self._AddFix(token)
-
- elif (code == errors.WRONG_INDENTATION and
- not FLAGS.disable_indentation_fixing):
- token = tokenutil.GetFirstTokenInSameLine(token)
- actual = error.position.start
- expected = error.position.length
-
- # Cases where first token is param but with leading spaces.
- if (len(token.string.lstrip()) == len(token.string) - actual and
- token.string.lstrip()):
- token.string = token.string.lstrip()
- actual = 0
-
- if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0:
- token.string = token.string.lstrip() + (' ' * expected)
- self._AddFix([token])
- else:
- # We need to add indentation.
- new_token = Token(' ' * expected, Type.WHITESPACE,
- token.line, token.line_number)
- # Note that we'll never need to add indentation at the first line,
- # since it will always not be indented. Therefore it's safe to assume
- # token.previous exists.
- tokenutil.InsertTokenAfter(new_token, token.previous)
- self._AddFix([token])
-
- elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT,
- errors.MISSING_END_OF_SCOPE_COMMENT]:
- # Only fix cases where }); is found with no trailing content on the line
- # other than a comment. Value of 'token' is set to } for this error.
- if (token.type == Type.END_BLOCK and
- token.next.type == Type.END_PAREN and
- token.next.next.type == Type.SEMICOLON):
- current_token = token.next.next.next
- removed_tokens = []
- while current_token and current_token.line_number == token.line_number:
- if current_token.IsAnyType(Type.WHITESPACE,
- Type.START_SINGLE_LINE_COMMENT,
- Type.COMMENT):
- removed_tokens.append(current_token)
- current_token = current_token.next
- else:
- return
-
- if removed_tokens:
- self._DeleteTokens(removed_tokens[0], len(removed_tokens))
-
- whitespace_token = Token(' ', Type.WHITESPACE, token.line,
- token.line_number)
- start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT,
- token.line, token.line_number)
- comment_token = Token(' goog.scope', Type.COMMENT, token.line,
- token.line_number)
- insertion_tokens = [whitespace_token, start_comment_token,
- comment_token]
-
- tokenutil.InsertTokensAfter(insertion_tokens, token.next.next)
- self._AddFix(removed_tokens + insertion_tokens)
-
- elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
- tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
- num_delete_tokens = len(tokens_in_line)
- # If line being deleted is preceded and succeed with blank lines then
- # delete one blank line also.
- if (tokens_in_line[0].previous and tokens_in_line[-1].next
- and tokens_in_line[0].previous.type == Type.BLANK_LINE
- and tokens_in_line[-1].next.type == Type.BLANK_LINE):
- num_delete_tokens += 1
- self._DeleteTokens(tokens_in_line[0], num_delete_tokens)
- self._AddFix(tokens_in_line)
-
- elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]:
- missing_namespaces = error.fix_data[0]
- need_blank_line = error.fix_data[1] or (not token.previous)
-
- insert_location = Token('', Type.NORMAL, '', token.line_number - 1)
- dummy_first_token = insert_location
- tokenutil.InsertTokenBefore(insert_location, token)
-
- # If inserting a blank line check blank line does not exist before
- # token to avoid extra blank lines.
- if (need_blank_line and insert_location.previous
- and insert_location.previous.type != Type.BLANK_LINE):
- tokenutil.InsertBlankLineAfter(insert_location)
- insert_location = insert_location.next
-
- for missing_namespace in missing_namespaces:
- new_tokens = self._GetNewRequireOrProvideTokens(
- code == errors.MISSING_GOOG_PROVIDE,
- missing_namespace, insert_location.line_number + 1)
- tokenutil.InsertLineAfter(insert_location, new_tokens)
- insert_location = new_tokens[-1]
- self._AddFix(new_tokens)
-
- # If inserting a blank line check blank line does not exist after
- # token to avoid extra blank lines.
- if (need_blank_line and insert_location.next
- and insert_location.next.type != Type.BLANK_LINE):
- tokenutil.InsertBlankLineAfter(insert_location)
-
- tokenutil.DeleteToken(dummy_first_token)
-
- def _StripSpace(self, token, before):
- """Strip whitespace tokens either preceding or following the given token.
-
- Args:
- token: The token.
- before: If true, strip space before the token, if false, after it.
- """
- token = token.previous if before else token.next
- while token and token.type == Type.WHITESPACE:
- tokenutil.DeleteToken(token)
- token = token.previous if before else token.next
-
- def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number):
- """Returns a list of tokens to create a goog.require/provide statement.
-
- Args:
- is_provide: True if getting tokens for a provide, False for require.
- namespace: The required or provided namespaces to get tokens for.
- line_number: The line number the new require or provide statement will be
- on.
-
- Returns:
- Tokens to create a new goog.require or goog.provide statement.
- """
- string = 'goog.require'
- if is_provide:
- string = 'goog.provide'
- line_text = string + '(\'' + namespace + '\');\n'
- return [
- Token(string, Type.IDENTIFIER, line_text, line_number),
- Token('(', Type.START_PAREN, line_text, line_number),
- Token('\'', Type.SINGLE_QUOTE_STRING_START, line_text, line_number),
- Token(namespace, Type.STRING_TEXT, line_text, line_number),
- Token('\'', Type.SINGLE_QUOTE_STRING_END, line_text, line_number),
- Token(')', Type.END_PAREN, line_text, line_number),
- Token(';', Type.SEMICOLON, line_text, line_number)
- ]
-
- def _DeleteToken(self, token):
- """Deletes the specified token from the linked list of tokens.
-
- Updates instance variables pointing to tokens such as _file_token if
- they reference the deleted token.
-
- Args:
- token: The token to delete.
- """
- if token == self._file_token:
- self._file_token = token.next
-
- tokenutil.DeleteToken(token)
-
- def _DeleteTokens(self, token, token_count):
- """Deletes the given number of tokens starting with the given token.
-
- Updates instance variables pointing to tokens such as _file_token if
- they reference the deleted token.
-
- Args:
- token: The first token to delete.
- token_count: The total number of tokens to delete.
- """
- if token == self._file_token:
- for unused_i in xrange(token_count):
- self._file_token = self._file_token.next
-
- tokenutil.DeleteTokens(token, token_count)
-
- def FinishFile(self):
- """Called when the current file has finished style checking.
-
- Used to go back and fix any errors in the file. It currently supports both
- js and html files. For js files it does a simple dump of all tokens, but in
- order to support html file, we need to merge the original file with the new
- token set back together. This works because the tokenized html file is the
- original html file with all non js lines kept but blanked out with one blank
- line token per line of html.
- """
- if self._file_fix_count:
- # Get the original file content for html.
- if self._file_is_html:
- f = open(self._file_name, 'r')
- original_lines = f.readlines()
- f.close()
-
- f = self._external_file
- if not f:
- error_noun = 'error' if self._file_fix_count == 1 else 'errors'
- print 'Fixed %d %s in %s' % (
- self._file_fix_count, error_noun, self._file_name)
- f = open(self._file_name, 'w')
-
- token = self._file_token
- # Finding the first not deleted token.
- while token.is_deleted:
- token = token.next
- # If something got inserted before first token (e.g. due to sorting)
- # then move to start. Bug 8398202.
- while token.previous:
- token = token.previous
- char_count = 0
- line = ''
- while token:
- line += token.string
- char_count += len(token.string)
-
- if token.IsLastInLine():
- # We distinguish if a blank line in html was from stripped original
- # file or newly added error fix by looking at the "org_line_number"
- # field on the token. It is only set in the tokenizer, so for all
- # error fixes, the value should be None.
- if (line or not self._file_is_html or
- token.orig_line_number is None):
- f.write(line)
- f.write('\n')
- else:
- f.write(original_lines[token.orig_line_number - 1])
- line = ''
- if char_count > 80 and token.line_number in self._file_changed_lines:
- print 'WARNING: Line %d of %s is now longer than 80 characters.' % (
- token.line_number, self._file_name)
-
- char_count = 0
-
- token = token.next
-
- if not self._external_file:
- # Close the file if we created it
- f.close()
diff --git a/tools/closure_linter/build/lib/closure_linter/error_fixer_test.py b/tools/closure_linter/build/lib/closure_linter/error_fixer_test.py
deleted file mode 100644
index 49f449de42..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/error_fixer_test.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for the error_fixer module."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-
-
-import unittest as googletest
-from closure_linter import error_fixer
-from closure_linter import testutil
-
-
-class ErrorFixerTest(googletest.TestCase):
- """Unit tests for error_fixer."""
-
- def setUp(self):
- self.error_fixer = error_fixer.ErrorFixer()
-
- def testDeleteToken(self):
- start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
- second_token = start_token.next
- self.error_fixer.HandleFile('test_file', start_token)
-
- self.error_fixer._DeleteToken(start_token)
-
- self.assertEqual(second_token, self.error_fixer._file_token)
-
- def testDeleteTokens(self):
- start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
- fourth_token = start_token.next.next.next
- self.error_fixer.HandleFile('test_file', start_token)
-
- self.error_fixer._DeleteTokens(start_token, 3)
-
- self.assertEqual(fourth_token, self.error_fixer._file_token)
-
-_TEST_SCRIPT = """\
-var x = 3;
-"""
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/errorrecord.py b/tools/closure_linter/build/lib/closure_linter/errorrecord.py
deleted file mode 100644
index ce9fb908c7..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/errorrecord.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""A simple, pickle-serializable class to represent a lint error."""
-
-__author__ = 'nnaze@google.com (Nathan Naze)'
-
-import gflags as flags
-
-from closure_linter import errors
-from closure_linter.common import erroroutput
-
-FLAGS = flags.FLAGS
-
-
-class ErrorRecord(object):
- """Record-keeping struct that can be serialized back from a process.
-
- Attributes:
- path: Path to the file.
- error_string: Error string for the user.
- new_error: Whether this is a "new error" (see errors.NEW_ERRORS).
- """
-
- def __init__(self, path, error_string, new_error):
- self.path = path
- self.error_string = error_string
- self.new_error = new_error
-
-
-def MakeErrorRecord(path, error):
- """Make an error record with correctly formatted error string.
-
- Errors are not able to be serialized (pickled) over processes because of
- their pointers to the complex token/context graph. We use an intermediary
- serializable class to pass back just the relevant information.
-
- Args:
- path: Path of file the error was found in.
- error: An error.Error instance.
-
- Returns:
- _ErrorRecord instance.
- """
- new_error = error.code in errors.NEW_ERRORS
-
- if FLAGS.unix_mode:
- error_string = erroroutput.GetUnixErrorOutput(
- path, error, new_error=new_error)
- else:
- error_string = erroroutput.GetErrorOutput(error, new_error=new_error)
-
- return ErrorRecord(path, error_string, new_error)
diff --git a/tools/closure_linter/build/lib/closure_linter/errorrules.py b/tools/closure_linter/build/lib/closure_linter/errorrules.py
deleted file mode 100644
index b1b72aab6d..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/errorrules.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Linter error rules class for Closure Linter."""
-
-__author__ = 'robbyw@google.com (Robert Walker)'
-
-import gflags as flags
-from closure_linter import errors
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_boolean('jsdoc', True,
- 'Whether to report errors for missing JsDoc.')
-flags.DEFINE_list('disable', None,
- 'Disable specific error. Usage Ex.: gjslint --disable 1,'
- '0011 foo.js.')
-flags.DEFINE_integer('max_line_length', 80, 'Maximum line length allowed '
- 'without warning.', lower_bound=1)
-
-disabled_error_nums = None
-
-
-def GetMaxLineLength():
- """Returns allowed maximum length of line.
-
- Returns:
- Length of line allowed without any warning.
- """
- return FLAGS.max_line_length
-
-
-def ShouldReportError(error):
- """Whether the given error should be reported.
-
- Returns:
- True for all errors except missing documentation errors and disabled
- errors. For missing documentation, it returns the value of the
- jsdoc flag.
- """
- global disabled_error_nums
- if disabled_error_nums is None:
- disabled_error_nums = []
- if FLAGS.disable:
- for error_str in FLAGS.disable:
- error_num = 0
- try:
- error_num = int(error_str)
- except ValueError:
- pass
- disabled_error_nums.append(error_num)
-
- return ((FLAGS.jsdoc or error not in (
- errors.MISSING_PARAMETER_DOCUMENTATION,
- errors.MISSING_RETURN_DOCUMENTATION,
- errors.MISSING_MEMBER_DOCUMENTATION,
- errors.MISSING_PRIVATE,
- errors.MISSING_JSDOC_TAG_THIS)) and
- (not FLAGS.disable or error not in disabled_error_nums))
diff --git a/tools/closure_linter/build/lib/closure_linter/errorrules_test.py b/tools/closure_linter/build/lib/closure_linter/errorrules_test.py
deleted file mode 100644
index cb903785e6..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/errorrules_test.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2013 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Medium tests for the gjslint errorrules.
-
-Currently its just verifying that warnings can't be disabled.
-"""
-
-
-
-import gflags as flags
-import unittest as googletest
-
-from closure_linter import errors
-from closure_linter import runner
-from closure_linter.common import erroraccumulator
-
-flags.FLAGS.strict = True
-flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
-flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
-
-
-class ErrorRulesTest(googletest.TestCase):
- """Test case to for gjslint errorrules."""
-
- def testNoMaxLineLengthFlagExists(self):
- """Tests that --max_line_length flag does not exists."""
- self.assertTrue('max_line_length' not in flags.FLAGS.FlagDict())
-
- def testGetMaxLineLength(self):
- """Tests warning are reported for line greater than 80.
- """
-
- # One line > 100 and one line > 80 and < 100. So should produce two
- # line too long error.
- original = [
- 'goog.require(\'dummy.aa\');',
- '',
- 'function a() {',
- ' dummy.aa.i = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13'
- ' + 14 + 15 + 16 + 17 + 18 + 19 + 20;',
- ' dummy.aa.j = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13'
- ' + 14 + 15 + 16 + 17 + 18;',
- '}',
- ''
- ]
-
- # Expect line too long.
- expected = [errors.LINE_TOO_LONG, errors.LINE_TOO_LONG]
-
- self._AssertErrors(original, expected)
-
- def testNoDisableFlagExists(self):
- """Tests that --disable flag does not exists."""
- self.assertTrue('disable' not in flags.FLAGS.FlagDict())
-
- def testWarningsNotDisabled(self):
- """Tests warnings are reported when nothing is disabled.
- """
- original = [
- 'goog.require(\'dummy.aa\');',
- 'goog.require(\'dummy.Cc\');',
- 'goog.require(\'dummy.Dd\');',
- '',
- 'function a() {',
- ' dummy.aa.i = 1;',
- ' dummy.Cc.i = 1;',
- ' dummy.Dd.i = 1;',
- '}',
- ]
-
- expected = [errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
- errors.FILE_MISSING_NEWLINE]
-
- self._AssertErrors(original, expected)
-
- def _AssertErrors(self, original, expected_errors, include_header=True):
- """Asserts that the error fixer corrects original to expected."""
- if include_header:
- original = self._GetHeader() + original
-
- # Trap gjslint's output parse it to get messages added.
- error_accumulator = erroraccumulator.ErrorAccumulator()
- runner.Run('testing.js', error_accumulator, source=original)
- error_nums = [e.code for e in error_accumulator.GetErrors()]
-
- error_nums.sort()
- expected_errors.sort()
- self.assertListEqual(error_nums, expected_errors)
-
- def _GetHeader(self):
- """Returns a fake header for a JavaScript file."""
- return [
- '// Copyright 2011 Google Inc. All Rights Reserved.',
- '',
- '/**',
- ' * @fileoverview Fake file overview.',
- ' * @author fake@google.com (Fake Person)',
- ' */',
- ''
- ]
-
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/errors.py b/tools/closure_linter/build/lib/closure_linter/errors.py
deleted file mode 100644
index 356ee0c5a6..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/errors.py
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Error codes for JavaScript style checker."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-
-def ByName(name):
- """Get the error code for the given error name.
-
- Args:
- name: The name of the error
-
- Returns:
- The error code
- """
- return globals()[name]
-
-
-# "File-fatal" errors - these errors stop further parsing of a single file
-FILE_NOT_FOUND = -1
-FILE_DOES_NOT_PARSE = -2
-
-# Spacing
-EXTRA_SPACE = 1
-MISSING_SPACE = 2
-EXTRA_LINE = 3
-MISSING_LINE = 4
-ILLEGAL_TAB = 5
-WRONG_INDENTATION = 6
-WRONG_BLANK_LINE_COUNT = 7
-
-# Semicolons
-MISSING_SEMICOLON = 10
-MISSING_SEMICOLON_AFTER_FUNCTION = 11
-ILLEGAL_SEMICOLON_AFTER_FUNCTION = 12
-REDUNDANT_SEMICOLON = 13
-
-# Miscellaneous
-ILLEGAL_PROTOTYPE_MEMBER_VALUE = 100
-LINE_TOO_LONG = 110
-LINE_STARTS_WITH_OPERATOR = 120
-COMMA_AT_END_OF_LITERAL = 121
-LINE_ENDS_WITH_DOT = 122
-MULTI_LINE_STRING = 130
-UNNECESSARY_DOUBLE_QUOTED_STRING = 131
-UNUSED_PRIVATE_MEMBER = 132
-UNUSED_LOCAL_VARIABLE = 133
-
-# Requires, provides
-GOOG_REQUIRES_NOT_ALPHABETIZED = 140
-GOOG_PROVIDES_NOT_ALPHABETIZED = 141
-MISSING_GOOG_REQUIRE = 142
-MISSING_GOOG_PROVIDE = 143
-EXTRA_GOOG_REQUIRE = 144
-EXTRA_GOOG_PROVIDE = 145
-ALIAS_STMT_NEEDS_GOOG_REQUIRE = 146
-
-# JsDoc
-INVALID_JSDOC_TAG = 200
-INVALID_USE_OF_DESC_TAG = 201
-NO_BUG_NUMBER_AFTER_BUG_TAG = 202
-MISSING_PARAMETER_DOCUMENTATION = 210
-EXTRA_PARAMETER_DOCUMENTATION = 211
-WRONG_PARAMETER_DOCUMENTATION = 212
-MISSING_JSDOC_TAG_TYPE = 213
-MISSING_JSDOC_TAG_DESCRIPTION = 214
-MISSING_JSDOC_PARAM_NAME = 215
-OUT_OF_ORDER_JSDOC_TAG_TYPE = 216
-MISSING_RETURN_DOCUMENTATION = 217
-UNNECESSARY_RETURN_DOCUMENTATION = 218
-MISSING_BRACES_AROUND_TYPE = 219
-MISSING_MEMBER_DOCUMENTATION = 220
-MISSING_PRIVATE = 221
-EXTRA_PRIVATE = 222
-INVALID_OVERRIDE_PRIVATE = 223
-INVALID_INHERIT_DOC_PRIVATE = 224
-MISSING_JSDOC_TAG_THIS = 225
-UNNECESSARY_BRACES_AROUND_INHERIT_DOC = 226
-INVALID_AUTHOR_TAG_DESCRIPTION = 227
-JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230
-JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231
-JSDOC_MISSING_OPTIONAL_TYPE = 232
-JSDOC_MISSING_OPTIONAL_PREFIX = 233
-JSDOC_MISSING_VAR_ARGS_TYPE = 234
-JSDOC_MISSING_VAR_ARGS_NAME = 235
-JSDOC_DOES_NOT_PARSE = 236
-# TODO(robbyw): Split this in to more specific syntax problems.
-INCORRECT_SUPPRESS_SYNTAX = 250
-INVALID_SUPPRESS_TYPE = 251
-UNNECESSARY_SUPPRESS = 252
-
-# File ending
-FILE_MISSING_NEWLINE = 300
-FILE_IN_BLOCK = 301
-
-# Interfaces
-INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400
-INTERFACE_METHOD_CANNOT_HAVE_CODE = 401
-
-# Comments
-MISSING_END_OF_SCOPE_COMMENT = 500
-MALFORMED_END_OF_SCOPE_COMMENT = 501
-
-# goog.scope - Namespace aliasing
-# TODO(nnaze) Add additional errors here and in aliaspass.py
-INVALID_USE_OF_GOOG_SCOPE = 600
-EXTRA_GOOG_SCOPE_USAGE = 601
-
-# ActionScript specific errors:
-# TODO(user): move these errors to their own file and move all JavaScript
-# specific errors to their own file as well.
-# All ActionScript specific errors should have error number at least 1000.
-FUNCTION_MISSING_RETURN_TYPE = 1132
-PARAMETER_MISSING_TYPE = 1133
-VAR_MISSING_TYPE = 1134
-PARAMETER_MISSING_DEFAULT_VALUE = 1135
-IMPORTS_NOT_ALPHABETIZED = 1140
-IMPORT_CONTAINS_WILDCARD = 1141
-UNUSED_IMPORT = 1142
-INVALID_TRACE_SEVERITY_LEVEL = 1250
-MISSING_TRACE_SEVERITY_LEVEL = 1251
-MISSING_TRACE_MESSAGE = 1252
-REMOVE_TRACE_BEFORE_SUBMIT = 1253
-REMOVE_COMMENT_BEFORE_SUBMIT = 1254
-# End of list of ActionScript specific errors.
-
-NEW_ERRORS = frozenset([
- # Errors added after 2.0.2:
- WRONG_INDENTATION,
- MISSING_SEMICOLON,
- # Errors added after 2.3.9:
- JSDOC_MISSING_VAR_ARGS_TYPE,
- JSDOC_MISSING_VAR_ARGS_NAME,
- # Errors added after 2.3.15:
- ALIAS_STMT_NEEDS_GOOG_REQUIRE,
- JSDOC_DOES_NOT_PARSE,
- LINE_ENDS_WITH_DOT,
- # Errors added after 2.3.17:
- ])
diff --git a/tools/closure_linter/build/lib/closure_linter/fixjsstyle.py b/tools/closure_linter/build/lib/closure_linter/fixjsstyle.py
deleted file mode 100644
index 2d65e0398f..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/fixjsstyle.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Automatically fix simple style guide violations."""
-
-__author__ = 'robbyw@google.com (Robert Walker)'
-
-import StringIO
-import sys
-
-import gflags as flags
-
-from closure_linter import error_fixer
-from closure_linter import runner
-from closure_linter.common import simplefileflags as fileflags
-
-FLAGS = flags.FLAGS
-flags.DEFINE_list('additional_extensions', None, 'List of additional file '
- 'extensions (not js) that should be treated as '
- 'JavaScript files.')
-flags.DEFINE_boolean('dry_run', False, 'Do not modify the file, only print it.')
-
-
-def main(argv=None):
- """Main function.
-
- Args:
- argv: Sequence of command line arguments.
- """
- if argv is None:
- argv = flags.FLAGS(sys.argv)
-
- suffixes = ['.js']
- if FLAGS.additional_extensions:
- suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
-
- files = fileflags.GetFileList(argv, 'JavaScript', suffixes)
-
- output_buffer = None
- if FLAGS.dry_run:
- output_buffer = StringIO.StringIO()
-
- fixer = error_fixer.ErrorFixer(output_buffer)
-
- # Check the list of files.
- for filename in files:
- runner.Run(filename, fixer)
- if FLAGS.dry_run:
- print output_buffer.getvalue()
-
-
-if __name__ == '__main__':
- main()
diff --git a/tools/closure_linter/build/lib/closure_linter/fixjsstyle_test.py b/tools/closure_linter/build/lib/closure_linter/fixjsstyle_test.py
deleted file mode 100644
index 34de3f8488..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/fixjsstyle_test.py
+++ /dev/null
@@ -1,615 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Medium tests for the gpylint auto-fixer."""
-
-__author__ = 'robbyw@google.com (Robby Walker)'
-
-import StringIO
-
-import gflags as flags
-import unittest as googletest
-from closure_linter import error_fixer
-from closure_linter import runner
-
-
-_RESOURCE_PREFIX = 'closure_linter/testdata'
-
-flags.FLAGS.strict = True
-flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
-flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
-
-
-class FixJsStyleTest(googletest.TestCase):
- """Test case to for gjslint auto-fixing."""
-
- def setUp(self):
- flags.FLAGS.dot_on_next_line = True
-
- def tearDown(self):
- flags.FLAGS.dot_on_next_line = False
-
- def testFixJsStyle(self):
- test_cases = [
- ['fixjsstyle.in.js', 'fixjsstyle.out.js'],
- ['indentation.js', 'fixjsstyle.indentation.out.js'],
- ['fixjsstyle.html.in.html', 'fixjsstyle.html.out.html'],
- ['fixjsstyle.oplineend.in.js', 'fixjsstyle.oplineend.out.js']]
- for [running_input_file, running_output_file] in test_cases:
- print 'Checking %s vs %s' % (running_input_file, running_output_file)
- input_filename = None
- golden_filename = None
- current_filename = None
- try:
- input_filename = '%s/%s' % (_RESOURCE_PREFIX, running_input_file)
- current_filename = input_filename
-
- golden_filename = '%s/%s' % (_RESOURCE_PREFIX, running_output_file)
- current_filename = golden_filename
- except IOError as ex:
- raise IOError('Could not find testdata resource for %s: %s' %
- (current_filename, ex))
-
- if running_input_file == 'fixjsstyle.in.js':
- with open(input_filename) as f:
- for line in f:
- # Go to last line.
- pass
- self.assertTrue(line == line.rstrip(), '%s file should not end '
- 'with a new line.' % (input_filename))
-
- # Autofix the file, sending output to a fake file.
- actual = StringIO.StringIO()
- runner.Run(input_filename, error_fixer.ErrorFixer(actual))
-
- # Now compare the files.
- actual.seek(0)
- expected = open(golden_filename, 'r')
-
- # Uncomment to generate new golden files and run
- # open('/'.join(golden_filename.split('/')[4:]), 'w').write(actual.read())
- # actual.seek(0)
-
- self.assertEqual(actual.readlines(), expected.readlines())
-
- def testAddProvideFirstLine(self):
- """Tests handling of case where goog.provide is added."""
- original = [
- 'dummy.bb.cc = 1;',
- ]
-
- expected = [
- 'goog.provide(\'dummy.bb\');',
- '',
- 'dummy.bb.cc = 1;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- original = [
- '',
- 'dummy.bb.cc = 1;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testAddRequireFirstLine(self):
- """Tests handling of case where goog.require is added."""
- original = [
- 'a = dummy.bb.cc;',
- ]
-
- expected = [
- 'goog.require(\'dummy.bb\');',
- '',
- 'a = dummy.bb.cc;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- original = [
- '',
- 'a = dummy.bb.cc;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testDeleteProvideAndAddProvideFirstLine(self):
- """Tests handling of case where goog.provide is deleted and added.
-
- Bug 14832597.
- """
- original = [
- 'goog.provide(\'dummy.aa\');',
- '',
- 'dummy.bb.cc = 1;',
- ]
-
- expected = [
- 'goog.provide(\'dummy.bb\');',
- '',
- 'dummy.bb.cc = 1;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- original = [
- 'goog.provide(\'dummy.aa\');',
- 'dummy.bb.cc = 1;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testDeleteProvideAndAddRequireFirstLine(self):
- """Tests handling where goog.provide is deleted and goog.require added.
-
- Bug 14832597.
- """
- original = [
- 'goog.provide(\'dummy.aa\');',
- '',
- 'a = dummy.bb.cc;',
- ]
-
- expected = [
- 'goog.require(\'dummy.bb\');',
- '',
- 'a = dummy.bb.cc;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- original = [
- 'goog.provide(\'dummy.aa\');',
- 'a = dummy.bb.cc;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testDeleteRequireAndAddRequireFirstLine(self):
- """Tests handling of case where goog.require is deleted and added.
-
- Bug 14832597.
- """
- original = [
- 'goog.require(\'dummy.aa\');',
- '',
- 'a = dummy.bb.cc;',
- ]
-
- expected = [
- 'goog.require(\'dummy.bb\');',
- '',
- 'a = dummy.bb.cc;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- original = [
- 'goog.require(\'dummy.aa\');',
- 'a = dummy.bb.cc;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testDeleteRequireAndAddProvideFirstLine(self):
- """Tests handling where goog.require is deleted and goog.provide added.
-
- Bug 14832597.
- """
- original = [
- 'goog.require(\'dummy.aa\');',
- '',
- 'dummy.bb.cc = 1;',
- ]
-
- expected = [
- 'goog.provide(\'dummy.bb\');',
- '',
- 'dummy.bb.cc = 1;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- original = [
- 'goog.require(\'dummy.aa\');',
- 'dummy.bb.cc = 1;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testMultipleProvideInsert(self):
- original = [
- 'goog.provide(\'dummy.bb\');',
- 'goog.provide(\'dummy.dd\');',
- '',
- 'dummy.aa.ff = 1;',
- 'dummy.bb.ff = 1;',
- 'dummy.cc.ff = 1;',
- 'dummy.dd.ff = 1;',
- 'dummy.ee.ff = 1;',
- ]
-
- expected = [
- 'goog.provide(\'dummy.aa\');',
- 'goog.provide(\'dummy.bb\');',
- 'goog.provide(\'dummy.cc\');',
- 'goog.provide(\'dummy.dd\');',
- 'goog.provide(\'dummy.ee\');',
- '',
- 'dummy.aa.ff = 1;',
- 'dummy.bb.ff = 1;',
- 'dummy.cc.ff = 1;',
- 'dummy.dd.ff = 1;',
- 'dummy.ee.ff = 1;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testMultipleRequireInsert(self):
- original = [
- 'goog.require(\'dummy.bb\');',
- 'goog.require(\'dummy.dd\');',
- '',
- 'a = dummy.aa.ff;',
- 'b = dummy.bb.ff;',
- 'c = dummy.cc.ff;',
- 'd = dummy.dd.ff;',
- 'e = dummy.ee.ff;',
- ]
-
- expected = [
- 'goog.require(\'dummy.aa\');',
- 'goog.require(\'dummy.bb\');',
- 'goog.require(\'dummy.cc\');',
- 'goog.require(\'dummy.dd\');',
- 'goog.require(\'dummy.ee\');',
- '',
- 'a = dummy.aa.ff;',
- 'b = dummy.bb.ff;',
- 'c = dummy.cc.ff;',
- 'd = dummy.dd.ff;',
- 'e = dummy.ee.ff;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testUnsortedRequires(self):
- """Tests handling of unsorted goog.require statements without header.
-
- Bug 8398202.
- """
- original = [
- 'goog.require(\'dummy.aa\');',
- 'goog.require(\'dummy.Cc\');',
- 'goog.require(\'dummy.Dd\');',
- '',
- 'function a() {',
- ' dummy.aa.i = 1;',
- ' dummy.Cc.i = 1;',
- ' dummy.Dd.i = 1;',
- '}',
- ]
-
- expected = [
- 'goog.require(\'dummy.Cc\');',
- 'goog.require(\'dummy.Dd\');',
- 'goog.require(\'dummy.aa\');',
- '',
- 'function a() {',
- ' dummy.aa.i = 1;',
- ' dummy.Cc.i = 1;',
- ' dummy.Dd.i = 1;',
- '}',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testMissingExtraAndUnsortedRequires(self):
- """Tests handling of missing extra and unsorted goog.require statements."""
- original = [
- 'goog.require(\'dummy.aa\');',
- 'goog.require(\'dummy.Cc\');',
- 'goog.require(\'dummy.Dd\');',
- '',
- 'var x = new dummy.Bb();',
- 'dummy.Cc.someMethod();',
- 'dummy.aa.someMethod();',
- ]
-
- expected = [
- 'goog.require(\'dummy.Bb\');',
- 'goog.require(\'dummy.Cc\');',
- 'goog.require(\'dummy.aa\');',
- '',
- 'var x = new dummy.Bb();',
- 'dummy.Cc.someMethod();',
- 'dummy.aa.someMethod();',
- ]
-
- self._AssertFixes(original, expected)
-
- def testExtraRequireOnFirstLine(self):
- """Tests handling of extra goog.require statement on the first line.
-
- There was a bug when fixjsstyle quits with an exception. It happened if
- - the first line of the file is an extra goog.require() statement,
- - goog.require() statements are not sorted.
- """
- original = [
- 'goog.require(\'dummy.aa\');',
- 'goog.require(\'dummy.cc\');',
- 'goog.require(\'dummy.bb\');',
- '',
- 'var x = new dummy.bb();',
- 'var y = new dummy.cc();',
- ]
-
- expected = [
- 'goog.require(\'dummy.bb\');',
- 'goog.require(\'dummy.cc\');',
- '',
- 'var x = new dummy.bb();',
- 'var y = new dummy.cc();',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testUnsortedProvides(self):
- """Tests handling of unsorted goog.provide statements without header.
-
- Bug 8398202.
- """
- original = [
- 'goog.provide(\'dummy.aa\');',
- 'goog.provide(\'dummy.Cc\');',
- 'goog.provide(\'dummy.Dd\');',
- '',
- 'dummy.aa = function() {};'
- 'dummy.Cc = function() {};'
- 'dummy.Dd = function() {};'
- ]
-
- expected = [
- 'goog.provide(\'dummy.Cc\');',
- 'goog.provide(\'dummy.Dd\');',
- 'goog.provide(\'dummy.aa\');',
- '',
- 'dummy.aa = function() {};'
- 'dummy.Cc = function() {};'
- 'dummy.Dd = function() {};'
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testMissingExtraAndUnsortedProvides(self):
- """Tests handling of missing extra and unsorted goog.provide statements."""
- original = [
- 'goog.provide(\'dummy.aa\');',
- 'goog.provide(\'dummy.Cc\');',
- 'goog.provide(\'dummy.Dd\');',
- '',
- 'dummy.Cc = function() {};',
- 'dummy.Bb = function() {};',
- 'dummy.aa.someMethod = function();',
- ]
-
- expected = [
- 'goog.provide(\'dummy.Bb\');',
- 'goog.provide(\'dummy.Cc\');',
- 'goog.provide(\'dummy.aa\');',
- '',
- 'dummy.Cc = function() {};',
- 'dummy.Bb = function() {};',
- 'dummy.aa.someMethod = function();',
- ]
-
- self._AssertFixes(original, expected)
-
- def testNoRequires(self):
- """Tests positioning of missing requires without existing requires."""
- original = [
- 'goog.provide(\'dummy.Something\');',
- '',
- 'dummy.Something = function() {};',
- '',
- 'var x = new dummy.Bb();',
- ]
-
- expected = [
- 'goog.provide(\'dummy.Something\');',
- '',
- 'goog.require(\'dummy.Bb\');',
- '',
- 'dummy.Something = function() {};',
- '',
- 'var x = new dummy.Bb();',
- ]
-
- self._AssertFixes(original, expected)
-
- def testNoProvides(self):
- """Tests positioning of missing provides without existing provides."""
- original = [
- 'goog.require(\'dummy.Bb\');',
- '',
- 'dummy.Something = function() {};',
- '',
- 'var x = new dummy.Bb();',
- ]
-
- expected = [
- 'goog.provide(\'dummy.Something\');',
- '',
- 'goog.require(\'dummy.Bb\');',
- '',
- 'dummy.Something = function() {};',
- '',
- 'var x = new dummy.Bb();',
- ]
-
- self._AssertFixes(original, expected)
-
- def testOutputOkayWhenFirstTokenIsDeleted(self):
- """Tests that autofix output is is correct when first token is deleted.
-
- Regression test for bug 4581567
- """
- original = ['"use strict";']
- expected = ["'use strict';"]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testGoogScopeIndentation(self):
- """Tests Handling a typical end-of-scope indentation fix."""
- original = [
- 'goog.scope(function() {',
- ' // TODO(brain): Take over the world.',
- '}); // goog.scope',
- ]
-
- expected = [
- 'goog.scope(function() {',
- '// TODO(brain): Take over the world.',
- '}); // goog.scope',
- ]
-
- self._AssertFixes(original, expected)
-
- def testMissingEndOfScopeComment(self):
- """Tests Handling a missing comment at end of goog.scope."""
- original = [
- 'goog.scope(function() {',
- '});',
- ]
-
- expected = [
- 'goog.scope(function() {',
- '}); // goog.scope',
- ]
-
- self._AssertFixes(original, expected)
-
- def testMissingEndOfScopeCommentWithOtherComment(self):
- """Tests handling an irrelevant comment at end of goog.scope."""
- original = [
- 'goog.scope(function() {',
- "}); // I don't belong here!",
- ]
-
- expected = [
- 'goog.scope(function() {',
- '}); // goog.scope',
- ]
-
- self._AssertFixes(original, expected)
-
- def testMalformedEndOfScopeComment(self):
- """Tests Handling a malformed comment at end of goog.scope."""
- original = [
- 'goog.scope(function() {',
- '}); // goog.scope FTW',
- ]
-
- expected = [
- 'goog.scope(function() {',
- '}); // goog.scope',
- ]
-
- self._AssertFixes(original, expected)
-
- def testEndsWithIdentifier(self):
- """Tests Handling case where script ends with identifier. Bug 7643404."""
- original = [
- 'goog.provide(\'xyz\');',
- '',
- 'abc'
- ]
-
- expected = [
- 'goog.provide(\'xyz\');',
- '',
- 'abc;'
- ]
-
- self._AssertFixes(original, expected)
-
- def testFileStartsWithSemicolon(self):
- """Tests handling files starting with semicolon.
-
- b/10062516
- """
- original = [
- ';goog.provide(\'xyz\');',
- '',
- 'abc;'
- ]
-
- expected = [
- 'goog.provide(\'xyz\');',
- '',
- 'abc;'
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testCodeStartsWithSemicolon(self):
- """Tests handling code in starting with semicolon after comments.
-
- b/10062516
- """
- original = [
- ';goog.provide(\'xyz\');',
- '',
- 'abc;'
- ]
-
- expected = [
- 'goog.provide(\'xyz\');',
- '',
- 'abc;'
- ]
-
- self._AssertFixes(original, expected)
-
- def _AssertFixes(self, original, expected, include_header=True):
- """Asserts that the error fixer corrects original to expected."""
- if include_header:
- original = self._GetHeader() + original
- expected = self._GetHeader() + expected
-
- actual = StringIO.StringIO()
- runner.Run('testing.js', error_fixer.ErrorFixer(actual), original)
- actual.seek(0)
-
- expected = [x + '\n' for x in expected]
-
- self.assertListEqual(actual.readlines(), expected)
-
- def _GetHeader(self):
- """Returns a fake header for a JavaScript file."""
- return [
- '// Copyright 2011 Google Inc. All Rights Reserved.',
- '',
- '/**',
- ' * @fileoverview Fake file overview.',
- ' * @author fake@google.com (Fake Person)',
- ' */',
- ''
- ]
-
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/full_test.py b/tools/closure_linter/build/lib/closure_linter/full_test.py
deleted file mode 100644
index d0a1557dc2..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/full_test.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Full regression-type (Medium) tests for gjslint.
-
-Tests every error that can be thrown by gjslint. Based heavily on
-devtools/javascript/gpylint/full_test.py
-"""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-import os
-import sys
-import unittest
-
-import gflags as flags
-import unittest as googletest
-
-from closure_linter import error_check
-from closure_linter import errors
-from closure_linter import runner
-from closure_linter.common import filetestcase
-
-_RESOURCE_PREFIX = 'closure_linter/testdata'
-
-flags.FLAGS.strict = True
-flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
-flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
-flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
- 'limited_doc_checks.js')
-flags.FLAGS.jslint_error = error_check.Rule.ALL
-
-# List of files under testdata to test.
-# We need to list files explicitly since pyglib can't list directories.
-# TODO(user): Figure out how to list the directory.
-_TEST_FILES = [
- 'all_js_wrapped.js',
- 'blank_lines.js',
- 'ends_with_block.js',
- 'empty_file.js',
- 'externs.js',
- 'externs_jsdoc.js',
- 'goog_scope.js',
- 'html_parse_error.html',
- 'indentation.js',
- 'interface.js',
- 'jsdoc.js',
- 'limited_doc_checks.js',
- 'minimal.js',
- 'other.js',
- 'provide_blank.js',
- 'provide_extra.js',
- 'provide_missing.js',
- 'require_alias.js',
- 'require_all_caps.js',
- 'require_blank.js',
- 'require_extra.js',
- 'require_function.js',
- 'require_function_missing.js',
- 'require_function_through_both.js',
- 'require_function_through_namespace.js',
- 'require_interface.js',
- 'require_interface_alias.js',
- 'require_interface_base.js',
- 'require_lower_case.js',
- 'require_missing.js',
- 'require_numeric.js',
- 'require_provide_blank.js',
- 'require_provide_missing.js',
- 'require_provide_ok.js',
- 'semicolon_missing.js',
- 'simple.html',
- 'spaces.js',
- 'tokenizer.js',
- 'unparseable.js',
- 'unused_local_variables.js',
- 'unused_private_members.js',
- 'utf8.html',
-]
-
-
-class GJsLintTestSuite(unittest.TestSuite):
- """Test suite to run a GJsLintTest for each of several files.
-
- If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
- testdata to test. Otherwise, _TEST_FILES is used.
- """
-
- def __init__(self, tests=()):
- unittest.TestSuite.__init__(self, tests)
-
- argv = sys.argv and sys.argv[1:] or []
- if argv:
- test_files = argv
- else:
- test_files = _TEST_FILES
- for test_file in test_files:
- resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
- self.addTest(
- filetestcase.AnnotatedFileTestCase(
- resource_path,
- runner.Run,
- errors.ByName))
-
-if __name__ == '__main__':
- # Don't let main parse args; it happens in the TestSuite.
- googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
diff --git a/tools/closure_linter/build/lib/closure_linter/gjslint.py b/tools/closure_linter/build/lib/closure_linter/gjslint.py
deleted file mode 100644
index 824e025dcb..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/gjslint.py
+++ /dev/null
@@ -1,319 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Checks JavaScript files for common style guide violations.
-
-gjslint.py is designed to be used as a PRESUBMIT script to check for javascript
-style guide violations. As of now, it checks for the following violations:
-
- * Missing and extra spaces
- * Lines longer than 80 characters
- * Missing newline at end of file
- * Missing semicolon after function declaration
- * Valid JsDoc including parameter matching
-
-Someday it will validate to the best of its ability against the entirety of the
-JavaScript style guide.
-
-This file is a front end that parses arguments and flags. The core of the code
-is in tokenizer.py and checker.py.
-"""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)',
- 'nnaze@google.com (Nathan Naze)',)
-
-import errno
-import itertools
-import os
-import platform
-import re
-import sys
-import time
-
-import gflags as flags
-
-from closure_linter import errorrecord
-from closure_linter import runner
-from closure_linter.common import erroraccumulator
-from closure_linter.common import simplefileflags as fileflags
-
-# Attempt import of multiprocessing (should be available in Python 2.6 and up).
-try:
- # pylint: disable=g-import-not-at-top
- import multiprocessing
-except ImportError:
- multiprocessing = None
-
-FLAGS = flags.FLAGS
-flags.DEFINE_boolean('unix_mode', False,
- 'Whether to emit warnings in standard unix format.')
-flags.DEFINE_boolean('beep', True, 'Whether to beep when errors are found.')
-flags.DEFINE_boolean('time', False, 'Whether to emit timing statistics.')
-flags.DEFINE_boolean('quiet', False, 'Whether to minimize logged messages. '
- 'Most useful for per-file linting, such as that performed '
- 'by the presubmit linter service.')
-flags.DEFINE_boolean('check_html', False,
- 'Whether to check javascript in html files.')
-flags.DEFINE_boolean('summary', False,
- 'Whether to show an error count summary.')
-flags.DEFINE_list('additional_extensions', None, 'List of additional file '
- 'extensions (not js) that should be treated as '
- 'JavaScript files.')
-flags.DEFINE_boolean('multiprocess',
- platform.system() is 'Linux' and bool(multiprocessing),
- 'Whether to attempt parallelized linting using the '
- 'multiprocessing module. Enabled by default on Linux '
- 'if the multiprocessing module is present (Python 2.6+). '
- 'Otherwise disabled by default. '
- 'Disabling may make debugging easier.')
-flags.ADOPT_module_key_flags(fileflags)
-flags.ADOPT_module_key_flags(runner)
-
-
-GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time',
- '--check_html', '--summary', '--quiet']
-
-
-
-def _MultiprocessCheckPaths(paths):
- """Run _CheckPath over mutltiple processes.
-
- Tokenization, passes, and checks are expensive operations. Running in a
- single process, they can only run on one CPU/core. Instead,
- shard out linting over all CPUs with multiprocessing to parallelize.
-
- Args:
- paths: paths to check.
-
- Yields:
- errorrecord.ErrorRecords for any found errors.
- """
-
- pool = multiprocessing.Pool()
-
- path_results = pool.imap(_CheckPath, paths)
- for results in path_results:
- for result in results:
- yield result
-
- # Force destruct before returning, as this can sometimes raise spurious
- # "interrupted system call" (EINTR), which we can ignore.
- try:
- pool.close()
- pool.join()
- del pool
- except OSError as err:
- if err.errno is not errno.EINTR:
- raise err
-
-
-def _CheckPaths(paths):
- """Run _CheckPath on all paths in one thread.
-
- Args:
- paths: paths to check.
-
- Yields:
- errorrecord.ErrorRecords for any found errors.
- """
-
- for path in paths:
- results = _CheckPath(path)
- for record in results:
- yield record
-
-
-def _CheckPath(path):
- """Check a path and return any errors.
-
- Args:
- path: paths to check.
-
- Returns:
- A list of errorrecord.ErrorRecords for any found errors.
- """
-
- error_handler = erroraccumulator.ErrorAccumulator()
- runner.Run(path, error_handler)
-
- make_error_record = lambda err: errorrecord.MakeErrorRecord(path, err)
- return map(make_error_record, error_handler.GetErrors())
-
-
-def _GetFilePaths(argv):
- suffixes = ['.js']
- if FLAGS.additional_extensions:
- suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
- if FLAGS.check_html:
- suffixes += ['.html', '.htm']
- return fileflags.GetFileList(argv, 'JavaScript', suffixes)
-
-
-# Error printing functions
-
-
-def _PrintFileSummary(paths, records):
- """Print a detailed summary of the number of errors in each file."""
-
- paths = list(paths)
- paths.sort()
-
- for path in paths:
- path_errors = [e for e in records if e.path == path]
- print '%s: %d' % (path, len(path_errors))
-
-
-def _PrintFileSeparator(path):
- print '----- FILE : %s -----' % path
-
-
-def _PrintSummary(paths, error_records):
- """Print a summary of the number of errors and files."""
-
- error_count = len(error_records)
- all_paths = set(paths)
- all_paths_count = len(all_paths)
-
- if error_count is 0:
- print '%d files checked, no errors found.' % all_paths_count
-
- new_error_count = len([e for e in error_records if e.new_error])
-
- error_paths = set([e.path for e in error_records])
- error_paths_count = len(error_paths)
- no_error_paths_count = all_paths_count - error_paths_count
-
- if (error_count or new_error_count) and not FLAGS.quiet:
- error_noun = 'error' if error_count == 1 else 'errors'
- new_error_noun = 'error' if new_error_count == 1 else 'errors'
- error_file_noun = 'file' if error_paths_count == 1 else 'files'
- ok_file_noun = 'file' if no_error_paths_count == 1 else 'files'
- print ('Found %d %s, including %d new %s, in %d %s (%d %s OK).' %
- (error_count,
- error_noun,
- new_error_count,
- new_error_noun,
- error_paths_count,
- error_file_noun,
- no_error_paths_count,
- ok_file_noun))
-
-
-def _PrintErrorRecords(error_records):
- """Print error records strings in the expected format."""
-
- current_path = None
- for record in error_records:
-
- if current_path != record.path:
- current_path = record.path
- if not FLAGS.unix_mode:
- _PrintFileSeparator(current_path)
-
- print record.error_string
-
-
-def _FormatTime(t):
- """Formats a duration as a human-readable string.
-
- Args:
- t: A duration in seconds.
-
- Returns:
- A formatted duration string.
- """
- if t < 1:
- return '%dms' % round(t * 1000)
- else:
- return '%.2fs' % t
-
-
-
-
-def main(argv=None):
- """Main function.
-
- Args:
- argv: Sequence of command line arguments.
- """
- if argv is None:
- argv = flags.FLAGS(sys.argv)
-
- if FLAGS.time:
- start_time = time.time()
-
- suffixes = ['.js']
- if FLAGS.additional_extensions:
- suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
- if FLAGS.check_html:
- suffixes += ['.html', '.htm']
- paths = fileflags.GetFileList(argv, 'JavaScript', suffixes)
-
- if FLAGS.multiprocess:
- records_iter = _MultiprocessCheckPaths(paths)
- else:
- records_iter = _CheckPaths(paths)
-
- records_iter, records_iter_copy = itertools.tee(records_iter, 2)
- _PrintErrorRecords(records_iter_copy)
-
- error_records = list(records_iter)
- _PrintSummary(paths, error_records)
-
- exit_code = 0
-
- # If there are any errors
- if error_records:
- exit_code += 1
-
- # If there are any new errors
- if [r for r in error_records if r.new_error]:
- exit_code += 2
-
- if exit_code:
- if FLAGS.summary:
- _PrintFileSummary(paths, error_records)
-
- if FLAGS.beep:
- # Make a beep noise.
- sys.stdout.write(chr(7))
-
- # Write out instructions for using fixjsstyle script to fix some of the
- # reported errors.
- fix_args = []
- for flag in sys.argv[1:]:
- for f in GJSLINT_ONLY_FLAGS:
- if flag.startswith(f):
- break
- else:
- fix_args.append(flag)
-
- if not FLAGS.quiet:
- print """
-Some of the errors reported by GJsLint may be auto-fixable using the script
-fixjsstyle. Please double check any changes it makes and report any bugs. The
-script can be run by executing:
-
-fixjsstyle %s """ % ' '.join(fix_args)
-
- if FLAGS.time:
- print 'Done in %s.' % _FormatTime(time.time() - start_time)
-
- sys.exit(exit_code)
-
-
-if __name__ == '__main__':
- main()
diff --git a/tools/closure_linter/build/lib/closure_linter/indentation.py b/tools/closure_linter/build/lib/closure_linter/indentation.py
deleted file mode 100644
index d48ad2b862..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/indentation.py
+++ /dev/null
@@ -1,617 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Methods for checking EcmaScript files for indentation issues."""
-
-__author__ = ('robbyw@google.com (Robert Walker)')
-
-import gflags as flags
-
-from closure_linter import ecmametadatapass
-from closure_linter import errors
-from closure_linter import javascripttokens
-from closure_linter import tokenutil
-from closure_linter.common import error
-from closure_linter.common import position
-
-
-flags.DEFINE_boolean('debug_indentation', False,
- 'Whether to print debugging information for indentation.')
-
-
-# Shorthand
-Context = ecmametadatapass.EcmaContext
-Error = error.Error
-Position = position.Position
-Type = javascripttokens.JavaScriptTokenType
-
-
-# The general approach:
-#
-# 1. Build a stack of tokens that can affect indentation.
-# For each token, we determine if it is a block or continuation token.
-# Some tokens need to be temporarily overwritten in case they are removed
-# before the end of the line.
-# Much of the work here is determining which tokens to keep on the stack
-# at each point. Operators, for example, should be removed once their
-# expression or line is gone, while parentheses must stay until the matching
-# end parentheses is found.
-#
-# 2. Given that stack, determine the allowable indentations.
-# Due to flexible indentation rules in JavaScript, there may be many
-# allowable indentations for each stack. We follows the general
-# "no false positives" approach of GJsLint and build the most permissive
-# set possible.
-
-
-class TokenInfo(object):
- """Stores information about a token.
-
- Attributes:
- token: The token
- is_block: Whether the token represents a block indentation.
- is_transient: Whether the token should be automatically removed without
- finding a matching end token.
- overridden_by: TokenInfo for a token that overrides the indentation that
- this token would require.
- is_permanent_override: Whether the override on this token should persist
- even after the overriding token is removed from the stack. For example:
- x([
- 1],
- 2);
- needs this to be set so the last line is not required to be a continuation
- indent.
- line_number: The effective line number of this token. Will either be the
- actual line number or the one before it in the case of a mis-wrapped
- operator.
- """
-
- def __init__(self, token, is_block=False):
- """Initializes a TokenInfo object.
-
- Args:
- token: The token
- is_block: Whether the token represents a block indentation.
- """
- self.token = token
- self.overridden_by = None
- self.is_permanent_override = False
- self.is_block = is_block
- self.is_transient = not is_block and token.type not in (
- Type.START_PAREN, Type.START_PARAMETERS)
- self.line_number = token.line_number
-
- def __repr__(self):
- result = '\n %s' % self.token
- if self.overridden_by:
- result = '%s OVERRIDDEN [by "%s"]' % (
- result, self.overridden_by.token.string)
- result += ' {is_block: %s, is_transient: %s}' % (
- self.is_block, self.is_transient)
- return result
-
-
-class IndentationRules(object):
- """EmcaScript indentation rules.
-
- Can be used to find common indentation errors in JavaScript, ActionScript and
- other Ecma like scripting languages.
- """
-
- def __init__(self):
- """Initializes the IndentationRules checker."""
- self._stack = []
-
- # Map from line number to number of characters it is off in indentation.
- self._start_index_offset = {}
-
- def Finalize(self):
- if self._stack:
- old_stack = self._stack
- self._stack = []
- raise Exception('INTERNAL ERROR: indentation stack is not empty: %r' %
- old_stack)
-
- def CheckToken(self, token, state):
- """Checks a token for indentation errors.
-
- Args:
- token: The current token under consideration
- state: Additional information about the current tree state
-
- Returns:
- An error array [error code, error string, error token] if the token is
- improperly indented, or None if indentation is correct.
- """
-
- token_type = token.type
- indentation_errors = []
- stack = self._stack
- is_first = self._IsFirstNonWhitespaceTokenInLine(token)
-
- # Add tokens that could decrease indentation before checking.
- if token_type == Type.END_PAREN:
- self._PopTo(Type.START_PAREN)
-
- elif token_type == Type.END_PARAMETERS:
- self._PopTo(Type.START_PARAMETERS)
-
- elif token_type == Type.END_BRACKET:
- self._PopTo(Type.START_BRACKET)
-
- elif token_type == Type.END_BLOCK:
- start_token = self._PopTo(Type.START_BLOCK)
- # Check for required goog.scope comment.
- if start_token:
- goog_scope = tokenutil.GoogScopeOrNoneFromStartBlock(start_token.token)
- if goog_scope is not None:
- if not token.line.endswith('; // goog.scope\n'):
- if (token.line.find('//') > -1 and
- token.line.find('goog.scope') >
- token.line.find('//')):
- indentation_errors.append([
- errors.MALFORMED_END_OF_SCOPE_COMMENT,
- ('Malformed end of goog.scope comment. Please use the '
- 'exact following syntax to close the scope:\n'
- '}); // goog.scope'),
- token,
- Position(token.start_index, token.length)])
- else:
- indentation_errors.append([
- errors.MISSING_END_OF_SCOPE_COMMENT,
- ('Missing comment for end of goog.scope which opened at line '
- '%d. End the scope with:\n'
- '}); // goog.scope' %
- (start_token.line_number)),
- token,
- Position(token.start_index, token.length)])
-
- elif token_type == Type.KEYWORD and token.string in ('case', 'default'):
- self._Add(self._PopTo(Type.START_BLOCK))
-
- elif token_type == Type.SEMICOLON:
- self._PopTransient()
-
- if (is_first and
- token_type not in (Type.COMMENT, Type.DOC_PREFIX, Type.STRING_TEXT)):
- if flags.FLAGS.debug_indentation:
- print 'Line #%d: stack %r' % (token.line_number, stack)
-
- # Ignore lines that start in JsDoc since we don't check them properly yet.
- # TODO(robbyw): Support checking JsDoc indentation.
- # Ignore lines that start as multi-line strings since indentation is N/A.
- # Ignore lines that start with operators since we report that already.
- # Ignore lines with tabs since we report that already.
- expected = self._GetAllowableIndentations()
- actual = self._GetActualIndentation(token)
-
- # Special case comments describing else, case, and default. Allow them
- # to outdent to the parent block.
- if token_type in Type.COMMENT_TYPES:
- next_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
- if next_code and next_code.type == Type.END_BLOCK:
- next_code = tokenutil.SearchExcept(next_code, Type.NON_CODE_TYPES)
- if next_code and next_code.string in ('else', 'case', 'default'):
- # TODO(robbyw): This almost certainly introduces false negatives.
- expected |= self._AddToEach(expected, -2)
-
- if actual >= 0 and actual not in expected:
- expected = sorted(expected)
- indentation_errors.append([
- errors.WRONG_INDENTATION,
- 'Wrong indentation: expected any of {%s} but got %d' % (
- ', '.join('%d' % x for x in expected if x < 80), actual),
- token,
- Position(actual, expected[0])])
- self._start_index_offset[token.line_number] = expected[0] - actual
-
- # Add tokens that could increase indentation.
- if token_type == Type.START_BRACKET:
- self._Add(TokenInfo(
- token=token,
- is_block=token.metadata.context.type == Context.ARRAY_LITERAL))
-
- elif token_type == Type.START_BLOCK or token.metadata.is_implied_block:
- self._Add(TokenInfo(token=token, is_block=True))
-
- elif token_type in (Type.START_PAREN, Type.START_PARAMETERS):
- self._Add(TokenInfo(token=token, is_block=False))
-
- elif token_type == Type.KEYWORD and token.string == 'return':
- self._Add(TokenInfo(token))
-
- elif not token.IsLastInLine() and (
- token.IsAssignment() or token.IsOperator('?')):
- self._Add(TokenInfo(token=token))
-
- # Handle implied block closes.
- if token.metadata.is_implied_block_close:
- self._PopToImpliedBlock()
-
- # Add some tokens only if they appear at the end of the line.
- is_last = self._IsLastCodeInLine(token)
- if is_last:
- next_code_token = tokenutil.GetNextCodeToken(token)
- # Increase required indentation if this is an overlong wrapped statement
- # ending in an operator.
- if token_type == Type.OPERATOR:
- if token.string == ':':
- if stack and stack[-1].token.string == '?':
- # When a ternary : is on a different line than its '?', it doesn't
- # add indentation.
- if token.line_number == stack[-1].token.line_number:
- self._Add(TokenInfo(token))
- elif token.metadata.context.type == Context.CASE_BLOCK:
- # Pop transient tokens from say, line continuations, e.g.,
- # case x.
- # y:
- # Want to pop the transient 4 space continuation indent.
- self._PopTransient()
- # Starting the body of the case statement, which is a type of
- # block.
- self._Add(TokenInfo(token=token, is_block=True))
- elif token.metadata.context.type == Context.LITERAL_ELEMENT:
- # When in an object literal, acts as operator indicating line
- # continuations.
- self._Add(TokenInfo(token))
- else:
- # ':' might also be a statement label, no effect on indentation in
- # this case.
- pass
-
- elif token.string != ',':
- self._Add(TokenInfo(token))
- else:
- # The token is a comma.
- if token.metadata.context.type == Context.VAR:
- self._Add(TokenInfo(token))
- elif token.metadata.context.type != Context.PARAMETERS:
- self._PopTransient()
- # Increase required indentation if this is the end of a statement that's
- # continued with an operator on the next line (e.g. the '.').
- elif (next_code_token and next_code_token.type == Type.OPERATOR and
- not next_code_token.metadata.IsUnaryOperator()):
- self._Add(TokenInfo(token))
- elif token_type == Type.PARAMETERS and token.string.endswith(','):
- # Parameter lists.
- self._Add(TokenInfo(token))
- elif token.IsKeyword('var'):
- self._Add(TokenInfo(token))
- elif token.metadata.is_implied_semicolon:
- self._PopTransient()
- elif token.IsAssignment():
- self._Add(TokenInfo(token))
-
- return indentation_errors
-
- def _AddToEach(self, original, amount):
- """Returns a new set with the given amount added to each element.
-
- Args:
- original: The original set of numbers
- amount: The amount to add to each element
-
- Returns:
- A new set containing each element of the original set added to the amount.
- """
- return set([x + amount for x in original])
-
- _HARD_STOP_TYPES = (Type.START_PAREN, Type.START_PARAMETERS,
- Type.START_BRACKET)
-
- _HARD_STOP_STRINGS = ('return', '?')
-
- def _IsHardStop(self, token):
- """Determines if the given token can have a hard stop after it.
-
- Args:
- token: token to examine
-
- Returns:
- Whether the token can have a hard stop after it.
-
- Hard stops are indentations defined by the position of another token as in
- indentation lined up with return, (, [, and ?.
- """
- return (token.type in self._HARD_STOP_TYPES or
- token.string in self._HARD_STOP_STRINGS or
- token.IsAssignment())
-
- def _GetAllowableIndentations(self):
- """Computes the set of allowable indentations.
-
- Returns:
- The set of allowable indentations, given the current stack.
- """
- expected = set([0])
- hard_stops = set([])
-
- # Whether the tokens are still in the same continuation, meaning additional
- # indentation is optional. As an example:
- # x = 5 +
- # 6 +
- # 7;
- # The second '+' does not add any required indentation.
- in_same_continuation = False
-
- for token_info in self._stack:
- token = token_info.token
-
- # Handle normal additive indentation tokens.
- if not token_info.overridden_by and token.string != 'return':
- if token_info.is_block:
- expected = self._AddToEach(expected, 2)
- hard_stops = self._AddToEach(hard_stops, 2)
- in_same_continuation = False
- elif in_same_continuation:
- expected |= self._AddToEach(expected, 4)
- hard_stops |= self._AddToEach(hard_stops, 4)
- else:
- expected = self._AddToEach(expected, 4)
- hard_stops |= self._AddToEach(hard_stops, 4)
- in_same_continuation = True
-
- # Handle hard stops after (, [, return, =, and ?
- if self._IsHardStop(token):
- override_is_hard_stop = (token_info.overridden_by and
- self._IsHardStop(
- token_info.overridden_by.token))
- if token.type == Type.START_PAREN and token.previous:
- # For someFunction(...) we allow to indent at the beginning of the
- # identifier +4
- prev = token.previous
- if (prev.type == Type.IDENTIFIER and
- prev.line_number == token.line_number):
- hard_stops.add(prev.start_index + 4)
- if not override_is_hard_stop:
- start_index = token.start_index
- if token.line_number in self._start_index_offset:
- start_index += self._start_index_offset[token.line_number]
- if (token.type in (Type.START_PAREN, Type.START_PARAMETERS) and
- not token_info.overridden_by):
- hard_stops.add(start_index + 1)
-
- elif token.string == 'return' and not token_info.overridden_by:
- hard_stops.add(start_index + 7)
-
- elif token.type == Type.START_BRACKET:
- hard_stops.add(start_index + 1)
-
- elif token.IsAssignment():
- hard_stops.add(start_index + len(token.string) + 1)
-
- elif token.IsOperator('?') and not token_info.overridden_by:
- hard_stops.add(start_index + 2)
-
- return (expected | hard_stops) or set([0])
-
- def _GetActualIndentation(self, token):
- """Gets the actual indentation of the line containing the given token.
-
- Args:
- token: Any token on the line.
-
- Returns:
- The actual indentation of the line containing the given token. Returns
- -1 if this line should be ignored due to the presence of tabs.
- """
- # Move to the first token in the line
- token = tokenutil.GetFirstTokenInSameLine(token)
-
- # If it is whitespace, it is the indentation.
- if token.type == Type.WHITESPACE:
- if token.string.find('\t') >= 0:
- return -1
- else:
- return len(token.string)
- elif token.type == Type.PARAMETERS:
- return len(token.string) - len(token.string.lstrip())
- else:
- return 0
-
- def _IsFirstNonWhitespaceTokenInLine(self, token):
- """Determines if the given token is the first non-space token on its line.
-
- Args:
- token: The token.
-
- Returns:
- True if the token is the first non-whitespace token on its line.
- """
- if token.type in (Type.WHITESPACE, Type.BLANK_LINE):
- return False
- if token.IsFirstInLine():
- return True
- return (token.previous and token.previous.IsFirstInLine() and
- token.previous.type == Type.WHITESPACE)
-
- def _IsLastCodeInLine(self, token):
- """Determines if the given token is the last code token on its line.
-
- Args:
- token: The token.
-
- Returns:
- True if the token is the last code token on its line.
- """
- if token.type in Type.NON_CODE_TYPES:
- return False
- start_token = token
- while True:
- token = token.next
- if not token or token.line_number != start_token.line_number:
- return True
- if token.type not in Type.NON_CODE_TYPES:
- return False
-
- def _AllFunctionPropertyAssignTokens(self, start_token, end_token):
- """Checks if tokens are (likely) a valid function property assignment.
-
- Args:
- start_token: Start of the token range.
- end_token: End of the token range.
-
- Returns:
- True if all tokens between start_token and end_token are legal tokens
- within a function declaration and assignment into a property.
- """
- for token in tokenutil.GetTokenRange(start_token, end_token):
- fn_decl_tokens = (Type.FUNCTION_DECLARATION,
- Type.PARAMETERS,
- Type.START_PARAMETERS,
- Type.END_PARAMETERS,
- Type.END_PAREN)
- if (token.type not in fn_decl_tokens and
- token.IsCode() and
- not tokenutil.IsIdentifierOrDot(token) and
- not token.IsAssignment() and
- not (token.type == Type.OPERATOR and token.string == ',')):
- return False
- return True
-
- def _Add(self, token_info):
- """Adds the given token info to the stack.
-
- Args:
- token_info: The token information to add.
- """
- if self._stack and self._stack[-1].token == token_info.token:
- # Don't add the same token twice.
- return
-
- if token_info.is_block or token_info.token.type == Type.START_PAREN:
- scope_token = tokenutil.GoogScopeOrNoneFromStartBlock(token_info.token)
- token_info.overridden_by = TokenInfo(scope_token) if scope_token else None
-
- if (token_info.token.type == Type.START_BLOCK and
- token_info.token.metadata.context.type == Context.BLOCK):
- # Handle function() {} assignments: their block contents get special
- # treatment and are allowed to just indent by two whitespace.
- # For example
- # long.long.name = function(
- # a) {
- # In this case the { and the = are on different lines. But the
- # override should still apply for all previous stack tokens that are
- # part of an assignment of a block.
-
- has_assignment = any(x for x in self._stack if x.token.IsAssignment())
- if has_assignment:
- last_token = token_info.token.previous
- for stack_info in reversed(self._stack):
- if (last_token and
- not self._AllFunctionPropertyAssignTokens(stack_info.token,
- last_token)):
- break
- stack_info.overridden_by = token_info
- stack_info.is_permanent_override = True
- last_token = stack_info.token
-
- index = len(self._stack) - 1
- while index >= 0:
- stack_info = self._stack[index]
- stack_token = stack_info.token
-
- if stack_info.line_number == token_info.line_number:
- # In general, tokens only override each other when they are on
- # the same line.
- stack_info.overridden_by = token_info
- if (token_info.token.type == Type.START_BLOCK and
- (stack_token.IsAssignment() or
- stack_token.type in (Type.IDENTIFIER, Type.START_PAREN))):
- # Multi-line blocks have lasting overrides, as in:
- # callFn({
- # a: 10
- # },
- # 30);
- # b/11450054. If a string is not closed properly then close_block
- # could be null.
- close_block = token_info.token.metadata.context.end_token
- stack_info.is_permanent_override = close_block and (
- close_block.line_number != token_info.token.line_number)
- else:
- break
- index -= 1
-
- self._stack.append(token_info)
-
- def _Pop(self):
- """Pops the top token from the stack.
-
- Returns:
- The popped token info.
- """
- token_info = self._stack.pop()
- if token_info.token.type not in (Type.START_BLOCK, Type.START_BRACKET):
- # Remove any temporary overrides.
- self._RemoveOverrides(token_info)
- else:
- # For braces and brackets, which can be object and array literals, remove
- # overrides when the literal is closed on the same line.
- token_check = token_info.token
- same_type = token_check.type
- goal_type = None
- if token_info.token.type == Type.START_BRACKET:
- goal_type = Type.END_BRACKET
- else:
- goal_type = Type.END_BLOCK
- line_number = token_info.token.line_number
- count = 0
- while token_check and token_check.line_number == line_number:
- if token_check.type == goal_type:
- count -= 1
- if not count:
- self._RemoveOverrides(token_info)
- break
- if token_check.type == same_type:
- count += 1
- token_check = token_check.next
- return token_info
-
- def _PopToImpliedBlock(self):
- """Pops the stack until an implied block token is found."""
- while not self._Pop().token.metadata.is_implied_block:
- pass
-
- def _PopTo(self, stop_type):
- """Pops the stack until a token of the given type is popped.
-
- Args:
- stop_type: The type of token to pop to.
-
- Returns:
- The token info of the given type that was popped.
- """
- last = None
- while True:
- last = self._Pop()
- if last.token.type == stop_type:
- break
- return last
-
- def _RemoveOverrides(self, token_info):
- """Marks any token that was overridden by this token as active again.
-
- Args:
- token_info: The token that is being removed from the stack.
- """
- for stack_token in self._stack:
- if (stack_token.overridden_by == token_info and
- not stack_token.is_permanent_override):
- stack_token.overridden_by = None
-
- def _PopTransient(self):
- """Pops all transient tokens - i.e. not blocks, literals, or parens."""
- while self._stack and self._stack[-1].is_transient:
- self._Pop()
diff --git a/tools/closure_linter/build/lib/closure_linter/javascriptlintrules.py b/tools/closure_linter/build/lib/closure_linter/javascriptlintrules.py
deleted file mode 100644
index 9578009daa..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/javascriptlintrules.py
+++ /dev/null
@@ -1,754 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Methods for checking JS files for common style guide violations.
-
-These style guide violations should only apply to JavaScript and not an Ecma
-scripting languages.
-"""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)',
- 'jacobr@google.com (Jacob Richman)')
-
-import re
-
-from closure_linter import ecmalintrules
-from closure_linter import error_check
-from closure_linter import errors
-from closure_linter import javascripttokenizer
-from closure_linter import javascripttokens
-from closure_linter import requireprovidesorter
-from closure_linter import tokenutil
-from closure_linter.common import error
-from closure_linter.common import position
-
-# Shorthand
-Error = error.Error
-Position = position.Position
-Rule = error_check.Rule
-Type = javascripttokens.JavaScriptTokenType
-
-
-class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
- """JavaScript lint rules that catch JavaScript specific style errors."""
-
- def __init__(self, namespaces_info):
- """Initializes a JavaScriptLintRules instance."""
- ecmalintrules.EcmaScriptLintRules.__init__(self)
- self._namespaces_info = namespaces_info
- self._declared_private_member_tokens = {}
- self._declared_private_members = set()
- self._used_private_members = set()
- # A stack of dictionaries, one for each function scope entered. Each
- # dictionary is keyed by an identifier that defines a local variable and has
- # a token as its value.
- self._unused_local_variables_by_scope = []
-
- def HandleMissingParameterDoc(self, token, param_name):
- """Handle errors associated with a parameter missing a param tag."""
- self._HandleError(errors.MISSING_PARAMETER_DOCUMENTATION,
- 'Missing docs for parameter: "%s"' % param_name, token)
-
- # pylint: disable=too-many-statements
- def CheckToken(self, token, state):
- """Checks a token, given the current parser_state, for warnings and errors.
-
- Args:
- token: The current token under consideration
- state: parser_state object that indicates the current state in the page
- """
-
- # Call the base class's CheckToken function.
- super(JavaScriptLintRules, self).CheckToken(token, state)
-
- # Store some convenience variables
- namespaces_info = self._namespaces_info
-
- if error_check.ShouldCheck(Rule.UNUSED_LOCAL_VARIABLES):
- self._CheckUnusedLocalVariables(token, state)
-
- if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
- # Find all assignments to private members.
- if token.type == Type.SIMPLE_LVALUE:
- identifier = token.string
- if identifier.endswith('_') and not identifier.endswith('__'):
- doc_comment = state.GetDocComment()
- suppressed = doc_comment and (
- 'underscore' in doc_comment.suppressions or
- 'unusedPrivateMembers' in doc_comment.suppressions)
- if not suppressed:
- # Look for static members defined on a provided namespace.
- if namespaces_info:
- namespace = namespaces_info.GetClosurizedNamespace(identifier)
- provided_namespaces = namespaces_info.GetProvidedNamespaces()
- else:
- namespace = None
- provided_namespaces = set()
-
- # Skip cases of this.something_.somethingElse_.
- regex = re.compile(r'^this\.[a-zA-Z_]+$')
- if namespace in provided_namespaces or regex.match(identifier):
- variable = identifier.split('.')[-1]
- self._declared_private_member_tokens[variable] = token
- self._declared_private_members.add(variable)
- elif not identifier.endswith('__'):
- # Consider setting public members of private members to be a usage.
- for piece in identifier.split('.'):
- if piece.endswith('_'):
- self._used_private_members.add(piece)
-
- # Find all usages of private members.
- if token.type == Type.IDENTIFIER:
- for piece in token.string.split('.'):
- if piece.endswith('_'):
- self._used_private_members.add(piece)
-
- if token.type == Type.DOC_FLAG:
- flag = token.attached_object
-
- if flag.flag_type == 'param' and flag.name_token is not None:
- self._CheckForMissingSpaceBeforeToken(
- token.attached_object.name_token)
-
- if flag.type is not None and flag.name is not None:
- if error_check.ShouldCheck(Rule.VARIABLE_ARG_MARKER):
- # Check for variable arguments marker in type.
- if flag.jstype.IsVarArgsType() and flag.name != 'var_args':
- self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_NAME,
- 'Variable length argument %s must be renamed '
- 'to var_args.' % flag.name,
- token)
- elif not flag.jstype.IsVarArgsType() and flag.name == 'var_args':
- self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_TYPE,
- 'Variable length argument %s type must start '
- 'with \'...\'.' % flag.name,
- token)
-
- if error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER):
- # Check for optional marker in type.
- if (flag.jstype.opt_arg and
- not flag.name.startswith('opt_')):
- self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX,
- 'Optional parameter name %s must be prefixed '
- 'with opt_.' % flag.name,
- token)
- elif (not flag.jstype.opt_arg and
- flag.name.startswith('opt_')):
- self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE,
- 'Optional parameter %s type must end with =.' %
- flag.name,
- token)
-
- if flag.flag_type in state.GetDocFlag().HAS_TYPE:
- # Check for both missing type token and empty type braces '{}'
- # Missing suppress types are reported separately and we allow enums,
- # const, private, public and protected without types.
- if (flag.flag_type not in state.GetDocFlag().CAN_OMIT_TYPE
- and (not flag.jstype or flag.jstype.IsEmpty())):
- self._HandleError(errors.MISSING_JSDOC_TAG_TYPE,
- 'Missing type in %s tag' % token.string, token)
-
- elif flag.name_token and flag.type_end_token and tokenutil.Compare(
- flag.type_end_token, flag.name_token) > 0:
- self._HandleError(
- errors.OUT_OF_ORDER_JSDOC_TAG_TYPE,
- 'Type should be immediately after %s tag' % token.string,
- token)
-
- elif token.type == Type.DOUBLE_QUOTE_STRING_START:
- next_token = token.next
- while next_token.type == Type.STRING_TEXT:
- if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search(
- next_token.string):
- break
- next_token = next_token.next
- else:
- self._HandleError(
- errors.UNNECESSARY_DOUBLE_QUOTED_STRING,
- 'Single-quoted string preferred over double-quoted string.',
- token,
- position=Position.All(token.string))
-
- elif token.type == Type.END_DOC_COMMENT:
- doc_comment = state.GetDocComment()
-
- # When @externs appears in a @fileoverview comment, it should trigger
- # the same limited doc checks as a special filename like externs.js.
- if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag('externs'):
- self._SetLimitedDocChecks(True)
-
- if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL) and
- not self._is_html and
- state.InTopLevel() and
- not state.InNonScopeBlock()):
-
- # Check if we're in a fileoverview or constructor JsDoc.
- is_constructor = (
- doc_comment.HasFlag('constructor') or
- doc_comment.HasFlag('interface'))
- # @fileoverview is an optional tag so if the dosctring is the first
- # token in the file treat it as a file level docstring.
- is_file_level_comment = (
- doc_comment.HasFlag('fileoverview') or
- not doc_comment.start_token.previous)
-
- # If the comment is not a file overview, and it does not immediately
- # precede some code, skip it.
- # NOTE: The tokenutil methods are not used here because of their
- # behavior at the top of a file.
- next_token = token.next
- if (not next_token or
- (not is_file_level_comment and
- next_token.type in Type.NON_CODE_TYPES)):
- return
-
- # Don't require extra blank lines around suppression of extra
- # goog.require errors.
- if (doc_comment.SuppressionOnly() and
- next_token.type == Type.IDENTIFIER and
- next_token.string in ['goog.provide', 'goog.require']):
- return
-
- # Find the start of this block (include comments above the block, unless
- # this is a file overview).
- block_start = doc_comment.start_token
- if not is_file_level_comment:
- token = block_start.previous
- while token and token.type in Type.COMMENT_TYPES:
- block_start = token
- token = token.previous
-
- # Count the number of blank lines before this block.
- blank_lines = 0
- token = block_start.previous
- while token and token.type in [Type.WHITESPACE, Type.BLANK_LINE]:
- if token.type == Type.BLANK_LINE:
- # A blank line.
- blank_lines += 1
- elif token.type == Type.WHITESPACE and not token.line.strip():
- # A line with only whitespace on it.
- blank_lines += 1
- token = token.previous
-
- # Log errors.
- error_message = False
- expected_blank_lines = 0
-
- # Only need blank line before file overview if it is not the beginning
- # of the file, e.g. copyright is first.
- if is_file_level_comment and blank_lines == 0 and block_start.previous:
- error_message = 'Should have a blank line before a file overview.'
- expected_blank_lines = 1
- elif is_constructor and blank_lines != 3:
- error_message = (
- 'Should have 3 blank lines before a constructor/interface.')
- expected_blank_lines = 3
- elif (not is_file_level_comment and not is_constructor and
- blank_lines != 2):
- error_message = 'Should have 2 blank lines between top-level blocks.'
- expected_blank_lines = 2
-
- if error_message:
- self._HandleError(
- errors.WRONG_BLANK_LINE_COUNT, error_message,
- block_start, position=Position.AtBeginning(),
- fix_data=expected_blank_lines - blank_lines)
-
- elif token.type == Type.END_BLOCK:
- if state.InFunction() and state.IsFunctionClose():
- is_immediately_called = (token.next and
- token.next.type == Type.START_PAREN)
-
- function = state.GetFunction()
- if not self._limited_doc_checks:
- if (function.has_return and function.doc and
- not is_immediately_called and
- not function.doc.HasFlag('return') and
- not function.doc.InheritsDocumentation() and
- not function.doc.HasFlag('constructor')):
- # Check for proper documentation of return value.
- self._HandleError(
- errors.MISSING_RETURN_DOCUMENTATION,
- 'Missing @return JsDoc in function with non-trivial return',
- function.doc.end_token, position=Position.AtBeginning())
- elif (not function.has_return and
- not function.has_throw and
- function.doc and
- function.doc.HasFlag('return') and
- not state.InInterfaceMethod()):
- flag = function.doc.GetFlag('return')
- valid_no_return_names = ['undefined', 'void', '*']
- invalid_return = flag.jstype is None or not any(
- sub_type.identifier in valid_no_return_names
- for sub_type in flag.jstype.IterTypeGroup())
-
- if invalid_return:
- self._HandleError(
- errors.UNNECESSARY_RETURN_DOCUMENTATION,
- 'Found @return JsDoc on function that returns nothing',
- flag.flag_token, position=Position.AtBeginning())
-
- # b/4073735. Method in object literal definition of prototype can
- # safely reference 'this'.
- prototype_object_literal = False
- block_start = None
- previous_code = None
- previous_previous_code = None
-
- # Search for cases where prototype is defined as object literal.
- # previous_previous_code
- # | previous_code
- # | | block_start
- # | | |
- # a.b.prototype = {
- # c : function() {
- # this.d = 1;
- # }
- # }
-
- # If in object literal, find first token of block so to find previous
- # tokens to check above condition.
- if state.InObjectLiteral():
- block_start = state.GetCurrentBlockStart()
-
- # If an object literal then get previous token (code type). For above
- # case it should be '='.
- if block_start:
- previous_code = tokenutil.SearchExcept(block_start,
- Type.NON_CODE_TYPES,
- reverse=True)
-
- # If previous token to block is '=' then get its previous token.
- if previous_code and previous_code.IsOperator('='):
- previous_previous_code = tokenutil.SearchExcept(previous_code,
- Type.NON_CODE_TYPES,
- reverse=True)
-
- # If variable/token before '=' ends with '.prototype' then its above
- # case of prototype defined with object literal.
- prototype_object_literal = (previous_previous_code and
- previous_previous_code.string.endswith(
- '.prototype'))
-
- if (function.has_this and function.doc and
- not function.doc.HasFlag('this') and
- not function.is_constructor and
- not function.is_interface and
- '.prototype.' not in function.name and
- not prototype_object_literal):
- self._HandleError(
- errors.MISSING_JSDOC_TAG_THIS,
- 'Missing @this JsDoc in function referencing "this". ('
- 'this usually means you are trying to reference "this" in '
- 'a static function, or you have forgotten to mark a '
- 'constructor with @constructor)',
- function.doc.end_token, position=Position.AtBeginning())
-
- elif token.type == Type.IDENTIFIER:
- if token.string == 'goog.inherits' and not state.InFunction():
- if state.GetLastNonSpaceToken().line_number == token.line_number:
- self._HandleError(
- errors.MISSING_LINE,
- 'Missing newline between constructor and goog.inherits',
- token,
- position=Position.AtBeginning())
-
- extra_space = state.GetLastNonSpaceToken().next
- while extra_space != token:
- if extra_space.type == Type.BLANK_LINE:
- self._HandleError(
- errors.EXTRA_LINE,
- 'Extra line between constructor and goog.inherits',
- extra_space)
- extra_space = extra_space.next
-
- # TODO(robbyw): Test the last function was a constructor.
- # TODO(robbyw): Test correct @extends and @implements documentation.
-
- elif (token.string == 'goog.provide' and
- not state.InFunction() and
- namespaces_info is not None):
- namespace = tokenutil.GetStringAfterToken(token)
-
- # Report extra goog.provide statement.
- if not namespace or namespaces_info.IsExtraProvide(token):
- if not namespace:
- msg = 'Empty namespace in goog.provide'
- else:
- msg = 'Unnecessary goog.provide: ' + namespace
-
- # Hint to user if this is a Test namespace.
- if namespace.endswith('Test'):
- msg += (' *Test namespaces must be mentioned in the '
- 'goog.setTestOnly() call')
-
- self._HandleError(
- errors.EXTRA_GOOG_PROVIDE,
- msg,
- token, position=Position.AtBeginning())
-
- if namespaces_info.IsLastProvide(token):
- # Report missing provide statements after the last existing provide.
- missing_provides = namespaces_info.GetMissingProvides()
- if missing_provides:
- self._ReportMissingProvides(
- missing_provides,
- tokenutil.GetLastTokenInSameLine(token).next,
- False)
-
- # If there are no require statements, missing requires should be
- # reported after the last provide.
- if not namespaces_info.GetRequiredNamespaces():
- missing_requires, illegal_alias_statements = (
- namespaces_info.GetMissingRequires())
- if missing_requires:
- self._ReportMissingRequires(
- missing_requires,
- tokenutil.GetLastTokenInSameLine(token).next,
- True)
- if illegal_alias_statements:
- self._ReportIllegalAliasStatement(illegal_alias_statements)
-
- elif (token.string == 'goog.require' and
- not state.InFunction() and
- namespaces_info is not None):
- namespace = tokenutil.GetStringAfterToken(token)
-
- # If there are no provide statements, missing provides should be
- # reported before the first require.
- if (namespaces_info.IsFirstRequire(token) and
- not namespaces_info.GetProvidedNamespaces()):
- missing_provides = namespaces_info.GetMissingProvides()
- if missing_provides:
- self._ReportMissingProvides(
- missing_provides,
- tokenutil.GetFirstTokenInSameLine(token),
- True)
-
- # Report extra goog.require statement.
- if not namespace or namespaces_info.IsExtraRequire(token):
- if not namespace:
- msg = 'Empty namespace in goog.require'
- else:
- msg = 'Unnecessary goog.require: ' + namespace
-
- self._HandleError(
- errors.EXTRA_GOOG_REQUIRE,
- msg,
- token, position=Position.AtBeginning())
-
- # Report missing goog.require statements.
- if namespaces_info.IsLastRequire(token):
- missing_requires, illegal_alias_statements = (
- namespaces_info.GetMissingRequires())
- if missing_requires:
- self._ReportMissingRequires(
- missing_requires,
- tokenutil.GetLastTokenInSameLine(token).next,
- False)
- if illegal_alias_statements:
- self._ReportIllegalAliasStatement(illegal_alias_statements)
-
- elif token.type == Type.OPERATOR:
- last_in_line = token.IsLastInLine()
- # If the token is unary and appears to be used in a unary context
- # it's ok. Otherwise, if it's at the end of the line or immediately
- # before a comment, it's ok.
- # Don't report an error before a start bracket - it will be reported
- # by that token's space checks.
- if (not token.metadata.IsUnaryOperator() and not last_in_line
- and not token.next.IsComment()
- and not token.next.IsOperator(',')
- and not tokenutil.IsDot(token)
- and token.next.type not in (Type.WHITESPACE, Type.END_PAREN,
- Type.END_BRACKET, Type.SEMICOLON,
- Type.START_BRACKET)):
- self._HandleError(
- errors.MISSING_SPACE,
- 'Missing space after "%s"' % token.string,
- token,
- position=Position.AtEnd(token.string))
- elif token.type == Type.WHITESPACE:
- first_in_line = token.IsFirstInLine()
- last_in_line = token.IsLastInLine()
- # Check whitespace length if it's not the first token of the line and
- # if it's not immediately before a comment.
- if not last_in_line and not first_in_line and not token.next.IsComment():
- # Ensure there is no space after opening parentheses.
- if (token.previous.type in (Type.START_PAREN, Type.START_BRACKET,
- Type.FUNCTION_NAME)
- or token.next.type == Type.START_PARAMETERS):
- self._HandleError(
- errors.EXTRA_SPACE,
- 'Extra space after "%s"' % token.previous.string,
- token,
- position=Position.All(token.string))
- elif token.type == Type.SEMICOLON:
- previous_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES,
- reverse=True)
- if not previous_token:
- self._HandleError(
- errors.REDUNDANT_SEMICOLON,
- 'Semicolon without any statement',
- token,
- position=Position.AtEnd(token.string))
- elif (previous_token.type == Type.KEYWORD and
- previous_token.string not in ['break', 'continue', 'return']):
- self._HandleError(
- errors.REDUNDANT_SEMICOLON,
- ('Semicolon after \'%s\' without any statement.'
- ' Looks like an error.' % previous_token.string),
- token,
- position=Position.AtEnd(token.string))
-
- def _CheckUnusedLocalVariables(self, token, state):
- """Checks for unused local variables in function blocks.
-
- Args:
- token: The token to check.
- state: The state tracker.
- """
- # We don't use state.InFunction because that disregards scope functions.
- in_function = state.FunctionDepth() > 0
- if token.type == Type.SIMPLE_LVALUE or token.type == Type.IDENTIFIER:
- if in_function:
- identifier = token.string
- # Check whether the previous token was var.
- previous_code_token = tokenutil.CustomSearch(
- token,
- lambda t: t.type not in Type.NON_CODE_TYPES,
- reverse=True)
- if previous_code_token and previous_code_token.IsKeyword('var'):
- # Add local variable declaration to the top of the unused locals
- # stack.
- self._unused_local_variables_by_scope[-1][identifier] = token
- elif token.type == Type.IDENTIFIER:
- # This covers most cases where the variable is used as an identifier.
- self._MarkLocalVariableUsed(token.string)
- elif token.type == Type.SIMPLE_LVALUE and '.' in identifier:
- # This covers cases where a value is assigned to a property of the
- # variable.
- self._MarkLocalVariableUsed(token.string)
- elif token.type == Type.START_BLOCK:
- if in_function and state.IsFunctionOpen():
- # Push a new map onto the stack
- self._unused_local_variables_by_scope.append({})
- elif token.type == Type.END_BLOCK:
- if state.IsFunctionClose():
- # Pop the stack and report any remaining locals as unused.
- unused_local_variables = self._unused_local_variables_by_scope.pop()
- for unused_token in unused_local_variables.values():
- self._HandleError(
- errors.UNUSED_LOCAL_VARIABLE,
- 'Unused local variable: %s.' % unused_token.string,
- unused_token)
- elif token.type == Type.DOC_FLAG:
- # Flags that use aliased symbols should be counted.
- flag = token.attached_object
- js_type = flag and flag.jstype
- if flag and flag.flag_type in state.GetDocFlag().HAS_TYPE and js_type:
- self._MarkAliasUsed(js_type)
-
- def _MarkAliasUsed(self, js_type):
- """Marks aliases in a type as used.
-
- Recursively iterates over all subtypes in a jsdoc type annotation and
- tracks usage of aliased symbols (which may be local variables).
- Marks the local variable as used in the scope nearest to the current
- scope that matches the given token.
-
- Args:
- js_type: The jsdoc type, a typeannotation.TypeAnnotation object.
- """
- if js_type.alias:
- self._MarkLocalVariableUsed(js_type.identifier)
- for sub_type in js_type.IterTypes():
- self._MarkAliasUsed(sub_type)
-
- def _MarkLocalVariableUsed(self, identifier):
- """Marks the local variable as used in the relevant scope.
-
- Marks the local variable in the scope nearest to the current scope that
- matches the given identifier as used.
-
- Args:
- identifier: The identifier representing the potential usage of a local
- variable.
- """
- identifier = identifier.split('.', 1)[0]
- # Find the first instance of the identifier in the stack of function scopes
- # and mark it used.
- for unused_local_variables in reversed(
- self._unused_local_variables_by_scope):
- if identifier in unused_local_variables:
- del unused_local_variables[identifier]
- break
-
- def _ReportMissingProvides(self, missing_provides, token, need_blank_line):
- """Reports missing provide statements to the error handler.
-
- Args:
- missing_provides: A dictionary of string(key) and integer(value) where
- each string(key) is a namespace that should be provided, but is not
- and integer(value) is first line number where it's required.
- token: The token where the error was detected (also where the new provides
- will be inserted.
- need_blank_line: Whether a blank line needs to be inserted after the new
- provides are inserted. May be True, False, or None, where None
- indicates that the insert location is unknown.
- """
-
- missing_provides_msg = 'Missing the following goog.provide statements:\n'
- missing_provides_msg += '\n'.join(['goog.provide(\'%s\');' % x for x in
- sorted(missing_provides)])
- missing_provides_msg += '\n'
-
- missing_provides_msg += '\nFirst line where provided: \n'
- missing_provides_msg += '\n'.join(
- [' %s : line %d' % (x, missing_provides[x]) for x in
- sorted(missing_provides)])
- missing_provides_msg += '\n'
-
- self._HandleError(
- errors.MISSING_GOOG_PROVIDE,
- missing_provides_msg,
- token, position=Position.AtBeginning(),
- fix_data=(missing_provides.keys(), need_blank_line))
-
- def _ReportMissingRequires(self, missing_requires, token, need_blank_line):
- """Reports missing require statements to the error handler.
-
- Args:
- missing_requires: A dictionary of string(key) and integer(value) where
- each string(key) is a namespace that should be required, but is not
- and integer(value) is first line number where it's required.
- token: The token where the error was detected (also where the new requires
- will be inserted.
- need_blank_line: Whether a blank line needs to be inserted before the new
- requires are inserted. May be True, False, or None, where None
- indicates that the insert location is unknown.
- """
-
- missing_requires_msg = 'Missing the following goog.require statements:\n'
- missing_requires_msg += '\n'.join(['goog.require(\'%s\');' % x for x in
- sorted(missing_requires)])
- missing_requires_msg += '\n'
-
- missing_requires_msg += '\nFirst line where required: \n'
- missing_requires_msg += '\n'.join(
- [' %s : line %d' % (x, missing_requires[x]) for x in
- sorted(missing_requires)])
- missing_requires_msg += '\n'
-
- self._HandleError(
- errors.MISSING_GOOG_REQUIRE,
- missing_requires_msg,
- token, position=Position.AtBeginning(),
- fix_data=(missing_requires.keys(), need_blank_line))
-
- def _ReportIllegalAliasStatement(self, illegal_alias_statements):
- """Reports alias statements that would need a goog.require."""
- for namespace, token in illegal_alias_statements.iteritems():
- self._HandleError(
- errors.ALIAS_STMT_NEEDS_GOOG_REQUIRE,
- 'The alias definition would need the namespace \'%s\' which is not '
- 'required through any other symbol.' % namespace,
- token, position=Position.AtBeginning())
-
- def Finalize(self, state):
- """Perform all checks that need to occur after all lines are processed."""
- # Call the base class's Finalize function.
- super(JavaScriptLintRules, self).Finalize(state)
-
- if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
- # Report an error for any declared private member that was never used.
- unused_private_members = (self._declared_private_members -
- self._used_private_members)
-
- for variable in unused_private_members:
- token = self._declared_private_member_tokens[variable]
- self._HandleError(errors.UNUSED_PRIVATE_MEMBER,
- 'Unused private member: %s.' % token.string,
- token)
-
- # Clear state to prepare for the next file.
- self._declared_private_member_tokens = {}
- self._declared_private_members = set()
- self._used_private_members = set()
-
- namespaces_info = self._namespaces_info
- if namespaces_info is not None:
- # If there are no provide or require statements, missing provides and
- # requires should be reported on line 1.
- if (not namespaces_info.GetProvidedNamespaces() and
- not namespaces_info.GetRequiredNamespaces()):
- missing_provides = namespaces_info.GetMissingProvides()
- if missing_provides:
- self._ReportMissingProvides(
- missing_provides, state.GetFirstToken(), None)
-
- missing_requires, illegal_alias = namespaces_info.GetMissingRequires()
- if missing_requires:
- self._ReportMissingRequires(
- missing_requires, state.GetFirstToken(), None)
- if illegal_alias:
- self._ReportIllegalAliasStatement(illegal_alias)
-
- self._CheckSortedRequiresProvides(state.GetFirstToken())
-
- def _CheckSortedRequiresProvides(self, token):
- """Checks that all goog.require and goog.provide statements are sorted.
-
- Note that this method needs to be run after missing statements are added to
- preserve alphabetical order.
-
- Args:
- token: The first token in the token stream.
- """
- sorter = requireprovidesorter.RequireProvideSorter()
- first_provide_token = sorter.CheckProvides(token)
- if first_provide_token:
- new_order = sorter.GetFixedProvideString(first_provide_token)
- self._HandleError(
- errors.GOOG_PROVIDES_NOT_ALPHABETIZED,
- 'goog.provide classes must be alphabetized. The correct code is:\n' +
- new_order,
- first_provide_token,
- position=Position.AtBeginning(),
- fix_data=first_provide_token)
-
- first_require_token = sorter.CheckRequires(token)
- if first_require_token:
- new_order = sorter.GetFixedRequireString(first_require_token)
- self._HandleError(
- errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
- 'goog.require classes must be alphabetized. The correct code is:\n' +
- new_order,
- first_require_token,
- position=Position.AtBeginning(),
- fix_data=first_require_token)
-
- def GetLongLineExceptions(self):
- """Gets a list of regexps for lines which can be longer than the limit.
-
- Returns:
- A list of regexps, used as matches (rather than searches).
- """
- return [
- re.compile(r'(var .+\s*=\s*)?goog\.require\(.+\);?\s*$'),
- re.compile(r'goog\.(provide|module|setTestOnly)\(.+\);?\s*$'),
- re.compile(r'[\s/*]*@visibility\s*{.*}[\s*/]*$'),
- ]
diff --git a/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker.py b/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker.py
deleted file mode 100644
index e0a42f66a8..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Parser for JavaScript files."""
-
-
-
-from closure_linter import javascripttokens
-from closure_linter import statetracker
-from closure_linter import tokenutil
-
-# Shorthand
-Type = javascripttokens.JavaScriptTokenType
-
-
-class JsDocFlag(statetracker.DocFlag):
- """Javascript doc flag object.
-
- Attribute:
- flag_type: param, return, define, type, etc.
- flag_token: The flag token.
- type_start_token: The first token specifying the flag JS type,
- including braces.
- type_end_token: The last token specifying the flag JS type,
- including braces.
- type: The type spec string.
- jstype: The type spec, a TypeAnnotation instance.
- name_token: The token specifying the flag name.
- name: The flag name
- description_start_token: The first token in the description.
- description_end_token: The end token in the description.
- description: The description.
- """
-
- # Please keep these lists alphabetized.
-
- # Some projects use the following extensions to JsDoc.
- # TODO(robbyw): determine which of these, if any, should be illegal.
- EXTENDED_DOC = frozenset([
- 'class', 'code', 'desc', 'final', 'hidden', 'inheritDoc', 'link',
- 'meaning', 'provideGoog', 'throws'])
-
- LEGAL_DOC = EXTENDED_DOC | statetracker.DocFlag.LEGAL_DOC
-
-
-class JavaScriptStateTracker(statetracker.StateTracker):
- """JavaScript state tracker.
-
- Inherits from the core EcmaScript StateTracker adding extra state tracking
- functionality needed for JavaScript.
- """
-
- def __init__(self):
- """Initializes a JavaScript token stream state tracker."""
- statetracker.StateTracker.__init__(self, JsDocFlag)
-
- def Reset(self):
- self._scope_depth = 0
- self._block_stack = []
- super(JavaScriptStateTracker, self).Reset()
-
- def InTopLevel(self):
- """Compute whether we are at the top level in the class.
-
- This function call is language specific. In some languages like
- JavaScript, a function is top level if it is not inside any parenthesis.
- In languages such as ActionScript, a function is top level if it is directly
- within a class.
-
- Returns:
- Whether we are at the top level in the class.
- """
- return self._scope_depth == self.ParenthesesDepth()
-
- def InFunction(self):
- """Returns true if the current token is within a function.
-
- This js-specific override ignores goog.scope functions.
-
- Returns:
- True if the current token is within a function.
- """
- return self._scope_depth != self.FunctionDepth()
-
- def InNonScopeBlock(self):
- """Compute whether we are nested within a non-goog.scope block.
-
- Returns:
- True if the token is not enclosed in a block that does not originate from
- a goog.scope statement. False otherwise.
- """
- return self._scope_depth != self.BlockDepth()
-
- def GetBlockType(self, token):
- """Determine the block type given a START_BLOCK token.
-
- Code blocks come after parameters, keywords like else, and closing parens.
-
- Args:
- token: The current token. Can be assumed to be type START_BLOCK
- Returns:
- Code block type for current token.
- """
- last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, reverse=True)
- if last_code.type in (Type.END_PARAMETERS, Type.END_PAREN,
- Type.KEYWORD) and not last_code.IsKeyword('return'):
- return self.CODE
- else:
- return self.OBJECT_LITERAL
-
- def GetCurrentBlockStart(self):
- """Gets the start token of current block.
-
- Returns:
- Starting token of current block. None if not in block.
- """
- if self._block_stack:
- return self._block_stack[-1]
- else:
- return None
-
- def HandleToken(self, token, last_non_space_token):
- """Handles the given token and updates state.
-
- Args:
- token: The token to handle.
- last_non_space_token: The last non space token encountered
- """
- if token.type == Type.START_BLOCK:
- self._block_stack.append(token)
- if token.type == Type.IDENTIFIER and token.string == 'goog.scope':
- self._scope_depth += 1
- if token.type == Type.END_BLOCK:
- start_token = self._block_stack.pop()
- if tokenutil.GoogScopeOrNoneFromStartBlock(start_token):
- self._scope_depth -= 1
- super(JavaScriptStateTracker, self).HandleToken(token,
- last_non_space_token)
diff --git a/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker_test.py b/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker_test.py
deleted file mode 100644
index 76dabd2c70..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker_test.py
+++ /dev/null
@@ -1,278 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for the javascriptstatetracker module."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-
-import unittest as googletest
-
-from closure_linter import javascripttokens
-from closure_linter import testutil
-from closure_linter import tokenutil
-
-
-_FUNCTION_SCRIPT = """\
-var a = 3;
-
-function foo(aaa, bbb, ccc) {
- var b = 4;
-}
-
-
-/**
- * JSDoc comment.
- */
-var bar = function(ddd, eee, fff) {
-
-};
-
-
-/**
- * Verify that nested functions get their proper parameters recorded.
- */
-var baz = function(ggg, hhh, iii) {
- var qux = function(jjj, kkk, lll) {
- };
- // make sure that entering a new block does not change baz' parameters.
- {};
-};
-
-"""
-
-
-class FunctionTest(googletest.TestCase):
-
- def testFunctionParse(self):
- functions, _ = testutil.ParseFunctionsAndComments(_FUNCTION_SCRIPT)
- self.assertEquals(4, len(functions))
-
- # First function
- function = functions[0]
- self.assertEquals(['aaa', 'bbb', 'ccc'], function.parameters)
-
- start_token = function.start_token
- end_token = function.end_token
-
- self.assertEquals(
- javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
- function.start_token.type)
-
- self.assertEquals('function', start_token.string)
- self.assertEquals(3, start_token.line_number)
- self.assertEquals(0, start_token.start_index)
-
- self.assertEquals('}', end_token.string)
- self.assertEquals(5, end_token.line_number)
- self.assertEquals(0, end_token.start_index)
-
- self.assertEquals('foo', function.name)
-
- self.assertIsNone(function.doc)
-
- # Second function
- function = functions[1]
- self.assertEquals(['ddd', 'eee', 'fff'], function.parameters)
-
- start_token = function.start_token
- end_token = function.end_token
-
- self.assertEquals(
- javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
- function.start_token.type)
-
- self.assertEquals('function', start_token.string)
- self.assertEquals(11, start_token.line_number)
- self.assertEquals(10, start_token.start_index)
-
- self.assertEquals('}', end_token.string)
- self.assertEquals(13, end_token.line_number)
- self.assertEquals(0, end_token.start_index)
-
- self.assertEquals('bar', function.name)
-
- self.assertIsNotNone(function.doc)
-
- # Check function JSDoc
- doc = function.doc
- doc_tokens = tokenutil.GetTokenRange(doc.start_token, doc.end_token)
-
- comment_type = javascripttokens.JavaScriptTokenType.COMMENT
- comment_tokens = filter(lambda t: t.type is comment_type, doc_tokens)
-
- self.assertEquals('JSDoc comment.',
- tokenutil.TokensToString(comment_tokens).strip())
-
- # Third function
- function = functions[2]
- self.assertEquals(['ggg', 'hhh', 'iii'], function.parameters)
-
- start_token = function.start_token
- end_token = function.end_token
-
- self.assertEquals(
- javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
- function.start_token.type)
-
- self.assertEquals('function', start_token.string)
- self.assertEquals(19, start_token.line_number)
- self.assertEquals(10, start_token.start_index)
-
- self.assertEquals('}', end_token.string)
- self.assertEquals(24, end_token.line_number)
- self.assertEquals(0, end_token.start_index)
-
- self.assertEquals('baz', function.name)
- self.assertIsNotNone(function.doc)
-
- # Fourth function (inside third function)
- function = functions[3]
- self.assertEquals(['jjj', 'kkk', 'lll'], function.parameters)
-
- start_token = function.start_token
- end_token = function.end_token
-
- self.assertEquals(
- javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
- function.start_token.type)
-
- self.assertEquals('function', start_token.string)
- self.assertEquals(20, start_token.line_number)
- self.assertEquals(12, start_token.start_index)
-
- self.assertEquals('}', end_token.string)
- self.assertEquals(21, end_token.line_number)
- self.assertEquals(2, end_token.start_index)
-
- self.assertEquals('qux', function.name)
- self.assertIsNone(function.doc)
-
-
-
-class CommentTest(googletest.TestCase):
-
- def testGetDescription(self):
- comment = self._ParseComment("""
- /**
- * Comment targeting goog.foo.
- *
- * This is the second line.
- * @param {number} foo The count of foo.
- */
- target;""")
-
- self.assertEqual(
- 'Comment targeting goog.foo.\n\nThis is the second line.',
- comment.description)
-
- def testCommentGetTarget(self):
- self.assertCommentTarget('goog.foo', """
- /**
- * Comment targeting goog.foo.
- */
- goog.foo = 6;
- """)
-
- self.assertCommentTarget('bar', """
- /**
- * Comment targeting bar.
- */
- var bar = "Karate!";
- """)
-
- self.assertCommentTarget('doThing', """
- /**
- * Comment targeting doThing.
- */
- function doThing() {};
- """)
-
- self.assertCommentTarget('this.targetProperty', """
- goog.bar.Baz = function() {
- /**
- * Comment targeting targetProperty.
- */
- this.targetProperty = 3;
- };
- """)
-
- self.assertCommentTarget('goog.bar.prop', """
- /**
- * Comment targeting goog.bar.prop.
- */
- goog.bar.prop;
- """)
-
- self.assertCommentTarget('goog.aaa.bbb', """
- /**
- * Comment targeting goog.aaa.bbb.
- */
- (goog.aaa.bbb)
- """)
-
- self.assertCommentTarget('theTarget', """
- /**
- * Comment targeting symbol preceded by newlines, whitespace,
- * and parens -- things we ignore.
- */
- (theTarget)
- """)
-
- self.assertCommentTarget(None, """
- /**
- * @fileoverview File overview.
- */
- (notATarget)
- """)
-
- self.assertCommentTarget(None, """
- /**
- * Comment that doesn't find a target.
- */
- """)
-
- self.assertCommentTarget('theTarget.is.split.across.lines', """
- /**
- * Comment that addresses a symbol split across lines.
- */
- (theTarget.is.split
- .across.lines)
- """)
-
- self.assertCommentTarget('theTarget.is.split.across.lines', """
- /**
- * Comment that addresses a symbol split across lines.
- */
- (theTarget.is.split.
- across.lines)
- """)
-
- def _ParseComment(self, script):
- """Parse a script that contains one comment and return it."""
- _, comments = testutil.ParseFunctionsAndComments(script)
- self.assertEquals(1, len(comments))
- return comments[0]
-
- def assertCommentTarget(self, target, script):
- comment = self._ParseComment(script)
- self.assertEquals(target, comment.GetTargetIdentifier())
-
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/javascripttokenizer.py b/tools/closure_linter/build/lib/closure_linter/javascripttokenizer.py
deleted file mode 100644
index 2ee5b81ee1..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/javascripttokenizer.py
+++ /dev/null
@@ -1,463 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Regular expression based JavaScript parsing classes."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-import copy
-import re
-
-from closure_linter import javascripttokens
-from closure_linter.common import matcher
-from closure_linter.common import tokenizer
-
-# Shorthand
-Type = javascripttokens.JavaScriptTokenType
-Matcher = matcher.Matcher
-
-
-class JavaScriptModes(object):
- """Enumeration of the different matcher modes used for JavaScript."""
- TEXT_MODE = 'text'
- SINGLE_QUOTE_STRING_MODE = 'single_quote_string'
- DOUBLE_QUOTE_STRING_MODE = 'double_quote_string'
- BLOCK_COMMENT_MODE = 'block_comment'
- DOC_COMMENT_MODE = 'doc_comment'
- DOC_COMMENT_LEX_SPACES_MODE = 'doc_comment_spaces'
- LINE_COMMENT_MODE = 'line_comment'
- PARAMETER_MODE = 'parameter'
- FUNCTION_MODE = 'function'
-
-
-class JavaScriptTokenizer(tokenizer.Tokenizer):
- """JavaScript tokenizer.
-
- Convert JavaScript code in to an array of tokens.
- """
-
- # Useful patterns for JavaScript parsing.
- IDENTIFIER_CHAR = r'A-Za-z0-9_$'
-
- # Number patterns based on:
- # http://www.mozilla.org/js/language/js20-2000-07/formal/lexer-grammar.html
- MANTISSA = r"""
- (\d+(?!\.)) | # Matches '10'
- (\d+\.(?!\d)) | # Matches '10.'
- (\d*\.\d+) # Matches '.5' or '10.5'
- """
- DECIMAL_LITERAL = r'(%s)([eE][-+]?\d+)?' % MANTISSA
- HEX_LITERAL = r'0[xX][0-9a-fA-F]+'
- NUMBER = re.compile(r"""
- ((%s)|(%s))
- """ % (HEX_LITERAL, DECIMAL_LITERAL), re.VERBOSE)
-
- # Strings come in three parts - first we match the start of the string, then
- # the contents, then the end. The contents consist of any character except a
- # backslash or end of string, or a backslash followed by any character, or a
- # backslash followed by end of line to support correct parsing of multi-line
- # strings.
- SINGLE_QUOTE = re.compile(r"'")
- SINGLE_QUOTE_TEXT = re.compile(r"([^'\\]|\\(.|$))+")
- DOUBLE_QUOTE = re.compile(r'"')
- DOUBLE_QUOTE_TEXT = re.compile(r'([^"\\]|\\(.|$))+')
-
- START_SINGLE_LINE_COMMENT = re.compile(r'//')
- END_OF_LINE_SINGLE_LINE_COMMENT = re.compile(r'//$')
-
- START_DOC_COMMENT = re.compile(r'/\*\*')
- START_BLOCK_COMMENT = re.compile(r'/\*')
- END_BLOCK_COMMENT = re.compile(r'\*/')
- BLOCK_COMMENT_TEXT = re.compile(r'([^*]|\*(?!/))+')
-
- # Comment text is anything that we are not going to parse into another special
- # token like (inline) flags or end comments. Complicated regex to match
- # most normal characters, and '*', '{', '}', and '@' when we are sure that
- # it is safe. Expression [^*{\s]@ must come first, or the other options will
- # match everything before @, and we won't match @'s that aren't part of flags
- # like in email addresses in the @author tag.
- DOC_COMMENT_TEXT = re.compile(r'([^*{}\s]@|[^*{}@]|\*(?!/))+')
- DOC_COMMENT_NO_SPACES_TEXT = re.compile(r'([^*{}\s]@|[^*{}@\s]|\*(?!/))+')
- # Match anything that is allowed in a type definition, except for tokens
- # needed to parse it (and the lookahead assertion for "*/").
- DOC_COMMENT_TYPE_TEXT = re.compile(r'([^*|!?=<>(){}:,\s]|\*(?!/))+')
-
- # Match the prefix ' * ' that starts every line of jsdoc. Want to include
- # spaces after the '*', but nothing else that occurs after a '*', and don't
- # want to match the '*' in '*/'.
- DOC_PREFIX = re.compile(r'\s*\*(\s+|(?!/))')
-
- START_BLOCK = re.compile('{')
- END_BLOCK = re.compile('}')
-
- REGEX_CHARACTER_CLASS = r"""
- \[ # Opening bracket
- ([^\]\\]|\\.)* # Anything but a ] or \,
- # or a backslash followed by anything
- \] # Closing bracket
- """
- # We ensure the regex is followed by one of the above tokens to avoid
- # incorrectly parsing something like x / y / z as x REGEX(/ y /) z
- POST_REGEX_LIST = [
- ';', ',', r'\.', r'\)', r'\]', '$', r'\/\/', r'\/\*', ':', '}']
-
- REGEX = re.compile(r"""
- / # opening slash
- (?!\*) # not the start of a comment
- (\\.|[^\[\/\\]|(%s))* # a backslash followed by anything,
- # or anything but a / or [ or \,
- # or a character class
- / # closing slash
- [gimsx]* # optional modifiers
- (?=\s*(%s))
- """ % (REGEX_CHARACTER_CLASS, '|'.join(POST_REGEX_LIST)),
- re.VERBOSE)
-
- ANYTHING = re.compile(r'.*')
- PARAMETERS = re.compile(r'[^\)]+')
- CLOSING_PAREN_WITH_SPACE = re.compile(r'\)\s*')
-
- FUNCTION_DECLARATION = re.compile(r'\bfunction\b')
-
- OPENING_PAREN = re.compile(r'\(')
- CLOSING_PAREN = re.compile(r'\)')
-
- OPENING_BRACKET = re.compile(r'\[')
- CLOSING_BRACKET = re.compile(r'\]')
-
- # We omit these JS keywords from the list:
- # function - covered by FUNCTION_DECLARATION.
- # delete, in, instanceof, new, typeof - included as operators.
- # this - included in identifiers.
- # null, undefined - not included, should go in some "special constant" list.
- KEYWORD_LIST = [
- 'break',
- 'case',
- 'catch',
- 'continue',
- 'default',
- 'do',
- 'else',
- 'finally',
- 'for',
- 'if',
- 'return',
- 'switch',
- 'throw',
- 'try',
- 'var',
- 'while',
- 'with',
- ]
-
- # List of regular expressions to match as operators. Some notes: for our
- # purposes, the comma behaves similarly enough to a normal operator that we
- # include it here. r'\bin\b' actually matches 'in' surrounded by boundary
- # characters - this may not match some very esoteric uses of the in operator.
- # Operators that are subsets of larger operators must come later in this list
- # for proper matching, e.g., '>>' must come AFTER '>>>'.
- OPERATOR_LIST = [
- ',',
- r'\+\+',
- '===',
- '!==',
- '>>>=',
- '>>>',
- '==',
- '>=',
- '<=',
- '!=',
- '<<=',
- '>>=',
- '<<',
- '>>',
- '=>',
- '>',
- '<',
- r'\+=',
- r'\+',
- '--',
- r'\^=',
- '-=',
- '-',
- '/=',
- '/',
- r'\*=',
- r'\*',
- '%=',
- '%',
- '&&',
- r'\|\|',
- '&=',
- '&',
- r'\|=',
- r'\|',
- '=',
- '!',
- ':',
- r'\?',
- r'\^',
- r'\bdelete\b',
- r'\bin\b',
- r'\binstanceof\b',
- r'\bnew\b',
- r'\btypeof\b',
- r'\bvoid\b',
- r'\.',
- ]
- OPERATOR = re.compile('|'.join(OPERATOR_LIST))
-
- WHITESPACE = re.compile(r'\s+')
- SEMICOLON = re.compile(r';')
- # Technically JavaScript identifiers can't contain '.', but we treat a set of
- # nested identifiers as a single identifier, except for trailing dots.
- NESTED_IDENTIFIER = r'[a-zA-Z_$]([%s]|\.[a-zA-Z_$])*' % IDENTIFIER_CHAR
- IDENTIFIER = re.compile(NESTED_IDENTIFIER)
-
- SIMPLE_LVALUE = re.compile(r"""
- (?P<identifier>%s) # a valid identifier
- (?=\s* # optional whitespace
- \= # look ahead to equal sign
- (?!=)) # not follwed by equal
- """ % NESTED_IDENTIFIER, re.VERBOSE)
-
- # A doc flag is a @ sign followed by non-space characters that appears at the
- # beginning of the line, after whitespace, or after a '{'. The look-behind
- # check is necessary to not match someone@google.com as a flag.
- DOC_FLAG = re.compile(r'(^|(?<=\s))@(?P<name>[a-zA-Z]+)')
- # To properly parse parameter names and complex doctypes containing
- # whitespace, we need to tokenize whitespace into a token after certain
- # doctags. All statetracker.HAS_TYPE that are not listed here must not contain
- # any whitespace in their types.
- DOC_FLAG_LEX_SPACES = re.compile(
- r'(^|(?<=\s))@(?P<name>%s)\b' %
- '|'.join([
- 'const',
- 'enum',
- 'extends',
- 'final',
- 'implements',
- 'param',
- 'private',
- 'protected',
- 'public',
- 'return',
- 'type',
- 'typedef'
- ]))
-
- DOC_INLINE_FLAG = re.compile(r'(?<={)@(?P<name>[a-zA-Z]+)')
-
- DOC_TYPE_BLOCK_START = re.compile(r'[<(]')
- DOC_TYPE_BLOCK_END = re.compile(r'[>)]')
- DOC_TYPE_MODIFIERS = re.compile(r'[!?|,:=]')
-
- # Star followed by non-slash, i.e a star that does not end a comment.
- # This is used for TYPE_GROUP below.
- SAFE_STAR = r'(\*(?!/))'
-
- COMMON_DOC_MATCHERS = [
- # Find the end of the comment.
- Matcher(END_BLOCK_COMMENT, Type.END_DOC_COMMENT,
- JavaScriptModes.TEXT_MODE),
-
- # Tokenize documented flags like @private.
- Matcher(DOC_INLINE_FLAG, Type.DOC_INLINE_FLAG),
- Matcher(DOC_FLAG_LEX_SPACES, Type.DOC_FLAG,
- JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE),
-
- # Encountering a doc flag should leave lex spaces mode.
- Matcher(DOC_FLAG, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_MODE),
-
- # Tokenize braces so we can find types.
- Matcher(START_BLOCK, Type.DOC_START_BRACE),
- Matcher(END_BLOCK, Type.DOC_END_BRACE),
-
- # And some more to parse types.
- Matcher(DOC_TYPE_BLOCK_START, Type.DOC_TYPE_START_BLOCK),
- Matcher(DOC_TYPE_BLOCK_END, Type.DOC_TYPE_END_BLOCK),
-
- Matcher(DOC_TYPE_MODIFIERS, Type.DOC_TYPE_MODIFIER),
- Matcher(DOC_COMMENT_TYPE_TEXT, Type.COMMENT),
-
- Matcher(DOC_PREFIX, Type.DOC_PREFIX, None, True)]
-
- # When text is not matched, it is given this default type based on mode.
- # If unspecified in this map, the default default is Type.NORMAL.
- JAVASCRIPT_DEFAULT_TYPES = {
- JavaScriptModes.DOC_COMMENT_MODE: Type.COMMENT,
- JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: Type.COMMENT
- }
-
- @classmethod
- def BuildMatchers(cls):
- """Builds the token matcher group.
-
- The token matcher groups work as follows: it is a list of Matcher objects.
- The matchers will be tried in this order, and the first to match will be
- returned. Hence the order is important because the matchers that come first
- overrule the matchers that come later.
-
- Returns:
- The completed token matcher group.
- """
- # Match a keyword string followed by a non-identifier character in order to
- # not match something like doSomething as do + Something.
- keyword = re.compile('(%s)((?=[^%s])|$)' % (
- '|'.join(cls.KEYWORD_LIST), cls.IDENTIFIER_CHAR))
- return {
-
- # Matchers for basic text mode.
- JavaScriptModes.TEXT_MODE: [
- # Check a big group - strings, starting comments, and regexes - all
- # of which could be intertwined. 'string with /regex/',
- # /regex with 'string'/, /* comment with /regex/ and string */ (and
- # so on)
- Matcher(cls.START_DOC_COMMENT, Type.START_DOC_COMMENT,
- JavaScriptModes.DOC_COMMENT_MODE),
- Matcher(cls.START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT,
- JavaScriptModes.BLOCK_COMMENT_MODE),
- Matcher(cls.END_OF_LINE_SINGLE_LINE_COMMENT,
- Type.START_SINGLE_LINE_COMMENT),
- Matcher(cls.START_SINGLE_LINE_COMMENT,
- Type.START_SINGLE_LINE_COMMENT,
- JavaScriptModes.LINE_COMMENT_MODE),
- Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START,
- JavaScriptModes.SINGLE_QUOTE_STRING_MODE),
- Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START,
- JavaScriptModes.DOUBLE_QUOTE_STRING_MODE),
- Matcher(cls.REGEX, Type.REGEX),
-
- # Next we check for start blocks appearing outside any of the items
- # above.
- Matcher(cls.START_BLOCK, Type.START_BLOCK),
- Matcher(cls.END_BLOCK, Type.END_BLOCK),
-
- # Then we search for function declarations.
- Matcher(cls.FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION,
- JavaScriptModes.FUNCTION_MODE),
-
- # Next, we convert non-function related parens to tokens.
- Matcher(cls.OPENING_PAREN, Type.START_PAREN),
- Matcher(cls.CLOSING_PAREN, Type.END_PAREN),
-
- # Next, we convert brackets to tokens.
- Matcher(cls.OPENING_BRACKET, Type.START_BRACKET),
- Matcher(cls.CLOSING_BRACKET, Type.END_BRACKET),
-
- # Find numbers. This has to happen before operators because
- # scientific notation numbers can have + and - in them.
- Matcher(cls.NUMBER, Type.NUMBER),
-
- # Find operators and simple assignments
- Matcher(cls.SIMPLE_LVALUE, Type.SIMPLE_LVALUE),
- Matcher(cls.OPERATOR, Type.OPERATOR),
-
- # Find key words and whitespace.
- Matcher(keyword, Type.KEYWORD),
- Matcher(cls.WHITESPACE, Type.WHITESPACE),
-
- # Find identifiers.
- Matcher(cls.IDENTIFIER, Type.IDENTIFIER),
-
- # Finally, we convert semicolons to tokens.
- Matcher(cls.SEMICOLON, Type.SEMICOLON)],
-
- # Matchers for single quote strings.
- JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [
- Matcher(cls.SINGLE_QUOTE_TEXT, Type.STRING_TEXT),
- Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END,
- JavaScriptModes.TEXT_MODE)],
-
- # Matchers for double quote strings.
- JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [
- Matcher(cls.DOUBLE_QUOTE_TEXT, Type.STRING_TEXT),
- Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END,
- JavaScriptModes.TEXT_MODE)],
-
- # Matchers for block comments.
- JavaScriptModes.BLOCK_COMMENT_MODE: [
- # First we check for exiting a block comment.
- Matcher(cls.END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT,
- JavaScriptModes.TEXT_MODE),
-
- # Match non-comment-ending text..
- Matcher(cls.BLOCK_COMMENT_TEXT, Type.COMMENT)],
-
- # Matchers for doc comments.
- JavaScriptModes.DOC_COMMENT_MODE: cls.COMMON_DOC_MATCHERS + [
- Matcher(cls.DOC_COMMENT_TEXT, Type.COMMENT)],
-
- JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: cls.COMMON_DOC_MATCHERS + [
- Matcher(cls.WHITESPACE, Type.COMMENT),
- Matcher(cls.DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)],
-
- # Matchers for single line comments.
- JavaScriptModes.LINE_COMMENT_MODE: [
- # We greedy match until the end of the line in line comment mode.
- Matcher(cls.ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)],
-
- # Matchers for code after the function keyword.
- JavaScriptModes.FUNCTION_MODE: [
- # Must match open paren before anything else and move into parameter
- # mode, otherwise everything inside the parameter list is parsed
- # incorrectly.
- Matcher(cls.OPENING_PAREN, Type.START_PARAMETERS,
- JavaScriptModes.PARAMETER_MODE),
- Matcher(cls.WHITESPACE, Type.WHITESPACE),
- Matcher(cls.IDENTIFIER, Type.FUNCTION_NAME)],
-
- # Matchers for function parameters
- JavaScriptModes.PARAMETER_MODE: [
- # When in function parameter mode, a closing paren is treated
- # specially. Everything else is treated as lines of parameters.
- Matcher(cls.CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS,
- JavaScriptModes.TEXT_MODE),
- Matcher(cls.PARAMETERS, Type.PARAMETERS,
- JavaScriptModes.PARAMETER_MODE)]}
-
- def __init__(self, parse_js_doc=True):
- """Create a tokenizer object.
-
- Args:
- parse_js_doc: Whether to do detailed parsing of javascript doc comments,
- or simply treat them as normal comments. Defaults to parsing JsDoc.
- """
- matchers = self.BuildMatchers()
- if not parse_js_doc:
- # Make a copy so the original doesn't get modified.
- matchers = copy.deepcopy(matchers)
- matchers[JavaScriptModes.DOC_COMMENT_MODE] = matchers[
- JavaScriptModes.BLOCK_COMMENT_MODE]
-
- tokenizer.Tokenizer.__init__(self, JavaScriptModes.TEXT_MODE, matchers,
- self.JAVASCRIPT_DEFAULT_TYPES)
-
- def _CreateToken(self, string, token_type, line, line_number, values=None):
- """Creates a new JavaScriptToken object.
-
- Args:
- string: The string of input the token contains.
- token_type: The type of token.
- line: The text of the line this token is in.
- line_number: The line number of the token.
- values: A dict of named values within the token. For instance, a
- function declaration may have a value called 'name' which captures the
- name of the function.
- """
- return javascripttokens.JavaScriptToken(string, token_type, line,
- line_number, values, line_number)
diff --git a/tools/closure_linter/build/lib/closure_linter/javascripttokens.py b/tools/closure_linter/build/lib/closure_linter/javascripttokens.py
deleted file mode 100644
index f5815d2bf8..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/javascripttokens.py
+++ /dev/null
@@ -1,153 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Classes to represent JavaScript tokens."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-from closure_linter.common import tokens
-
-class JavaScriptTokenType(tokens.TokenType):
- """Enumeration of JavaScript token types, and useful sets of token types."""
- NUMBER = 'number'
- START_SINGLE_LINE_COMMENT = '//'
- START_BLOCK_COMMENT = '/*'
- START_DOC_COMMENT = '/**'
- END_BLOCK_COMMENT = '*/'
- END_DOC_COMMENT = 'doc */'
- COMMENT = 'comment'
- SINGLE_QUOTE_STRING_START = "'string"
- SINGLE_QUOTE_STRING_END = "string'"
- DOUBLE_QUOTE_STRING_START = '"string'
- DOUBLE_QUOTE_STRING_END = 'string"'
- STRING_TEXT = 'string'
- START_BLOCK = '{'
- END_BLOCK = '}'
- START_PAREN = '('
- END_PAREN = ')'
- START_BRACKET = '['
- END_BRACKET = ']'
- REGEX = '/regex/'
- FUNCTION_DECLARATION = 'function(...)'
- FUNCTION_NAME = 'function functionName(...)'
- START_PARAMETERS = 'startparams('
- PARAMETERS = 'pa,ra,ms'
- END_PARAMETERS = ')endparams'
- SEMICOLON = ';'
- DOC_FLAG = '@flag'
- DOC_INLINE_FLAG = '{@flag ...}'
- DOC_START_BRACE = 'doc {'
- DOC_END_BRACE = 'doc }'
- DOC_PREFIX = 'comment prefix: * '
- DOC_TYPE_START_BLOCK = 'Type <'
- DOC_TYPE_END_BLOCK = 'Type >'
- DOC_TYPE_MODIFIER = 'modifier'
- SIMPLE_LVALUE = 'lvalue='
- KEYWORD = 'keyword'
- OPERATOR = 'operator'
- IDENTIFIER = 'identifier'
-
- STRING_TYPES = frozenset([
- SINGLE_QUOTE_STRING_START, SINGLE_QUOTE_STRING_END,
- DOUBLE_QUOTE_STRING_START, DOUBLE_QUOTE_STRING_END, STRING_TEXT])
-
- COMMENT_TYPES = frozenset([
- START_SINGLE_LINE_COMMENT, COMMENT,
- START_BLOCK_COMMENT, START_DOC_COMMENT,
- END_BLOCK_COMMENT, END_DOC_COMMENT,
- DOC_START_BRACE, DOC_END_BRACE,
- DOC_FLAG, DOC_INLINE_FLAG, DOC_PREFIX,
- DOC_TYPE_START_BLOCK, DOC_TYPE_END_BLOCK, DOC_TYPE_MODIFIER])
-
- FLAG_DESCRIPTION_TYPES = frozenset([
- DOC_INLINE_FLAG, COMMENT, DOC_START_BRACE, DOC_END_BRACE,
- DOC_TYPE_START_BLOCK, DOC_TYPE_END_BLOCK, DOC_TYPE_MODIFIER])
-
- FLAG_ENDING_TYPES = frozenset([DOC_FLAG, END_DOC_COMMENT])
-
- NON_CODE_TYPES = COMMENT_TYPES | frozenset([
- tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE])
-
- UNARY_OPERATORS = ['!', 'new', 'delete', 'typeof', 'void']
-
- UNARY_OK_OPERATORS = ['--', '++', '-', '+'] + UNARY_OPERATORS
-
- UNARY_POST_OPERATORS = ['--', '++']
-
- # An expression ender is any token that can end an object - i.e. we could have
- # x.y or [1, 2], or (10 + 9) or {a: 10}.
- EXPRESSION_ENDER_TYPES = [tokens.TokenType.NORMAL, IDENTIFIER, NUMBER,
- SIMPLE_LVALUE, END_BRACKET, END_PAREN, END_BLOCK,
- SINGLE_QUOTE_STRING_END, DOUBLE_QUOTE_STRING_END]
-
-
-class JavaScriptToken(tokens.Token):
- """JavaScript token subclass of Token, provides extra instance checks.
-
- The following token types have data in attached_object:
- - All JsDoc flags: a parser.JsDocFlag object.
- """
-
- def IsKeyword(self, keyword):
- """Tests if this token is the given keyword.
-
- Args:
- keyword: The keyword to compare to.
-
- Returns:
- True if this token is a keyword token with the given name.
- """
- return self.type == JavaScriptTokenType.KEYWORD and self.string == keyword
-
- def IsOperator(self, operator):
- """Tests if this token is the given operator.
-
- Args:
- operator: The operator to compare to.
-
- Returns:
- True if this token is a operator token with the given name.
- """
- return self.type == JavaScriptTokenType.OPERATOR and self.string == operator
-
- def IsAssignment(self):
- """Tests if this token is an assignment operator.
-
- Returns:
- True if this token is an assignment operator.
- """
- return (self.type == JavaScriptTokenType.OPERATOR and
- self.string.endswith('=') and
- self.string not in ('==', '!=', '>=', '<=', '===', '!=='))
-
- def IsComment(self):
- """Tests if this token is any part of a comment.
-
- Returns:
- True if this token is any part of a comment.
- """
- return self.type in JavaScriptTokenType.COMMENT_TYPES
-
- def IsCode(self):
- """Tests if this token is code, as opposed to a comment or whitespace."""
- return self.type not in JavaScriptTokenType.NON_CODE_TYPES
-
- def __repr__(self):
- return '<JavaScriptToken: %d, %s, "%s", %r, %r>' % (self.line_number,
- self.type, self.string,
- self.values,
- self.metadata)
diff --git a/tools/closure_linter/build/lib/closure_linter/not_strict_test.py b/tools/closure_linter/build/lib/closure_linter/not_strict_test.py
deleted file mode 100644
index c92c13ee03..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/not_strict_test.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tests for gjslint --nostrict.
-
-Tests errors that can be thrown by gjslint when not in strict mode.
-"""
-
-
-
-import os
-import sys
-import unittest
-
-import gflags as flags
-import unittest as googletest
-
-from closure_linter import errors
-from closure_linter import runner
-from closure_linter.common import filetestcase
-
-_RESOURCE_PREFIX = 'closure_linter/testdata'
-
-flags.FLAGS.strict = False
-flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
-flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
-flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
- 'limited_doc_checks.js')
-
-
-# List of files under testdata to test.
-# We need to list files explicitly since pyglib can't list directories.
-_TEST_FILES = [
- 'not_strict.js'
- ]
-
-
-class GJsLintTestSuite(unittest.TestSuite):
- """Test suite to run a GJsLintTest for each of several files.
-
- If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
- testdata to test. Otherwise, _TEST_FILES is used.
- """
-
- def __init__(self, tests=()):
- unittest.TestSuite.__init__(self, tests)
-
- argv = sys.argv and sys.argv[1:] or []
- if argv:
- test_files = argv
- else:
- test_files = _TEST_FILES
- for test_file in test_files:
- resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
- self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
- runner.Run,
- errors.ByName))
-
-if __name__ == '__main__':
- # Don't let main parse args; it happens in the TestSuite.
- googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
diff --git a/tools/closure_linter/build/lib/closure_linter/requireprovidesorter.py b/tools/closure_linter/build/lib/closure_linter/requireprovidesorter.py
deleted file mode 100644
index e7e08a13c2..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/requireprovidesorter.py
+++ /dev/null
@@ -1,329 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Contains logic for sorting goog.provide and goog.require statements.
-
-Closurized JavaScript files use goog.provide and goog.require statements at the
-top of the file to manage dependencies. These statements should be sorted
-alphabetically, however, it is common for them to be accompanied by inline
-comments or suppression annotations. In order to sort these statements without
-disrupting their comments and annotations, the association between statements
-and comments/annotations must be maintained while sorting.
-
- RequireProvideSorter: Handles checking/fixing of provide/require statements.
-"""
-
-
-
-from closure_linter import javascripttokens
-from closure_linter import tokenutil
-
-# Shorthand
-Type = javascripttokens.JavaScriptTokenType
-
-
-class RequireProvideSorter(object):
- """Checks for and fixes alphabetization of provide and require statements.
-
- When alphabetizing, comments on the same line or comments directly above a
- goog.provide or goog.require statement are associated with that statement and
- stay with the statement as it gets sorted.
- """
-
- def CheckProvides(self, token):
- """Checks alphabetization of goog.provide statements.
-
- Iterates over tokens in given token stream, identifies goog.provide tokens,
- and checks that they occur in alphabetical order by the object being
- provided.
-
- Args:
- token: A token in the token stream before any goog.provide tokens.
-
- Returns:
- The first provide token in the token stream.
-
- None is returned if all goog.provide statements are already sorted.
- """
- provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide')
- provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens)
- sorted_provide_strings = sorted(provide_strings)
- if provide_strings != sorted_provide_strings:
- return provide_tokens[0]
- return None
-
- def CheckRequires(self, token):
- """Checks alphabetization of goog.require statements.
-
- Iterates over tokens in given token stream, identifies goog.require tokens,
- and checks that they occur in alphabetical order by the dependency being
- required.
-
- Args:
- token: A token in the token stream before any goog.require tokens.
-
- Returns:
- The first require token in the token stream.
-
- None is returned if all goog.require statements are already sorted.
- """
- require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require')
- require_strings = self._GetRequireOrProvideTokenStrings(require_tokens)
- sorted_require_strings = sorted(require_strings)
- if require_strings != sorted_require_strings:
- return require_tokens[0]
- return None
-
- def FixProvides(self, token):
- """Sorts goog.provide statements in the given token stream alphabetically.
-
- Args:
- token: The first token in the token stream.
- """
- self._FixProvidesOrRequires(
- self._GetRequireOrProvideTokens(token, 'goog.provide'))
-
- def FixRequires(self, token):
- """Sorts goog.require statements in the given token stream alphabetically.
-
- Args:
- token: The first token in the token stream.
- """
- self._FixProvidesOrRequires(
- self._GetRequireOrProvideTokens(token, 'goog.require'))
-
- def _FixProvidesOrRequires(self, tokens):
- """Sorts goog.provide or goog.require statements.
-
- Args:
- tokens: A list of goog.provide or goog.require tokens in the order they
- appear in the token stream. i.e. the first token in this list must
- be the first goog.provide or goog.require token.
- """
- strings = self._GetRequireOrProvideTokenStrings(tokens)
- sorted_strings = sorted(strings)
-
- # Make a separate pass to remove any blank lines between goog.require/
- # goog.provide tokens.
- first_token = tokens[0]
- last_token = tokens[-1]
- i = last_token
- while i != first_token and i is not None:
- if i.type is Type.BLANK_LINE:
- tokenutil.DeleteToken(i)
- i = i.previous
-
- # A map from required/provided object name to tokens that make up the line
- # it was on, including any comments immediately before it or after it on the
- # same line.
- tokens_map = self._GetTokensMap(tokens)
-
- # Iterate over the map removing all tokens.
- for name in tokens_map:
- tokens_to_delete = tokens_map[name]
- for i in tokens_to_delete:
- tokenutil.DeleteToken(i)
-
- # Save token to rest of file. Sorted token will be inserted before this.
- rest_of_file = tokens_map[strings[-1]][-1].next
-
- # Re-add all tokens in the map in alphabetical order.
- insert_after = tokens[0].previous
- for string in sorted_strings:
- for i in tokens_map[string]:
- if rest_of_file:
- tokenutil.InsertTokenBefore(i, rest_of_file)
- else:
- tokenutil.InsertTokenAfter(i, insert_after)
- insert_after = i
-
- def _GetRequireOrProvideTokens(self, token, token_string):
- """Gets all goog.provide or goog.require tokens in the given token stream.
-
- Args:
- token: The first token in the token stream.
- token_string: One of 'goog.provide' or 'goog.require' to indicate which
- tokens to find.
-
- Returns:
- A list of goog.provide or goog.require tokens in the order they appear in
- the token stream.
- """
- tokens = []
- while token:
- if token.type == Type.IDENTIFIER:
- if token.string == token_string:
- tokens.append(token)
- elif token.string not in [
- 'goog.provide', 'goog.require', 'goog.setTestOnly']:
- # These 3 identifiers are at the top of the file. So if any other
- # identifier is encountered, return.
- # TODO(user): Once it's decided what ordering goog.require
- # should use, add 'goog.module' to the list above and implement the
- # decision.
- break
- token = token.next
-
- return tokens
-
- def _GetRequireOrProvideTokenStrings(self, tokens):
- """Gets a list of strings corresponding to the given list of tokens.
-
- The string will be the next string in the token stream after each token in
- tokens. This is used to find the object being provided/required by a given
- goog.provide or goog.require token.
-
- Args:
- tokens: A list of goog.provide or goog.require tokens.
-
- Returns:
- A list of object names that are being provided or required by the given
- list of tokens. For example:
-
- ['object.a', 'object.c', 'object.b']
- """
- token_strings = []
- for token in tokens:
- if not token.is_deleted:
- name = tokenutil.GetStringAfterToken(token)
- token_strings.append(name)
- return token_strings
-
- def _GetTokensMap(self, tokens):
- """Gets a map from object name to tokens associated with that object.
-
- Starting from the goog.provide/goog.require token, searches backwards in the
- token stream for any lines that start with a comment. These lines are
- associated with the goog.provide/goog.require token. Also associates any
- tokens on the same line as the goog.provide/goog.require token with that
- token.
-
- Args:
- tokens: A list of goog.provide or goog.require tokens.
-
- Returns:
- A dictionary that maps object names to the tokens associated with the
- goog.provide or goog.require of that object name. For example:
-
- {
- 'object.a': [JavaScriptToken, JavaScriptToken, ...],
- 'object.b': [...]
- }
-
- The list of tokens includes any comment lines above the goog.provide or
- goog.require statement and everything after the statement on the same
- line. For example, all of the following would be associated with
- 'object.a':
-
- /** @suppress {extraRequire} */
- goog.require('object.a'); // Some comment.
- """
- tokens_map = {}
- for token in tokens:
- object_name = tokenutil.GetStringAfterToken(token)
- # If the previous line starts with a comment, presume that the comment
- # relates to the goog.require or goog.provide and keep them together when
- # sorting.
- first_token = token
- previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token)
- while (previous_first_token and
- previous_first_token.IsAnyType(Type.COMMENT_TYPES)):
- first_token = previous_first_token
- previous_first_token = tokenutil.GetFirstTokenInPreviousLine(
- first_token)
-
- # Find the last token on the line.
- last_token = tokenutil.GetLastTokenInSameLine(token)
-
- all_tokens = self._GetTokenList(first_token, last_token)
- tokens_map[object_name] = all_tokens
- return tokens_map
-
- def _GetTokenList(self, first_token, last_token):
- """Gets a list of all tokens from first_token to last_token, inclusive.
-
- Args:
- first_token: The first token to get.
- last_token: The last token to get.
-
- Returns:
- A list of all tokens between first_token and last_token, including both
- first_token and last_token.
-
- Raises:
- Exception: If the token stream ends before last_token is reached.
- """
- token_list = []
- token = first_token
- while token != last_token:
- if not token:
- raise Exception('ran out of tokens')
- token_list.append(token)
- token = token.next
- token_list.append(last_token)
-
- return token_list
-
- def GetFixedRequireString(self, token):
- """Get fixed/sorted order of goog.require statements.
-
- Args:
- token: The first token in the token stream.
-
- Returns:
- A string for correct sorted order of goog.require.
- """
- return self._GetFixedRequireOrProvideString(
- self._GetRequireOrProvideTokens(token, 'goog.require'))
-
- def GetFixedProvideString(self, token):
- """Get fixed/sorted order of goog.provide statements.
-
- Args:
- token: The first token in the token stream.
-
- Returns:
- A string for correct sorted order of goog.provide.
- """
- return self._GetFixedRequireOrProvideString(
- self._GetRequireOrProvideTokens(token, 'goog.provide'))
-
- def _GetFixedRequireOrProvideString(self, tokens):
- """Sorts goog.provide or goog.require statements.
-
- Args:
- tokens: A list of goog.provide or goog.require tokens in the order they
- appear in the token stream. i.e. the first token in this list must
- be the first goog.provide or goog.require token.
-
- Returns:
- A string for sorted goog.require or goog.provide statements
- """
-
- # A map from required/provided object name to tokens that make up the line
- # it was on, including any comments immediately before it or after it on the
- # same line.
- tokens_map = self._GetTokensMap(tokens)
- sorted_strings = sorted(tokens_map.keys())
-
- new_order = ''
- for string in sorted_strings:
- for i in tokens_map[string]:
- new_order += i.string
- if i.IsLastInLine():
- new_order += '\n'
-
- return new_order
diff --git a/tools/closure_linter/build/lib/closure_linter/requireprovidesorter_test.py b/tools/closure_linter/build/lib/closure_linter/requireprovidesorter_test.py
deleted file mode 100644
index fecb6d04da..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/requireprovidesorter_test.py
+++ /dev/null
@@ -1,155 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for RequireProvideSorter."""
-
-
-
-import unittest as googletest
-from closure_linter import javascripttokens
-from closure_linter import requireprovidesorter
-from closure_linter import testutil
-
-# pylint: disable=g-bad-name
-TokenType = javascripttokens.JavaScriptTokenType
-
-
-class RequireProvideSorterTest(googletest.TestCase):
- """Tests for RequireProvideSorter."""
-
- def testGetFixedProvideString(self):
- """Tests that fixed string constains proper comments also."""
- input_lines = [
- 'goog.provide(\'package.xyz\');',
- '/** @suppress {extraprovide} **/',
- 'goog.provide(\'package.abcd\');'
- ]
-
- expected_lines = [
- '/** @suppress {extraprovide} **/',
- 'goog.provide(\'package.abcd\');',
- 'goog.provide(\'package.xyz\');'
- ]
-
- token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
-
- sorter = requireprovidesorter.RequireProvideSorter()
- fixed_provide_string = sorter.GetFixedProvideString(token)
-
- self.assertEquals(expected_lines, fixed_provide_string.splitlines())
-
- def testGetFixedRequireString(self):
- """Tests that fixed string constains proper comments also."""
- input_lines = [
- 'goog.require(\'package.xyz\');',
- '/** This is needed for scope. **/',
- 'goog.require(\'package.abcd\');'
- ]
-
- expected_lines = [
- '/** This is needed for scope. **/',
- 'goog.require(\'package.abcd\');',
- 'goog.require(\'package.xyz\');'
- ]
-
- token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
-
- sorter = requireprovidesorter.RequireProvideSorter()
- fixed_require_string = sorter.GetFixedRequireString(token)
-
- self.assertEquals(expected_lines, fixed_require_string.splitlines())
-
- def testFixRequires_removeBlankLines(self):
- """Tests that blank lines are omitted in sorted goog.require statements."""
- input_lines = [
- 'goog.provide(\'package.subpackage.Whatever\');',
- '',
- 'goog.require(\'package.subpackage.ClassB\');',
- '',
- 'goog.require(\'package.subpackage.ClassA\');'
- ]
- expected_lines = [
- 'goog.provide(\'package.subpackage.Whatever\');',
- '',
- 'goog.require(\'package.subpackage.ClassA\');',
- 'goog.require(\'package.subpackage.ClassB\');'
- ]
- token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
-
- sorter = requireprovidesorter.RequireProvideSorter()
- sorter.FixRequires(token)
-
- self.assertEquals(expected_lines, self._GetLines(token))
-
- def fixRequiresTest_withTestOnly(self, position):
- """Regression-tests sorting even with a goog.setTestOnly statement.
-
- Args:
- position: The position in the list where to insert the goog.setTestOnly
- statement. Will be used to test all possible combinations for
- this test.
- """
- input_lines = [
- 'goog.provide(\'package.subpackage.Whatever\');',
- '',
- 'goog.require(\'package.subpackage.ClassB\');',
- 'goog.require(\'package.subpackage.ClassA\');'
- ]
- expected_lines = [
- 'goog.provide(\'package.subpackage.Whatever\');',
- '',
- 'goog.require(\'package.subpackage.ClassA\');',
- 'goog.require(\'package.subpackage.ClassB\');'
- ]
- input_lines.insert(position, 'goog.setTestOnly();')
- expected_lines.insert(position, 'goog.setTestOnly();')
-
- token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
-
- sorter = requireprovidesorter.RequireProvideSorter()
- sorter.FixRequires(token)
-
- self.assertEquals(expected_lines, self._GetLines(token))
-
- def testFixRequires_withTestOnly(self):
- """Regression-tests sorting even after a goog.setTestOnly statement."""
-
- # goog.setTestOnly at first line.
- self.fixRequiresTest_withTestOnly(position=0)
-
- # goog.setTestOnly after goog.provide.
- self.fixRequiresTest_withTestOnly(position=1)
-
- # goog.setTestOnly before goog.require.
- self.fixRequiresTest_withTestOnly(position=2)
-
- # goog.setTestOnly after goog.require.
- self.fixRequiresTest_withTestOnly(position=4)
-
- def _GetLines(self, token):
- """Returns an array of lines based on the specified token stream."""
- lines = []
- line = ''
- while token:
- line += token.string
- if token.IsLastInLine():
- lines.append(line)
- line = ''
- token = token.next
- return lines
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/runner.py b/tools/closure_linter/build/lib/closure_linter/runner.py
deleted file mode 100644
index 04e7fa4ac8..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/runner.py
+++ /dev/null
@@ -1,198 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Main lint function. Tokenizes file, runs passes, and feeds to checker."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = 'nnaze@google.com (Nathan Naze)'
-
-import traceback
-
-import gflags as flags
-
-from closure_linter import checker
-from closure_linter import ecmalintrules
-from closure_linter import ecmametadatapass
-from closure_linter import error_check
-from closure_linter import errors
-from closure_linter import javascriptstatetracker
-from closure_linter import javascripttokenizer
-
-from closure_linter.common import error
-from closure_linter.common import htmlutil
-from closure_linter.common import tokens
-
-flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'],
- 'List of files with relaxed documentation checks. Will not '
- 'report errors for missing documentation, some missing '
- 'descriptions, or methods whose @return tags don\'t have a '
- 'matching return statement.')
-flags.DEFINE_boolean('error_trace', False,
- 'Whether to show error exceptions.')
-flags.ADOPT_module_key_flags(checker)
-flags.ADOPT_module_key_flags(ecmalintrules)
-flags.ADOPT_module_key_flags(error_check)
-
-
-def _GetLastNonWhiteSpaceToken(start_token):
- """Get the last non-whitespace token in a token stream."""
- ret_token = None
-
- whitespace_tokens = frozenset([
- tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE])
- for t in start_token:
- if t.type not in whitespace_tokens:
- ret_token = t
-
- return ret_token
-
-
-def _IsHtml(filename):
- return filename.endswith('.html') or filename.endswith('.htm')
-
-
-def _Tokenize(fileobj):
- """Tokenize a file.
-
- Args:
- fileobj: file-like object (or iterable lines) with the source.
-
- Returns:
- The first token in the token stream and the ending mode of the tokenizer.
- """
- tokenizer = javascripttokenizer.JavaScriptTokenizer()
- start_token = tokenizer.TokenizeFile(fileobj)
- return start_token, tokenizer.mode
-
-
-def _IsLimitedDocCheck(filename, limited_doc_files):
- """Whether this this a limited-doc file.
-
- Args:
- filename: The filename.
- limited_doc_files: Iterable of strings. Suffixes of filenames that should
- be limited doc check.
-
- Returns:
- Whether the file should be limited check.
- """
- for limited_doc_filename in limited_doc_files:
- if filename.endswith(limited_doc_filename):
- return True
- return False
-
-
-def Run(filename, error_handler, source=None):
- """Tokenize, run passes, and check the given file.
-
- Args:
- filename: The path of the file to check
- error_handler: The error handler to report errors to.
- source: A file-like object with the file source. If omitted, the file will
- be read from the filename path.
- """
- if not source:
- try:
- source = open(filename)
- except IOError:
- error_handler.HandleFile(filename, None)
- error_handler.HandleError(
- error.Error(errors.FILE_NOT_FOUND, 'File not found'))
- error_handler.FinishFile()
- return
-
- if _IsHtml(filename):
- source_file = htmlutil.GetScriptLines(source)
- else:
- source_file = source
-
- token, tokenizer_mode = _Tokenize(source_file)
-
- error_handler.HandleFile(filename, token)
-
- # If we did not end in the basic mode, this a failed parse.
- if tokenizer_mode is not javascripttokenizer.JavaScriptModes.TEXT_MODE:
- error_handler.HandleError(
- error.Error(errors.FILE_IN_BLOCK,
- 'File ended in mode "%s".' % tokenizer_mode,
- _GetLastNonWhiteSpaceToken(token)))
-
- # Run the ECMA pass
- error_token = None
-
- ecma_pass = ecmametadatapass.EcmaMetaDataPass()
- error_token = RunMetaDataPass(token, ecma_pass, error_handler, filename)
-
- is_limited_doc_check = (
- _IsLimitedDocCheck(filename, flags.FLAGS.limited_doc_files))
-
- _RunChecker(token, error_handler,
- is_limited_doc_check,
- is_html=_IsHtml(filename),
- stop_token=error_token)
-
- error_handler.FinishFile()
-
-
-def RunMetaDataPass(start_token, metadata_pass, error_handler, filename=''):
- """Run a metadata pass over a token stream.
-
- Args:
- start_token: The first token in a token stream.
- metadata_pass: Metadata pass to run.
- error_handler: The error handler to report errors to.
- filename: Filename of the source.
-
- Returns:
- The token where the error occurred (if any).
- """
-
- try:
- metadata_pass.Process(start_token)
- except ecmametadatapass.ParseError, parse_err:
- if flags.FLAGS.error_trace:
- traceback.print_exc()
- error_token = parse_err.token
- error_msg = str(parse_err)
- error_handler.HandleError(
- error.Error(errors.FILE_DOES_NOT_PARSE,
- ('Error parsing file at token "%s". Unable to '
- 'check the rest of file.'
- '\nError "%s"' % (error_token, error_msg)), error_token))
- return error_token
- except Exception: # pylint: disable=broad-except
- traceback.print_exc()
- error_handler.HandleError(
- error.Error(
- errors.FILE_DOES_NOT_PARSE,
- 'Internal error in %s' % filename))
-
-
-def _RunChecker(start_token, error_handler,
- limited_doc_checks, is_html,
- stop_token=None):
-
- state_tracker = javascriptstatetracker.JavaScriptStateTracker()
-
- style_checker = checker.JavaScriptStyleChecker(
- state_tracker=state_tracker,
- error_handler=error_handler)
-
- style_checker.Check(start_token,
- is_html=is_html,
- limited_doc_checks=limited_doc_checks,
- stop_token=stop_token)
diff --git a/tools/closure_linter/build/lib/closure_linter/runner_test.py b/tools/closure_linter/build/lib/closure_linter/runner_test.py
deleted file mode 100644
index da5857d309..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/runner_test.py
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for the runner module."""
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-import StringIO
-
-
-import mox
-
-
-import unittest as googletest
-
-from closure_linter import errors
-from closure_linter import runner
-from closure_linter.common import error
-from closure_linter.common import errorhandler
-from closure_linter.common import tokens
-
-
-class LimitedDocTest(googletest.TestCase):
-
- def testIsLimitedDocCheck(self):
- self.assertTrue(runner._IsLimitedDocCheck('foo_test.js', ['_test.js']))
- self.assertFalse(runner._IsLimitedDocCheck('foo_bar.js', ['_test.js']))
-
- self.assertTrue(runner._IsLimitedDocCheck(
- 'foo_moo.js', ['moo.js', 'quack.js']))
- self.assertFalse(runner._IsLimitedDocCheck(
- 'foo_moo.js', ['woof.js', 'quack.js']))
-
-
-class RunnerTest(googletest.TestCase):
-
- def setUp(self):
- self.mox = mox.Mox()
-
- def testRunOnMissingFile(self):
- mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler)
-
- def ValidateError(err):
- return (isinstance(err, error.Error) and
- err.code is errors.FILE_NOT_FOUND and
- err.token is None)
-
- mock_error_handler.HandleFile('does_not_exist.js', None)
- mock_error_handler.HandleError(mox.Func(ValidateError))
- mock_error_handler.FinishFile()
-
- self.mox.ReplayAll()
-
- runner.Run('does_not_exist.js', mock_error_handler)
-
- self.mox.VerifyAll()
-
- def testBadTokenization(self):
- mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler)
-
- def ValidateError(err):
- return (isinstance(err, error.Error) and
- err.code is errors.FILE_IN_BLOCK and
- err.token.string == '}')
-
- mock_error_handler.HandleFile('foo.js', mox.IsA(tokens.Token))
- mock_error_handler.HandleError(mox.Func(ValidateError))
- mock_error_handler.HandleError(mox.IsA(error.Error))
- mock_error_handler.FinishFile()
-
- self.mox.ReplayAll()
-
- source = StringIO.StringIO(_BAD_TOKENIZATION_SCRIPT)
- runner.Run('foo.js', mock_error_handler, source)
-
- self.mox.VerifyAll()
-
-
-_BAD_TOKENIZATION_SCRIPT = """
-function foo () {
- var a = 3;
- var b = 2;
- return b + a; /* Comment not closed
-}
-"""
-
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/scopeutil.py b/tools/closure_linter/build/lib/closure_linter/scopeutil.py
deleted file mode 100644
index a7ca9b630a..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/scopeutil.py
+++ /dev/null
@@ -1,206 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tools to match goog.scope alias statements."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-import itertools
-
-from closure_linter import ecmametadatapass
-from closure_linter import tokenutil
-from closure_linter.javascripttokens import JavaScriptTokenType
-
-
-
-def IsGoogScopeBlock(context):
- """Whether the given context is a goog.scope block.
-
- This function only checks that the block is a function block inside
- a goog.scope() call.
-
- TODO(nnaze): Implement goog.scope checks that verify the call is
- in the root context and contains only a single function literal.
-
- Args:
- context: An EcmaContext of type block.
-
- Returns:
- Whether the context is a goog.scope block.
- """
-
- if context.type != ecmametadatapass.EcmaContext.BLOCK:
- return False
-
- if not _IsFunctionLiteralBlock(context):
- return False
-
- # Check that this function is contained by a group
- # of form "goog.scope(...)".
- parent = context.parent
- if parent and parent.type is ecmametadatapass.EcmaContext.GROUP:
-
- last_code_token = parent.start_token.metadata.last_code
-
- if (last_code_token and
- last_code_token.type is JavaScriptTokenType.IDENTIFIER and
- last_code_token.string == 'goog.scope'):
- return True
-
- return False
-
-
-def _IsFunctionLiteralBlock(block_context):
- """Check if a context is a function literal block (without parameters).
-
- Example function literal block: 'function() {}'
-
- Args:
- block_context: An EcmaContext of type block.
-
- Returns:
- Whether this context is a function literal block.
- """
-
- previous_code_tokens_iter = itertools.ifilter(
- lambda token: token not in JavaScriptTokenType.NON_CODE_TYPES,
- reversed(block_context.start_token))
-
- # Ignore the current token
- next(previous_code_tokens_iter, None)
-
- # Grab the previous three tokens and put them in correct order.
- previous_code_tokens = list(itertools.islice(previous_code_tokens_iter, 3))
- previous_code_tokens.reverse()
-
- # There aren't three previous tokens.
- if len(previous_code_tokens) is not 3:
- return False
-
- # Check that the previous three code tokens are "function ()"
- previous_code_token_types = [token.type for token in previous_code_tokens]
- if (previous_code_token_types == [
- JavaScriptTokenType.FUNCTION_DECLARATION,
- JavaScriptTokenType.START_PARAMETERS,
- JavaScriptTokenType.END_PARAMETERS]):
- return True
-
- return False
-
-
-def IsInClosurizedNamespace(symbol, closurized_namespaces):
- """Match a goog.scope alias.
-
- Args:
- symbol: An identifier like 'goog.events.Event'.
- closurized_namespaces: Iterable of valid Closurized namespaces (strings).
-
- Returns:
- True if symbol is an identifier in a Closurized namespace, otherwise False.
- """
- for ns in closurized_namespaces:
- if symbol.startswith(ns + '.'):
- return True
-
- return False
-
-
-def _GetVarAssignmentTokens(context):
- """Returns the tokens from context if it is a var assignment.
-
- Args:
- context: An EcmaContext.
-
- Returns:
- If a var assignment, the tokens contained within it w/o the trailing
- semicolon.
- """
- if context.type != ecmametadatapass.EcmaContext.VAR:
- return
-
- # Get the tokens in this statement.
- if context.start_token and context.end_token:
- statement_tokens = tokenutil.GetTokenRange(context.start_token,
- context.end_token)
- else:
- return
-
- # And now just those tokens that are actually code.
- is_non_code_type = lambda t: t.type not in JavaScriptTokenType.NON_CODE_TYPES
- code_tokens = filter(is_non_code_type, statement_tokens)
-
- # Pop off the semicolon if present.
- if code_tokens and code_tokens[-1].IsType(JavaScriptTokenType.SEMICOLON):
- code_tokens.pop()
-
- if len(code_tokens) < 4:
- return
-
- if (code_tokens[0].IsKeyword('var') and
- code_tokens[1].IsType(JavaScriptTokenType.SIMPLE_LVALUE) and
- code_tokens[2].IsOperator('=')):
- return code_tokens
-
-
-def MatchAlias(context):
- """Match an alias statement (some identifier assigned to a variable).
-
- Example alias: var MyClass = proj.longNamespace.MyClass.
-
- Args:
- context: An EcmaContext of type EcmaContext.VAR.
-
- Returns:
- If a valid alias, returns a tuple of alias and symbol, otherwise None.
- """
- code_tokens = _GetVarAssignmentTokens(context)
- if code_tokens is None:
- return
-
- if all(tokenutil.IsIdentifierOrDot(t) for t in code_tokens[3:]):
- # var Foo = bar.Foo;
- alias, symbol = code_tokens[1], code_tokens[3]
- # Mark both tokens as an alias definition to not count them as usages.
- alias.metadata.is_alias_definition = True
- symbol.metadata.is_alias_definition = True
- return alias.string, tokenutil.GetIdentifierForToken(symbol)
-
-
-def MatchModuleAlias(context):
- """Match an alias statement in a goog.module style import.
-
- Example alias: var MyClass = goog.require('proj.longNamespace.MyClass').
-
- Args:
- context: An EcmaContext.
-
- Returns:
- If a valid alias, returns a tuple of alias and symbol, otherwise None.
- """
- code_tokens = _GetVarAssignmentTokens(context)
- if code_tokens is None:
- return
-
- if(code_tokens[3].IsType(JavaScriptTokenType.IDENTIFIER) and
- code_tokens[3].string == 'goog.require'):
- # var Foo = goog.require('bar.Foo');
- alias = code_tokens[1]
- symbol = tokenutil.GetStringAfterToken(code_tokens[3])
- if symbol:
- alias.metadata.is_alias_definition = True
- return alias.string, symbol
diff --git a/tools/closure_linter/build/lib/closure_linter/scopeutil_test.py b/tools/closure_linter/build/lib/closure_linter/scopeutil_test.py
deleted file mode 100644
index 722a953900..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/scopeutil_test.py
+++ /dev/null
@@ -1,222 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for the scopeutil module."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-
-import unittest as googletest
-
-from closure_linter import ecmametadatapass
-from closure_linter import scopeutil
-from closure_linter import testutil
-
-
-def _FindContexts(start_token):
- """Depth first search of all contexts referenced by a token stream.
-
- Includes contexts' parents, which might not be directly referenced
- by any token in the stream.
-
- Args:
- start_token: First token in the token stream.
-
- Yields:
- All contexts referenced by this token stream.
- """
-
- seen_contexts = set()
-
- # For each token, yield the context if we haven't seen it before.
- for token in start_token:
-
- token_context = token.metadata.context
- contexts = [token_context]
-
- # Also grab all the context's ancestors.
- parent = token_context.parent
- while parent:
- contexts.append(parent)
- parent = parent.parent
-
- # Yield each of these contexts if we've not seen them.
- for context in contexts:
- if context not in seen_contexts:
- yield context
-
- seen_contexts.add(context)
-
-
-def _FindFirstContextOfType(token, context_type):
- """Returns the first statement context."""
- for context in _FindContexts(token):
- if context.type == context_type:
- return context
-
-
-def _ParseAssignment(script):
- start_token = testutil.TokenizeSourceAndRunEcmaPass(script)
- statement = _FindFirstContextOfType(
- start_token, ecmametadatapass.EcmaContext.VAR)
- return statement
-
-
-class StatementTest(googletest.TestCase):
-
- def assertAlias(self, expected_match, script):
- statement = _ParseAssignment(script)
- match = scopeutil.MatchAlias(statement)
- self.assertEquals(expected_match, match)
-
- def assertModuleAlias(self, expected_match, script):
- statement = _ParseAssignment(script)
- match = scopeutil.MatchModuleAlias(statement)
- self.assertEquals(expected_match, match)
-
- def testSimpleAliases(self):
- self.assertAlias(
- ('foo', 'goog.foo'),
- 'var foo = goog.foo;')
-
- self.assertAlias(
- ('foo', 'goog.foo'),
- 'var foo = goog.foo') # No semicolon
-
- def testAliasWithComment(self):
- self.assertAlias(
- ('Component', 'goog.ui.Component'),
- 'var Component = /* comment */ goog.ui.Component;')
-
- def testMultilineAlias(self):
- self.assertAlias(
- ('Component', 'goog.ui.Component'),
- 'var Component = \n goog.ui.\n Component;')
-
- def testNonSymbolAliasVarStatements(self):
- self.assertAlias(None, 'var foo = 3;')
- self.assertAlias(None, 'var foo = function() {};')
- self.assertAlias(None, 'var foo = bar ? baz : qux;')
-
- def testModuleAlias(self):
- self.assertModuleAlias(
- ('foo', 'goog.foo'),
- 'var foo = goog.require("goog.foo");')
- self.assertModuleAlias(
- None,
- 'var foo = goog.require(notastring);')
-
-
-class ScopeBlockTest(googletest.TestCase):
-
- @staticmethod
- def _GetBlocks(source):
- start_token = testutil.TokenizeSourceAndRunEcmaPass(source)
- for context in _FindContexts(start_token):
- if context.type is ecmametadatapass.EcmaContext.BLOCK:
- yield context
-
- def assertNoBlocks(self, script):
- blocks = list(self._GetBlocks(script))
- self.assertEquals([], blocks)
-
- def testNotBlocks(self):
- # Ensure these are not considered blocks.
- self.assertNoBlocks('goog.scope(if{});')
- self.assertNoBlocks('goog.scope(for{});')
- self.assertNoBlocks('goog.scope(switch{});')
- self.assertNoBlocks('goog.scope(function foo{});')
-
- def testNonScopeBlocks(self):
-
- blocks = list(self._GetBlocks('goog.scope(try{});'))
- self.assertEquals(1, len(blocks))
- self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
-
- blocks = list(self._GetBlocks('goog.scope(function(a,b){});'))
- self.assertEquals(1, len(blocks))
- self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
-
- blocks = list(self._GetBlocks('goog.scope(try{} catch(){});'))
- # Two blocks: try and catch.
- self.assertEquals(2, len(blocks))
- self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
- self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
-
- blocks = list(self._GetBlocks('goog.scope(try{} catch(){} finally {});'))
- self.assertEquals(3, len(blocks))
- self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
- self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
- self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
-
-
-class AliasTest(googletest.TestCase):
-
- def setUp(self):
- self.start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
-
- def testMatchAliasStatement(self):
- matches = set()
- for context in _FindContexts(self.start_token):
- match = scopeutil.MatchAlias(context)
- if match:
- matches.add(match)
-
- self.assertEquals(
- set([('bar', 'baz'),
- ('foo', 'this.foo_'),
- ('Component', 'goog.ui.Component'),
- ('MyClass', 'myproject.foo.MyClass'),
- ('NonClosurizedClass', 'aaa.bbb.NonClosurizedClass')]),
- matches)
-
- def testMatchAliasStatement_withClosurizedNamespaces(self):
-
- closurized_namepaces = frozenset(['goog', 'myproject'])
-
- matches = set()
- for context in _FindContexts(self.start_token):
- match = scopeutil.MatchAlias(context)
- if match:
- unused_alias, symbol = match
- if scopeutil.IsInClosurizedNamespace(symbol, closurized_namepaces):
- matches.add(match)
-
- self.assertEquals(
- set([('MyClass', 'myproject.foo.MyClass'),
- ('Component', 'goog.ui.Component')]),
- matches)
-
-_TEST_SCRIPT = """
-goog.scope(function() {
- var Component = goog.ui.Component; // scope alias
- var MyClass = myproject.foo.MyClass; // scope alias
-
- // Scope alias of non-Closurized namespace.
- var NonClosurizedClass = aaa.bbb.NonClosurizedClass;
-
- var foo = this.foo_; // non-scope object property alias
- var bar = baz; // variable alias
-
- var component = new Component();
-});
-
-"""
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/statetracker.py b/tools/closure_linter/build/lib/closure_linter/statetracker.py
deleted file mode 100644
index 52e363972f..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/statetracker.py
+++ /dev/null
@@ -1,1294 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Light weight EcmaScript state tracker that reads tokens and tracks state."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-import re
-
-from closure_linter import javascripttokenizer
-from closure_linter import javascripttokens
-from closure_linter import tokenutil
-from closure_linter import typeannotation
-
-# Shorthand
-Type = javascripttokens.JavaScriptTokenType
-
-
-class DocFlag(object):
- """Generic doc flag object.
-
- Attribute:
- flag_type: param, return, define, type, etc.
- flag_token: The flag token.
- type_start_token: The first token specifying the flag type,
- including braces.
- type_end_token: The last token specifying the flag type,
- including braces.
- type: The type spec string.
- jstype: The type spec, a TypeAnnotation instance.
- name_token: The token specifying the flag name.
- name: The flag name
- description_start_token: The first token in the description.
- description_end_token: The end token in the description.
- description: The description.
- """
-
- # Please keep these lists alphabetized.
-
- # The list of standard jsdoc tags is from
- STANDARD_DOC = frozenset([
- 'author',
- 'bug',
- 'classTemplate',
- 'consistentIdGenerator',
- 'const',
- 'constructor',
- 'define',
- 'deprecated',
- 'dict',
- 'enum',
- 'export',
- 'expose',
- 'extends',
- 'externs',
- 'fileoverview',
- 'idGenerator',
- 'implements',
- 'implicitCast',
- 'interface',
- 'lends',
- 'license',
- 'ngInject', # This annotation is specific to AngularJS.
- 'noalias',
- 'nocompile',
- 'nosideeffects',
- 'override',
- 'owner',
- 'package',
- 'param',
- 'preserve',
- 'private',
- 'protected',
- 'public',
- 'return',
- 'see',
- 'stableIdGenerator',
- 'struct',
- 'supported',
- 'template',
- 'this',
- 'type',
- 'typedef',
- 'unrestricted',
- ])
-
- ANNOTATION = frozenset(['preserveTry', 'suppress'])
-
- LEGAL_DOC = STANDARD_DOC | ANNOTATION
-
- # Includes all Closure Compiler @suppress types.
- # Not all of these annotations are interpreted by Closure Linter.
- #
- # Specific cases:
- # - accessControls is supported by the compiler at the expression
- # and method level to suppress warnings about private/protected
- # access (method level applies to all references in the method).
- # The linter mimics the compiler behavior.
- SUPPRESS_TYPES = frozenset([
- 'accessControls',
- 'ambiguousFunctionDecl',
- 'checkDebuggerStatement',
- 'checkRegExp',
- 'checkStructDictInheritance',
- 'checkTypes',
- 'checkVars',
- 'const',
- 'constantProperty',
- 'deprecated',
- 'duplicate',
- 'es5Strict',
- 'externsValidation',
- 'extraProvide',
- 'extraRequire',
- 'fileoverviewTags',
- 'globalThis',
- 'internetExplorerChecks',
- 'invalidCasts',
- 'missingProperties',
- 'missingProvide',
- 'missingRequire',
- 'missingReturn',
- 'nonStandardJsDocs',
- 'strictModuleDepCheck',
- 'suspiciousCode',
- 'tweakValidation',
- 'typeInvalidation',
- 'undefinedNames',
- 'undefinedVars',
- 'underscore',
- 'unknownDefines',
- 'unnecessaryCasts',
- 'unusedPrivateMembers',
- 'uselessCode',
- 'visibility',
- 'with',
- ])
-
- HAS_DESCRIPTION = frozenset([
- 'define',
- 'deprecated',
- 'desc',
- 'fileoverview',
- 'license',
- 'param',
- 'preserve',
- 'return',
- 'supported',
- ])
-
- # Docflags whose argument should be parsed using the typeannotation parser.
- HAS_TYPE = frozenset([
- 'const',
- 'define',
- 'enum',
- 'extends',
- 'final',
- 'implements',
- 'mods',
- 'package',
- 'param',
- 'private',
- 'protected',
- 'public',
- 'return',
- 'suppress',
- 'type',
- 'typedef',
- ])
-
- # Docflags for which it's ok to omit the type (flag without an argument).
- CAN_OMIT_TYPE = frozenset([
- 'const',
- 'enum',
- 'final',
- 'package',
- 'private',
- 'protected',
- 'public',
- 'suppress', # We'll raise a separate INCORRECT_SUPPRESS_SYNTAX instead.
- ])
-
- # Docflags that only take a type as an argument and should not parse a
- # following description.
- TYPE_ONLY = frozenset([
- 'const',
- 'enum',
- 'extends',
- 'implements',
- 'package',
- 'suppress',
- 'type',
- ])
-
- HAS_NAME = frozenset(['param'])
-
- EMPTY_COMMENT_LINE = re.compile(r'^\s*\*?\s*$')
- EMPTY_STRING = re.compile(r'^\s*$')
-
- def __init__(self, flag_token, error_handler=None):
- """Creates the DocFlag object and attaches it to the given start token.
-
- Args:
- flag_token: The starting token of the flag.
- error_handler: An optional error handler for errors occurring while
- parsing the doctype.
- """
- self.flag_token = flag_token
- self.flag_type = flag_token.string.strip().lstrip('@')
-
- # Extract type, if applicable.
- self.type = None
- self.jstype = None
- self.type_start_token = None
- self.type_end_token = None
- if self.flag_type in self.HAS_TYPE:
- brace = tokenutil.SearchUntil(flag_token, [Type.DOC_START_BRACE],
- Type.FLAG_ENDING_TYPES)
- if brace:
- end_token, contents = _GetMatchingEndBraceAndContents(brace)
- self.type = contents
- self.jstype = typeannotation.Parse(brace, end_token,
- error_handler)
- self.type_start_token = brace
- self.type_end_token = end_token
- elif (self.flag_type in self.TYPE_ONLY and
- flag_token.next.type not in Type.FLAG_ENDING_TYPES and
- flag_token.line_number == flag_token.next.line_number):
- # b/10407058. If the flag is expected to be followed by a type then
- # search for type in same line only. If no token after flag in same
- # line then conclude that no type is specified.
- self.type_start_token = flag_token.next
- self.type_end_token, self.type = _GetEndTokenAndContents(
- self.type_start_token)
- if self.type is not None:
- self.type = self.type.strip()
- self.jstype = typeannotation.Parse(flag_token, self.type_end_token,
- error_handler)
-
- # Extract name, if applicable.
- self.name_token = None
- self.name = None
- if self.flag_type in self.HAS_NAME:
- # Handle bad case, name could be immediately after flag token.
- self.name_token = _GetNextPartialIdentifierToken(flag_token)
-
- # Handle good case, if found token is after type start, look for
- # a identifier (substring to cover cases like [cnt] b/4197272) after
- # type end, since types contain identifiers.
- if (self.type and self.name_token and
- tokenutil.Compare(self.name_token, self.type_start_token) > 0):
- self.name_token = _GetNextPartialIdentifierToken(self.type_end_token)
-
- if self.name_token:
- self.name = self.name_token.string
-
- # Extract description, if applicable.
- self.description_start_token = None
- self.description_end_token = None
- self.description = None
- if self.flag_type in self.HAS_DESCRIPTION:
- search_start_token = flag_token
- if self.name_token and self.type_end_token:
- if tokenutil.Compare(self.type_end_token, self.name_token) > 0:
- search_start_token = self.type_end_token
- else:
- search_start_token = self.name_token
- elif self.name_token:
- search_start_token = self.name_token
- elif self.type:
- search_start_token = self.type_end_token
-
- interesting_token = tokenutil.Search(search_start_token,
- Type.FLAG_DESCRIPTION_TYPES | Type.FLAG_ENDING_TYPES)
- if interesting_token.type in Type.FLAG_DESCRIPTION_TYPES:
- self.description_start_token = interesting_token
- self.description_end_token, self.description = (
- _GetEndTokenAndContents(interesting_token))
-
- def HasType(self):
- """Returns whether this flag should have a type annotation."""
- return self.flag_type in self.HAS_TYPE
-
- def __repr__(self):
- return '<Flag: %s, type:%s>' % (self.flag_type, repr(self.jstype))
-
-
-class DocComment(object):
- """JavaScript doc comment object.
-
- Attributes:
- ordered_params: Ordered list of parameters documented.
- start_token: The token that starts the doc comment.
- end_token: The token that ends the doc comment.
- suppressions: Map of suppression type to the token that added it.
- """
- def __init__(self, start_token):
- """Create the doc comment object.
-
- Args:
- start_token: The first token in the doc comment.
- """
- self.__flags = []
- self.start_token = start_token
- self.end_token = None
- self.suppressions = {}
- self.invalidated = False
-
- @property
- def ordered_params(self):
- """Gives the list of parameter names as a list of strings."""
- params = []
- for flag in self.__flags:
- if flag.flag_type == 'param' and flag.name:
- params.append(flag.name)
- return params
-
- def Invalidate(self):
- """Indicate that the JSDoc is well-formed but we had problems parsing it.
-
- This is a short-circuiting mechanism so that we don't emit false
- positives about well-formed doc comments just because we don't support
- hot new syntaxes.
- """
- self.invalidated = True
-
- def IsInvalidated(self):
- """Test whether Invalidate() has been called."""
- return self.invalidated
-
- def AddSuppression(self, token):
- """Add a new error suppression flag.
-
- Args:
- token: The suppression flag token.
- """
- flag = token and token.attached_object
- if flag and flag.jstype:
- for suppression in flag.jstype.IterIdentifiers():
- self.suppressions[suppression] = token
-
- def SuppressionOnly(self):
- """Returns whether this comment contains only suppression flags."""
- if not self.__flags:
- return False
-
- for flag in self.__flags:
- if flag.flag_type != 'suppress':
- return False
-
- return True
-
- def AddFlag(self, flag):
- """Add a new document flag.
-
- Args:
- flag: DocFlag object.
- """
- self.__flags.append(flag)
-
- def InheritsDocumentation(self):
- """Test if the jsdoc implies documentation inheritance.
-
- Returns:
- True if documentation may be pulled off the superclass.
- """
- return self.HasFlag('inheritDoc') or self.HasFlag('override')
-
- def HasFlag(self, flag_type):
- """Test if the given flag has been set.
-
- Args:
- flag_type: The type of the flag to check.
-
- Returns:
- True if the flag is set.
- """
- for flag in self.__flags:
- if flag.flag_type == flag_type:
- return True
- return False
-
- def GetFlag(self, flag_type):
- """Gets the last flag of the given type.
-
- Args:
- flag_type: The type of the flag to get.
-
- Returns:
- The last instance of the given flag type in this doc comment.
- """
- for flag in reversed(self.__flags):
- if flag.flag_type == flag_type:
- return flag
-
- def GetDocFlags(self):
- """Return the doc flags for this comment."""
- return list(self.__flags)
-
- def _YieldDescriptionTokens(self):
- for token in self.start_token:
-
- if (token is self.end_token or
- token.type is javascripttokens.JavaScriptTokenType.DOC_FLAG or
- token.type not in javascripttokens.JavaScriptTokenType.COMMENT_TYPES):
- return
-
- if token.type not in [
- javascripttokens.JavaScriptTokenType.START_DOC_COMMENT,
- javascripttokens.JavaScriptTokenType.END_DOC_COMMENT,
- javascripttokens.JavaScriptTokenType.DOC_PREFIX]:
- yield token
-
- @property
- def description(self):
- return tokenutil.TokensToString(
- self._YieldDescriptionTokens())
-
- def GetTargetIdentifier(self):
- """Returns the identifier (as a string) that this is a comment for.
-
- Note that this uses method uses GetIdentifierForToken to get the full
- identifier, even if broken up by whitespace, newlines, or comments,
- and thus could be longer than GetTargetToken().string.
-
- Returns:
- The identifier for the token this comment is for.
- """
- token = self.GetTargetToken()
- if token:
- return tokenutil.GetIdentifierForToken(token)
-
- def GetTargetToken(self):
- """Get this comment's target token.
-
- Returns:
- The token that is the target of this comment, or None if there isn't one.
- """
-
- # File overviews describe the file, not a token.
- if self.HasFlag('fileoverview'):
- return
-
- skip_types = frozenset([
- Type.WHITESPACE,
- Type.BLANK_LINE,
- Type.START_PAREN])
-
- target_types = frozenset([
- Type.FUNCTION_NAME,
- Type.IDENTIFIER,
- Type.SIMPLE_LVALUE])
-
- token = self.end_token.next
- while token:
- if token.type in target_types:
- return token
-
- # Handles the case of a comment on "var foo = ...'
- if token.IsKeyword('var'):
- next_code_token = tokenutil.CustomSearch(
- token,
- lambda t: t.type not in Type.NON_CODE_TYPES)
-
- if (next_code_token and
- next_code_token.IsType(Type.SIMPLE_LVALUE)):
- return next_code_token
-
- return
-
- # Handles the case of a comment on "function foo () {}"
- if token.type is Type.FUNCTION_DECLARATION:
- next_code_token = tokenutil.CustomSearch(
- token,
- lambda t: t.type not in Type.NON_CODE_TYPES)
-
- if next_code_token.IsType(Type.FUNCTION_NAME):
- return next_code_token
-
- return
-
- # Skip types will end the search.
- if token.type not in skip_types:
- return
-
- token = token.next
-
- def CompareParameters(self, params):
- """Computes the edit distance and list from the function params to the docs.
-
- Uses the Levenshtein edit distance algorithm, with code modified from
- http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python
-
- Args:
- params: The parameter list for the function declaration.
-
- Returns:
- The edit distance, the edit list.
- """
- source_len, target_len = len(self.ordered_params), len(params)
- edit_lists = [[]]
- distance = [[]]
- for i in range(target_len+1):
- edit_lists[0].append(['I'] * i)
- distance[0].append(i)
-
- for j in range(1, source_len+1):
- edit_lists.append([['D'] * j])
- distance.append([j])
-
- for i in range(source_len):
- for j in range(target_len):
- cost = 1
- if self.ordered_params[i] == params[j]:
- cost = 0
-
- deletion = distance[i][j+1] + 1
- insertion = distance[i+1][j] + 1
- substitution = distance[i][j] + cost
-
- edit_list = None
- best = None
- if deletion <= insertion and deletion <= substitution:
- # Deletion is best.
- best = deletion
- edit_list = list(edit_lists[i][j+1])
- edit_list.append('D')
-
- elif insertion <= substitution:
- # Insertion is best.
- best = insertion
- edit_list = list(edit_lists[i+1][j])
- edit_list.append('I')
- edit_lists[i+1].append(edit_list)
-
- else:
- # Substitution is best.
- best = substitution
- edit_list = list(edit_lists[i][j])
- if cost:
- edit_list.append('S')
- else:
- edit_list.append('=')
-
- edit_lists[i+1].append(edit_list)
- distance[i+1].append(best)
-
- return distance[source_len][target_len], edit_lists[source_len][target_len]
-
- def __repr__(self):
- """Returns a string representation of this object.
-
- Returns:
- A string representation of this object.
- """
- return '<DocComment: %s, %s>' % (
- str(self.ordered_params), str(self.__flags))
-
-
-#
-# Helper methods used by DocFlag and DocComment to parse out flag information.
-#
-
-
-def _GetMatchingEndBraceAndContents(start_brace):
- """Returns the matching end brace and contents between the two braces.
-
- If any FLAG_ENDING_TYPE token is encountered before a matching end brace, then
- that token is used as the matching ending token. Contents will have all
- comment prefixes stripped out of them, and all comment prefixes in between the
- start and end tokens will be split out into separate DOC_PREFIX tokens.
-
- Args:
- start_brace: The DOC_START_BRACE token immediately before desired contents.
-
- Returns:
- The matching ending token (DOC_END_BRACE or FLAG_ENDING_TYPE) and a string
- of the contents between the matching tokens, minus any comment prefixes.
- """
- open_count = 1
- close_count = 0
- contents = []
-
- # We don't consider the start brace part of the type string.
- token = start_brace.next
- while open_count != close_count:
- if token.type == Type.DOC_START_BRACE:
- open_count += 1
- elif token.type == Type.DOC_END_BRACE:
- close_count += 1
-
- if token.type != Type.DOC_PREFIX:
- contents.append(token.string)
-
- if token.type in Type.FLAG_ENDING_TYPES:
- break
- token = token.next
-
- #Don't include the end token (end brace, end doc comment, etc.) in type.
- token = token.previous
- contents = contents[:-1]
-
- return token, ''.join(contents)
-
-
-def _GetNextPartialIdentifierToken(start_token):
- """Returns the first token having identifier as substring after a token.
-
- Searches each token after the start to see if it contains an identifier.
- If found, token is returned. If no identifier is found returns None.
- Search is abandoned when a FLAG_ENDING_TYPE token is found.
-
- Args:
- start_token: The token to start searching after.
-
- Returns:
- The token found containing identifier, None otherwise.
- """
- token = start_token.next
-
- while token and token.type not in Type.FLAG_ENDING_TYPES:
- match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.search(
- token.string)
- if match is not None and token.type == Type.COMMENT:
- return token
-
- token = token.next
-
- return None
-
-
-def _GetEndTokenAndContents(start_token):
- """Returns last content token and all contents before FLAG_ENDING_TYPE token.
-
- Comment prefixes are split into DOC_PREFIX tokens and stripped from the
- returned contents.
-
- Args:
- start_token: The token immediately before the first content token.
-
- Returns:
- The last content token and a string of all contents including start and
- end tokens, with comment prefixes stripped.
- """
- iterator = start_token
- last_line = iterator.line_number
- last_token = None
- contents = ''
- doc_depth = 0
- while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0:
- if (iterator.IsFirstInLine() and
- DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)):
- # If we have a blank comment line, consider that an implicit
- # ending of the description. This handles a case like:
- #
- # * @return {boolean} True
- # *
- # * Note: This is a sentence.
- #
- # The note is not part of the @return description, but there was
- # no definitive ending token. Rather there was a line containing
- # only a doc comment prefix or whitespace.
- break
-
- # b/2983692
- # don't prematurely match against a @flag if inside a doc flag
- # need to think about what is the correct behavior for unterminated
- # inline doc flags
- if (iterator.type == Type.DOC_START_BRACE and
- iterator.next.type == Type.DOC_INLINE_FLAG):
- doc_depth += 1
- elif (iterator.type == Type.DOC_END_BRACE and
- doc_depth > 0):
- doc_depth -= 1
-
- if iterator.type in Type.FLAG_DESCRIPTION_TYPES:
- contents += iterator.string
- last_token = iterator
-
- iterator = iterator.next
- if iterator.line_number != last_line:
- contents += '\n'
- last_line = iterator.line_number
-
- end_token = last_token
- if DocFlag.EMPTY_STRING.match(contents):
- contents = None
- else:
- # Strip trailing newline.
- contents = contents[:-1]
-
- return end_token, contents
-
-
-class Function(object):
- """Data about a JavaScript function.
-
- Attributes:
- block_depth: Block depth the function began at.
- doc: The DocComment associated with the function.
- has_return: If the function has a return value.
- has_this: If the function references the 'this' object.
- is_assigned: If the function is part of an assignment.
- is_constructor: If the function is a constructor.
- name: The name of the function, whether given in the function keyword or
- as the lvalue the function is assigned to.
- start_token: First token of the function (the function' keyword token).
- end_token: Last token of the function (the closing '}' token).
- parameters: List of parameter names.
- """
-
- def __init__(self, block_depth, is_assigned, doc, name):
- self.block_depth = block_depth
- self.is_assigned = is_assigned
- self.is_constructor = doc and doc.HasFlag('constructor')
- self.is_interface = doc and doc.HasFlag('interface')
- self.has_return = False
- self.has_throw = False
- self.has_this = False
- self.name = name
- self.doc = doc
- self.start_token = None
- self.end_token = None
- self.parameters = None
-
-
-class StateTracker(object):
- """EcmaScript state tracker.
-
- Tracks block depth, function names, etc. within an EcmaScript token stream.
- """
-
- OBJECT_LITERAL = 'o'
- CODE = 'c'
-
- def __init__(self, doc_flag=DocFlag):
- """Initializes a JavaScript token stream state tracker.
-
- Args:
- doc_flag: An optional custom DocFlag used for validating
- documentation flags.
- """
- self._doc_flag = doc_flag
- self.Reset()
-
- def Reset(self):
- """Resets the state tracker to prepare for processing a new page."""
- self._block_depth = 0
- self._is_block_close = False
- self._paren_depth = 0
- self._function_stack = []
- self._functions_by_name = {}
- self._last_comment = None
- self._doc_comment = None
- self._cumulative_params = None
- self._block_types = []
- self._last_non_space_token = None
- self._last_line = None
- self._first_token = None
- self._documented_identifiers = set()
- self._variables_in_scope = []
-
- def DocFlagPass(self, start_token, error_handler):
- """Parses doc flags.
-
- This pass needs to be executed before the aliaspass and we don't want to do
- a full-blown statetracker dry run for these.
-
- Args:
- start_token: The token at which to start iterating
- error_handler: An error handler for error reporting.
- """
- if not start_token:
- return
- doc_flag_types = (Type.DOC_FLAG, Type.DOC_INLINE_FLAG)
- for token in start_token:
- if token.type in doc_flag_types:
- token.attached_object = self._doc_flag(token, error_handler)
-
- def InFunction(self):
- """Returns true if the current token is within a function.
-
- Returns:
- True if the current token is within a function.
- """
- return bool(self._function_stack)
-
- def InConstructor(self):
- """Returns true if the current token is within a constructor.
-
- Returns:
- True if the current token is within a constructor.
- """
- return self.InFunction() and self._function_stack[-1].is_constructor
-
- def InInterfaceMethod(self):
- """Returns true if the current token is within an interface method.
-
- Returns:
- True if the current token is within an interface method.
- """
- if self.InFunction():
- if self._function_stack[-1].is_interface:
- return True
- else:
- name = self._function_stack[-1].name
- prototype_index = name.find('.prototype.')
- if prototype_index != -1:
- class_function_name = name[0:prototype_index]
- if (class_function_name in self._functions_by_name and
- self._functions_by_name[class_function_name].is_interface):
- return True
-
- return False
-
- def InTopLevelFunction(self):
- """Returns true if the current token is within a top level function.
-
- Returns:
- True if the current token is within a top level function.
- """
- return len(self._function_stack) == 1 and self.InTopLevel()
-
- def InAssignedFunction(self):
- """Returns true if the current token is within a function variable.
-
- Returns:
- True if if the current token is within a function variable
- """
- return self.InFunction() and self._function_stack[-1].is_assigned
-
- def IsFunctionOpen(self):
- """Returns true if the current token is a function block open.
-
- Returns:
- True if the current token is a function block open.
- """
- return (self._function_stack and
- self._function_stack[-1].block_depth == self._block_depth - 1)
-
- def IsFunctionClose(self):
- """Returns true if the current token is a function block close.
-
- Returns:
- True if the current token is a function block close.
- """
- return (self._function_stack and
- self._function_stack[-1].block_depth == self._block_depth)
-
- def InBlock(self):
- """Returns true if the current token is within a block.
-
- Returns:
- True if the current token is within a block.
- """
- return bool(self._block_depth)
-
- def IsBlockClose(self):
- """Returns true if the current token is a block close.
-
- Returns:
- True if the current token is a block close.
- """
- return self._is_block_close
-
- def InObjectLiteral(self):
- """Returns true if the current token is within an object literal.
-
- Returns:
- True if the current token is within an object literal.
- """
- return self._block_depth and self._block_types[-1] == self.OBJECT_LITERAL
-
- def InObjectLiteralDescendant(self):
- """Returns true if the current token has an object literal ancestor.
-
- Returns:
- True if the current token has an object literal ancestor.
- """
- return self.OBJECT_LITERAL in self._block_types
-
- def InParentheses(self):
- """Returns true if the current token is within parentheses.
-
- Returns:
- True if the current token is within parentheses.
- """
- return bool(self._paren_depth)
-
- def ParenthesesDepth(self):
- """Returns the number of parens surrounding the token.
-
- Returns:
- The number of parenthesis surrounding the token.
- """
- return self._paren_depth
-
- def BlockDepth(self):
- """Returns the number of blocks in which the token is nested.
-
- Returns:
- The number of blocks in which the token is nested.
- """
- return self._block_depth
-
- def FunctionDepth(self):
- """Returns the number of functions in which the token is nested.
-
- Returns:
- The number of functions in which the token is nested.
- """
- return len(self._function_stack)
-
- def InTopLevel(self):
- """Whether we are at the top level in the class.
-
- This function call is language specific. In some languages like
- JavaScript, a function is top level if it is not inside any parenthesis.
- In languages such as ActionScript, a function is top level if it is directly
- within a class.
- """
- raise TypeError('Abstract method InTopLevel not implemented')
-
- def GetBlockType(self, token):
- """Determine the block type given a START_BLOCK token.
-
- Code blocks come after parameters, keywords like else, and closing parens.
-
- Args:
- token: The current token. Can be assumed to be type START_BLOCK.
- Returns:
- Code block type for current token.
- """
- raise TypeError('Abstract method GetBlockType not implemented')
-
- def GetParams(self):
- """Returns the accumulated input params as an array.
-
- In some EcmasSript languages, input params are specified like
- (param:Type, param2:Type2, ...)
- in other they are specified just as
- (param, param2)
- We handle both formats for specifying parameters here and leave
- it to the compilers for each language to detect compile errors.
- This allows more code to be reused between lint checkers for various
- EcmaScript languages.
-
- Returns:
- The accumulated input params as an array.
- """
- params = []
- if self._cumulative_params:
- params = re.compile(r'\s+').sub('', self._cumulative_params).split(',')
- # Strip out the type from parameters of the form name:Type.
- params = map(lambda param: param.split(':')[0], params)
-
- return params
-
- def GetLastComment(self):
- """Return the last plain comment that could be used as documentation.
-
- Returns:
- The last plain comment that could be used as documentation.
- """
- return self._last_comment
-
- def GetDocComment(self):
- """Return the most recent applicable documentation comment.
-
- Returns:
- The last applicable documentation comment.
- """
- return self._doc_comment
-
- def HasDocComment(self, identifier):
- """Returns whether the identifier has been documented yet.
-
- Args:
- identifier: The identifier.
-
- Returns:
- Whether the identifier has been documented yet.
- """
- return identifier in self._documented_identifiers
-
- def InDocComment(self):
- """Returns whether the current token is in a doc comment.
-
- Returns:
- Whether the current token is in a doc comment.
- """
- return self._doc_comment and self._doc_comment.end_token is None
-
- def GetDocFlag(self):
- """Returns the current documentation flags.
-
- Returns:
- The current documentation flags.
- """
- return self._doc_flag
-
- def IsTypeToken(self, t):
- if self.InDocComment() and t.type not in (Type.START_DOC_COMMENT,
- Type.DOC_FLAG, Type.DOC_INLINE_FLAG, Type.DOC_PREFIX):
- f = tokenutil.SearchUntil(t, [Type.DOC_FLAG], [Type.START_DOC_COMMENT],
- None, True)
- if (f and f.attached_object.type_start_token is not None and
- f.attached_object.type_end_token is not None):
- return (tokenutil.Compare(t, f.attached_object.type_start_token) > 0 and
- tokenutil.Compare(t, f.attached_object.type_end_token) < 0)
- return False
-
- def GetFunction(self):
- """Return the function the current code block is a part of.
-
- Returns:
- The current Function object.
- """
- if self._function_stack:
- return self._function_stack[-1]
-
- def GetBlockDepth(self):
- """Return the block depth.
-
- Returns:
- The current block depth.
- """
- return self._block_depth
-
- def GetLastNonSpaceToken(self):
- """Return the last non whitespace token."""
- return self._last_non_space_token
-
- def GetLastLine(self):
- """Return the last line."""
- return self._last_line
-
- def GetFirstToken(self):
- """Return the very first token in the file."""
- return self._first_token
-
- def IsVariableInScope(self, token_string):
- """Checks if string is variable in current scope.
-
- For given string it checks whether the string is a defined variable
- (including function param) in current state.
-
- E.g. if variables defined (variables in current scope) is docs
- then docs, docs.length etc will be considered as variable in current
- scope. This will help in avoding extra goog.require for variables.
-
- Args:
- token_string: String to check if its is a variable in current scope.
-
- Returns:
- true if given string is a variable in current scope.
- """
- for variable in self._variables_in_scope:
- if (token_string == variable
- or token_string.startswith(variable + '.')):
- return True
-
- return False
-
- def HandleToken(self, token, last_non_space_token):
- """Handles the given token and updates state.
-
- Args:
- token: The token to handle.
- last_non_space_token:
- """
- self._is_block_close = False
-
- if not self._first_token:
- self._first_token = token
-
- # Track block depth.
- type = token.type
- if type == Type.START_BLOCK:
- self._block_depth += 1
-
- # Subclasses need to handle block start very differently because
- # whether a block is a CODE or OBJECT_LITERAL block varies significantly
- # by language.
- self._block_types.append(self.GetBlockType(token))
-
- # When entering a function body, record its parameters.
- if self.InFunction():
- function = self._function_stack[-1]
- if self._block_depth == function.block_depth + 1:
- function.parameters = self.GetParams()
-
- # Track block depth.
- elif type == Type.END_BLOCK:
- self._is_block_close = not self.InObjectLiteral()
- self._block_depth -= 1
- self._block_types.pop()
-
- # Track parentheses depth.
- elif type == Type.START_PAREN:
- self._paren_depth += 1
-
- # Track parentheses depth.
- elif type == Type.END_PAREN:
- self._paren_depth -= 1
-
- elif type == Type.COMMENT:
- self._last_comment = token.string
-
- elif type == Type.START_DOC_COMMENT:
- self._last_comment = None
- self._doc_comment = DocComment(token)
-
- elif type == Type.END_DOC_COMMENT:
- self._doc_comment.end_token = token
-
- elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
- # Don't overwrite flags if they were already parsed in a previous pass.
- if token.attached_object is None:
- flag = self._doc_flag(token)
- token.attached_object = flag
- else:
- flag = token.attached_object
- self._doc_comment.AddFlag(flag)
-
- if flag.flag_type == 'suppress':
- self._doc_comment.AddSuppression(token)
-
- elif type == Type.FUNCTION_DECLARATION:
- last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None,
- True)
- doc = None
- # Only top-level functions are eligible for documentation.
- if self.InTopLevel():
- doc = self._doc_comment
-
- name = ''
- is_assigned = last_code and (last_code.IsOperator('=') or
- last_code.IsOperator('||') or last_code.IsOperator('&&') or
- (last_code.IsOperator(':') and not self.InObjectLiteral()))
- if is_assigned:
- # TODO(robbyw): This breaks for x[2] = ...
- # Must use loop to find full function name in the case of line-wrapped
- # declarations (bug 1220601) like:
- # my.function.foo.
- # bar = function() ...
- identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE, None, True)
- while identifier and tokenutil.IsIdentifierOrDot(identifier):
- name = identifier.string + name
- # Traverse behind us, skipping whitespace and comments.
- while True:
- identifier = identifier.previous
- if not identifier or not identifier.type in Type.NON_CODE_TYPES:
- break
-
- else:
- next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
- while next_token and next_token.IsType(Type.FUNCTION_NAME):
- name += next_token.string
- next_token = tokenutil.Search(next_token, Type.FUNCTION_NAME, 2)
-
- function = Function(self._block_depth, is_assigned, doc, name)
- function.start_token = token
-
- self._function_stack.append(function)
- self._functions_by_name[name] = function
-
- # Add a delimiter in stack for scope variables to define start of
- # function. This helps in popping variables of this function when
- # function declaration ends.
- self._variables_in_scope.append('')
-
- elif type == Type.START_PARAMETERS:
- self._cumulative_params = ''
-
- elif type == Type.PARAMETERS:
- self._cumulative_params += token.string
- self._variables_in_scope.extend(self.GetParams())
-
- elif type == Type.KEYWORD and token.string == 'return':
- next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
- if not next_token.IsType(Type.SEMICOLON):
- function = self.GetFunction()
- if function:
- function.has_return = True
-
- elif type == Type.KEYWORD and token.string == 'throw':
- function = self.GetFunction()
- if function:
- function.has_throw = True
-
- elif type == Type.KEYWORD and token.string == 'var':
- function = self.GetFunction()
- next_token = tokenutil.Search(token, [Type.IDENTIFIER,
- Type.SIMPLE_LVALUE])
-
- if next_token:
- if next_token.type == Type.SIMPLE_LVALUE:
- self._variables_in_scope.append(next_token.values['identifier'])
- else:
- self._variables_in_scope.append(next_token.string)
-
- elif type == Type.SIMPLE_LVALUE:
- identifier = token.values['identifier']
- jsdoc = self.GetDocComment()
- if jsdoc:
- self._documented_identifiers.add(identifier)
-
- self._HandleIdentifier(identifier, True)
-
- elif type == Type.IDENTIFIER:
- self._HandleIdentifier(token.string, False)
-
- # Detect documented non-assignments.
- next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
- if next_token and next_token.IsType(Type.SEMICOLON):
- if (self._last_non_space_token and
- self._last_non_space_token.IsType(Type.END_DOC_COMMENT)):
- self._documented_identifiers.add(token.string)
-
- def _HandleIdentifier(self, identifier, is_assignment):
- """Process the given identifier.
-
- Currently checks if it references 'this' and annotates the function
- accordingly.
-
- Args:
- identifier: The identifer to process.
- is_assignment: Whether the identifer is being written to.
- """
- if identifier == 'this' or identifier.startswith('this.'):
- function = self.GetFunction()
- if function:
- function.has_this = True
-
- def HandleAfterToken(self, token):
- """Handle updating state after a token has been checked.
-
- This function should be used for destructive state changes such as
- deleting a tracked object.
-
- Args:
- token: The token to handle.
- """
- type = token.type
- if type == Type.SEMICOLON or type == Type.END_PAREN or (
- type == Type.END_BRACKET and
- self._last_non_space_token.type not in (
- Type.SINGLE_QUOTE_STRING_END, Type.DOUBLE_QUOTE_STRING_END)):
- # We end on any numeric array index, but keep going for string based
- # array indices so that we pick up manually exported identifiers.
- self._doc_comment = None
- self._last_comment = None
-
- elif type == Type.END_BLOCK:
- self._doc_comment = None
- self._last_comment = None
-
- if self.InFunction() and self.IsFunctionClose():
- # TODO(robbyw): Detect the function's name for better errors.
- function = self._function_stack.pop()
- function.end_token = token
-
- # Pop all variables till delimiter ('') those were defined in the
- # function being closed so make them out of scope.
- while self._variables_in_scope and self._variables_in_scope[-1]:
- self._variables_in_scope.pop()
-
- # Pop delimiter
- if self._variables_in_scope:
- self._variables_in_scope.pop()
-
- elif type == Type.END_PARAMETERS and self._doc_comment:
- self._doc_comment = None
- self._last_comment = None
-
- if not token.IsAnyType(Type.WHITESPACE, Type.BLANK_LINE):
- self._last_non_space_token = token
-
- self._last_line = token.line
diff --git a/tools/closure_linter/build/lib/closure_linter/statetracker_test.py b/tools/closure_linter/build/lib/closure_linter/statetracker_test.py
deleted file mode 100644
index 494dc642fc..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/statetracker_test.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for the statetracker module."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-
-
-import unittest as googletest
-
-from closure_linter import javascripttokens
-from closure_linter import statetracker
-from closure_linter import testutil
-
-
-class _FakeDocFlag(object):
-
- def __repr__(self):
- return '@%s %s' % (self.flag_type, self.name)
-
-
-class IdentifierTest(googletest.TestCase):
-
- def testJustIdentifier(self):
- a = javascripttokens.JavaScriptToken(
- 'abc', javascripttokens.JavaScriptTokenType.IDENTIFIER, 'abc', 1)
-
- st = statetracker.StateTracker()
- st.HandleToken(a, None)
-
-
-class DocCommentTest(googletest.TestCase):
-
- @staticmethod
- def _MakeDocFlagFake(flag_type, name=None):
- flag = _FakeDocFlag()
- flag.flag_type = flag_type
- flag.name = name
- return flag
-
- def testDocFlags(self):
- comment = statetracker.DocComment(None)
-
- a = self._MakeDocFlagFake('param', 'foo')
- comment.AddFlag(a)
-
- b = self._MakeDocFlagFake('param', '')
- comment.AddFlag(b)
-
- c = self._MakeDocFlagFake('param', 'bar')
- comment.AddFlag(c)
-
- self.assertEquals(
- ['foo', 'bar'],
- comment.ordered_params)
-
- self.assertEquals(
- [a, b, c],
- comment.GetDocFlags())
-
- def testInvalidate(self):
- comment = statetracker.DocComment(None)
-
- self.assertFalse(comment.invalidated)
- self.assertFalse(comment.IsInvalidated())
-
- comment.Invalidate()
-
- self.assertTrue(comment.invalidated)
- self.assertTrue(comment.IsInvalidated())
-
- def testSuppressionOnly(self):
- comment = statetracker.DocComment(None)
-
- self.assertFalse(comment.SuppressionOnly())
- comment.AddFlag(self._MakeDocFlagFake('suppress'))
- self.assertTrue(comment.SuppressionOnly())
- comment.AddFlag(self._MakeDocFlagFake('foo'))
- self.assertFalse(comment.SuppressionOnly())
-
- def testRepr(self):
- comment = statetracker.DocComment(None)
- comment.AddFlag(self._MakeDocFlagFake('param', 'foo'))
- comment.AddFlag(self._MakeDocFlagFake('param', 'bar'))
-
- self.assertEquals(
- '<DocComment: [\'foo\', \'bar\'], [@param foo, @param bar]>',
- repr(comment))
-
- def testDocFlagParam(self):
- comment = self._ParseComment("""
- /**
- * @param {string} [name] Name of customer.
- */""")
- flag = comment.GetFlag('param')
- self.assertEquals('string', flag.type)
- self.assertEquals('string', flag.jstype.ToString())
- self.assertEquals('[name]', flag.name)
-
- def _ParseComment(self, script):
- """Parse a script that contains one comment and return it."""
- _, comments = testutil.ParseFunctionsAndComments(script)
- self.assertEquals(1, len(comments))
- return comments[0]
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/strict_test.py b/tools/closure_linter/build/lib/closure_linter/strict_test.py
deleted file mode 100644
index 2634456874..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/strict_test.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2013 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tests for gjslint --strict.
-
-Tests errors that can be thrown by gjslint when in strict mode.
-"""
-
-
-
-import unittest
-
-import gflags as flags
-import unittest as googletest
-
-from closure_linter import errors
-from closure_linter import runner
-from closure_linter.common import erroraccumulator
-
-flags.FLAGS.strict = True
-
-
-class StrictTest(unittest.TestCase):
- """Tests scenarios where strict generates warnings."""
-
- def testUnclosedString(self):
- """Tests warnings are reported when nothing is disabled.
-
- b/11450054.
- """
- original = [
- 'bug = function() {',
- ' (\'foo\'\');',
- '};',
- '',
- ]
-
- expected = [errors.FILE_DOES_NOT_PARSE, errors.MULTI_LINE_STRING,
- errors.FILE_IN_BLOCK]
- self._AssertErrors(original, expected)
-
- def _AssertErrors(self, original, expected_errors):
- """Asserts that the error fixer corrects original to expected."""
-
- # Trap gjslint's output parse it to get messages added.
- error_accumulator = erroraccumulator.ErrorAccumulator()
- runner.Run('testing.js', error_accumulator, source=original)
- error_nums = [e.code for e in error_accumulator.GetErrors()]
-
- error_nums.sort()
- expected_errors.sort()
- self.assertListEqual(error_nums, expected_errors)
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/testutil.py b/tools/closure_linter/build/lib/closure_linter/testutil.py
deleted file mode 100644
index f7084ee37b..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/testutil.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Utility functions for testing gjslint components."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-import StringIO
-
-from closure_linter import ecmametadatapass
-from closure_linter import javascriptstatetracker
-from closure_linter import javascripttokenizer
-
-
-def TokenizeSource(source):
- """Convert a source into a string of tokens.
-
- Args:
- source: A source file as a string or file-like object (iterates lines).
-
- Returns:
- The first token of the resulting token stream.
- """
-
- if isinstance(source, basestring):
- source = StringIO.StringIO(source)
-
- tokenizer = javascripttokenizer.JavaScriptTokenizer()
- return tokenizer.TokenizeFile(source)
-
-
-def TokenizeSourceAndRunEcmaPass(source):
- """Tokenize a source and run the EcmaMetaDataPass on it.
-
- Args:
- source: A source file as a string or file-like object (iterates lines).
-
- Returns:
- The first token of the resulting token stream.
- """
- start_token = TokenizeSource(source)
- ecma_pass = ecmametadatapass.EcmaMetaDataPass()
- ecma_pass.Process(start_token)
- return start_token
-
-
-def ParseFunctionsAndComments(source, error_handler=None):
- """Run the tokenizer and tracker and return comments and functions found.
-
- Args:
- source: A source file as a string or file-like object (iterates lines).
- error_handler: An error handler.
-
- Returns:
- The functions and comments as a tuple.
- """
- start_token = TokenizeSourceAndRunEcmaPass(source)
-
- tracker = javascriptstatetracker.JavaScriptStateTracker()
- if error_handler is not None:
- tracker.DocFlagPass(start_token, error_handler)
-
- functions = []
- comments = []
- for token in start_token:
- tracker.HandleToken(token, tracker.GetLastNonSpaceToken())
-
- function = tracker.GetFunction()
- if function and function not in functions:
- functions.append(function)
-
- comment = tracker.GetDocComment()
- if comment and comment not in comments:
- comments.append(comment)
-
- tracker.HandleAfterToken(token)
-
- return functions, comments
diff --git a/tools/closure_linter/build/lib/closure_linter/tokenutil.py b/tools/closure_linter/build/lib/closure_linter/tokenutil.py
deleted file mode 100644
index 11e3ccc68b..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/tokenutil.py
+++ /dev/null
@@ -1,697 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Token utility functions."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-import copy
-import StringIO
-
-from closure_linter.common import tokens
-from closure_linter.javascripttokens import JavaScriptToken
-from closure_linter.javascripttokens import JavaScriptTokenType
-
-# Shorthand
-Type = tokens.TokenType
-
-
-def GetFirstTokenInSameLine(token):
- """Returns the first token in the same line as token.
-
- Args:
- token: Any token in the line.
-
- Returns:
- The first token in the same line as token.
- """
- while not token.IsFirstInLine():
- token = token.previous
- return token
-
-
-def GetFirstTokenInPreviousLine(token):
- """Returns the first token in the previous line as token.
-
- Args:
- token: Any token in the line.
-
- Returns:
- The first token in the previous line as token, or None if token is on the
- first line.
- """
- first_in_line = GetFirstTokenInSameLine(token)
- if first_in_line.previous:
- return GetFirstTokenInSameLine(first_in_line.previous)
-
- return None
-
-
-def GetLastTokenInSameLine(token):
- """Returns the last token in the same line as token.
-
- Args:
- token: Any token in the line.
-
- Returns:
- The last token in the same line as token.
- """
- while not token.IsLastInLine():
- token = token.next
- return token
-
-
-def GetAllTokensInSameLine(token):
- """Returns all tokens in the same line as the given token.
-
- Args:
- token: Any token in the line.
-
- Returns:
- All tokens on the same line as the given token.
- """
- first_token = GetFirstTokenInSameLine(token)
- last_token = GetLastTokenInSameLine(token)
-
- tokens_in_line = []
- while first_token != last_token:
- tokens_in_line.append(first_token)
- first_token = first_token.next
- tokens_in_line.append(last_token)
-
- return tokens_in_line
-
-
-def CustomSearch(start_token, func, end_func=None, distance=None,
- reverse=False):
- """Returns the first token where func is True within distance of this token.
-
- Args:
- start_token: The token to start searching from
- func: The function to call to test a token for applicability
- end_func: The function to call to test a token to determine whether to abort
- the search.
- distance: The number of tokens to look through before failing search. Must
- be positive. If unspecified, will search until the end of the token
- chain
- reverse: When true, search the tokens before this one instead of the tokens
- after it
-
- Returns:
- The first token matching func within distance of this token, or None if no
- such token is found.
- """
- token = start_token
- if reverse:
- while token and (distance is None or distance > 0):
- previous = token.previous
- if previous:
- if func(previous):
- return previous
- if end_func and end_func(previous):
- return None
-
- token = previous
- if distance is not None:
- distance -= 1
-
- else:
- while token and (distance is None or distance > 0):
- next_token = token.next
- if next_token:
- if func(next_token):
- return next_token
- if end_func and end_func(next_token):
- return None
-
- token = next_token
- if distance is not None:
- distance -= 1
-
- return None
-
-
-def Search(start_token, token_types, distance=None, reverse=False):
- """Returns the first token of type in token_types within distance.
-
- Args:
- start_token: The token to start searching from
- token_types: The allowable types of the token being searched for
- distance: The number of tokens to look through before failing search. Must
- be positive. If unspecified, will search until the end of the token
- chain
- reverse: When true, search the tokens before this one instead of the tokens
- after it
-
- Returns:
- The first token of any type in token_types within distance of this token, or
- None if no such token is found.
- """
- return CustomSearch(start_token, lambda token: token.IsAnyType(token_types),
- None, distance, reverse)
-
-
-def SearchExcept(start_token, token_types, distance=None, reverse=False):
- """Returns the first token not of any type in token_types within distance.
-
- Args:
- start_token: The token to start searching from
- token_types: The unallowable types of the token being searched for
- distance: The number of tokens to look through before failing search. Must
- be positive. If unspecified, will search until the end of the token
- chain
- reverse: When true, search the tokens before this one instead of the tokens
- after it
-
- Returns:
- The first token of any type in token_types within distance of this token, or
- None if no such token is found.
- """
- return CustomSearch(start_token,
- lambda token: not token.IsAnyType(token_types),
- None, distance, reverse)
-
-
-def SearchUntil(start_token, token_types, end_types, distance=None,
- reverse=False):
- """Returns the first token of type in token_types before a token of end_type.
-
- Args:
- start_token: The token to start searching from.
- token_types: The allowable types of the token being searched for.
- end_types: Types of tokens to abort search if we find.
- distance: The number of tokens to look through before failing search. Must
- be positive. If unspecified, will search until the end of the token
- chain
- reverse: When true, search the tokens before this one instead of the tokens
- after it
-
- Returns:
- The first token of any type in token_types within distance of this token
- before any tokens of type in end_type, or None if no such token is found.
- """
- return CustomSearch(start_token, lambda token: token.IsAnyType(token_types),
- lambda token: token.IsAnyType(end_types),
- distance, reverse)
-
-
-def DeleteToken(token):
- """Deletes the given token from the linked list.
-
- Args:
- token: The token to delete
- """
- # When deleting a token, we do not update the deleted token itself to make
- # sure the previous and next pointers are still pointing to tokens which are
- # not deleted. Also it is very hard to keep track of all previously deleted
- # tokens to update them when their pointers become invalid. So we add this
- # flag that any token linked list iteration logic can skip deleted node safely
- # when its current token is deleted.
- token.is_deleted = True
- if token.previous:
- token.previous.next = token.next
-
- if token.next:
- token.next.previous = token.previous
-
- following_token = token.next
- while following_token and following_token.metadata.last_code == token:
- following_token.metadata.last_code = token.metadata.last_code
- following_token = following_token.next
-
-
-def DeleteTokens(token, token_count):
- """Deletes the given number of tokens starting with the given token.
-
- Args:
- token: The token to start deleting at.
- token_count: The total number of tokens to delete.
- """
- for i in xrange(1, token_count):
- DeleteToken(token.next)
- DeleteToken(token)
-
-
-def InsertTokenBefore(new_token, token):
- """Insert new_token before token.
-
- Args:
- new_token: A token to be added to the stream
- token: A token already in the stream
- """
- new_token.next = token
- new_token.previous = token.previous
-
- new_token.metadata = copy.copy(token.metadata)
-
- if new_token.IsCode():
- old_last_code = token.metadata.last_code
- following_token = token
- while (following_token and
- following_token.metadata.last_code == old_last_code):
- following_token.metadata.last_code = new_token
- following_token = following_token.next
-
- token.previous = new_token
- if new_token.previous:
- new_token.previous.next = new_token
-
- if new_token.start_index is None:
- if new_token.line_number == token.line_number:
- new_token.start_index = token.start_index
- else:
- previous_token = new_token.previous
- if previous_token:
- new_token.start_index = (previous_token.start_index +
- len(previous_token.string))
- else:
- new_token.start_index = 0
-
- iterator = new_token.next
- while iterator and iterator.line_number == new_token.line_number:
- iterator.start_index += len(new_token.string)
- iterator = iterator.next
-
-
-def InsertTokenAfter(new_token, token):
- """Insert new_token after token.
-
- Args:
- new_token: A token to be added to the stream
- token: A token already in the stream
- """
- new_token.previous = token
- new_token.next = token.next
-
- new_token.metadata = copy.copy(token.metadata)
-
- if token.IsCode():
- new_token.metadata.last_code = token
-
- if new_token.IsCode():
- following_token = token.next
- while following_token and following_token.metadata.last_code == token:
- following_token.metadata.last_code = new_token
- following_token = following_token.next
-
- token.next = new_token
- if new_token.next:
- new_token.next.previous = new_token
-
- if new_token.start_index is None:
- if new_token.line_number == token.line_number:
- new_token.start_index = token.start_index + len(token.string)
- else:
- new_token.start_index = 0
-
- iterator = new_token.next
- while iterator and iterator.line_number == new_token.line_number:
- iterator.start_index += len(new_token.string)
- iterator = iterator.next
-
-
-def InsertTokensAfter(new_tokens, token):
- """Insert multiple tokens after token.
-
- Args:
- new_tokens: An array of tokens to be added to the stream
- token: A token already in the stream
- """
- # TODO(user): It would be nicer to have InsertTokenAfter defer to here
- # instead of vice-versa.
- current_token = token
- for new_token in new_tokens:
- InsertTokenAfter(new_token, current_token)
- current_token = new_token
-
-
-def InsertSpaceTokenAfter(token):
- """Inserts a space token after the given token.
-
- Args:
- token: The token to insert a space token after
-
- Returns:
- A single space token
- """
- space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line,
- token.line_number)
- InsertTokenAfter(space_token, token)
-
-
-def InsertBlankLineAfter(token):
- """Inserts a blank line after the given token.
-
- Args:
- token: The token to insert a blank line after
-
- Returns:
- A single space token
- """
- blank_token = JavaScriptToken('', Type.BLANK_LINE, '',
- token.line_number + 1)
- InsertLineAfter(token, [blank_token])
-
-
-def InsertLineAfter(token, new_tokens):
- """Inserts a new line consisting of new_tokens after the given token.
-
- Args:
- token: The token to insert after.
- new_tokens: The tokens that will make up the new line.
- """
- insert_location = token
- for new_token in new_tokens:
- InsertTokenAfter(new_token, insert_location)
- insert_location = new_token
-
- # Update all subsequent line numbers.
- next_token = new_tokens[-1].next
- while next_token:
- next_token.line_number += 1
- next_token = next_token.next
-
-
-def SplitToken(token, position):
- """Splits the token into two tokens at position.
-
- Args:
- token: The token to split
- position: The position to split at. Will be the beginning of second token.
-
- Returns:
- The new second token.
- """
- new_string = token.string[position:]
- token.string = token.string[:position]
-
- new_token = JavaScriptToken(new_string, token.type, token.line,
- token.line_number)
- InsertTokenAfter(new_token, token)
-
- return new_token
-
-
-def Compare(token1, token2):
- """Compares two tokens and determines their relative order.
-
- Args:
- token1: The first token to compare.
- token2: The second token to compare.
-
- Returns:
- A negative integer, zero, or a positive integer as the first token is
- before, equal, or after the second in the token stream.
- """
- if token2.line_number != token1.line_number:
- return token1.line_number - token2.line_number
- else:
- return token1.start_index - token2.start_index
-
-
-def GoogScopeOrNoneFromStartBlock(token):
- """Determines if the given START_BLOCK is part of a goog.scope statement.
-
- Args:
- token: A token of type START_BLOCK.
-
- Returns:
- The goog.scope function call token, or None if such call doesn't exist.
- """
- if token.type != JavaScriptTokenType.START_BLOCK:
- return None
-
- # Search for a goog.scope statement, which will be 5 tokens before the
- # block. Illustration of the tokens found prior to the start block:
- # goog.scope(function() {
- # 5 4 3 21 ^
-
- maybe_goog_scope = token
- for unused_i in xrange(5):
- maybe_goog_scope = (maybe_goog_scope.previous if maybe_goog_scope and
- maybe_goog_scope.previous else None)
- if maybe_goog_scope and maybe_goog_scope.string == 'goog.scope':
- return maybe_goog_scope
-
-
-def GetTokenRange(start_token, end_token):
- """Returns a list of tokens between the two given, inclusive.
-
- Args:
- start_token: Start token in the range.
- end_token: End token in the range.
-
- Returns:
- A list of tokens, in order, from start_token to end_token (including start
- and end). Returns none if the tokens do not describe a valid range.
- """
-
- token_range = []
- token = start_token
-
- while token:
- token_range.append(token)
-
- if token == end_token:
- return token_range
-
- token = token.next
-
-
-def TokensToString(token_iterable):
- """Convert a number of tokens into a string.
-
- Newlines will be inserted whenever the line_number of two neighboring
- strings differ.
-
- Args:
- token_iterable: The tokens to turn to a string.
-
- Returns:
- A string representation of the given tokens.
- """
-
- buf = StringIO.StringIO()
- token_list = list(token_iterable)
- if not token_list:
- return ''
-
- line_number = token_list[0].line_number
-
- for token in token_list:
-
- while line_number < token.line_number:
- line_number += 1
- buf.write('\n')
-
- if line_number > token.line_number:
- line_number = token.line_number
- buf.write('\n')
-
- buf.write(token.string)
-
- return buf.getvalue()
-
-
-def GetPreviousCodeToken(token):
- """Returns the code token before the specified token.
-
- Args:
- token: A token.
-
- Returns:
- The code token before the specified token or None if no such token
- exists.
- """
-
- return CustomSearch(
- token,
- lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES,
- reverse=True)
-
-
-def GetNextCodeToken(token):
- """Returns the next code token after the specified token.
-
- Args:
- token: A token.
-
- Returns:
- The next code token after the specified token or None if no such token
- exists.
- """
-
- return CustomSearch(
- token,
- lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES,
- reverse=False)
-
-
-def GetIdentifierStart(token):
- """Returns the first token in an identifier.
-
- Given a token which is part of an identifier, returns the token at the start
- of the identifier.
-
- Args:
- token: A token which is part of an identifier.
-
- Returns:
- The token at the start of the identifier or None if the identifier was not
- of the form 'a.b.c' (e.g. "['a']['b'].c").
- """
-
- start_token = token
- previous_code_token = GetPreviousCodeToken(token)
-
- while (previous_code_token and (
- previous_code_token.IsType(JavaScriptTokenType.IDENTIFIER) or
- IsDot(previous_code_token))):
- start_token = previous_code_token
- previous_code_token = GetPreviousCodeToken(previous_code_token)
-
- if IsDot(start_token):
- return None
-
- return start_token
-
-
-def GetIdentifierForToken(token):
- """Get the symbol specified by a token.
-
- Given a token, this function additionally concatenates any parts of an
- identifying symbol being identified that are split by whitespace or a
- newline.
-
- The function will return None if the token is not the first token of an
- identifier.
-
- Args:
- token: The first token of a symbol.
-
- Returns:
- The whole symbol, as a string.
- """
-
- # Search backward to determine if this token is the first token of the
- # identifier. If it is not the first token, return None to signal that this
- # token should be ignored.
- prev_token = token.previous
- while prev_token:
- if (prev_token.IsType(JavaScriptTokenType.IDENTIFIER) or
- IsDot(prev_token)):
- return None
-
- if (prev_token.IsType(tokens.TokenType.WHITESPACE) or
- prev_token.IsAnyType(JavaScriptTokenType.COMMENT_TYPES)):
- prev_token = prev_token.previous
- else:
- break
-
- # A "function foo()" declaration.
- if token.type is JavaScriptTokenType.FUNCTION_NAME:
- return token.string
-
- # A "var foo" declaration (if the previous token is 'var')
- previous_code_token = GetPreviousCodeToken(token)
-
- if previous_code_token and previous_code_token.IsKeyword('var'):
- return token.string
-
- # Otherwise, this is potentially a namespaced (goog.foo.bar) identifier that
- # could span multiple lines or be broken up by whitespace. We need
- # to concatenate.
- identifier_types = set([
- JavaScriptTokenType.IDENTIFIER,
- JavaScriptTokenType.SIMPLE_LVALUE
- ])
-
- assert token.type in identifier_types
-
- # Start with the first token
- symbol_tokens = [token]
-
- if token.next:
- for t in token.next:
- last_symbol_token = symbol_tokens[-1]
-
- # A dot is part of the previous symbol.
- if IsDot(t):
- symbol_tokens.append(t)
- continue
-
- # An identifier is part of the previous symbol if the previous one was a
- # dot.
- if t.type in identifier_types:
- if IsDot(last_symbol_token):
- symbol_tokens.append(t)
- continue
- else:
- break
-
- # Skip any whitespace
- if t.type in JavaScriptTokenType.NON_CODE_TYPES:
- continue
-
- # This is the end of the identifier. Stop iterating.
- break
-
- if symbol_tokens:
- return ''.join([t.string for t in symbol_tokens])
-
-
-def GetStringAfterToken(token):
- """Get string after token.
-
- Args:
- token: Search will be done after this token.
-
- Returns:
- String if found after token else None (empty string will also
- return None).
-
- Search until end of string as in case of empty string Type.STRING_TEXT is not
- present/found and don't want to return next string.
- E.g.
- a = '';
- b = 'test';
- When searching for string after 'a' if search is not limited by end of string
- then it will return 'test' which is not desirable as there is a empty string
- before that.
-
- This will return None for cases where string is empty or no string found
- as in both cases there is no Type.STRING_TEXT.
- """
- string_token = SearchUntil(token, JavaScriptTokenType.STRING_TEXT,
- [JavaScriptTokenType.SINGLE_QUOTE_STRING_END,
- JavaScriptTokenType.DOUBLE_QUOTE_STRING_END])
- if string_token:
- return string_token.string
- else:
- return None
-
-
-def IsDot(token):
- """Whether the token represents a "dot" operator (foo.bar)."""
- return token.type is JavaScriptTokenType.OPERATOR and token.string == '.'
-
-
-def IsIdentifierOrDot(token):
- """Whether the token is either an identifier or a '.'."""
- return (token.type in [JavaScriptTokenType.IDENTIFIER,
- JavaScriptTokenType.SIMPLE_LVALUE] or
- IsDot(token))
diff --git a/tools/closure_linter/build/lib/closure_linter/tokenutil_test.py b/tools/closure_linter/build/lib/closure_linter/tokenutil_test.py
deleted file mode 100644
index c7d3854776..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/tokenutil_test.py
+++ /dev/null
@@ -1,297 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for the scopeutil module."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-import unittest as googletest
-
-from closure_linter import ecmametadatapass
-from closure_linter import javascripttokens
-from closure_linter import testutil
-from closure_linter import tokenutil
-
-
-class FakeToken(object):
- pass
-
-
-class TokenUtilTest(googletest.TestCase):
-
- def testGetTokenRange(self):
-
- a = FakeToken()
- b = FakeToken()
- c = FakeToken()
- d = FakeToken()
- e = FakeToken()
-
- a.next = b
- b.next = c
- c.next = d
-
- self.assertEquals([a, b, c, d], tokenutil.GetTokenRange(a, d))
-
- # This is an error as e does not come after a in the token chain.
- self.assertRaises(Exception, lambda: tokenutil.GetTokenRange(a, e))
-
- def testTokensToString(self):
-
- a = FakeToken()
- b = FakeToken()
- c = FakeToken()
- d = FakeToken()
- e = FakeToken()
-
- a.string = 'aaa'
- b.string = 'bbb'
- c.string = 'ccc'
- d.string = 'ddd'
- e.string = 'eee'
-
- a.line_number = 5
- b.line_number = 6
- c.line_number = 6
- d.line_number = 10
- e.line_number = 11
-
- self.assertEquals(
- 'aaa\nbbbccc\n\n\n\nddd\neee',
- tokenutil.TokensToString([a, b, c, d, e]))
-
- self.assertEquals(
- 'ddd\neee\naaa\nbbbccc',
- tokenutil.TokensToString([d, e, a, b, c]),
- 'Neighboring tokens not in line_number order should have a newline '
- 'between them.')
-
- def testGetPreviousCodeToken(self):
-
- tokens = testutil.TokenizeSource("""
-start1. // comment
- /* another comment */
- end1
-""")
-
- def _GetTokenStartingWith(token_starts_with):
- for t in tokens:
- if t.string.startswith(token_starts_with):
- return t
-
- self.assertEquals(
- None,
- tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('start1')))
-
- self.assertEquals(
- '.',
- tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('end1')).string)
-
- self.assertEquals(
- 'start1',
- tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('.')).string)
-
- def testGetNextCodeToken(self):
-
- tokens = testutil.TokenizeSource("""
-start1. // comment
- /* another comment */
- end1
-""")
-
- def _GetTokenStartingWith(token_starts_with):
- for t in tokens:
- if t.string.startswith(token_starts_with):
- return t
-
- self.assertEquals(
- '.',
- tokenutil.GetNextCodeToken(_GetTokenStartingWith('start1')).string)
-
- self.assertEquals(
- 'end1',
- tokenutil.GetNextCodeToken(_GetTokenStartingWith('.')).string)
-
- self.assertEquals(
- None,
- tokenutil.GetNextCodeToken(_GetTokenStartingWith('end1')))
-
- def testGetIdentifierStart(self):
-
- tokens = testutil.TokenizeSource("""
-start1 . // comment
- prototype. /* another comment */
- end1
-
-['edge'][case].prototype.
- end2 = function() {}
-""")
-
- def _GetTokenStartingWith(token_starts_with):
- for t in tokens:
- if t.string.startswith(token_starts_with):
- return t
-
- self.assertEquals(
- 'start1',
- tokenutil.GetIdentifierStart(_GetTokenStartingWith('end1')).string)
-
- self.assertEquals(
- 'start1',
- tokenutil.GetIdentifierStart(_GetTokenStartingWith('start1')).string)
-
- self.assertEquals(
- None,
- tokenutil.GetIdentifierStart(_GetTokenStartingWith('end2')))
-
- def testInsertTokenBefore(self):
-
- self.AssertInsertTokenAfterBefore(False)
-
- def testInsertTokenAfter(self):
-
- self.AssertInsertTokenAfterBefore(True)
-
- def AssertInsertTokenAfterBefore(self, after):
-
- new_token = javascripttokens.JavaScriptToken(
- 'a', javascripttokens.JavaScriptTokenType.IDENTIFIER, 1, 1)
-
- existing_token1 = javascripttokens.JavaScriptToken(
- 'var', javascripttokens.JavaScriptTokenType.KEYWORD, 1, 1)
- existing_token1.start_index = 0
- existing_token1.metadata = ecmametadatapass.EcmaMetaData()
-
- existing_token2 = javascripttokens.JavaScriptToken(
- ' ', javascripttokens.JavaScriptTokenType.WHITESPACE, 1, 1)
- existing_token2.start_index = 3
- existing_token2.metadata = ecmametadatapass.EcmaMetaData()
- existing_token2.metadata.last_code = existing_token1
-
- existing_token1.next = existing_token2
- existing_token2.previous = existing_token1
-
- if after:
- tokenutil.InsertTokenAfter(new_token, existing_token1)
- else:
- tokenutil.InsertTokenBefore(new_token, existing_token2)
-
- self.assertEquals(existing_token1, new_token.previous)
- self.assertEquals(existing_token2, new_token.next)
-
- self.assertEquals(new_token, existing_token1.next)
- self.assertEquals(new_token, existing_token2.previous)
-
- self.assertEquals(existing_token1, new_token.metadata.last_code)
- self.assertEquals(new_token, existing_token2.metadata.last_code)
-
- self.assertEquals(0, existing_token1.start_index)
- self.assertEquals(3, new_token.start_index)
- self.assertEquals(4, existing_token2.start_index)
-
- def testGetIdentifierForToken(self):
-
- tokens = testutil.TokenizeSource("""
-start1.abc.def.prototype.
- onContinuedLine
-
-(start2.abc.def
- .hij.klm
- .nop)
-
-start3.abc.def
- .hij = function() {};
-
-// An absurd multi-liner.
-start4.abc.def.
- hij.
- klm = function() {};
-
-start5 . aaa . bbb . ccc
- shouldntBePartOfThePreviousSymbol
-
-start6.abc.def ghi.shouldntBePartOfThePreviousSymbol
-
-var start7 = 42;
-
-function start8() {
-
-}
-
-start9.abc. // why is there a comment here?
- def /* another comment */
- shouldntBePart
-
-start10.abc // why is there a comment here?
- .def /* another comment */
- shouldntBePart
-
-start11.abc. middle1.shouldNotBeIdentifier
-""")
-
- def _GetTokenStartingWith(token_starts_with):
- for t in tokens:
- if t.string.startswith(token_starts_with):
- return t
-
- self.assertEquals(
- 'start1.abc.def.prototype.onContinuedLine',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start1')))
-
- self.assertEquals(
- 'start2.abc.def.hij.klm.nop',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start2')))
-
- self.assertEquals(
- 'start3.abc.def.hij',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start3')))
-
- self.assertEquals(
- 'start4.abc.def.hij.klm',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start4')))
-
- self.assertEquals(
- 'start5.aaa.bbb.ccc',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start5')))
-
- self.assertEquals(
- 'start6.abc.def',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start6')))
-
- self.assertEquals(
- 'start7',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start7')))
-
- self.assertEquals(
- 'start8',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start8')))
-
- self.assertEquals(
- 'start9.abc.def',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start9')))
-
- self.assertEquals(
- 'start10.abc.def',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start10')))
-
- self.assertIsNone(
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('middle1')))
-
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/typeannotation.py b/tools/closure_linter/build/lib/closure_linter/typeannotation.py
deleted file mode 100644
index 00604c13a7..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/typeannotation.py
+++ /dev/null
@@ -1,401 +0,0 @@
-#!/usr/bin/env python
-#*-* coding: utf-8
-"""Closure typeannotation parsing and utilities."""
-
-
-
-from closure_linter import errors
-from closure_linter import javascripttokens
-from closure_linter.common import error
-
-# Shorthand
-TYPE = javascripttokens.JavaScriptTokenType
-
-
-class TypeAnnotation(object):
- """Represents a structured view of a closure type annotation.
-
- Attribute:
- identifier: The name of the type.
- key_type: The name part before a colon.
- sub_types: The list of sub_types used e.g. for Array.<…>
- or_null: The '?' annotation
- not_null: The '!' annotation
- type_group: If this a a grouping (a|b), but does not include function(a).
- return_type: The return type of a function definition.
- alias: The actual type set by closurizednamespaceinfo if the identifier uses
- an alias to shorten the name.
- tokens: An ordered list of tokens used for this type. May contain
- TypeAnnotation instances for sub_types, key_type or return_type.
- """
-
- IMPLICIT_TYPE_GROUP = 2
-
- NULLABILITY_UNKNOWN = 2
-
- # Frequently used known non-nullable types.
- NON_NULLABLE = frozenset([
- 'boolean', 'function', 'number', 'string', 'undefined'])
- # Frequently used known nullable types.
- NULLABLE_TYPE_WHITELIST = frozenset([
- 'Array', 'Document', 'Element', 'Function', 'Node', 'NodeList',
- 'Object'])
-
- def __init__(self):
- self.identifier = ''
- self.sub_types = []
- self.or_null = False
- self.not_null = False
- self.type_group = False
- self.alias = None
- self.key_type = None
- self.record_type = False
- self.opt_arg = False
- self.return_type = None
- self.tokens = []
-
- def IsFunction(self):
- """Determines whether this is a function definition."""
- return self.identifier == 'function'
-
- def IsConstructor(self):
- """Determines whether this is a function definition for a constructor."""
- key_type = self.sub_types and self.sub_types[0].key_type
- return self.IsFunction() and key_type.identifier == 'new'
-
- def IsRecordType(self):
- """Returns True if this type is a record type."""
- return (self.record_type or
- bool([t for t in self.sub_types if t.IsRecordType()]))
-
- def IsVarArgsType(self):
- """Determines if the type is a var_args type, i.e. starts with '...'."""
- return self.identifier.startswith('...') or (
- self.type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP and
- self.sub_types[0].identifier.startswith('...'))
-
- def IsEmpty(self):
- """Returns True if the type is empty."""
- return not self.tokens
-
- def IsUnknownType(self):
- """Returns True if this is the unknown type {?}."""
- return (self.or_null
- and not self.identifier
- and not self.sub_types
- and not self.return_type)
-
- def Append(self, item):
- """Adds a sub_type to this type and finalizes it.
-
- Args:
- item: The TypeAnnotation item to append.
- """
- # item is a TypeAnnotation instance, so pylint: disable=protected-access
- self.sub_types.append(item._Finalize(self))
-
- def __repr__(self):
- """Reconstructs the type definition."""
- append = ''
- if self.sub_types:
- separator = (',' if not self.type_group else '|')
- if self.identifier == 'function':
- surround = '(%s)'
- else:
- surround = {False: '{%s}' if self.record_type else '<%s>',
- True: '(%s)',
- self.IMPLICIT_TYPE_GROUP: '%s'}[self.type_group]
- append = surround % separator.join([repr(t) for t in self.sub_types])
- if self.return_type:
- append += ':%s' % repr(self.return_type)
- append += '=' if self.opt_arg else ''
- prefix = '' + ('?' if self.or_null else '') + ('!' if self.not_null else '')
- keyword = '%s:' % repr(self.key_type) if self.key_type else ''
- return keyword + prefix + '%s' % (self.alias or self.identifier) + append
-
- def ToString(self):
- """Concats the type's tokens to form a string again."""
- ret = []
- for token in self.tokens:
- if not isinstance(token, TypeAnnotation):
- ret.append(token.string)
- else:
- ret.append(token.ToString())
- return ''.join(ret)
-
- def Dump(self, indent=''):
- """Dumps this type's structure for debugging purposes."""
- result = []
- for t in self.tokens:
- if isinstance(t, TypeAnnotation):
- result.append(indent + str(t) + ' =>\n' + t.Dump(indent + ' '))
- else:
- result.append(indent + str(t))
- return '\n'.join(result)
-
- def IterIdentifiers(self):
- """Iterates over all identifiers in this type and its subtypes."""
- if self.identifier:
- yield self.identifier
- for subtype in self.IterTypes():
- for identifier in subtype.IterIdentifiers():
- yield identifier
-
- def IterTypeGroup(self):
- """Iterates over all types in the type group including self.
-
- Yields:
- If this is a implicit or manual type-group: all sub_types.
- Otherwise: self
- E.g. for @type {Foo.<Bar>} this will yield only Foo.<Bar>,
- for @type {Foo|(Bar|Sample)} this will yield Foo, Bar and Sample.
-
- """
- if self.type_group:
- for sub_type in self.sub_types:
- for sub_type in sub_type.IterTypeGroup():
- yield sub_type
- else:
- yield self
-
- def IterTypes(self):
- """Iterates over each subtype as well as return and key types."""
- if self.return_type:
- yield self.return_type
-
- if self.key_type:
- yield self.key_type
-
- for sub_type in self.sub_types:
- yield sub_type
-
- def GetNullability(self, modifiers=True):
- """Computes whether the type may be null.
-
- Args:
- modifiers: Whether the modifiers ? and ! should be considered in the
- evaluation.
- Returns:
- True if the type allows null, False if the type is strictly non nullable
- and NULLABILITY_UNKNOWN if the nullability cannot be determined.
- """
-
- # Explicitly marked nullable types or 'null' are nullable.
- if (modifiers and self.or_null) or self.identifier == 'null':
- return True
-
- # Explicitly marked non-nullable types or non-nullable base types:
- if ((modifiers and self.not_null) or self.record_type
- or self.identifier in self.NON_NULLABLE):
- return False
-
- # A type group is nullable if any of its elements are nullable.
- if self.type_group:
- maybe_nullable = False
- for sub_type in self.sub_types:
- nullability = sub_type.GetNullability()
- if nullability == self.NULLABILITY_UNKNOWN:
- maybe_nullable = nullability
- elif nullability:
- return True
- return maybe_nullable
-
- # Whitelisted types are nullable.
- if self.identifier.rstrip('.') in self.NULLABLE_TYPE_WHITELIST:
- return True
-
- # All other types are unknown (most should be nullable, but
- # enums are not and typedefs might not be).
- return self.NULLABILITY_UNKNOWN
-
- def WillAlwaysBeNullable(self):
- """Computes whether the ! flag is illegal for this type.
-
- This is the case if this type or any of the subtypes is marked as
- explicitly nullable.
-
- Returns:
- True if the ! flag would be illegal.
- """
- if self.or_null or self.identifier == 'null':
- return True
-
- if self.type_group:
- return bool([t for t in self.sub_types if t.WillAlwaysBeNullable()])
-
- return False
-
- def _Finalize(self, parent):
- """Fixes some parsing issues once the TypeAnnotation is complete."""
-
- # Normalize functions whose definition ended up in the key type because
- # they defined a return type after a colon.
- if self.key_type and self.key_type.identifier == 'function':
- current = self.key_type
- current.return_type = self
- self.key_type = None
- # opt_arg never refers to the return type but to the function itself.
- current.opt_arg = self.opt_arg
- self.opt_arg = False
- return current
-
- # If a typedef just specified the key, it will not end up in the key type.
- if parent.record_type and not self.key_type:
- current = TypeAnnotation()
- current.key_type = self
- current.tokens.append(self)
- return current
- return self
-
- def FirstToken(self):
- """Returns the first token used in this type or any of its subtypes."""
- first = self.tokens[0]
- return first.FirstToken() if isinstance(first, TypeAnnotation) else first
-
-
-def Parse(token, token_end, error_handler):
- """Parses a type annotation and returns a TypeAnnotation object."""
- return TypeAnnotationParser(error_handler).Parse(token.next, token_end)
-
-
-class TypeAnnotationParser(object):
- """A parser for type annotations constructing the TypeAnnotation object."""
-
- def __init__(self, error_handler):
- self._stack = []
- self._error_handler = error_handler
- self._closing_error = False
-
- def Parse(self, token, token_end):
- """Parses a type annotation and returns a TypeAnnotation object."""
- root = TypeAnnotation()
- self._stack.append(root)
- current = TypeAnnotation()
- root.tokens.append(current)
-
- while token and token != token_end:
- if token.type in (TYPE.DOC_TYPE_START_BLOCK, TYPE.DOC_START_BRACE):
- if token.string == '(':
- if (current.identifier and
- current.identifier not in ['function', '...']):
- self.Error(token,
- 'Invalid identifier for (): "%s"' % current.identifier)
- current.type_group = current.identifier != 'function'
- elif token.string == '{':
- current.record_type = True
- current.tokens.append(token)
- self._stack.append(current)
- current = TypeAnnotation()
- self._stack[-1].tokens.append(current)
-
- elif token.type in (TYPE.DOC_TYPE_END_BLOCK, TYPE.DOC_END_BRACE):
- prev = self._stack.pop()
- prev.Append(current)
- current = prev
-
- # If an implicit type group was created, close it as well.
- if prev.type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP:
- prev = self._stack.pop()
- prev.Append(current)
- current = prev
- current.tokens.append(token)
-
- elif token.type == TYPE.DOC_TYPE_MODIFIER:
- if token.string == '!':
- current.tokens.append(token)
- current.not_null = True
- elif token.string == '?':
- current.tokens.append(token)
- current.or_null = True
- elif token.string == ':':
- current.tokens.append(token)
- prev = current
- current = TypeAnnotation()
- prev.tokens.append(current)
- current.key_type = prev
- elif token.string == '=':
- # For implicit type groups the '=' refers to the parent.
- try:
- if self._stack[-1].type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP:
- self._stack[-1].tokens.append(token)
- self._stack[-1].opt_arg = True
- else:
- current.tokens.append(token)
- current.opt_arg = True
- except IndexError:
- self.ClosingError(token)
- elif token.string == '|':
- # If a type group has explicitly been opened do a normal append.
- # Otherwise we have to open the type group and move the current
- # type into it, before appending
- if not self._stack[-1].type_group:
- type_group = TypeAnnotation()
- if current.key_type and current.key_type.identifier != 'function':
- type_group.key_type = current.key_type
- current.key_type = None
- type_group.type_group = TypeAnnotation.IMPLICIT_TYPE_GROUP
- # Fix the token order
- prev = self._stack[-1].tokens.pop()
- self._stack[-1].tokens.append(type_group)
- type_group.tokens.append(prev)
- self._stack.append(type_group)
- self._stack[-1].tokens.append(token)
- self.Append(current, error_token=token)
- current = TypeAnnotation()
- self._stack[-1].tokens.append(current)
- elif token.string == ',':
- self.Append(current, error_token=token)
- current = TypeAnnotation()
- self._stack[-1].tokens.append(token)
- self._stack[-1].tokens.append(current)
- else:
- current.tokens.append(token)
- self.Error(token, 'Invalid token')
-
- elif token.type == TYPE.COMMENT:
- current.tokens.append(token)
- current.identifier += token.string.strip()
-
- elif token.type in [TYPE.DOC_PREFIX, TYPE.WHITESPACE]:
- current.tokens.append(token)
-
- else:
- current.tokens.append(token)
- self.Error(token, 'Unexpected token')
-
- token = token.next
-
- self.Append(current, error_token=token)
- try:
- ret = self._stack.pop()
- except IndexError:
- self.ClosingError(token)
- # The type is screwed up, but let's return something.
- return current
-
- if self._stack and (len(self._stack) != 1 or
- ret.type_group != TypeAnnotation.IMPLICIT_TYPE_GROUP):
- self.Error(token, 'Too many opening items.')
-
- return ret if len(ret.sub_types) > 1 else ret.sub_types[0]
-
- def Append(self, type_obj, error_token):
- """Appends a new TypeAnnotation object to the current parent."""
- if self._stack:
- self._stack[-1].Append(type_obj)
- else:
- self.ClosingError(error_token)
-
- def ClosingError(self, token):
- """Reports an error about too many closing items, but only once."""
- if not self._closing_error:
- self._closing_error = True
- self.Error(token, 'Too many closing items.')
-
- def Error(self, token, message):
- """Calls the error_handler to post an error message."""
- if self._error_handler:
- self._error_handler.HandleError(error.Error(
- errors.JSDOC_DOES_NOT_PARSE,
- 'Error parsing jsdoc type at token "%s" (column: %d): %s' %
- (token.string, token.start_index, message), token))
diff --git a/tools/closure_linter/build/lib/closure_linter/typeannotation_test.py b/tools/closure_linter/build/lib/closure_linter/typeannotation_test.py
deleted file mode 100644
index da9dfa369f..0000000000
--- a/tools/closure_linter/build/lib/closure_linter/typeannotation_test.py
+++ /dev/null
@@ -1,232 +0,0 @@
-#!/usr/bin/env python
-"""Unit tests for the typeannotation module."""
-
-
-
-
-import unittest as googletest
-
-from closure_linter import testutil
-from closure_linter.common import erroraccumulator
-
-CRAZY_TYPE = ('Array.<!function(new:X,{a:null},...(c|d)):'
- 'function(...(Object.<string>))>')
-
-
-class TypeErrorException(Exception):
- """Exception for TypeErrors."""
-
- def __init__(self, errors):
- super(TypeErrorException, self).__init__()
- self.errors = errors
-
-
-class TypeParserTest(googletest.TestCase):
- """Tests for typeannotation parsing."""
-
- def _ParseComment(self, script):
- """Parse a script that contains one comment and return it."""
- accumulator = erroraccumulator.ErrorAccumulator()
- _, comments = testutil.ParseFunctionsAndComments(script, accumulator)
- if accumulator.GetErrors():
- raise TypeErrorException(accumulator.GetErrors())
- self.assertEquals(1, len(comments))
- return comments[0]
-
- def _ParseType(self, type_str):
- """Creates a comment to parse and returns the parsed type."""
- comment = self._ParseComment('/** @type {%s} **/' % type_str)
- return comment.GetDocFlags()[0].jstype
-
- def assertProperReconstruction(self, type_str, matching_str=None):
- """Parses the type and asserts the its repr matches the type.
-
- If matching_str is specified, it will assert that the repr matches this
- string instead.
-
- Args:
- type_str: The type string to parse.
- matching_str: A string the __repr__ of the parsed type should match.
- Returns:
- The parsed js_type.
- """
- parsed_type = self._ParseType(type_str)
- # Use listEqual assertion to more easily identify the difference
- self.assertListEqual(list(matching_str or type_str),
- list(repr(parsed_type)))
- self.assertEquals(matching_str or type_str, repr(parsed_type))
-
- # Newlines will be inserted by the file writer.
- self.assertEquals(type_str.replace('\n', ''), parsed_type.ToString())
- return parsed_type
-
- def assertNullable(self, type_str, nullable=True):
- parsed_type = self.assertProperReconstruction(type_str)
- self.assertEquals(nullable, parsed_type.GetNullability(),
- '"%s" should %sbe nullable' %
- (type_str, 'not ' if nullable else ''))
-
- def assertNotNullable(self, type_str):
- return self.assertNullable(type_str, nullable=False)
-
- def testReconstruction(self):
- self.assertProperReconstruction('*')
- self.assertProperReconstruction('number')
- self.assertProperReconstruction('(((number)))')
- self.assertProperReconstruction('!number')
- self.assertProperReconstruction('?!number')
- self.assertProperReconstruction('number=')
- self.assertProperReconstruction('number=!?', '?!number=')
- self.assertProperReconstruction('number|?string')
- self.assertProperReconstruction('(number|string)')
- self.assertProperReconstruction('?(number|string)')
- self.assertProperReconstruction('Object.<number,string>')
- self.assertProperReconstruction('function(new:Object)')
- self.assertProperReconstruction('function(new:Object):number')
- self.assertProperReconstruction('function(new:Object,Element):number')
- self.assertProperReconstruction('function(this:T,...)')
- self.assertProperReconstruction('{a:?number}')
- self.assertProperReconstruction('{a:?number,b:(number|string)}')
- self.assertProperReconstruction('{c:{nested_element:*}|undefined}')
- self.assertProperReconstruction('{handleEvent:function(?):?}')
- self.assertProperReconstruction('function():?|null')
- self.assertProperReconstruction('null|function():?|bar')
-
- def testOptargs(self):
- self.assertProperReconstruction('number=')
- self.assertProperReconstruction('number|string=')
- self.assertProperReconstruction('(number|string)=')
- self.assertProperReconstruction('(number|string=)')
- self.assertProperReconstruction('(number=|string)')
- self.assertProperReconstruction('function(...):number=')
-
- def testIndepth(self):
- # Do an deeper check of the crazy identifier
- crazy = self.assertProperReconstruction(CRAZY_TYPE)
- self.assertEquals('Array.', crazy.identifier)
- self.assertEquals(1, len(crazy.sub_types))
- func1 = crazy.sub_types[0]
- func2 = func1.return_type
- self.assertEquals('function', func1.identifier)
- self.assertEquals('function', func2.identifier)
- self.assertEquals(3, len(func1.sub_types))
- self.assertEquals(1, len(func2.sub_types))
- self.assertEquals('Object.', func2.sub_types[0].sub_types[0].identifier)
-
- def testIterIdentifiers(self):
- nested_identifiers = self._ParseType('(a|{b:(c|function(new:d):e)})')
- for identifier in ('a', 'b', 'c', 'd', 'e'):
- self.assertIn(identifier, nested_identifiers.IterIdentifiers())
-
- def testIsEmpty(self):
- self.assertTrue(self._ParseType('').IsEmpty())
- self.assertFalse(self._ParseType('?').IsEmpty())
- self.assertFalse(self._ParseType('!').IsEmpty())
- self.assertFalse(self._ParseType('<?>').IsEmpty())
-
- def testIsConstructor(self):
- self.assertFalse(self._ParseType('').IsConstructor())
- self.assertFalse(self._ParseType('Array.<number>').IsConstructor())
- self.assertTrue(self._ParseType('function(new:T)').IsConstructor())
-
- def testIsVarArgsType(self):
- self.assertTrue(self._ParseType('...number').IsVarArgsType())
- self.assertTrue(self._ParseType('...Object|Array').IsVarArgsType())
- self.assertTrue(self._ParseType('...(Object|Array)').IsVarArgsType())
- self.assertFalse(self._ParseType('Object|...Array').IsVarArgsType())
- self.assertFalse(self._ParseType('(...Object|Array)').IsVarArgsType())
-
- def testIsUnknownType(self):
- self.assertTrue(self._ParseType('?').IsUnknownType())
- self.assertTrue(self._ParseType('Foo.<?>').sub_types[0].IsUnknownType())
- self.assertFalse(self._ParseType('?|!').IsUnknownType())
- self.assertTrue(self._ParseType('?|!').sub_types[0].IsUnknownType())
- self.assertFalse(self._ParseType('!').IsUnknownType())
-
- long_type = 'function():?|{handleEvent:function(?=):?,sample:?}|?='
- record = self._ParseType(long_type)
- # First check that there's not just one type with 3 return types, but three
- # top-level types.
- self.assertEquals(3, len(record.sub_types))
-
- # Now extract all unknown type instances and verify that they really are.
- handle_event, sample = record.sub_types[1].sub_types
- for i, sub_type in enumerate([
- record.sub_types[0].return_type,
- handle_event.return_type,
- handle_event.sub_types[0],
- sample,
- record.sub_types[2]]):
- self.assertTrue(sub_type.IsUnknownType(),
- 'Type %d should be the unknown type: %s\n%s' % (
- i, sub_type.tokens, record.Dump()))
-
- def testTypedefNames(self):
- easy = self._ParseType('{a}')
- self.assertTrue(easy.record_type)
-
- easy = self.assertProperReconstruction('{a}', '{a:}').sub_types[0]
- self.assertEquals('a', easy.key_type.identifier)
- self.assertEquals('', easy.identifier)
-
- easy = self.assertProperReconstruction('{a:b}').sub_types[0]
- self.assertEquals('a', easy.key_type.identifier)
- self.assertEquals('b', easy.identifier)
-
- def assertTypeError(self, type_str):
- """Asserts that parsing the given type raises a linter error."""
- self.assertRaises(TypeErrorException, self._ParseType, type_str)
-
- def testParseBadTypes(self):
- """Tests that several errors in types don't break the parser."""
- self.assertTypeError('<')
- self.assertTypeError('>')
- self.assertTypeError('Foo.<Bar')
- self.assertTypeError('Foo.Bar>=')
- self.assertTypeError('Foo.<Bar>>=')
- self.assertTypeError('(')
- self.assertTypeError(')')
- self.assertTypeError('Foo.<Bar)>')
- self._ParseType(':')
- self._ParseType(':foo')
- self.assertTypeError(':)foo')
- self.assertTypeError('(a|{b:(c|function(new:d):e')
-
- def testNullable(self):
- self.assertNullable('null')
- self.assertNullable('Object')
- self.assertNullable('?string')
- self.assertNullable('?number')
-
- self.assertNotNullable('string')
- self.assertNotNullable('number')
- self.assertNotNullable('boolean')
- self.assertNotNullable('function(Object)')
- self.assertNotNullable('function(Object):Object')
- self.assertNotNullable('function(?Object):?Object')
- self.assertNotNullable('!Object')
-
- self.assertNotNullable('boolean|string')
- self.assertNotNullable('(boolean|string)')
-
- self.assertNullable('(boolean|string|null)')
- self.assertNullable('(?boolean)')
- self.assertNullable('?(boolean)')
-
- self.assertNullable('(boolean|Object)')
- self.assertNotNullable('(boolean|(string|{a:}))')
-
- def testSpaces(self):
- """Tests that spaces don't change the outcome."""
- type_str = (' A < b | ( c | ? ! d e f ) > | '
- 'function ( x : . . . ) : { y : z = } ')
- two_spaces = type_str.replace(' ', ' ')
- no_spaces = type_str.replace(' ', '')
- newlines = type_str.replace(' ', '\n * ')
- self.assertProperReconstruction(no_spaces)
- self.assertProperReconstruction(type_str, no_spaces)
- self.assertProperReconstruction(two_spaces, no_spaces)
- self.assertProperReconstruction(newlines, no_spaces)
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/closure_linter.egg-info/PKG-INFO b/tools/closure_linter/closure_linter.egg-info/PKG-INFO
deleted file mode 100644
index 8055c15c21..0000000000
--- a/tools/closure_linter/closure_linter.egg-info/PKG-INFO
+++ /dev/null
@@ -1,10 +0,0 @@
-Metadata-Version: 1.0
-Name: closure-linter
-Version: 2.3.17
-Summary: Closure Linter
-Home-page: http://code.google.com/p/closure-linter
-Author: The Closure Linter Authors
-Author-email: opensource@google.com
-License: Apache
-Description: UNKNOWN
-Platform: UNKNOWN
diff --git a/tools/closure_linter/closure_linter.egg-info/SOURCES.txt b/tools/closure_linter/closure_linter.egg-info/SOURCES.txt
deleted file mode 100644
index a193cdfeb9..0000000000
--- a/tools/closure_linter/closure_linter.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-README
-setup.py
-closure_linter/__init__.py
-closure_linter/aliaspass.py
-closure_linter/aliaspass_test.py
-closure_linter/checker.py
-closure_linter/checkerbase.py
-closure_linter/closurizednamespacesinfo.py
-closure_linter/closurizednamespacesinfo_test.py
-closure_linter/ecmalintrules.py
-closure_linter/ecmametadatapass.py
-closure_linter/error_check.py
-closure_linter/error_fixer.py
-closure_linter/error_fixer_test.py
-closure_linter/errorrecord.py
-closure_linter/errorrules.py
-closure_linter/errorrules_test.py
-closure_linter/errors.py
-closure_linter/fixjsstyle.py
-closure_linter/fixjsstyle_test.py
-closure_linter/full_test.py
-closure_linter/gjslint.py
-closure_linter/indentation.py
-closure_linter/javascriptlintrules.py
-closure_linter/javascriptstatetracker.py
-closure_linter/javascriptstatetracker_test.py
-closure_linter/javascripttokenizer.py
-closure_linter/javascripttokens.py
-closure_linter/not_strict_test.py
-closure_linter/requireprovidesorter.py
-closure_linter/requireprovidesorter_test.py
-closure_linter/runner.py
-closure_linter/runner_test.py
-closure_linter/scopeutil.py
-closure_linter/scopeutil_test.py
-closure_linter/statetracker.py
-closure_linter/statetracker_test.py
-closure_linter/strict_test.py
-closure_linter/testutil.py
-closure_linter/tokenutil.py
-closure_linter/tokenutil_test.py
-closure_linter/typeannotation.py
-closure_linter/typeannotation_test.py
-closure_linter.egg-info/PKG-INFO
-closure_linter.egg-info/SOURCES.txt
-closure_linter.egg-info/dependency_links.txt
-closure_linter.egg-info/entry_points.txt
-closure_linter.egg-info/requires.txt
-closure_linter.egg-info/top_level.txt
-closure_linter/common/__init__.py
-closure_linter/common/error.py
-closure_linter/common/erroraccumulator.py
-closure_linter/common/errorhandler.py
-closure_linter/common/erroroutput.py
-closure_linter/common/filetestcase.py
-closure_linter/common/htmlutil.py
-closure_linter/common/lintrunner.py
-closure_linter/common/matcher.py
-closure_linter/common/position.py
-closure_linter/common/simplefileflags.py
-closure_linter/common/tokenizer.py
-closure_linter/common/tokens.py
-closure_linter/common/tokens_test.py \ No newline at end of file
diff --git a/tools/closure_linter/closure_linter.egg-info/dependency_links.txt b/tools/closure_linter/closure_linter.egg-info/dependency_links.txt
deleted file mode 100644
index 8b13789179..0000000000
--- a/tools/closure_linter/closure_linter.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/tools/closure_linter/closure_linter.egg-info/entry_points.txt b/tools/closure_linter/closure_linter.egg-info/entry_points.txt
deleted file mode 100644
index 459b9d08b8..0000000000
--- a/tools/closure_linter/closure_linter.egg-info/entry_points.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-[console_scripts]
-fixjsstyle = closure_linter.fixjsstyle:main
-gjslint = closure_linter.gjslint:main
-
diff --git a/tools/closure_linter/closure_linter.egg-info/requires.txt b/tools/closure_linter/closure_linter.egg-info/requires.txt
deleted file mode 100644
index 71b67f110c..0000000000
--- a/tools/closure_linter/closure_linter.egg-info/requires.txt
+++ /dev/null
@@ -1 +0,0 @@
-python-gflags \ No newline at end of file
diff --git a/tools/closure_linter/closure_linter.egg-info/top_level.txt b/tools/closure_linter/closure_linter.egg-info/top_level.txt
deleted file mode 100644
index 7ece71f1c9..0000000000
--- a/tools/closure_linter/closure_linter.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-closure_linter
diff --git a/tools/closure_linter/closure_linter/__init__.py b/tools/closure_linter/closure_linter/__init__.py
deleted file mode 100755
index 1798c8cfff..0000000000
--- a/tools/closure_linter/closure_linter/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Package indicator for gjslint."""
diff --git a/tools/closure_linter/closure_linter/aliaspass.py b/tools/closure_linter/closure_linter/aliaspass.py
deleted file mode 100644
index bb37bfa07b..0000000000
--- a/tools/closure_linter/closure_linter/aliaspass.py
+++ /dev/null
@@ -1,248 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Pass that scans for goog.scope aliases and lint/usage errors."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-from closure_linter import ecmametadatapass
-from closure_linter import errors
-from closure_linter import javascripttokens
-from closure_linter import scopeutil
-from closure_linter import tokenutil
-from closure_linter.common import error
-
-
-# TODO(nnaze): Create a Pass interface and move this class, EcmaMetaDataPass,
-# and related classes onto it.
-
-
-def _GetAliasForIdentifier(identifier, alias_map):
- """Returns the aliased_symbol name for an identifier.
-
- Example usage:
- >>> alias_map = {'MyClass': 'goog.foo.MyClass'}
- >>> _GetAliasForIdentifier('MyClass.prototype.action', alias_map)
- 'goog.foo.MyClass.prototype.action'
-
- >>> _GetAliasForIdentifier('MyClass.prototype.action', {})
- None
-
- Args:
- identifier: The identifier.
- alias_map: A dictionary mapping a symbol to an alias.
-
- Returns:
- The aliased symbol name or None if not found.
- """
- ns = identifier.split('.', 1)[0]
- aliased_symbol = alias_map.get(ns)
- if aliased_symbol:
- return aliased_symbol + identifier[len(ns):]
-
-
-def _SetTypeAlias(js_type, alias_map):
- """Updates the alias for identifiers in a type.
-
- Args:
- js_type: A typeannotation.TypeAnnotation instance.
- alias_map: A dictionary mapping a symbol to an alias.
- """
- aliased_symbol = _GetAliasForIdentifier(js_type.identifier, alias_map)
- if aliased_symbol:
- js_type.alias = aliased_symbol
- for sub_type in js_type.IterTypes():
- _SetTypeAlias(sub_type, alias_map)
-
-
-class AliasPass(object):
- """Pass to identify goog.scope() usages.
-
- Identifies goog.scope() usages and finds lint/usage errors. Notes any
- aliases of symbols in Closurized namespaces (that is, reassignments
- such as "var MyClass = goog.foo.MyClass;") and annotates identifiers
- when they're using an alias (so they may be expanded to the full symbol
- later -- that "MyClass.prototype.action" refers to
- "goog.foo.MyClass.prototype.action" when expanded.).
- """
-
- def __init__(self, closurized_namespaces=None, error_handler=None):
- """Creates a new pass.
-
- Args:
- closurized_namespaces: A set of Closurized namespaces (e.g. 'goog').
- error_handler: An error handler to report lint errors to.
- """
-
- self._error_handler = error_handler
-
- # If we have namespaces, freeze the set.
- if closurized_namespaces:
- closurized_namespaces = frozenset(closurized_namespaces)
-
- self._closurized_namespaces = closurized_namespaces
-
- def Process(self, start_token):
- """Runs the pass on a token stream.
-
- Args:
- start_token: The first token in the stream.
- """
-
- if start_token is None:
- return
-
- # TODO(nnaze): Add more goog.scope usage checks.
- self._CheckGoogScopeCalls(start_token)
-
- # If we have closurized namespaces, identify aliased identifiers.
- if self._closurized_namespaces:
- context = start_token.metadata.context
- root_context = context.GetRoot()
- self._ProcessRootContext(root_context)
-
- def _CheckGoogScopeCalls(self, start_token):
- """Check goog.scope calls for lint/usage errors."""
-
- def IsScopeToken(token):
- return (token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER and
- token.string == 'goog.scope')
-
- # Find all the goog.scope tokens in the file
- scope_tokens = [t for t in start_token if IsScopeToken(t)]
-
- for token in scope_tokens:
- scope_context = token.metadata.context
-
- if not (scope_context.type == ecmametadatapass.EcmaContext.STATEMENT and
- scope_context.parent.type == ecmametadatapass.EcmaContext.ROOT):
- self._MaybeReportError(
- error.Error(errors.INVALID_USE_OF_GOOG_SCOPE,
- 'goog.scope call not in global scope', token))
-
- # There should be only one goog.scope reference. Register errors for
- # every instance after the first.
- for token in scope_tokens[1:]:
- self._MaybeReportError(
- error.Error(errors.EXTRA_GOOG_SCOPE_USAGE,
- 'More than one goog.scope call in file.', token))
-
- def _MaybeReportError(self, err):
- """Report an error to the handler (if registered)."""
- if self._error_handler:
- self._error_handler.HandleError(err)
-
- @classmethod
- def _YieldAllContexts(cls, context):
- """Yields all contexts that are contained by the given context."""
- yield context
- for child_context in context.children:
- for descendent_child in cls._YieldAllContexts(child_context):
- yield descendent_child
-
- @staticmethod
- def _IsTokenInParentBlock(token, parent_block):
- """Determines whether the given token is contained by the given block.
-
- Args:
- token: A token
- parent_block: An EcmaContext.
-
- Returns:
- Whether the token is in a context that is or is a child of the given
- parent_block context.
- """
- context = token.metadata.context
-
- while context:
- if context is parent_block:
- return True
- context = context.parent
-
- return False
-
- def _ProcessRootContext(self, root_context):
- """Processes all goog.scope blocks under the root context."""
-
- assert root_context.type is ecmametadatapass.EcmaContext.ROOT
-
- # Process aliases in statements in the root scope for goog.module-style
- # aliases.
- global_alias_map = {}
- for context in root_context.children:
- if context.type == ecmametadatapass.EcmaContext.STATEMENT:
- for statement_child in context.children:
- if statement_child.type == ecmametadatapass.EcmaContext.VAR:
- match = scopeutil.MatchModuleAlias(statement_child)
- if match:
- # goog.require aliases cannot use further aliases, the symbol is
- # the second part of match, directly.
- symbol = match[1]
- if scopeutil.IsInClosurizedNamespace(symbol,
- self._closurized_namespaces):
- global_alias_map[match[0]] = symbol
-
- # Process each block to find aliases.
- for context in root_context.children:
- self._ProcessBlock(context, global_alias_map)
-
- def _ProcessBlock(self, context, global_alias_map):
- """Scans a goog.scope block to find aliases and mark alias tokens."""
- alias_map = global_alias_map.copy()
-
- # Iterate over every token in the context. Each token points to one
- # context, but multiple tokens may point to the same context. We only want
- # to check each context once, so keep track of those we've seen.
- seen_contexts = set()
- token = context.start_token
- while token and self._IsTokenInParentBlock(token, context):
- token_context = token.metadata.context if token.metadata else None
-
- # Check to see if this token is an alias.
- if token_context and token_context not in seen_contexts:
- seen_contexts.add(token_context)
-
- # If this is a alias statement in the goog.scope block.
- if (token_context.type == ecmametadatapass.EcmaContext.VAR and
- scopeutil.IsGoogScopeBlock(token_context.parent.parent)):
- match = scopeutil.MatchAlias(token_context)
-
- # If this is an alias, remember it in the map.
- if match:
- alias, symbol = match
- symbol = _GetAliasForIdentifier(symbol, alias_map) or symbol
- if scopeutil.IsInClosurizedNamespace(symbol,
- self._closurized_namespaces):
- alias_map[alias] = symbol
-
- # If this token is an identifier that matches an alias,
- # mark the token as an alias to the original symbol.
- if (token.type is javascripttokens.JavaScriptTokenType.SIMPLE_LVALUE or
- token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER):
- identifier = tokenutil.GetIdentifierForToken(token)
- if identifier:
- aliased_symbol = _GetAliasForIdentifier(identifier, alias_map)
- if aliased_symbol:
- token.metadata.aliased_symbol = aliased_symbol
-
- elif token.type == javascripttokens.JavaScriptTokenType.DOC_FLAG:
- flag = token.attached_object
- if flag and flag.HasType() and flag.jstype:
- _SetTypeAlias(flag.jstype, alias_map)
-
- token = token.next # Get next token
diff --git a/tools/closure_linter/closure_linter/aliaspass_test.py b/tools/closure_linter/closure_linter/aliaspass_test.py
deleted file mode 100755
index 7042e53487..0000000000
--- a/tools/closure_linter/closure_linter/aliaspass_test.py
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for the aliaspass module."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-import unittest as googletest
-
-from closure_linter import aliaspass
-from closure_linter import errors
-from closure_linter import javascriptstatetracker
-from closure_linter import testutil
-from closure_linter.common import erroraccumulator
-
-
-def _GetTokenByLineAndString(start_token, string, line_number):
- for token in start_token:
- if token.line_number == line_number and token.string == string:
- return token
-
-
-class AliasPassTest(googletest.TestCase):
-
- def testInvalidGoogScopeCall(self):
- start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCOPE_SCRIPT)
-
- error_accumulator = erroraccumulator.ErrorAccumulator()
- alias_pass = aliaspass.AliasPass(
- error_handler=error_accumulator)
- alias_pass.Process(start_token)
-
- alias_errors = error_accumulator.GetErrors()
- self.assertEquals(1, len(alias_errors))
-
- alias_error = alias_errors[0]
-
- self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, alias_error.code)
- self.assertEquals('goog.scope', alias_error.token.string)
-
- def testAliasedIdentifiers(self):
- start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT)
- alias_pass = aliaspass.AliasPass(set(['goog', 'myproject']))
- alias_pass.Process(start_token)
-
- alias_token = _GetTokenByLineAndString(start_token, 'Event', 4)
- self.assertTrue(alias_token.metadata.is_alias_definition)
-
- my_class_token = _GetTokenByLineAndString(start_token, 'myClass', 9)
- self.assertIsNone(my_class_token.metadata.aliased_symbol)
-
- component_token = _GetTokenByLineAndString(start_token, 'Component', 17)
- self.assertEquals('goog.ui.Component',
- component_token.metadata.aliased_symbol)
-
- event_token = _GetTokenByLineAndString(start_token, 'Event.Something', 17)
- self.assertEquals('goog.events.Event.Something',
- event_token.metadata.aliased_symbol)
-
- non_closurized_token = _GetTokenByLineAndString(
- start_token, 'NonClosurizedClass', 18)
- self.assertIsNone(non_closurized_token.metadata.aliased_symbol)
-
- long_start_token = _GetTokenByLineAndString(start_token, 'Event', 24)
- self.assertEquals('goog.events.Event.MultilineIdentifier.someMethod',
- long_start_token.metadata.aliased_symbol)
-
- def testAliasedDoctypes(self):
- """Tests that aliases are correctly expanded within type annotations."""
- start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT)
- tracker = javascriptstatetracker.JavaScriptStateTracker()
- tracker.DocFlagPass(start_token, error_handler=None)
-
- alias_pass = aliaspass.AliasPass(set(['goog', 'myproject']))
- alias_pass.Process(start_token)
-
- flag_token = _GetTokenByLineAndString(start_token, '@type', 22)
- self.assertEquals(
- 'goog.events.Event.<goog.ui.Component,Array<myproject.foo.MyClass>>',
- repr(flag_token.attached_object.jstype))
-
- def testModuleAlias(self):
- start_token = testutil.TokenizeSourceAndRunEcmaPass("""
-goog.module('goog.test');
-var Alias = goog.require('goog.Alias');
-Alias.use();
-""")
- alias_pass = aliaspass.AliasPass(set(['goog']))
- alias_pass.Process(start_token)
- alias_token = _GetTokenByLineAndString(start_token, 'Alias', 3)
- self.assertTrue(alias_token.metadata.is_alias_definition)
-
- def testMultipleGoogScopeCalls(self):
- start_token = testutil.TokenizeSourceAndRunEcmaPass(
- _TEST_MULTIPLE_SCOPE_SCRIPT)
-
- error_accumulator = erroraccumulator.ErrorAccumulator()
-
- alias_pass = aliaspass.AliasPass(
- set(['goog', 'myproject']),
- error_handler=error_accumulator)
- alias_pass.Process(start_token)
-
- alias_errors = error_accumulator.GetErrors()
-
- self.assertEquals(3, len(alias_errors))
-
- error = alias_errors[0]
- self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, error.code)
- self.assertEquals(7, error.token.line_number)
-
- error = alias_errors[1]
- self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code)
- self.assertEquals(7, error.token.line_number)
-
- error = alias_errors[2]
- self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code)
- self.assertEquals(11, error.token.line_number)
-
-
-_TEST_ALIAS_SCRIPT = """
-goog.scope(function() {
-var events = goog.events; // scope alias
-var Event = events.
- Event; // nested multiline scope alias
-
-// This should not be registered as an aliased identifier because
-// it appears before the alias.
-var myClass = new MyClass();
-
-var Component = goog.ui.Component; // scope alias
-var MyClass = myproject.foo.MyClass; // scope alias
-
-// Scope alias of non-Closurized namespace.
-var NonClosurizedClass = aaa.bbb.NonClosurizedClass;
-
-var component = new Component(Event.Something);
-var nonClosurized = NonClosurizedClass();
-
-/**
- * A created namespace with a really long identifier.
- * @type {events.Event.<Component,Array<MyClass>}
- */
-Event.
- MultilineIdentifier.
- someMethod = function() {};
-});
-"""
-
-_TEST_SCOPE_SCRIPT = """
-function foo () {
- // This goog.scope call is invalid.
- goog.scope(function() {
-
- });
-}
-"""
-
-_TEST_MULTIPLE_SCOPE_SCRIPT = """
-goog.scope(function() {
- // do nothing
-});
-
-function foo() {
- var test = goog.scope; // We should not see goog.scope mentioned.
-}
-
-// This goog.scope invalid. There can be only one.
-goog.scope(function() {
-
-});
-"""
-
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/closure_linter/checker.py b/tools/closure_linter/closure_linter/checker.py
deleted file mode 100755
index 1c984173b0..0000000000
--- a/tools/closure_linter/closure_linter/checker.py
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Core methods for checking JS files for common style guide violations."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-import gflags as flags
-
-from closure_linter import aliaspass
-from closure_linter import checkerbase
-from closure_linter import closurizednamespacesinfo
-from closure_linter import javascriptlintrules
-
-
-flags.DEFINE_list('closurized_namespaces', '',
- 'Namespace prefixes, used for testing of'
- 'goog.provide/require')
-flags.DEFINE_list('ignored_extra_namespaces', '',
- 'Fully qualified namespaces that should be not be reported '
- 'as extra by the linter.')
-
-
-class JavaScriptStyleChecker(checkerbase.CheckerBase):
- """Checker that applies JavaScriptLintRules."""
-
- def __init__(self, state_tracker, error_handler):
- """Initialize an JavaScriptStyleChecker object.
-
- Args:
- state_tracker: State tracker.
- error_handler: Error handler to pass all errors to.
- """
- self._namespaces_info = None
- self._alias_pass = None
- if flags.FLAGS.closurized_namespaces:
- self._namespaces_info = (
- closurizednamespacesinfo.ClosurizedNamespacesInfo(
- flags.FLAGS.closurized_namespaces,
- flags.FLAGS.ignored_extra_namespaces))
-
- self._alias_pass = aliaspass.AliasPass(
- flags.FLAGS.closurized_namespaces, error_handler)
-
- checkerbase.CheckerBase.__init__(
- self,
- error_handler=error_handler,
- lint_rules=javascriptlintrules.JavaScriptLintRules(
- self._namespaces_info),
- state_tracker=state_tracker)
-
- def Check(self, start_token, limited_doc_checks=False, is_html=False,
- stop_token=None):
- """Checks a token stream for lint warnings/errors.
-
- Adds a separate pass for computing dependency information based on
- goog.require and goog.provide statements prior to the main linting pass.
-
- Args:
- start_token: The first token in the token stream.
- limited_doc_checks: Whether to perform limited checks.
- is_html: Whether this token stream is HTML.
- stop_token: If given, checks should stop at this token.
- """
- self._lint_rules.Initialize(self, limited_doc_checks, is_html)
-
- self._state_tracker.DocFlagPass(start_token, self._error_handler)
-
- if self._alias_pass:
- self._alias_pass.Process(start_token)
-
- # To maximize the amount of errors that get reported before a parse error
- # is displayed, don't run the dependency pass if a parse error exists.
- if self._namespaces_info:
- self._namespaces_info.Reset()
- self._ExecutePass(start_token, self._DependencyPass, stop_token)
-
- self._ExecutePass(start_token, self._LintPass, stop_token)
-
- # If we have a stop_token, we didn't end up reading the whole file and,
- # thus, don't call Finalize to do end-of-file checks.
- if not stop_token:
- self._lint_rules.Finalize(self._state_tracker)
-
- def _DependencyPass(self, token):
- """Processes an individual token for dependency information.
-
- Used to encapsulate the logic needed to process an individual token so that
- it can be passed to _ExecutePass.
-
- Args:
- token: The token to process.
- """
- self._namespaces_info.ProcessToken(token, self._state_tracker)
diff --git a/tools/closure_linter/closure_linter/checkerbase.py b/tools/closure_linter/closure_linter/checkerbase.py
deleted file mode 100755
index 6679ded05b..0000000000
--- a/tools/closure_linter/closure_linter/checkerbase.py
+++ /dev/null
@@ -1,192 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Base classes for writing checkers that operate on tokens."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)',
- 'jacobr@google.com (Jacob Richman)')
-
-from closure_linter import errorrules
-from closure_linter.common import error
-
-
-class LintRulesBase(object):
- """Base class for all classes defining the lint rules for a language."""
-
- def __init__(self):
- self.__checker = None
-
- def Initialize(self, checker, limited_doc_checks, is_html):
- """Initializes to prepare to check a file.
-
- Args:
- checker: Class to report errors to.
- limited_doc_checks: Whether doc checking is relaxed for this file.
- is_html: Whether the file is an HTML file with extracted contents.
- """
- self.__checker = checker
- self._limited_doc_checks = limited_doc_checks
- self._is_html = is_html
-
- def _HandleError(self, code, message, token, position=None,
- fix_data=None):
- """Call the HandleError function for the checker we are associated with."""
- if errorrules.ShouldReportError(code):
- self.__checker.HandleError(code, message, token, position, fix_data)
-
- def _SetLimitedDocChecks(self, limited_doc_checks):
- """Sets whether doc checking is relaxed for this file.
-
- Args:
- limited_doc_checks: Whether doc checking is relaxed for this file.
- """
- self._limited_doc_checks = limited_doc_checks
-
- def CheckToken(self, token, parser_state):
- """Checks a token, given the current parser_state, for warnings and errors.
-
- Args:
- token: The current token under consideration.
- parser_state: Object that indicates the parser state in the page.
-
- Raises:
- TypeError: If not overridden.
- """
- raise TypeError('Abstract method CheckToken not implemented')
-
- def Finalize(self, parser_state):
- """Perform all checks that need to occur after all lines are processed.
-
- Args:
- parser_state: State of the parser after parsing all tokens
-
- Raises:
- TypeError: If not overridden.
- """
- raise TypeError('Abstract method Finalize not implemented')
-
-
-class CheckerBase(object):
- """This class handles checking a LintRules object against a file."""
-
- def __init__(self, error_handler, lint_rules, state_tracker):
- """Initialize a checker object.
-
- Args:
- error_handler: Object that handles errors.
- lint_rules: LintRules object defining lint errors given a token
- and state_tracker object.
- state_tracker: Object that tracks the current state in the token stream.
-
- """
- self._error_handler = error_handler
- self._lint_rules = lint_rules
- self._state_tracker = state_tracker
-
- self._has_errors = False
-
- def HandleError(self, code, message, token, position=None,
- fix_data=None):
- """Prints out the given error message including a line number.
-
- Args:
- code: The error code.
- message: The error to print.
- token: The token where the error occurred, or None if it was a file-wide
- issue.
- position: The position of the error, defaults to None.
- fix_data: Metadata used for fixing the error.
- """
- self._has_errors = True
- self._error_handler.HandleError(
- error.Error(code, message, token, position, fix_data))
-
- def HasErrors(self):
- """Returns true if the style checker has found any errors.
-
- Returns:
- True if the style checker has found any errors.
- """
- return self._has_errors
-
- def Check(self, start_token, limited_doc_checks=False, is_html=False,
- stop_token=None):
- """Checks a token stream, reporting errors to the error reporter.
-
- Args:
- start_token: First token in token stream.
- limited_doc_checks: Whether doc checking is relaxed for this file.
- is_html: Whether the file being checked is an HTML file with extracted
- contents.
- stop_token: If given, check should stop at this token.
- """
-
- self._lint_rules.Initialize(self, limited_doc_checks, is_html)
- self._ExecutePass(start_token, self._LintPass, stop_token=stop_token)
- self._lint_rules.Finalize(self._state_tracker)
-
- def _LintPass(self, token):
- """Checks an individual token for lint warnings/errors.
-
- Used to encapsulate the logic needed to check an individual token so that it
- can be passed to _ExecutePass.
-
- Args:
- token: The token to check.
- """
- self._lint_rules.CheckToken(token, self._state_tracker)
-
- def _ExecutePass(self, token, pass_function, stop_token=None):
- """Calls the given function for every token in the given token stream.
-
- As each token is passed to the given function, state is kept up to date and,
- depending on the error_trace flag, errors are either caught and reported, or
- allowed to bubble up so developers can see the full stack trace. If a parse
- error is specified, the pass will proceed as normal until the token causing
- the parse error is reached.
-
- Args:
- token: The first token in the token stream.
- pass_function: The function to call for each token in the token stream.
- stop_token: The last token to check (if given).
-
- Raises:
- Exception: If any error occurred while calling the given function.
- """
-
- self._state_tracker.Reset()
- while token:
- # When we are looking at a token and decided to delete the whole line, we
- # will delete all of them in the "HandleToken()" below. So the current
- # token and subsequent ones may already be deleted here. The way we
- # delete a token does not wipe out the previous and next pointers of the
- # deleted token. So we need to check the token itself to make sure it is
- # not deleted.
- if not token.is_deleted:
- # End the pass at the stop token
- if stop_token and token is stop_token:
- return
-
- self._state_tracker.HandleToken(
- token, self._state_tracker.GetLastNonSpaceToken())
- pass_function(token)
- self._state_tracker.HandleAfterToken(token)
-
- token = token.next
diff --git a/tools/closure_linter/closure_linter/closurizednamespacesinfo.py b/tools/closure_linter/closure_linter/closurizednamespacesinfo.py
deleted file mode 100755
index e7cbfd3318..0000000000
--- a/tools/closure_linter/closure_linter/closurizednamespacesinfo.py
+++ /dev/null
@@ -1,578 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Logic for computing dependency information for closurized JavaScript files.
-
-Closurized JavaScript files express dependencies using goog.require and
-goog.provide statements. In order for the linter to detect when a statement is
-missing or unnecessary, all identifiers in the JavaScript file must first be
-processed to determine if they constitute the creation or usage of a dependency.
-"""
-
-
-
-import re
-
-from closure_linter import javascripttokens
-from closure_linter import tokenutil
-
-# pylint: disable=g-bad-name
-TokenType = javascripttokens.JavaScriptTokenType
-
-DEFAULT_EXTRA_NAMESPACES = [
- 'goog.testing.asserts',
- 'goog.testing.jsunit',
-]
-
-
-class UsedNamespace(object):
- """A type for information about a used namespace."""
-
- def __init__(self, namespace, identifier, token, alias_definition):
- """Initializes the instance.
-
- Args:
- namespace: the namespace of an identifier used in the file
- identifier: the complete identifier
- token: the token that uses the namespace
- alias_definition: a boolean stating whether the namespace is only to used
- for an alias definition and should not be required.
- """
- self.namespace = namespace
- self.identifier = identifier
- self.token = token
- self.alias_definition = alias_definition
-
- def GetLine(self):
- return self.token.line_number
-
- def __repr__(self):
- return 'UsedNamespace(%s)' % ', '.join(
- ['%s=%s' % (k, repr(v)) for k, v in self.__dict__.iteritems()])
-
-
-class ClosurizedNamespacesInfo(object):
- """Dependency information for closurized JavaScript files.
-
- Processes token streams for dependency creation or usage and provides logic
- for determining if a given require or provide statement is unnecessary or if
- there are missing require or provide statements.
- """
-
- def __init__(self, closurized_namespaces, ignored_extra_namespaces):
- """Initializes an instance the ClosurizedNamespacesInfo class.
-
- Args:
- closurized_namespaces: A list of namespace prefixes that should be
- processed for dependency information. Non-matching namespaces are
- ignored.
- ignored_extra_namespaces: A list of namespaces that should not be reported
- as extra regardless of whether they are actually used.
- """
- self._closurized_namespaces = closurized_namespaces
- self._ignored_extra_namespaces = (ignored_extra_namespaces +
- DEFAULT_EXTRA_NAMESPACES)
- self.Reset()
-
- def Reset(self):
- """Resets the internal state to prepare for processing a new file."""
-
- # A list of goog.provide tokens in the order they appeared in the file.
- self._provide_tokens = []
-
- # A list of goog.require tokens in the order they appeared in the file.
- self._require_tokens = []
-
- # Namespaces that are already goog.provided.
- self._provided_namespaces = []
-
- # Namespaces that are already goog.required.
- self._required_namespaces = []
-
- # Note that created_namespaces and used_namespaces contain both namespaces
- # and identifiers because there are many existing cases where a method or
- # constant is provided directly instead of its namespace. Ideally, these
- # two lists would only have to contain namespaces.
-
- # A list of tuples where the first element is the namespace of an identifier
- # created in the file, the second is the identifier itself and the third is
- # the line number where it's created.
- self._created_namespaces = []
-
- # A list of UsedNamespace instances.
- self._used_namespaces = []
-
- # A list of seemingly-unnecessary namespaces that are goog.required() and
- # annotated with @suppress {extraRequire}.
- self._suppressed_requires = []
-
- # A list of goog.provide tokens which are duplicates.
- self._duplicate_provide_tokens = []
-
- # A list of goog.require tokens which are duplicates.
- self._duplicate_require_tokens = []
-
- # Whether this file is in a goog.scope. Someday, we may add support
- # for checking scopified namespaces, but for now let's just fail
- # in a more reasonable way.
- self._scopified_file = False
-
- # TODO(user): Handle the case where there are 2 different requires
- # that can satisfy the same dependency, but only one is necessary.
-
- def GetProvidedNamespaces(self):
- """Returns the namespaces which are already provided by this file.
-
- Returns:
- A list of strings where each string is a 'namespace' corresponding to an
- existing goog.provide statement in the file being checked.
- """
- return set(self._provided_namespaces)
-
- def GetRequiredNamespaces(self):
- """Returns the namespaces which are already required by this file.
-
- Returns:
- A list of strings where each string is a 'namespace' corresponding to an
- existing goog.require statement in the file being checked.
- """
- return set(self._required_namespaces)
-
- def IsExtraProvide(self, token):
- """Returns whether the given goog.provide token is unnecessary.
-
- Args:
- token: A goog.provide token.
-
- Returns:
- True if the given token corresponds to an unnecessary goog.provide
- statement, otherwise False.
- """
- namespace = tokenutil.GetStringAfterToken(token)
-
- if self.GetClosurizedNamespace(namespace) is None:
- return False
-
- if token in self._duplicate_provide_tokens:
- return True
-
- # TODO(user): There's probably a faster way to compute this.
- for created_namespace, created_identifier, _ in self._created_namespaces:
- if namespace == created_namespace or namespace == created_identifier:
- return False
-
- return True
-
- def IsExtraRequire(self, token):
- """Returns whether the given goog.require token is unnecessary.
-
- Args:
- token: A goog.require token.
-
- Returns:
- True if the given token corresponds to an unnecessary goog.require
- statement, otherwise False.
- """
- namespace = tokenutil.GetStringAfterToken(token)
-
- if self.GetClosurizedNamespace(namespace) is None:
- return False
-
- if namespace in self._ignored_extra_namespaces:
- return False
-
- if token in self._duplicate_require_tokens:
- return True
-
- if namespace in self._suppressed_requires:
- return False
-
- # If the namespace contains a component that is initial caps, then that
- # must be the last component of the namespace.
- parts = namespace.split('.')
- if len(parts) > 1 and parts[-2][0].isupper():
- return True
-
- # TODO(user): There's probably a faster way to compute this.
- for ns in self._used_namespaces:
- if (not ns.alias_definition and (
- namespace == ns.namespace or namespace == ns.identifier)):
- return False
-
- return True
-
- def GetMissingProvides(self):
- """Returns the dict of missing provided namespaces for the current file.
-
- Returns:
- Returns a dictionary of key as string and value as integer where each
- string(key) is a namespace that should be provided by this file, but is
- not and integer(value) is first line number where it's defined.
- """
- missing_provides = dict()
- for namespace, identifier, line_number in self._created_namespaces:
- if (not self._IsPrivateIdentifier(identifier) and
- namespace not in self._provided_namespaces and
- identifier not in self._provided_namespaces and
- namespace not in self._required_namespaces and
- namespace not in missing_provides):
- missing_provides[namespace] = line_number
-
- return missing_provides
-
- def GetMissingRequires(self):
- """Returns the dict of missing required namespaces for the current file.
-
- For each non-private identifier used in the file, find either a
- goog.require, goog.provide or a created identifier that satisfies it.
- goog.require statements can satisfy the identifier by requiring either the
- namespace of the identifier or the identifier itself. goog.provide
- statements can satisfy the identifier by providing the namespace of the
- identifier. A created identifier can only satisfy the used identifier if
- it matches it exactly (necessary since things can be defined on a
- namespace in more than one file). Note that provided namespaces should be
- a subset of created namespaces, but we check both because in some cases we
- can't always detect the creation of the namespace.
-
- Returns:
- Returns a dictionary of key as string and value integer where each
- string(key) is a namespace that should be required by this file, but is
- not and integer(value) is first line number where it's used.
- """
- external_dependencies = set(self._required_namespaces)
-
- # Assume goog namespace is always available.
- external_dependencies.add('goog')
- # goog.module is treated as a builtin, too (for goog.module.get).
- external_dependencies.add('goog.module')
-
- created_identifiers = set()
- for unused_namespace, identifier, unused_line_number in (
- self._created_namespaces):
- created_identifiers.add(identifier)
-
- missing_requires = dict()
- illegal_alias_statements = dict()
-
- def ShouldRequireNamespace(namespace, identifier):
- """Checks if a namespace would normally be required."""
- return (
- not self._IsPrivateIdentifier(identifier) and
- namespace not in external_dependencies and
- namespace not in self._provided_namespaces and
- identifier not in external_dependencies and
- identifier not in created_identifiers and
- namespace not in missing_requires)
-
- # First check all the used identifiers where we know that their namespace
- # needs to be provided (unless they are optional).
- for ns in self._used_namespaces:
- namespace = ns.namespace
- identifier = ns.identifier
- if (not ns.alias_definition and
- ShouldRequireNamespace(namespace, identifier)):
- missing_requires[namespace] = ns.GetLine()
-
- # Now that all required namespaces are known, we can check if the alias
- # definitions (that are likely being used for typeannotations that don't
- # need explicit goog.require statements) are already covered. If not
- # the user shouldn't use the alias.
- for ns in self._used_namespaces:
- if (not ns.alias_definition or
- not ShouldRequireNamespace(ns.namespace, ns.identifier)):
- continue
- if self._FindNamespace(ns.identifier, self._provided_namespaces,
- created_identifiers, external_dependencies,
- missing_requires):
- continue
- namespace = ns.identifier.rsplit('.', 1)[0]
- illegal_alias_statements[namespace] = ns.token
-
- return missing_requires, illegal_alias_statements
-
- def _FindNamespace(self, identifier, *namespaces_list):
- """Finds the namespace of an identifier given a list of other namespaces.
-
- Args:
- identifier: An identifier whose parent needs to be defined.
- e.g. for goog.bar.foo we search something that provides
- goog.bar.
- *namespaces_list: var args of iterables of namespace identifiers
- Returns:
- The namespace that the given identifier is part of or None.
- """
- identifier = identifier.rsplit('.', 1)[0]
- identifier_prefix = identifier + '.'
- for namespaces in namespaces_list:
- for namespace in namespaces:
- if namespace == identifier or namespace.startswith(identifier_prefix):
- return namespace
- return None
-
- def _IsPrivateIdentifier(self, identifier):
- """Returns whether the given identifier is private."""
- pieces = identifier.split('.')
- for piece in pieces:
- if piece.endswith('_'):
- return True
- return False
-
- def IsFirstProvide(self, token):
- """Returns whether token is the first provide token."""
- return self._provide_tokens and token == self._provide_tokens[0]
-
- def IsFirstRequire(self, token):
- """Returns whether token is the first require token."""
- return self._require_tokens and token == self._require_tokens[0]
-
- def IsLastProvide(self, token):
- """Returns whether token is the last provide token."""
- return self._provide_tokens and token == self._provide_tokens[-1]
-
- def IsLastRequire(self, token):
- """Returns whether token is the last require token."""
- return self._require_tokens and token == self._require_tokens[-1]
-
- def ProcessToken(self, token, state_tracker):
- """Processes the given token for dependency information.
-
- Args:
- token: The token to process.
- state_tracker: The JavaScript state tracker.
- """
-
- # Note that this method is in the critical path for the linter and has been
- # optimized for performance in the following ways:
- # - Tokens are checked by type first to minimize the number of function
- # calls necessary to determine if action needs to be taken for the token.
- # - The most common tokens types are checked for first.
- # - The number of function calls has been minimized (thus the length of this
- # function.
-
- if token.type == TokenType.IDENTIFIER:
- # TODO(user): Consider saving the whole identifier in metadata.
- whole_identifier_string = tokenutil.GetIdentifierForToken(token)
- if whole_identifier_string is None:
- # We only want to process the identifier one time. If the whole string
- # identifier is None, that means this token was part of a multi-token
- # identifier, but it was not the first token of the identifier.
- return
-
- # In the odd case that a goog.require is encountered inside a function,
- # just ignore it (e.g. dynamic loading in test runners).
- if token.string == 'goog.require' and not state_tracker.InFunction():
- self._require_tokens.append(token)
- namespace = tokenutil.GetStringAfterToken(token)
- if namespace in self._required_namespaces:
- self._duplicate_require_tokens.append(token)
- else:
- self._required_namespaces.append(namespace)
-
- # If there is a suppression for the require, add a usage for it so it
- # gets treated as a regular goog.require (i.e. still gets sorted).
- if self._HasSuppression(state_tracker, 'extraRequire'):
- self._suppressed_requires.append(namespace)
- self._AddUsedNamespace(state_tracker, namespace, token)
-
- elif token.string == 'goog.provide':
- self._provide_tokens.append(token)
- namespace = tokenutil.GetStringAfterToken(token)
- if namespace in self._provided_namespaces:
- self._duplicate_provide_tokens.append(token)
- else:
- self._provided_namespaces.append(namespace)
-
- # If there is a suppression for the provide, add a creation for it so it
- # gets treated as a regular goog.provide (i.e. still gets sorted).
- if self._HasSuppression(state_tracker, 'extraProvide'):
- self._AddCreatedNamespace(state_tracker, namespace, token.line_number)
-
- elif token.string == 'goog.scope':
- self._scopified_file = True
-
- elif token.string == 'goog.setTestOnly':
-
- # Since the message is optional, we don't want to scan to later lines.
- for t in tokenutil.GetAllTokensInSameLine(token):
- if t.type == TokenType.STRING_TEXT:
- message = t.string
-
- if re.match(r'^\w+(\.\w+)+$', message):
- # This looks like a namespace. If it's a Closurized namespace,
- # consider it created.
- base_namespace = message.split('.', 1)[0]
- if base_namespace in self._closurized_namespaces:
- self._AddCreatedNamespace(state_tracker, message,
- token.line_number)
-
- break
- else:
- jsdoc = state_tracker.GetDocComment()
- if token.metadata and token.metadata.aliased_symbol:
- whole_identifier_string = token.metadata.aliased_symbol
- elif (token.string == 'goog.module.get' and
- not self._HasSuppression(state_tracker, 'extraRequire')):
- # Cannot use _AddUsedNamespace as this is not an identifier, but
- # already the entire namespace that's required.
- namespace = tokenutil.GetStringAfterToken(token)
- namespace = UsedNamespace(namespace, namespace, token,
- alias_definition=False)
- self._used_namespaces.append(namespace)
- if jsdoc and jsdoc.HasFlag('typedef'):
- self._AddCreatedNamespace(state_tracker, whole_identifier_string,
- token.line_number,
- namespace=self.GetClosurizedNamespace(
- whole_identifier_string))
- else:
- is_alias_definition = (token.metadata and
- token.metadata.is_alias_definition)
- self._AddUsedNamespace(state_tracker, whole_identifier_string,
- token, is_alias_definition)
-
- elif token.type == TokenType.SIMPLE_LVALUE:
- identifier = token.values['identifier']
- start_token = tokenutil.GetIdentifierStart(token)
- if start_token and start_token != token:
- # Multi-line identifier being assigned. Get the whole identifier.
- identifier = tokenutil.GetIdentifierForToken(start_token)
- else:
- start_token = token
- # If an alias is defined on the start_token, use it instead.
- if (start_token and
- start_token.metadata and
- start_token.metadata.aliased_symbol and
- not start_token.metadata.is_alias_definition):
- identifier = start_token.metadata.aliased_symbol
-
- if identifier:
- namespace = self.GetClosurizedNamespace(identifier)
- if state_tracker.InFunction():
- self._AddUsedNamespace(state_tracker, identifier, token)
- elif namespace and namespace != 'goog':
- self._AddCreatedNamespace(state_tracker, identifier,
- token.line_number, namespace=namespace)
-
- elif token.type == TokenType.DOC_FLAG:
- flag = token.attached_object
- flag_type = flag.flag_type
- if flag and flag.HasType() and flag.jstype:
- is_interface = state_tracker.GetDocComment().HasFlag('interface')
- if flag_type == 'implements' or (flag_type == 'extends'
- and is_interface):
- identifier = flag.jstype.alias or flag.jstype.identifier
- self._AddUsedNamespace(state_tracker, identifier, token)
- # Since we process doctypes only for implements and extends, the
- # type is a simple one and we don't need any iteration for subtypes.
-
- def _AddCreatedNamespace(self, state_tracker, identifier, line_number,
- namespace=None):
- """Adds the namespace of an identifier to the list of created namespaces.
-
- If the identifier is annotated with a 'missingProvide' suppression, it is
- not added.
-
- Args:
- state_tracker: The JavaScriptStateTracker instance.
- identifier: The identifier to add.
- line_number: Line number where namespace is created.
- namespace: The namespace of the identifier or None if the identifier is
- also the namespace.
- """
- if not namespace:
- namespace = identifier
-
- if self._HasSuppression(state_tracker, 'missingProvide'):
- return
-
- self._created_namespaces.append([namespace, identifier, line_number])
-
- def _AddUsedNamespace(self, state_tracker, identifier, token,
- is_alias_definition=False):
- """Adds the namespace of an identifier to the list of used namespaces.
-
- If the identifier is annotated with a 'missingRequire' suppression, it is
- not added.
-
- Args:
- state_tracker: The JavaScriptStateTracker instance.
- identifier: An identifier which has been used.
- token: The token in which the namespace is used.
- is_alias_definition: If the used namespace is part of an alias_definition.
- Aliased symbols need their parent namespace to be available, if it is
- not yet required through another symbol, an error will be thrown.
- """
- if self._HasSuppression(state_tracker, 'missingRequire'):
- return
-
- namespace = self.GetClosurizedNamespace(identifier)
- # b/5362203 If its a variable in scope then its not a required namespace.
- if namespace and not state_tracker.IsVariableInScope(namespace):
- namespace = UsedNamespace(namespace, identifier, token,
- is_alias_definition)
- self._used_namespaces.append(namespace)
-
- def _HasSuppression(self, state_tracker, suppression):
- jsdoc = state_tracker.GetDocComment()
- return jsdoc and suppression in jsdoc.suppressions
-
- def GetClosurizedNamespace(self, identifier):
- """Given an identifier, returns the namespace that identifier is from.
-
- Args:
- identifier: The identifier to extract a namespace from.
-
- Returns:
- The namespace the given identifier resides in, or None if one could not
- be found.
- """
- if identifier.startswith('goog.global'):
- # Ignore goog.global, since it is, by definition, global.
- return None
-
- parts = identifier.split('.')
- for namespace in self._closurized_namespaces:
- if not identifier.startswith(namespace + '.'):
- continue
-
- # The namespace for a class is the shortest prefix ending in a class
- # name, which starts with a capital letter but is not a capitalized word.
- #
- # We ultimately do not want to allow requiring or providing of inner
- # classes/enums. Instead, a file should provide only the top-level class
- # and users should require only that.
- namespace = []
- for part in parts:
- if part == 'prototype' or part.isupper():
- return '.'.join(namespace)
- namespace.append(part)
- if part[0].isupper():
- return '.'.join(namespace)
-
- # At this point, we know there's no class or enum, so the namespace is
- # just the identifier with the last part removed. With the exception of
- # apply, inherits, and call, which should also be stripped.
- if parts[-1] in ('apply', 'inherits', 'call'):
- parts.pop()
- parts.pop()
-
- # If the last part ends with an underscore, it is a private variable,
- # method, or enum. The namespace is whatever is before it.
- if parts and parts[-1].endswith('_'):
- parts.pop()
-
- return '.'.join(parts)
-
- return None
diff --git a/tools/closure_linter/closure_linter/closurizednamespacesinfo_test.py b/tools/closure_linter/closure_linter/closurizednamespacesinfo_test.py
deleted file mode 100755
index 7aeae21956..0000000000
--- a/tools/closure_linter/closure_linter/closurizednamespacesinfo_test.py
+++ /dev/null
@@ -1,873 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for ClosurizedNamespacesInfo."""
-
-
-
-import unittest as googletest
-from closure_linter import aliaspass
-from closure_linter import closurizednamespacesinfo
-from closure_linter import ecmametadatapass
-from closure_linter import javascriptstatetracker
-from closure_linter import javascripttokens
-from closure_linter import testutil
-from closure_linter import tokenutil
-
-# pylint: disable=g-bad-name
-TokenType = javascripttokens.JavaScriptTokenType
-
-
-def _ToLineDict(illegal_alias_stmts):
- """Replaces tokens with the respective line number."""
- return {k: v.line_number for k, v in illegal_alias_stmts.iteritems()}
-
-
-class ClosurizedNamespacesInfoTest(googletest.TestCase):
- """Tests for ClosurizedNamespacesInfo."""
-
- _test_cases = {
- 'goog.global.anything': None,
- 'package.CONSTANT': 'package',
- 'package.methodName': 'package',
- 'package.subpackage.methodName': 'package.subpackage',
- 'package.subpackage.methodName.apply': 'package.subpackage',
- 'package.ClassName.something': 'package.ClassName',
- 'package.ClassName.Enum.VALUE.methodName': 'package.ClassName',
- 'package.ClassName.CONSTANT': 'package.ClassName',
- 'package.namespace.CONSTANT.methodName': 'package.namespace',
- 'package.ClassName.inherits': 'package.ClassName',
- 'package.ClassName.apply': 'package.ClassName',
- 'package.ClassName.methodName.apply': 'package.ClassName',
- 'package.ClassName.methodName.call': 'package.ClassName',
- 'package.ClassName.prototype.methodName': 'package.ClassName',
- 'package.ClassName.privateMethod_': 'package.ClassName',
- 'package.className.privateProperty_': 'package.className',
- 'package.className.privateProperty_.methodName': 'package.className',
- 'package.ClassName.PrivateEnum_': 'package.ClassName',
- 'package.ClassName.prototype.methodName.apply': 'package.ClassName',
- 'package.ClassName.property.subProperty': 'package.ClassName',
- 'package.className.prototype.something.somethingElse': 'package.className'
- }
-
- def testGetClosurizedNamespace(self):
- """Tests that the correct namespace is returned for various identifiers."""
- namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
- closurized_namespaces=['package'], ignored_extra_namespaces=[])
- for identifier, expected_namespace in self._test_cases.items():
- actual_namespace = namespaces_info.GetClosurizedNamespace(identifier)
- self.assertEqual(
- expected_namespace,
- actual_namespace,
- 'expected namespace "' + str(expected_namespace) +
- '" for identifier "' + str(identifier) + '" but was "' +
- str(actual_namespace) + '"')
-
- def testIgnoredExtraNamespaces(self):
- """Tests that ignored_extra_namespaces are ignored."""
- token = self._GetRequireTokens('package.Something')
- namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
- closurized_namespaces=['package'],
- ignored_extra_namespaces=['package.Something'])
-
- self.assertFalse(namespaces_info.IsExtraRequire(token),
- 'Should be valid since it is in ignored namespaces.')
-
- namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
- ['package'], [])
-
- self.assertTrue(namespaces_info.IsExtraRequire(token),
- 'Should be invalid since it is not in ignored namespaces.')
-
- def testIsExtraProvide_created(self):
- """Tests that provides for created namespaces are not extra."""
- input_lines = [
- 'goog.provide(\'package.Foo\');',
- 'package.Foo = function() {};'
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertFalse(namespaces_info.IsExtraProvide(token),
- 'Should not be extra since it is created.')
-
- def testIsExtraProvide_createdIdentifier(self):
- """Tests that provides for created identifiers are not extra."""
- input_lines = [
- 'goog.provide(\'package.Foo.methodName\');',
- 'package.Foo.methodName = function() {};'
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertFalse(namespaces_info.IsExtraProvide(token),
- 'Should not be extra since it is created.')
-
- def testIsExtraProvide_notCreated(self):
- """Tests that provides for non-created namespaces are extra."""
- input_lines = ['goog.provide(\'package.Foo\');']
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertTrue(namespaces_info.IsExtraProvide(token),
- 'Should be extra since it is not created.')
-
- def testIsExtraProvide_notCreatedMultipartClosurizedNamespace(self):
- """Tests that provides for non-created namespaces are extra."""
- input_lines = ['goog.provide(\'multi.part.namespace.Foo\');']
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['multi.part'])
-
- self.assertTrue(namespaces_info.IsExtraProvide(token),
- 'Should be extra since it is not created.')
-
- def testIsExtraProvide_duplicate(self):
- """Tests that providing a namespace twice makes the second one extra."""
- input_lines = [
- 'goog.provide(\'package.Foo\');',
- 'goog.provide(\'package.Foo\');',
- 'package.Foo = function() {};'
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- # Advance to the second goog.provide token.
- token = tokenutil.Search(token.next, TokenType.IDENTIFIER)
-
- self.assertTrue(namespaces_info.IsExtraProvide(token),
- 'Should be extra since it is already provided.')
-
- def testIsExtraProvide_notClosurized(self):
- """Tests that provides of non-closurized namespaces are not extra."""
- input_lines = ['goog.provide(\'notclosurized.Foo\');']
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertFalse(namespaces_info.IsExtraProvide(token),
- 'Should not be extra since it is not closurized.')
-
- def testIsExtraRequire_used(self):
- """Tests that requires for used namespaces are not extra."""
- input_lines = [
- 'goog.require(\'package.Foo\');',
- 'var x = package.Foo.methodName();'
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertFalse(namespaces_info.IsExtraRequire(token),
- 'Should not be extra since it is used.')
-
- def testIsExtraRequire_usedIdentifier(self):
- """Tests that requires for used methods on classes are extra."""
- input_lines = [
- 'goog.require(\'package.Foo.methodName\');',
- 'var x = package.Foo.methodName();'
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertTrue(namespaces_info.IsExtraRequire(token),
- 'Should require the package, not the method specifically.')
-
- def testIsExtraRequire_notUsed(self):
- """Tests that requires for unused namespaces are extra."""
- input_lines = ['goog.require(\'package.Foo\');']
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertTrue(namespaces_info.IsExtraRequire(token),
- 'Should be extra since it is not used.')
-
- def testIsExtraRequire_notUsedMultiPartClosurizedNamespace(self):
- """Tests unused require with multi-part closurized namespaces."""
-
- input_lines = ['goog.require(\'multi.part.namespace.Foo\');']
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['multi.part'])
-
- self.assertTrue(namespaces_info.IsExtraRequire(token),
- 'Should be extra since it is not used.')
-
- def testIsExtraRequire_notClosurized(self):
- """Tests that requires of non-closurized namespaces are not extra."""
- input_lines = ['goog.require(\'notclosurized.Foo\');']
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertFalse(namespaces_info.IsExtraRequire(token),
- 'Should not be extra since it is not closurized.')
-
- def testIsExtraRequire_objectOnClass(self):
- """Tests that requiring an object on a class is extra."""
- input_lines = [
- 'goog.require(\'package.Foo.Enum\');',
- 'var x = package.Foo.Enum.VALUE1;',
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertTrue(namespaces_info.IsExtraRequire(token),
- 'The whole class, not the object, should be required.');
-
- def testIsExtraRequire_constantOnClass(self):
- """Tests that requiring a constant on a class is extra."""
- input_lines = [
- 'goog.require(\'package.Foo.CONSTANT\');',
- 'var x = package.Foo.CONSTANT',
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertTrue(namespaces_info.IsExtraRequire(token),
- 'The class, not the constant, should be required.');
-
- def testIsExtraRequire_constantNotOnClass(self):
- """Tests that requiring a constant not on a class is OK."""
- input_lines = [
- 'goog.require(\'package.subpackage.CONSTANT\');',
- 'var x = package.subpackage.CONSTANT',
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertFalse(namespaces_info.IsExtraRequire(token),
- 'Constants can be required except on classes.');
-
- def testIsExtraRequire_methodNotOnClass(self):
- """Tests that requiring a method not on a class is OK."""
- input_lines = [
- 'goog.require(\'package.subpackage.method\');',
- 'var x = package.subpackage.method()',
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertFalse(namespaces_info.IsExtraRequire(token),
- 'Methods can be required except on classes.');
-
- def testIsExtraRequire_defaults(self):
- """Tests that there are no warnings about extra requires for test utils"""
- input_lines = ['goog.require(\'goog.testing.jsunit\');']
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['goog'])
-
- self.assertFalse(namespaces_info.IsExtraRequire(token),
- 'Should not be extra since it is for testing.')
-
- def testGetMissingProvides_provided(self):
- """Tests that provided functions don't cause a missing provide."""
- input_lines = [
- 'goog.provide(\'package.Foo\');',
- 'package.Foo = function() {};'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(
- input_lines, ['package'])
-
- self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
-
- def testGetMissingProvides_providedIdentifier(self):
- """Tests that provided identifiers don't cause a missing provide."""
- input_lines = [
- 'goog.provide(\'package.Foo.methodName\');',
- 'package.Foo.methodName = function() {};'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
-
- def testGetMissingProvides_providedParentIdentifier(self):
- """Tests that provided identifiers on a class don't cause a missing provide
- on objects attached to that class."""
- input_lines = [
- 'goog.provide(\'package.foo.ClassName\');',
- 'package.foo.ClassName.methodName = function() {};',
- 'package.foo.ClassName.ObjectName = 1;',
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
-
- def testGetMissingProvides_unprovided(self):
- """Tests that unprovided functions cause a missing provide."""
- input_lines = ['package.Foo = function() {};']
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
-
- missing_provides = namespaces_info.GetMissingProvides()
- self.assertEquals(1, len(missing_provides))
- missing_provide = missing_provides.popitem()
- self.assertEquals('package.Foo', missing_provide[0])
- self.assertEquals(1, missing_provide[1])
-
- def testGetMissingProvides_privatefunction(self):
- """Tests that unprovided private functions don't cause a missing provide."""
- input_lines = ['package.Foo_ = function() {};']
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
-
- def testGetMissingProvides_required(self):
- """Tests that required namespaces don't cause a missing provide."""
- input_lines = [
- 'goog.require(\'package.Foo\');',
- 'package.Foo.methodName = function() {};'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
-
- def testGetMissingRequires_required(self):
- """Tests that required namespaces don't cause a missing require."""
- input_lines = [
- 'goog.require(\'package.Foo\');',
- 'package.Foo();'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(0, len(missing_requires))
-
- def testGetMissingRequires_requiredIdentifier(self):
- """Tests that required namespaces satisfy identifiers on that namespace."""
- input_lines = [
- 'goog.require(\'package.Foo\');',
- 'package.Foo.methodName();'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(0, len(missing_requires))
-
- def testGetMissingRequires_requiredNamespace(self):
- """Tests that required namespaces satisfy the namespace."""
- input_lines = [
- 'goog.require(\'package.soy.fooTemplate\');',
- 'render(package.soy.fooTemplate);'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(0, len(missing_requires))
-
- def testGetMissingRequires_requiredParentClass(self):
- """Tests that requiring a parent class of an object is sufficient to prevent
- a missing require on that object."""
- input_lines = [
- 'goog.require(\'package.Foo\');',
- 'package.Foo.methodName();',
- 'package.Foo.methodName(package.Foo.ObjectName);'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(0, len(missing_requires))
-
- def testGetMissingRequires_unrequired(self):
- """Tests that unrequired namespaces cause a missing require."""
- input_lines = ['package.Foo();']
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
-
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(1, len(missing_requires))
- missing_req = missing_requires.popitem()
- self.assertEquals('package.Foo', missing_req[0])
- self.assertEquals(1, missing_req[1])
-
- def testGetMissingRequires_provided(self):
- """Tests that provided namespaces satisfy identifiers on that namespace."""
- input_lines = [
- 'goog.provide(\'package.Foo\');',
- 'package.Foo.methodName();'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(0, len(missing_requires))
-
- def testGetMissingRequires_created(self):
- """Tests that created namespaces do not satisfy usage of an identifier."""
- input_lines = [
- 'package.Foo = function();',
- 'package.Foo.methodName();',
- 'package.Foo.anotherMethodName1();',
- 'package.Foo.anotherMethodName2();'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
-
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(1, len(missing_requires))
- missing_require = missing_requires.popitem()
- self.assertEquals('package.Foo', missing_require[0])
- # Make sure line number of first occurrence is reported
- self.assertEquals(2, missing_require[1])
-
- def testGetMissingRequires_createdIdentifier(self):
- """Tests that created identifiers satisfy usage of the identifier."""
- input_lines = [
- 'package.Foo.methodName = function();',
- 'package.Foo.methodName();'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(0, len(missing_requires))
-
- def testGetMissingRequires_implements(self):
- """Tests that a parametrized type requires the correct identifier."""
- input_lines = [
- '/** @constructor @implements {package.Bar<T>} */',
- 'package.Foo = function();',
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertItemsEqual({'package.Bar': 1}, missing_requires)
-
- def testGetMissingRequires_objectOnClass(self):
- """Tests that we should require a class, not the object on the class."""
- input_lines = [
- 'goog.require(\'package.Foo.Enum\');',
- 'var x = package.Foo.Enum.VALUE1;',
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(1, len(missing_requires),
- 'The whole class, not the object, should be required.')
-
- def testGetMissingRequires_variableWithSameName(self):
- """Tests that we should not goog.require variables and parameters.
-
- b/5362203 Variables in scope are not missing namespaces.
- """
- input_lines = [
- 'goog.provide(\'Foo\');',
- 'Foo.A = function();',
- 'Foo.A.prototype.method = function(ab) {',
- ' if (ab) {',
- ' var docs;',
- ' var lvalue = new Obj();',
- ' // Variable in scope hence not goog.require here.',
- ' docs.foo.abc = 1;',
- ' lvalue.next();',
- ' }',
- ' // Since js is function scope this should also not goog.require.',
- ' docs.foo.func();',
- ' // Its not a variable in scope hence goog.require.',
- ' dummy.xyz.reset();',
- ' return this.method2();',
- '};',
- 'Foo.A.prototype.method1 = function(docs, abcd, xyz) {',
- ' // Parameter hence not goog.require.',
- ' docs.nodes.length = 2;',
- ' lvalue.abc.reset();',
- '};'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['Foo',
- 'docs',
- 'lvalue',
- 'dummy'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals(2, len(missing_requires))
- self.assertItemsEqual(
- {'dummy.xyz': 14,
- 'lvalue.abc': 20}, missing_requires)
-
- def testIsFirstProvide(self):
- """Tests operation of the isFirstProvide method."""
- input_lines = [
- 'goog.provide(\'package.Foo\');',
- 'package.Foo.methodName();'
- ]
-
- token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- input_lines, ['package'])
- self.assertTrue(namespaces_info.IsFirstProvide(token))
-
- def testGetWholeIdentifierString(self):
- """Tests that created identifiers satisfy usage of the identifier."""
- input_lines = [
- 'package.Foo.',
- ' veryLong.',
- ' identifier;'
- ]
-
- token = testutil.TokenizeSource(input_lines)
-
- self.assertEquals('package.Foo.veryLong.identifier',
- tokenutil.GetIdentifierForToken(token))
-
- self.assertEquals(None,
- tokenutil.GetIdentifierForToken(token.next))
-
- def testScopified(self):
- """Tests that a goog.scope call is noticed."""
- input_lines = [
- 'goog.scope(function() {',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- self.assertTrue(namespaces_info._scopified_file)
-
- def testScope_unusedAlias(self):
- """Tests that an unused alias symbol is illegal."""
- input_lines = [
- 'goog.scope(function() {',
- 'var Event = goog.events.Event;',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
- self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
-
- def testScope_usedMultilevelAlias(self):
- """Tests that an used alias symbol in a deep namespace is ok."""
- input_lines = [
- 'goog.require(\'goog.Events\');',
- 'goog.scope(function() {',
- 'var Event = goog.Events.DeepNamespace.Event;',
- 'Event();',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
- self.assertEquals({}, illegal_alias_stmts)
-
- def testScope_usedAlias(self):
- """Tests that aliased symbols result in correct requires."""
- input_lines = [
- 'goog.scope(function() {',
- 'var Event = goog.events.Event;',
- 'var dom = goog.dom;',
- 'Event(dom.classes.get);',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, illegal_alias_stmts)
- self.assertEquals({'goog.dom.classes': 4, 'goog.events.Event': 4},
- missing_requires)
-
- def testModule_alias(self):
- """Tests that goog.module style aliases are supported."""
- input_lines = [
- 'goog.module(\'test.module\');',
- 'var Unused = goog.require(\'goog.Unused\');',
- 'var AliasedClass = goog.require(\'goog.AliasedClass\');',
- 'var x = new AliasedClass();',
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- namespaceToken = self._GetRequireTokens('goog.AliasedClass')
- self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken),
- 'AliasedClass should be marked as used')
- unusedToken = self._GetRequireTokens('goog.Unused')
- self.assertTrue(namespaces_info.IsExtraRequire(unusedToken),
- 'Unused should be marked as not used')
-
- def testModule_aliasInScope(self):
- """Tests that goog.module style aliases are supported."""
- input_lines = [
- 'goog.module(\'test.module\');',
- 'var AliasedClass = goog.require(\'goog.AliasedClass\');',
- 'goog.scope(function() {',
- 'var x = new AliasedClass();',
- '});',
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- namespaceToken = self._GetRequireTokens('goog.AliasedClass')
- self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken),
- 'AliasedClass should be marked as used')
-
- def testModule_getAlwaysProvided(self):
- """Tests that goog.module.get is recognized as a built-in."""
- input_lines = [
- 'goog.provide(\'test.MyClass\');',
- 'goog.require(\'goog.someModule\');',
- 'goog.scope(function() {',
- 'var someModule = goog.module.get(\'goog.someModule\');',
- 'test.MyClass = function() {};',
- '});',
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- self.assertEquals({}, namespaces_info.GetMissingRequires()[0])
-
- def testModule_requireForGet(self):
- """Tests that goog.module.get needs a goog.require call."""
- input_lines = [
- 'goog.provide(\'test.MyClass\');',
- 'function foo() {',
- ' var someModule = goog.module.get(\'goog.someModule\');',
- ' someModule.doSth();',
- '}',
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- self.assertEquals({'goog.someModule': 3},
- namespaces_info.GetMissingRequires()[0])
-
- def testScope_usedTypeAlias(self):
- """Tests aliased symbols in type annotations."""
- input_lines = [
- 'goog.scope(function() {',
- 'var Event = goog.events.Event;',
- '/** @type {Event} */;',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
- self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
-
- def testScope_partialAlias_typeOnly(self):
- """Tests a partial alias only used in type annotations.
-
- In this example, some goog.events namespace would need to be required
- so that evaluating goog.events.bar doesn't throw an error.
- """
- input_lines = [
- 'goog.scope(function() {',
- 'var bar = goog.events.bar;',
- '/** @type {bar.Foo} */;',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
- self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
-
- def testScope_partialAlias(self):
- """Tests a partial alias in conjunction with a type annotation.
-
- In this example, the partial alias is already defined by another type,
- therefore the doc-only type doesn't need to be required.
- """
- input_lines = [
- 'goog.scope(function() {',
- 'var bar = goog.events.bar;',
- '/** @type {bar.Event} */;',
- 'bar.EventType();'
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({'goog.events.bar.EventType': 4}, missing_requires)
- self.assertEquals({}, illegal_alias_stmts)
-
- def testScope_partialAliasRequires(self):
- """Tests partial aliases with correct requires."""
- input_lines = [
- 'goog.require(\'goog.events.bar.EventType\');',
- 'goog.scope(function() {',
- 'var bar = goog.events.bar;',
- '/** @type {bar.Event} */;',
- 'bar.EventType();'
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
- self.assertEquals({}, illegal_alias_stmts)
-
- def testScope_partialAliasRequiresBoth(self):
- """Tests partial aliases with correct requires."""
- input_lines = [
- 'goog.require(\'goog.events.bar.Event\');',
- 'goog.require(\'goog.events.bar.EventType\');',
- 'goog.scope(function() {',
- 'var bar = goog.events.bar;',
- '/** @type {bar.Event} */;',
- 'bar.EventType();'
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
- self.assertEquals({}, illegal_alias_stmts)
- event_token = self._GetRequireTokens('goog.events.bar.Event')
- self.assertTrue(namespaces_info.IsExtraRequire(event_token))
-
- def testScope_partialAliasNoSubtypeRequires(self):
- """Tests that partial aliases don't yield subtype requires (regression)."""
- input_lines = [
- 'goog.provide(\'goog.events.Foo\');',
- 'goog.scope(function() {',
- 'goog.events.Foo = {};',
- 'var Foo = goog.events.Foo;'
- 'Foo.CssName_ = {};'
- 'var CssName_ = Foo.CssName_;'
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, _ = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
-
- def testScope_aliasNamespace(self):
- """Tests that an unused alias namespace is not required when available.
-
- In the example goog.events.Bar is not required, because the namespace
- goog.events is already defined because goog.events.Foo is required.
- """
- input_lines = [
- 'goog.require(\'goog.events.Foo\');',
- 'goog.scope(function() {',
- 'var Bar = goog.events.Bar;',
- '/** @type {Bar} */;',
- 'goog.events.Foo;',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
- self.assertEquals({}, illegal_alias_stmts)
-
- def testScope_aliasNamespaceIllegal(self):
- """Tests that an unused alias namespace is not required when available."""
- input_lines = [
- 'goog.scope(function() {',
- 'var Bar = goog.events.Bar;',
- '/** @type {Bar} */;',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, missing_requires)
- self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
-
- def testScope_provides(self):
- """Tests that aliased symbols result in correct provides."""
- input_lines = [
- 'goog.scope(function() {',
- 'goog.bar = {};',
- 'var bar = goog.bar;',
- 'bar.Foo = {};',
- '});'
- ]
-
- namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
- missing_provides = namespaces_info.GetMissingProvides()
- self.assertEquals({'goog.bar.Foo': 4}, missing_provides)
- _, illegal_alias_stmts = namespaces_info.GetMissingRequires()
- self.assertEquals({}, illegal_alias_stmts)
-
- def testSetTestOnlyNamespaces(self):
- """Tests that a namespace in setTestOnly makes it a valid provide."""
- namespaces_info = self._GetNamespacesInfoForScript([
- 'goog.setTestOnly(\'goog.foo.barTest\');'
- ], ['goog'])
-
- token = self._GetProvideTokens('goog.foo.barTest')
- self.assertFalse(namespaces_info.IsExtraProvide(token))
-
- token = self._GetProvideTokens('goog.foo.bazTest')
- self.assertTrue(namespaces_info.IsExtraProvide(token))
-
- def testSetTestOnlyComment(self):
- """Ensure a comment in setTestOnly does not cause a created namespace."""
- namespaces_info = self._GetNamespacesInfoForScript([
- 'goog.setTestOnly(\'this is a comment\');'
- ], ['goog'])
-
- self.assertEquals(
- [], namespaces_info._created_namespaces,
- 'A comment in setTestOnly should not modify created namespaces.')
-
- def _GetNamespacesInfoForScript(self, script, closurized_namespaces=None):
- _, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
- script, closurized_namespaces)
-
- return namespaces_info
-
- def _GetStartTokenAndNamespacesInfoForScript(
- self, script, closurized_namespaces):
-
- token = testutil.TokenizeSource(script)
- return token, self._GetInitializedNamespacesInfo(
- token, closurized_namespaces, [])
-
- def _GetInitializedNamespacesInfo(self, token, closurized_namespaces,
- ignored_extra_namespaces):
- """Returns a namespaces info initialized with the given token stream."""
- namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
- closurized_namespaces=closurized_namespaces,
- ignored_extra_namespaces=ignored_extra_namespaces)
- state_tracker = javascriptstatetracker.JavaScriptStateTracker()
-
- ecma_pass = ecmametadatapass.EcmaMetaDataPass()
- ecma_pass.Process(token)
-
- state_tracker.DocFlagPass(token, error_handler=None)
-
- alias_pass = aliaspass.AliasPass(closurized_namespaces)
- alias_pass.Process(token)
-
- while token:
- state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
- namespaces_info.ProcessToken(token, state_tracker)
- state_tracker.HandleAfterToken(token)
- token = token.next
-
- return namespaces_info
-
- def _GetProvideTokens(self, namespace):
- """Returns a list of tokens for a goog.require of the given namespace."""
- line_text = 'goog.require(\'' + namespace + '\');\n'
- return testutil.TokenizeSource([line_text])
-
- def _GetRequireTokens(self, namespace):
- """Returns a list of tokens for a goog.require of the given namespace."""
- line_text = 'goog.require(\'' + namespace + '\');\n'
- return testutil.TokenizeSource([line_text])
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/closure_linter/common/__init__.py b/tools/closure_linter/closure_linter/common/__init__.py
deleted file mode 100755
index 57930436ce..0000000000
--- a/tools/closure_linter/closure_linter/common/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Package indicator for gjslint.common."""
diff --git a/tools/closure_linter/closure_linter/common/error.py b/tools/closure_linter/closure_linter/common/error.py
deleted file mode 100755
index 4209c235b8..0000000000
--- a/tools/closure_linter/closure_linter/common/error.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Error object commonly used in linters."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-
-class Error(object):
- """Object representing a style error."""
-
- def __init__(self, code, message, token=None, position=None, fix_data=None):
- """Initialize the error object.
-
- Args:
- code: The numeric error code.
- message: The error message string.
- token: The tokens.Token where the error occurred.
- position: The position of the error within the token.
- fix_data: Data to be used in autofixing. Codes with fix_data are:
- GOOG_REQUIRES_NOT_ALPHABETIZED - List of string value tokens that are
- class names in goog.requires calls.
- """
- self.code = code
- self.message = message
- self.token = token
- self.position = position
- if token:
- self.start_index = token.start_index
- else:
- self.start_index = 0
- self.fix_data = fix_data
- if self.position:
- self.start_index += self.position.start
-
- def Compare(a, b):
- """Compare two error objects, by source code order.
-
- Args:
- a: First error object.
- b: Second error object.
-
- Returns:
- A Negative/0/Positive number when a is before/the same as/after b.
- """
- line_diff = a.token.line_number - b.token.line_number
- if line_diff:
- return line_diff
-
- return a.start_index - b.start_index
- Compare = staticmethod(Compare)
diff --git a/tools/closure_linter/closure_linter/common/erroraccumulator.py b/tools/closure_linter/closure_linter/common/erroraccumulator.py
deleted file mode 100755
index 55844ba603..0000000000
--- a/tools/closure_linter/closure_linter/common/erroraccumulator.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Linter error handler class that accumulates an array of errors."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-
-from closure_linter.common import errorhandler
-
-
-class ErrorAccumulator(errorhandler.ErrorHandler):
- """Error handler object that accumulates errors in a list."""
-
- def __init__(self):
- self._errors = []
-
- def HandleError(self, error):
- """Append the error to the list.
-
- Args:
- error: The error object
- """
- self._errors.append(error)
-
- def GetErrors(self):
- """Returns the accumulated errors.
-
- Returns:
- A sequence of errors.
- """
- return self._errors
diff --git a/tools/closure_linter/closure_linter/common/errorhandler.py b/tools/closure_linter/closure_linter/common/errorhandler.py
deleted file mode 100755
index 764d54d84c..0000000000
--- a/tools/closure_linter/closure_linter/common/errorhandler.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Interface for a linter error handler.
-
-Error handlers aggregate a set of errors from multiple files and can optionally
-perform some action based on the reported errors, for example, logging the error
-or automatically fixing it.
-"""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-
-class ErrorHandler(object):
- """Error handler interface."""
-
- def __init__(self):
- if self.__class__ == ErrorHandler:
- raise NotImplementedError('class ErrorHandler is abstract')
-
- def HandleFile(self, filename, first_token):
- """Notifies this ErrorHandler that subsequent errors are in filename.
-
- Args:
- filename: The file being linted.
- first_token: The first token of the file.
- """
-
- def HandleError(self, error):
- """Append the error to the list.
-
- Args:
- error: The error object
- """
-
- def FinishFile(self):
- """Finishes handling the current file.
-
- Should be called after all errors in a file have been handled.
- """
-
- def GetErrors(self):
- """Returns the accumulated errors.
-
- Returns:
- A sequence of errors.
- """
diff --git a/tools/closure_linter/closure_linter/common/erroroutput.py b/tools/closure_linter/closure_linter/common/erroroutput.py
deleted file mode 100644
index 149738b5d4..0000000000
--- a/tools/closure_linter/closure_linter/common/erroroutput.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Utility functions to format errors."""
-
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)',
- 'nnaze@google.com (Nathan Naze)')
-
-
-def GetUnixErrorOutput(filename, error, new_error=False):
- """Get a output line for an error in UNIX format."""
-
- line = ''
-
- if error.token:
- line = '%d' % error.token.line_number
-
- error_code = '%04d' % error.code
- if new_error:
- error_code = 'New Error ' + error_code
- return '%s:%s:(%s) %s' % (filename, line, error_code, error.message)
-
-
-def GetErrorOutput(error, new_error=False):
- """Get a output line for an error in regular format."""
-
- line = ''
- if error.token:
- line = 'Line %d, ' % error.token.line_number
-
- code = 'E:%04d' % error.code
-
- error_message = error.message
- if new_error:
- error_message = 'New Error ' + error_message
-
- return '%s%s: %s' % (line, code, error.message)
diff --git a/tools/closure_linter/closure_linter/common/filetestcase.py b/tools/closure_linter/closure_linter/common/filetestcase.py
deleted file mode 100755
index 7cd83cd1dc..0000000000
--- a/tools/closure_linter/closure_linter/common/filetestcase.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Test case that runs a checker on a file, matching errors against annotations.
-
-Runs the given checker on the given file, accumulating all errors. The list
-of errors is then matched against those annotated in the file. Based heavily
-on devtools/javascript/gpylint/full_test.py.
-"""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-import re
-
-import gflags as flags
-import unittest as googletest
-from closure_linter.common import erroraccumulator
-
-
-class AnnotatedFileTestCase(googletest.TestCase):
- """Test case to run a linter against a single file."""
-
- # Matches an all caps letters + underscores error identifer
- _MESSAGE = {'msg': '[A-Z][A-Z_]+'}
- # Matches a //, followed by an optional line number with a +/-, followed by a
- # list of message IDs. Used to extract expected messages from testdata files.
- # TODO(robbyw): Generalize to use different commenting patterns.
- _EXPECTED_RE = re.compile(r'\s*//\s*(?:(?P<line>[+-]?[0-9]+):)?'
- r'\s*(?P<msgs>%(msg)s(?:,\s*%(msg)s)*)' % _MESSAGE)
-
- def __init__(self, filename, lint_callable, converter):
- """Create a single file lint test case.
-
- Args:
- filename: Filename to test.
- lint_callable: Callable that lints a file. This is usually runner.Run().
- converter: Function taking an error string and returning an error code.
- """
-
- googletest.TestCase.__init__(self, 'runTest')
- self._filename = filename
- self._messages = []
- self._lint_callable = lint_callable
- self._converter = converter
-
- def setUp(self):
- flags.FLAGS.dot_on_next_line = True
-
- def tearDown(self):
- flags.FLAGS.dot_on_next_line = False
-
- def shortDescription(self):
- """Provides a description for the test."""
- return 'Run linter on %s' % self._filename
-
- def runTest(self):
- """Runs the test."""
- try:
- filename = self._filename
- stream = open(filename)
- except IOError as ex:
- raise IOError('Could not find testdata resource for %s: %s' %
- (self._filename, ex))
-
- expected = self._GetExpectedMessages(stream)
- got = self._ProcessFileAndGetMessages(filename)
- self.assertEqual(expected, got)
-
- def _GetExpectedMessages(self, stream):
- """Parse a file and get a sorted list of expected messages."""
- messages = []
- for i, line in enumerate(stream):
- match = self._EXPECTED_RE.search(line)
- if match:
- line = match.group('line')
- msg_ids = match.group('msgs')
- if line is None:
- line = i + 1
- elif line.startswith('+') or line.startswith('-'):
- line = i + 1 + int(line)
- else:
- line = int(line)
- for msg_id in msg_ids.split(','):
- # Ignore a spurious message from the license preamble.
- if msg_id != 'WITHOUT':
- messages.append((line, self._converter(msg_id.strip())))
- stream.seek(0)
- messages.sort()
- return messages
-
- def _ProcessFileAndGetMessages(self, filename):
- """Trap gjslint's output parse it to get messages added."""
- error_accumulator = erroraccumulator.ErrorAccumulator()
- self._lint_callable(filename, error_accumulator)
-
- errors = error_accumulator.GetErrors()
-
- # Convert to expected tuple format.
-
- error_msgs = [(error.token.line_number, error.code) for error in errors]
- error_msgs.sort()
- return error_msgs
diff --git a/tools/closure_linter/closure_linter/common/htmlutil.py b/tools/closure_linter/closure_linter/common/htmlutil.py
deleted file mode 100755
index 26d44c5908..0000000000
--- a/tools/closure_linter/closure_linter/common/htmlutil.py
+++ /dev/null
@@ -1,170 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Utilities for dealing with HTML."""
-
-__author__ = ('robbyw@google.com (Robert Walker)')
-
-import cStringIO
-import formatter
-import htmllib
-import HTMLParser
-import re
-
-
-class ScriptExtractor(htmllib.HTMLParser):
- """Subclass of HTMLParser that extracts script contents from an HTML file.
-
- Also inserts appropriate blank lines so that line numbers in the extracted
- code match the line numbers in the original HTML.
- """
-
- def __init__(self):
- """Initialize a ScriptExtractor."""
- htmllib.HTMLParser.__init__(self, formatter.NullFormatter())
- self._in_script = False
- self._text = ''
-
- def start_script(self, attrs):
- """Internal handler for the start of a script tag.
-
- Args:
- attrs: The attributes of the script tag, as a list of tuples.
- """
- for attribute in attrs:
- if attribute[0].lower() == 'src':
- # Skip script tags with a src specified.
- return
- self._in_script = True
-
- def end_script(self):
- """Internal handler for the end of a script tag."""
- self._in_script = False
-
- def handle_data(self, data):
- """Internal handler for character data.
-
- Args:
- data: The character data from the HTML file.
- """
- if self._in_script:
- # If the last line contains whitespace only, i.e. is just there to
- # properly align a </script> tag, strip the whitespace.
- if data.rstrip(' \t') != data.rstrip(' \t\n\r\f'):
- data = data.rstrip(' \t')
- self._text += data
- else:
- self._AppendNewlines(data)
-
- def handle_comment(self, data):
- """Internal handler for HTML comments.
-
- Args:
- data: The text of the comment.
- """
- self._AppendNewlines(data)
-
- def _AppendNewlines(self, data):
- """Count the number of newlines in the given string and append them.
-
- This ensures line numbers are correct for reported errors.
-
- Args:
- data: The data to count newlines in.
- """
- # We append 'x' to both sides of the string to ensure that splitlines
- # gives us an accurate count.
- for i in xrange(len(('x' + data + 'x').splitlines()) - 1):
- self._text += '\n'
-
- def GetScriptLines(self):
- """Return the extracted script lines.
-
- Returns:
- The extracted script lines as a list of strings.
- """
- return self._text.splitlines()
-
-
-def GetScriptLines(f):
- """Extract script tag contents from the given HTML file.
-
- Args:
- f: The HTML file.
-
- Returns:
- Lines in the HTML file that are from script tags.
- """
- extractor = ScriptExtractor()
-
- # The HTML parser chokes on text like Array.<!string>, so we patch
- # that bug by replacing the < with &lt; - escaping all text inside script
- # tags would be better but it's a bit of a catch 22.
- contents = f.read()
- contents = re.sub(r'<([^\s\w/])',
- lambda x: '&lt;%s' % x.group(1),
- contents)
-
- extractor.feed(contents)
- extractor.close()
- return extractor.GetScriptLines()
-
-
-def StripTags(str):
- """Returns the string with HTML tags stripped.
-
- Args:
- str: An html string.
-
- Returns:
- The html string with all tags stripped. If there was a parse error, returns
- the text successfully parsed so far.
- """
- # Brute force approach to stripping as much HTML as possible. If there is a
- # parsing error, don't strip text before parse error position, and continue
- # trying from there.
- final_text = ''
- finished = False
- while not finished:
- try:
- strip = _HtmlStripper()
- strip.feed(str)
- strip.close()
- str = strip.get_output()
- final_text += str
- finished = True
- except HTMLParser.HTMLParseError, e:
- final_text += str[:e.offset]
- str = str[e.offset + 1:]
-
- return final_text
-
-
-class _HtmlStripper(HTMLParser.HTMLParser):
- """Simple class to strip tags from HTML.
-
- Does so by doing nothing when encountering tags, and appending character data
- to a buffer when that is encountered.
- """
- def __init__(self):
- self.reset()
- self.__output = cStringIO.StringIO()
-
- def handle_data(self, d):
- self.__output.write(d)
-
- def get_output(self):
- return self.__output.getvalue()
diff --git a/tools/closure_linter/closure_linter/common/lintrunner.py b/tools/closure_linter/closure_linter/common/lintrunner.py
deleted file mode 100755
index 07842c7bfe..0000000000
--- a/tools/closure_linter/closure_linter/common/lintrunner.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Interface for a lint running wrapper."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-
-class LintRunner(object):
- """Interface for a lint running wrapper."""
-
- def __init__(self):
- if self.__class__ == LintRunner:
- raise NotImplementedError('class LintRunner is abstract')
-
- def Run(self, filenames, error_handler):
- """Run a linter on the given filenames.
-
- Args:
- filenames: The filenames to check
- error_handler: An ErrorHandler object
-
- Returns:
- The error handler, which may have been used to collect error info.
- """
diff --git a/tools/closure_linter/closure_linter/common/matcher.py b/tools/closure_linter/closure_linter/common/matcher.py
deleted file mode 100755
index 9b4402c671..0000000000
--- a/tools/closure_linter/closure_linter/common/matcher.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Regular expression based JavaScript matcher classes."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-from closure_linter.common import position
-from closure_linter.common import tokens
-
-# Shorthand
-Token = tokens.Token
-Position = position.Position
-
-
-class Matcher(object):
- """A token matcher.
-
- Specifies a pattern to match, the type of token it represents, what mode the
- token changes to, and what mode the token applies to.
-
- Modes allow more advanced grammars to be incorporated, and are also necessary
- to tokenize line by line. We can have different patterns apply to different
- modes - i.e. looking for documentation while in comment mode.
-
- Attributes:
- regex: The regular expression representing this matcher.
- type: The type of token indicated by a successful match.
- result_mode: The mode to move to after a successful match.
- """
-
- def __init__(self, regex, token_type, result_mode=None, line_start=False):
- """Create a new matcher template.
-
- Args:
- regex: The regular expression to match.
- token_type: The type of token a successful match indicates.
- result_mode: What mode to change to after a successful match. Defaults to
- None, which means to not change the current mode.
- line_start: Whether this matcher should only match string at the start
- of a line.
- """
- self.regex = regex
- self.type = token_type
- self.result_mode = result_mode
- self.line_start = line_start
diff --git a/tools/closure_linter/closure_linter/common/position.py b/tools/closure_linter/closure_linter/common/position.py
deleted file mode 100755
index cebf17ef36..0000000000
--- a/tools/closure_linter/closure_linter/common/position.py
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Classes to represent positions within strings."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-
-class Position(object):
- """Object representing a segment of a string.
-
- Attributes:
- start: The index in to the string where the segment starts.
- length: The length of the string segment.
- """
-
- def __init__(self, start, length):
- """Initialize the position object.
-
- Args:
- start: The start index.
- length: The number of characters to include.
- """
- self.start = start
- self.length = length
-
- def Get(self, string):
- """Returns this range of the given string.
-
- Args:
- string: The string to slice.
-
- Returns:
- The string within the range specified by this object.
- """
- return string[self.start:self.start + self.length]
-
- def Set(self, target, source):
- """Sets this range within the target string to the source string.
-
- Args:
- target: The target string.
- source: The source string.
-
- Returns:
- The resulting string
- """
- return target[:self.start] + source + target[self.start + self.length:]
-
- def AtEnd(string):
- """Create a Position representing the end of the given string.
-
- Args:
- string: The string to represent the end of.
-
- Returns:
- The created Position object.
- """
- return Position(len(string), 0)
- AtEnd = staticmethod(AtEnd)
-
- def IsAtEnd(self, string):
- """Returns whether this position is at the end of the given string.
-
- Args:
- string: The string to test for the end of.
-
- Returns:
- Whether this position is at the end of the given string.
- """
- return self.start == len(string) and self.length == 0
-
- def AtBeginning():
- """Create a Position representing the beginning of any string.
-
- Returns:
- The created Position object.
- """
- return Position(0, 0)
- AtBeginning = staticmethod(AtBeginning)
-
- def IsAtBeginning(self):
- """Returns whether this position is at the beginning of any string.
-
- Returns:
- Whether this position is at the beginning of any string.
- """
- return self.start == 0 and self.length == 0
-
- def All(string):
- """Create a Position representing the entire string.
-
- Args:
- string: The string to represent the entirety of.
-
- Returns:
- The created Position object.
- """
- return Position(0, len(string))
- All = staticmethod(All)
-
- def Index(index):
- """Returns a Position object for the specified index.
-
- Args:
- index: The index to select, inclusively.
-
- Returns:
- The created Position object.
- """
- return Position(index, 1)
- Index = staticmethod(Index)
diff --git a/tools/closure_linter/closure_linter/common/simplefileflags.py b/tools/closure_linter/closure_linter/common/simplefileflags.py
deleted file mode 100755
index 3402bef3a1..0000000000
--- a/tools/closure_linter/closure_linter/common/simplefileflags.py
+++ /dev/null
@@ -1,190 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Determines the list of files to be checked from command line arguments."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-import glob
-import os
-import re
-
-import gflags as flags
-
-
-FLAGS = flags.FLAGS
-
-flags.DEFINE_multistring(
- 'recurse',
- None,
- 'Recurse in to the subdirectories of the given path',
- short_name='r')
-flags.DEFINE_list(
- 'exclude_directories',
- ('_demos'),
- 'Exclude the specified directories (only applicable along with -r or '
- '--presubmit)',
- short_name='e')
-flags.DEFINE_list(
- 'exclude_files',
- ('deps.js'),
- 'Exclude the specified files',
- short_name='x')
-
-
-def MatchesSuffixes(filename, suffixes):
- """Returns whether the given filename matches one of the given suffixes.
-
- Args:
- filename: Filename to check.
- suffixes: Sequence of suffixes to check.
-
- Returns:
- Whether the given filename matches one of the given suffixes.
- """
- suffix = filename[filename.rfind('.'):]
- return suffix in suffixes
-
-
-def _GetUserSpecifiedFiles(argv, suffixes):
- """Returns files to be linted, specified directly on the command line.
-
- Can handle the '*' wildcard in filenames, but no other wildcards.
-
- Args:
- argv: Sequence of command line arguments. The second and following arguments
- are assumed to be files that should be linted.
- suffixes: Expected suffixes for the file type being checked.
-
- Returns:
- A sequence of files to be linted.
- """
- files = argv[1:] or []
- all_files = []
- lint_files = []
-
- # Perform any necessary globs.
- for f in files:
- if f.find('*') != -1:
- for result in glob.glob(f):
- all_files.append(result)
- else:
- all_files.append(f)
-
- for f in all_files:
- if MatchesSuffixes(f, suffixes):
- lint_files.append(f)
- return lint_files
-
-
-def _GetRecursiveFiles(suffixes):
- """Returns files to be checked specified by the --recurse flag.
-
- Args:
- suffixes: Expected suffixes for the file type being checked.
-
- Returns:
- A list of files to be checked.
- """
- lint_files = []
- # Perform any request recursion
- if FLAGS.recurse:
- for start in FLAGS.recurse:
- for root, subdirs, files in os.walk(start):
- for f in files:
- if MatchesSuffixes(f, suffixes):
- lint_files.append(os.path.join(root, f))
- return lint_files
-
-
-def GetAllSpecifiedFiles(argv, suffixes):
- """Returns all files specified by the user on the commandline.
-
- Args:
- argv: Sequence of command line arguments. The second and following arguments
- are assumed to be files that should be linted.
- suffixes: Expected suffixes for the file type
-
- Returns:
- A list of all files specified directly or indirectly (via flags) on the
- command line by the user.
- """
- files = _GetUserSpecifiedFiles(argv, suffixes)
-
- if FLAGS.recurse:
- files += _GetRecursiveFiles(suffixes)
-
- return FilterFiles(files)
-
-
-def FilterFiles(files):
- """Filters the list of files to be linted be removing any excluded files.
-
- Filters out files excluded using --exclude_files and --exclude_directories.
-
- Args:
- files: Sequence of files that needs filtering.
-
- Returns:
- Filtered list of files to be linted.
- """
- num_files = len(files)
-
- ignore_dirs_regexs = []
- for ignore in FLAGS.exclude_directories:
- ignore_dirs_regexs.append(re.compile(r'(^|[\\/])%s[\\/]' % ignore))
-
- result_files = []
- for f in files:
- add_file = True
- for exclude in FLAGS.exclude_files:
- if f.endswith('/' + exclude) or f == exclude:
- add_file = False
- break
- for ignore in ignore_dirs_regexs:
- if ignore.search(f):
- # Break out of ignore loop so we don't add to
- # filtered files.
- add_file = False
- break
- if add_file:
- # Convert everything to absolute paths so we can easily remove duplicates
- # using a set.
- result_files.append(os.path.abspath(f))
-
- skipped = num_files - len(result_files)
- if skipped:
- print 'Skipping %d file(s).' % skipped
-
- return set(result_files)
-
-
-def GetFileList(argv, file_type, suffixes):
- """Parse the flags and return the list of files to check.
-
- Args:
- argv: Sequence of command line arguments.
- suffixes: Sequence of acceptable suffixes for the file type.
-
- Returns:
- The list of files to check.
- """
- return sorted(GetAllSpecifiedFiles(argv, suffixes))
-
-
-def IsEmptyArgumentList(argv):
- return not (len(argv[1:]) or FLAGS.recurse)
diff --git a/tools/closure_linter/closure_linter/common/tokenizer.py b/tools/closure_linter/closure_linter/common/tokenizer.py
deleted file mode 100755
index 9420ea3267..0000000000
--- a/tools/closure_linter/closure_linter/common/tokenizer.py
+++ /dev/null
@@ -1,185 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Regular expression based lexer."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-from closure_linter.common import tokens
-
-# Shorthand
-Type = tokens.TokenType
-
-
-class Tokenizer(object):
- """General purpose tokenizer.
-
- Attributes:
- mode: The latest mode of the tokenizer. This allows patterns to distinguish
- if they are mid-comment, mid-parameter list, etc.
- matchers: Dictionary of modes to sequences of matchers that define the
- patterns to check at any given time.
- default_types: Dictionary of modes to types, defining what type to give
- non-matched text when in the given mode. Defaults to Type.NORMAL.
- """
-
- def __init__(self, starting_mode, matchers, default_types):
- """Initialize the tokenizer.
-
- Args:
- starting_mode: Mode to start in.
- matchers: Dictionary of modes to sequences of matchers that defines the
- patterns to check at any given time.
- default_types: Dictionary of modes to types, defining what type to give
- non-matched text when in the given mode. Defaults to Type.NORMAL.
- """
- self.__starting_mode = starting_mode
- self.matchers = matchers
- self.default_types = default_types
-
- def TokenizeFile(self, file):
- """Tokenizes the given file.
-
- Args:
- file: An iterable that yields one line of the file at a time.
-
- Returns:
- The first token in the file
- """
- # The current mode.
- self.mode = self.__starting_mode
- # The first token in the stream.
- self.__first_token = None
- # The last token added to the token stream.
- self.__last_token = None
- # The current line number.
- self.__line_number = 0
-
- for line in file:
- self.__line_number += 1
- self.__TokenizeLine(line)
-
- return self.__first_token
-
- def _CreateToken(self, string, token_type, line, line_number, values=None):
- """Creates a new Token object (or subclass).
-
- Args:
- string: The string of input the token represents.
- token_type: The type of token.
- line: The text of the line this token is in.
- line_number: The line number of the token.
- values: A dict of named values within the token. For instance, a
- function declaration may have a value called 'name' which captures the
- name of the function.
-
- Returns:
- The newly created Token object.
- """
- return tokens.Token(string, token_type, line, line_number, values,
- line_number)
-
- def __TokenizeLine(self, line):
- """Tokenizes the given line.
-
- Args:
- line: The contents of the line.
- """
- string = line.rstrip('\n\r\f')
- line_number = self.__line_number
- self.__start_index = 0
-
- if not string:
- self.__AddToken(self._CreateToken('', Type.BLANK_LINE, line, line_number))
- return
-
- normal_token = ''
- index = 0
- while index < len(string):
- for matcher in self.matchers[self.mode]:
- if matcher.line_start and index > 0:
- continue
-
- match = matcher.regex.match(string, index)
-
- if match:
- if normal_token:
- self.__AddToken(
- self.__CreateNormalToken(self.mode, normal_token, line,
- line_number))
- normal_token = ''
-
- # Add the match.
- self.__AddToken(self._CreateToken(match.group(), matcher.type, line,
- line_number, match.groupdict()))
-
- # Change the mode to the correct one for after this match.
- self.mode = matcher.result_mode or self.mode
-
- # Shorten the string to be matched.
- index = match.end()
-
- break
-
- else:
- # If the for loop finishes naturally (i.e. no matches) we just add the
- # first character to the string of consecutive non match characters.
- # These will constitute a NORMAL token.
- if string:
- normal_token += string[index:index + 1]
- index += 1
-
- if normal_token:
- self.__AddToken(
- self.__CreateNormalToken(self.mode, normal_token, line, line_number))
-
- def __CreateNormalToken(self, mode, string, line, line_number):
- """Creates a normal token.
-
- Args:
- mode: The current mode.
- string: The string to tokenize.
- line: The line of text.
- line_number: The line number within the file.
-
- Returns:
- A Token object, of the default type for the current mode.
- """
- type = Type.NORMAL
- if mode in self.default_types:
- type = self.default_types[mode]
- return self._CreateToken(string, type, line, line_number)
-
- def __AddToken(self, token):
- """Add the given token to the token stream.
-
- Args:
- token: The token to add.
- """
- # Store the first token, or point the previous token to this one.
- if not self.__first_token:
- self.__first_token = token
- else:
- self.__last_token.next = token
-
- # Establish the doubly linked list
- token.previous = self.__last_token
- self.__last_token = token
-
- # Compute the character indices
- token.start_index = self.__start_index
- self.__start_index += token.length
diff --git a/tools/closure_linter/closure_linter/common/tokens.py b/tools/closure_linter/closure_linter/common/tokens.py
deleted file mode 100755
index 4703998752..0000000000
--- a/tools/closure_linter/closure_linter/common/tokens.py
+++ /dev/null
@@ -1,145 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Classes to represent tokens and positions within them."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-
-class TokenType(object):
- """Token types common to all languages."""
- NORMAL = 'normal'
- WHITESPACE = 'whitespace'
- BLANK_LINE = 'blank line'
-
-
-class Token(object):
- """Token class for intelligent text splitting.
-
- The token class represents a string of characters and an identifying type.
-
- Attributes:
- type: The type of token.
- string: The characters the token comprises.
- length: The length of the token.
- line: The text of the line the token is found in.
- line_number: The number of the line the token is found in.
- values: Dictionary of values returned from the tokens regex match.
- previous: The token before this one.
- next: The token after this one.
- start_index: The character index in the line where this token starts.
- attached_object: Object containing more information about this token.
- metadata: Object containing metadata about this token. Must be added by
- a separate metadata pass.
- """
-
- def __init__(self, string, token_type, line, line_number, values=None,
- orig_line_number=None):
- """Creates a new Token object.
-
- Args:
- string: The string of input the token contains.
- token_type: The type of token.
- line: The text of the line this token is in.
- line_number: The line number of the token.
- values: A dict of named values within the token. For instance, a
- function declaration may have a value called 'name' which captures the
- name of the function.
- orig_line_number: The line number of the original file this token comes
- from. This should be only set during the tokenization process. For newly
- created error fix tokens after that, it should be None.
- """
- self.type = token_type
- self.string = string
- self.length = len(string)
- self.line = line
- self.line_number = line_number
- self.orig_line_number = orig_line_number
- self.values = values
- self.is_deleted = False
-
- # These parts can only be computed when the file is fully tokenized
- self.previous = None
- self.next = None
- self.start_index = None
-
- # This part is set in statetracker.py
- # TODO(robbyw): Wrap this in to metadata
- self.attached_object = None
-
- # This part is set in *metadatapass.py
- self.metadata = None
-
- def IsFirstInLine(self):
- """Tests if this token is the first token in its line.
-
- Returns:
- Whether the token is the first token in its line.
- """
- return not self.previous or self.previous.line_number != self.line_number
-
- def IsLastInLine(self):
- """Tests if this token is the last token in its line.
-
- Returns:
- Whether the token is the last token in its line.
- """
- return not self.next or self.next.line_number != self.line_number
-
- def IsType(self, token_type):
- """Tests if this token is of the given type.
-
- Args:
- token_type: The type to test for.
-
- Returns:
- True if the type of this token matches the type passed in.
- """
- return self.type == token_type
-
- def IsAnyType(self, *token_types):
- """Tests if this token is any of the given types.
-
- Args:
- token_types: The types to check. Also accepts a single array.
-
- Returns:
- True if the type of this token is any of the types passed in.
- """
- if not isinstance(token_types[0], basestring):
- return self.type in token_types[0]
- else:
- return self.type in token_types
-
- def __repr__(self):
- return '<Token: %s, "%s", %r, %d, %r>' % (self.type, self.string,
- self.values, self.line_number,
- self.metadata)
-
- def __iter__(self):
- """Returns a token iterator."""
- node = self
- while node:
- yield node
- node = node.next
-
- def __reversed__(self):
- """Returns a reverse-direction token iterator."""
- node = self
- while node:
- yield node
- node = node.previous
diff --git a/tools/closure_linter/closure_linter/common/tokens_test.py b/tools/closure_linter/closure_linter/common/tokens_test.py
deleted file mode 100644
index 01ec89d01b..0000000000
--- a/tools/closure_linter/closure_linter/common/tokens_test.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-__author__ = 'nnaze@google.com (Nathan Naze)'
-
-import unittest as googletest
-from closure_linter.common import tokens
-
-
-def _CreateDummyToken():
- return tokens.Token('foo', None, 1, 1)
-
-
-def _CreateDummyTokens(count):
- dummy_tokens = []
- for _ in xrange(count):
- dummy_tokens.append(_CreateDummyToken())
- return dummy_tokens
-
-
-def _SetTokensAsNeighbors(neighbor_tokens):
- for i in xrange(len(neighbor_tokens)):
- prev_index = i - 1
- next_index = i + 1
-
- if prev_index >= 0:
- neighbor_tokens[i].previous = neighbor_tokens[prev_index]
-
- if next_index < len(neighbor_tokens):
- neighbor_tokens[i].next = neighbor_tokens[next_index]
-
-
-class TokensTest(googletest.TestCase):
-
- def testIsFirstInLine(self):
-
- # First token in file (has no previous).
- self.assertTrue(_CreateDummyToken().IsFirstInLine())
-
- a, b = _CreateDummyTokens(2)
- _SetTokensAsNeighbors([a, b])
-
- # Tokens on same line
- a.line_number = 30
- b.line_number = 30
-
- self.assertFalse(b.IsFirstInLine())
-
- # Tokens on different lines
- b.line_number = 31
- self.assertTrue(b.IsFirstInLine())
-
- def testIsLastInLine(self):
- # Last token in file (has no next).
- self.assertTrue(_CreateDummyToken().IsLastInLine())
-
- a, b = _CreateDummyTokens(2)
- _SetTokensAsNeighbors([a, b])
-
- # Tokens on same line
- a.line_number = 30
- b.line_number = 30
- self.assertFalse(a.IsLastInLine())
-
- b.line_number = 31
- self.assertTrue(a.IsLastInLine())
-
- def testIsType(self):
- a = tokens.Token('foo', 'fakeType1', 1, 1)
- self.assertTrue(a.IsType('fakeType1'))
- self.assertFalse(a.IsType('fakeType2'))
-
- def testIsAnyType(self):
- a = tokens.Token('foo', 'fakeType1', 1, 1)
- self.assertTrue(a.IsAnyType(['fakeType1', 'fakeType2']))
- self.assertFalse(a.IsAnyType(['fakeType3', 'fakeType4']))
-
- def testRepr(self):
- a = tokens.Token('foo', 'fakeType1', 1, 1)
- self.assertEquals('<Token: fakeType1, "foo", None, 1, None>', str(a))
-
- def testIter(self):
- dummy_tokens = _CreateDummyTokens(5)
- _SetTokensAsNeighbors(dummy_tokens)
- a, b, c, d, e = dummy_tokens
-
- i = iter(a)
- self.assertListEqual([a, b, c, d, e], list(i))
-
- def testReverseIter(self):
- dummy_tokens = _CreateDummyTokens(5)
- _SetTokensAsNeighbors(dummy_tokens)
- a, b, c, d, e = dummy_tokens
-
- ri = reversed(e)
- self.assertListEqual([e, d, c, b, a], list(ri))
-
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/closure_linter/ecmalintrules.py b/tools/closure_linter/closure_linter/ecmalintrules.py
deleted file mode 100755
index c07dffc86e..0000000000
--- a/tools/closure_linter/closure_linter/ecmalintrules.py
+++ /dev/null
@@ -1,844 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Core methods for checking EcmaScript files for common style guide violations.
-"""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)',
- 'jacobr@google.com (Jacob Richman)')
-
-import re
-
-import gflags as flags
-
-from closure_linter import checkerbase
-from closure_linter import ecmametadatapass
-from closure_linter import error_check
-from closure_linter import errorrules
-from closure_linter import errors
-from closure_linter import indentation
-from closure_linter import javascripttokenizer
-from closure_linter import javascripttokens
-from closure_linter import statetracker
-from closure_linter import tokenutil
-from closure_linter.common import error
-from closure_linter.common import position
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
-# TODO(user): When flipping this to True, remove logic from unit tests
-# that overrides this flag.
-flags.DEFINE_boolean('dot_on_next_line', False, 'Require dots to be'
- 'placed on the next line for wrapped expressions')
-
-# TODO(robbyw): Check for extra parens on return statements
-# TODO(robbyw): Check for 0px in strings
-# TODO(robbyw): Ensure inline jsDoc is in {}
-# TODO(robbyw): Check for valid JS types in parameter docs
-
-# Shorthand
-Context = ecmametadatapass.EcmaContext
-Error = error.Error
-Modes = javascripttokenizer.JavaScriptModes
-Position = position.Position
-Rule = error_check.Rule
-Type = javascripttokens.JavaScriptTokenType
-
-
-class EcmaScriptLintRules(checkerbase.LintRulesBase):
- """EmcaScript lint style checking rules.
-
- Can be used to find common style errors in JavaScript, ActionScript and other
- Ecma like scripting languages. Style checkers for Ecma scripting languages
- should inherit from this style checker.
- Please do not add any state to EcmaScriptLintRules or to any subclasses.
-
- All state should be added to the StateTracker subclass used for a particular
- language.
- """
-
- # It will be initialized in constructor so the flags are initialized.
- max_line_length = -1
-
- # Static constants.
- MISSING_PARAMETER_SPACE = re.compile(r',\S')
-
- EXTRA_SPACE = re.compile(r'(\(\s|\s\))')
-
- ENDS_WITH_SPACE = re.compile(r'\s$')
-
- ILLEGAL_TAB = re.compile(r'\t')
-
- # Regex used to split up complex types to check for invalid use of ? and |.
- TYPE_SPLIT = re.compile(r'[,<>()]')
-
- # Regex for form of author lines after the @author tag.
- AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)')
-
- # Acceptable tokens to remove for line too long testing.
- LONG_LINE_IGNORE = frozenset(
- ['*', '//', '@see'] +
- ['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE])
-
- JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED = frozenset([
- '@fileoverview', '@param', '@return', '@returns'])
-
- def __init__(self):
- """Initialize this lint rule object."""
- checkerbase.LintRulesBase.__init__(self)
- if EcmaScriptLintRules.max_line_length == -1:
- EcmaScriptLintRules.max_line_length = errorrules.GetMaxLineLength()
-
- def Initialize(self, checker, limited_doc_checks, is_html):
- """Initialize this lint rule object before parsing a new file."""
- checkerbase.LintRulesBase.Initialize(self, checker, limited_doc_checks,
- is_html)
- self._indentation = indentation.IndentationRules()
-
- def HandleMissingParameterDoc(self, token, param_name):
- """Handle errors associated with a parameter missing a @param tag."""
- raise TypeError('Abstract method HandleMissingParameterDoc not implemented')
-
- def _CheckLineLength(self, last_token, state):
- """Checks whether the line is too long.
-
- Args:
- last_token: The last token in the line.
- state: parser_state object that indicates the current state in the page
- """
- # Start from the last token so that we have the flag object attached to
- # and DOC_FLAG tokens.
- line_number = last_token.line_number
- token = last_token
-
- # Build a representation of the string where spaces indicate potential
- # line-break locations.
- line = []
- while token and token.line_number == line_number:
- if state.IsTypeToken(token):
- line.insert(0, 'x' * len(token.string))
- elif token.type in (Type.IDENTIFIER, Type.OPERATOR):
- # Dots are acceptable places to wrap (may be tokenized as identifiers).
- line.insert(0, token.string.replace('.', ' '))
- else:
- line.insert(0, token.string)
- token = token.previous
-
- line = ''.join(line)
- line = line.rstrip('\n\r\f')
- try:
- length = len(unicode(line, 'utf-8'))
- except (LookupError, UnicodeDecodeError):
- # Unknown encoding. The line length may be wrong, as was originally the
- # case for utf-8 (see bug 1735846). For now just accept the default
- # length, but as we find problems we can either add test for other
- # possible encodings or return without an error to protect against
- # false positives at the cost of more false negatives.
- length = len(line)
-
- if length > EcmaScriptLintRules.max_line_length:
-
- # If the line matches one of the exceptions, then it's ok.
- for long_line_regexp in self.GetLongLineExceptions():
- if long_line_regexp.match(last_token.line):
- return
-
- # If the line consists of only one "word", or multiple words but all
- # except one are ignoreable, then it's ok.
- parts = set(line.split())
-
- # We allow two "words" (type and name) when the line contains @param
- max_parts = 1
- if '@param' in parts:
- max_parts = 2
-
- # Custom tags like @requires may have url like descriptions, so ignore
- # the tag, similar to how we handle @see.
- custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags])
- if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags))
- > max_parts):
- self._HandleError(
- errors.LINE_TOO_LONG,
- 'Line too long (%d characters).' % len(line), last_token)
-
- def _CheckJsDocType(self, token, js_type):
- """Checks the given type for style errors.
-
- Args:
- token: The DOC_FLAG token for the flag whose type to check.
- js_type: The flag's typeannotation.TypeAnnotation instance.
- """
- if not js_type: return
-
- if js_type.type_group and len(js_type.sub_types) == 2:
- identifiers = [t.identifier for t in js_type.sub_types]
- if 'null' in identifiers:
- # Don't warn if the identifier is a template type (e.g. {TYPE|null}.
- if not identifiers[0].isupper() and not identifiers[1].isupper():
- self._HandleError(
- errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,
- 'Prefer "?Type" to "Type|null": "%s"' % js_type, token)
-
- # TODO(user): We should report an error for wrong usage of '?' and '|'
- # e.g. {?number|string|null} etc.
-
- for sub_type in js_type.IterTypes():
- self._CheckJsDocType(token, sub_type)
-
- def _CheckForMissingSpaceBeforeToken(self, token):
- """Checks for a missing space at the beginning of a token.
-
- Reports a MISSING_SPACE error if the token does not begin with a space or
- the previous token doesn't end with a space and the previous token is on the
- same line as the token.
-
- Args:
- token: The token being checked
- """
- # TODO(user): Check if too many spaces?
- if (len(token.string) == len(token.string.lstrip()) and
- token.previous and token.line_number == token.previous.line_number and
- len(token.previous.string) - len(token.previous.string.rstrip()) == 0):
- self._HandleError(
- errors.MISSING_SPACE,
- 'Missing space before "%s"' % token.string,
- token,
- position=Position.AtBeginning())
-
- def _CheckOperator(self, token):
- """Checks an operator for spacing and line style.
-
- Args:
- token: The operator token.
- """
- last_code = token.metadata.last_code
-
- if not self._ExpectSpaceBeforeOperator(token):
- if (token.previous and token.previous.type == Type.WHITESPACE and
- last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER) and
- last_code.line_number == token.line_number):
- self._HandleError(
- errors.EXTRA_SPACE, 'Extra space before "%s"' % token.string,
- token.previous, position=Position.All(token.previous.string))
-
- elif (token.previous and
- not token.previous.IsComment() and
- not tokenutil.IsDot(token) and
- token.previous.type in Type.EXPRESSION_ENDER_TYPES):
- self._HandleError(errors.MISSING_SPACE,
- 'Missing space before "%s"' % token.string, token,
- position=Position.AtBeginning())
-
- # Check wrapping of operators.
- next_code = tokenutil.GetNextCodeToken(token)
-
- is_dot = tokenutil.IsDot(token)
- wrapped_before = last_code and last_code.line_number != token.line_number
- wrapped_after = next_code and next_code.line_number != token.line_number
-
- if FLAGS.dot_on_next_line and is_dot and wrapped_after:
- self._HandleError(
- errors.LINE_ENDS_WITH_DOT,
- '"." must go on the following line',
- token)
- if (not is_dot and wrapped_before and
- not token.metadata.IsUnaryOperator()):
- self._HandleError(
- errors.LINE_STARTS_WITH_OPERATOR,
- 'Binary operator must go on previous line "%s"' % token.string,
- token)
-
- def _IsLabel(self, token):
- # A ':' token is considered part of a label if it occurs in a case
- # statement, a plain label, or an object literal, i.e. is not part of a
- # ternary.
-
- return (token.string == ':' and
- token.metadata.context.type in (Context.LITERAL_ELEMENT,
- Context.CASE_BLOCK,
- Context.STATEMENT))
-
- def _ExpectSpaceBeforeOperator(self, token):
- """Returns whether a space should appear before the given operator token.
-
- Args:
- token: The operator token.
-
- Returns:
- Whether there should be a space before the token.
- """
- if token.string == ',' or token.metadata.IsUnaryPostOperator():
- return False
-
- if tokenutil.IsDot(token):
- return False
-
- # Colons should appear in labels, object literals, the case of a switch
- # statement, and ternary operator. Only want a space in the case of the
- # ternary operator.
- if self._IsLabel(token):
- return False
-
- if token.metadata.IsUnaryOperator() and token.IsFirstInLine():
- return False
-
- return True
-
- def CheckToken(self, token, state):
- """Checks a token, given the current parser_state, for warnings and errors.
-
- Args:
- token: The current token under consideration
- state: parser_state object that indicates the current state in the page
- """
- # Store some convenience variables
- first_in_line = token.IsFirstInLine()
- last_in_line = token.IsLastInLine()
- last_non_space_token = state.GetLastNonSpaceToken()
-
- token_type = token.type
-
- # Process the line change.
- if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
- # TODO(robbyw): Support checking indentation in HTML files.
- indentation_errors = self._indentation.CheckToken(token, state)
- for indentation_error in indentation_errors:
- self._HandleError(*indentation_error)
-
- if last_in_line:
- self._CheckLineLength(token, state)
-
- if token_type == Type.PARAMETERS:
- # Find missing spaces in parameter lists.
- if self.MISSING_PARAMETER_SPACE.search(token.string):
- fix_data = ', '.join([s.strip() for s in token.string.split(',')])
- self._HandleError(errors.MISSING_SPACE, 'Missing space after ","',
- token, position=None, fix_data=fix_data.strip())
-
- # Find extra spaces at the beginning of parameter lists. Make sure
- # we aren't at the beginning of a continuing multi-line list.
- if not first_in_line:
- space_count = len(token.string) - len(token.string.lstrip())
- if space_count:
- self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("',
- token, position=Position(0, space_count))
-
- elif (token_type == Type.START_BLOCK and
- token.metadata.context.type == Context.BLOCK):
- self._CheckForMissingSpaceBeforeToken(token)
-
- elif token_type == Type.END_BLOCK:
- last_code = token.metadata.last_code
- if state.InFunction() and state.IsFunctionClose():
- if state.InTopLevelFunction():
- # A semicolons should not be included at the end of a function
- # declaration.
- if not state.InAssignedFunction():
- if not last_in_line and token.next.type == Type.SEMICOLON:
- self._HandleError(
- errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
- 'Illegal semicolon after function declaration',
- token.next, position=Position.All(token.next.string))
-
- # A semicolon should be included at the end of a function expression
- # that is not immediately called or used by a dot operator.
- if (state.InAssignedFunction() and token.next
- and token.next.type != Type.SEMICOLON):
- next_token = tokenutil.GetNextCodeToken(token)
- is_immediately_used = (next_token.type == Type.START_PAREN or
- tokenutil.IsDot(next_token))
- if not is_immediately_used:
- self._HandleError(
- errors.MISSING_SEMICOLON_AFTER_FUNCTION,
- 'Missing semicolon after function assigned to a variable',
- token, position=Position.AtEnd(token.string))
-
- if state.InInterfaceMethod() and last_code.type != Type.START_BLOCK:
- self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,
- 'Interface methods cannot contain code', last_code)
-
- elif (state.IsBlockClose() and
- token.next and token.next.type == Type.SEMICOLON):
- if (last_code.metadata.context.parent.type != Context.OBJECT_LITERAL
- and last_code.metadata.context.type != Context.OBJECT_LITERAL):
- self._HandleError(
- errors.REDUNDANT_SEMICOLON,
- 'No semicolon is required to end a code block',
- token.next, position=Position.All(token.next.string))
-
- elif token_type == Type.SEMICOLON:
- if token.previous and token.previous.type == Type.WHITESPACE:
- self._HandleError(
- errors.EXTRA_SPACE, 'Extra space before ";"',
- token.previous, position=Position.All(token.previous.string))
-
- if token.next and token.next.line_number == token.line_number:
- if token.metadata.context.type != Context.FOR_GROUP_BLOCK:
- # TODO(robbyw): Error about no multi-statement lines.
- pass
-
- elif token.next.type not in (
- Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):
- self._HandleError(
- errors.MISSING_SPACE,
- 'Missing space after ";" in for statement',
- token.next,
- position=Position.AtBeginning())
-
- last_code = token.metadata.last_code
- if last_code and last_code.type == Type.SEMICOLON:
- # Allow a single double semi colon in for loops for cases like:
- # for (;;) { }.
- # NOTE(user): This is not a perfect check, and will not throw an error
- # for cases like: for (var i = 0;; i < n; i++) {}, but then your code
- # probably won't work either.
- for_token = tokenutil.CustomSearch(
- last_code,
- lambda token: token.type == Type.KEYWORD and token.string == 'for',
- end_func=lambda token: token.type == Type.SEMICOLON,
- distance=None,
- reverse=True)
-
- if not for_token:
- self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',
- token, position=Position.All(token.string))
-
- elif token_type == Type.START_PAREN:
- # Ensure that opening parentheses have a space before any keyword
- # that is not being invoked like a member function.
- if (token.previous and token.previous.type == Type.KEYWORD and
- (not token.previous.metadata or
- not token.previous.metadata.last_code or
- not token.previous.metadata.last_code.string or
- token.previous.metadata.last_code.string[-1:] != '.')):
- self._HandleError(errors.MISSING_SPACE, 'Missing space before "("',
- token, position=Position.AtBeginning())
- elif token.previous and token.previous.type == Type.WHITESPACE:
- before_space = token.previous.previous
- # Ensure that there is no extra space before a function invocation,
- # even if the function being invoked happens to be a keyword.
- if (before_space and before_space.line_number == token.line_number and
- before_space.type == Type.IDENTIFIER or
- (before_space.type == Type.KEYWORD and before_space.metadata and
- before_space.metadata.last_code and
- before_space.metadata.last_code.string and
- before_space.metadata.last_code.string[-1:] == '.')):
- self._HandleError(
- errors.EXTRA_SPACE, 'Extra space before "("',
- token.previous, position=Position.All(token.previous.string))
-
- elif token_type == Type.START_BRACKET:
- self._HandleStartBracket(token, last_non_space_token)
- elif token_type in (Type.END_PAREN, Type.END_BRACKET):
- # Ensure there is no space before closing parentheses, except when
- # it's in a for statement with an omitted section, or when it's at the
- # beginning of a line.
- if (token.previous and token.previous.type == Type.WHITESPACE and
- not token.previous.IsFirstInLine() and
- not (last_non_space_token and last_non_space_token.line_number ==
- token.line_number and
- last_non_space_token.type == Type.SEMICOLON)):
- self._HandleError(
- errors.EXTRA_SPACE, 'Extra space before "%s"' %
- token.string, token.previous,
- position=Position.All(token.previous.string))
-
- elif token_type == Type.WHITESPACE:
- if self.ILLEGAL_TAB.search(token.string):
- if token.IsFirstInLine():
- if token.next:
- self._HandleError(
- errors.ILLEGAL_TAB,
- 'Illegal tab in whitespace before "%s"' % token.next.string,
- token, position=Position.All(token.string))
- else:
- self._HandleError(
- errors.ILLEGAL_TAB,
- 'Illegal tab in whitespace',
- token, position=Position.All(token.string))
- else:
- self._HandleError(
- errors.ILLEGAL_TAB,
- 'Illegal tab in whitespace after "%s"' % token.previous.string,
- token, position=Position.All(token.string))
-
- # Check whitespace length if it's not the first token of the line and
- # if it's not immediately before a comment.
- if last_in_line:
- # Check for extra whitespace at the end of a line.
- self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
- token, position=Position.All(token.string))
- elif not first_in_line and not token.next.IsComment():
- if token.length > 1:
- self._HandleError(
- errors.EXTRA_SPACE, 'Extra space after "%s"' %
- token.previous.string, token,
- position=Position(1, len(token.string) - 1))
-
- elif token_type == Type.OPERATOR:
- self._CheckOperator(token)
- elif token_type == Type.DOC_FLAG:
- flag = token.attached_object
-
- if flag.flag_type == 'bug':
- # TODO(robbyw): Check for exactly 1 space on the left.
- string = token.next.string.lstrip()
- string = string.split(' ', 1)[0]
-
- if not string.isdigit():
- self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,
- '@bug should be followed by a bug number', token)
-
- elif flag.flag_type == 'suppress':
- if flag.type is None:
- # A syntactically invalid suppress tag will get tokenized as a normal
- # flag, indicating an error.
- self._HandleError(
- errors.INCORRECT_SUPPRESS_SYNTAX,
- 'Invalid suppress syntax: should be @suppress {errortype}. '
- 'Spaces matter.', token)
- else:
- for suppress_type in flag.jstype.IterIdentifiers():
- if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
- self._HandleError(
- errors.INVALID_SUPPRESS_TYPE,
- 'Invalid suppression type: %s' % suppress_type, token)
-
- elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
- flag.flag_type == 'author'):
- # TODO(user): In non strict mode check the author tag for as much as
- # it exists, though the full form checked below isn't required.
- string = token.next.string
- result = self.AUTHOR_SPEC.match(string)
- if not result:
- self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION,
- 'Author tag line should be of the form: '
- '@author foo@somewhere.com (Your Name)',
- token.next)
- else:
- # Check spacing between email address and name. Do this before
- # checking earlier spacing so positions are easier to calculate for
- # autofixing.
- num_spaces = len(result.group(2))
- if num_spaces < 1:
- self._HandleError(errors.MISSING_SPACE,
- 'Missing space after email address',
- token.next, position=Position(result.start(2), 0))
- elif num_spaces > 1:
- self._HandleError(
- errors.EXTRA_SPACE, 'Extra space after email address',
- token.next,
- position=Position(result.start(2) + 1, num_spaces - 1))
-
- # Check for extra spaces before email address. Can't be too few, if
- # not at least one we wouldn't match @author tag.
- num_spaces = len(result.group(1))
- if num_spaces > 1:
- self._HandleError(errors.EXTRA_SPACE,
- 'Extra space before email address',
- token.next, position=Position(1, num_spaces - 1))
-
- elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and
- not self._limited_doc_checks):
- if flag.flag_type == 'param':
- if flag.name is None:
- self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,
- 'Missing name in @param tag', token)
-
- if not flag.description or flag.description is None:
- flag_name = token.type
- if 'name' in token.values:
- flag_name = '@' + token.values['name']
-
- if flag_name not in self.JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED:
- self._HandleError(
- errors.MISSING_JSDOC_TAG_DESCRIPTION,
- 'Missing description in %s tag' % flag_name, token)
- else:
- self._CheckForMissingSpaceBeforeToken(flag.description_start_token)
-
- if flag.HasType():
- if flag.type_start_token is not None:
- self._CheckForMissingSpaceBeforeToken(
- token.attached_object.type_start_token)
-
- if flag.jstype and not flag.jstype.IsEmpty():
- self._CheckJsDocType(token, flag.jstype)
-
- if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
- flag.type_start_token.type != Type.DOC_START_BRACE or
- flag.type_end_token.type != Type.DOC_END_BRACE):
- self._HandleError(
- errors.MISSING_BRACES_AROUND_TYPE,
- 'Type must always be surrounded by curly braces.', token)
-
- if token_type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
- if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
- token.values['name'] not in FLAGS.custom_jsdoc_tags):
- self._HandleError(
- errors.INVALID_JSDOC_TAG,
- 'Invalid JsDoc tag: %s' % token.values['name'], token)
-
- if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
- token.values['name'] == 'inheritDoc' and
- token_type == Type.DOC_INLINE_FLAG):
- self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
- 'Unnecessary braces around @inheritDoc',
- token)
-
- elif token_type == Type.SIMPLE_LVALUE:
- identifier = token.values['identifier']
-
- if ((not state.InFunction() or state.InConstructor()) and
- state.InTopLevel() and not state.InObjectLiteralDescendant()):
- jsdoc = state.GetDocComment()
- if not state.HasDocComment(identifier):
- # Only test for documentation on identifiers with .s in them to
- # avoid checking things like simple variables. We don't require
- # documenting assignments to .prototype itself (bug 1880803).
- if (not state.InConstructor() and
- identifier.find('.') != -1 and not
- identifier.endswith('.prototype') and not
- self._limited_doc_checks):
- comment = state.GetLastComment()
- if not (comment and comment.lower().count('jsdoc inherited')):
- self._HandleError(
- errors.MISSING_MEMBER_DOCUMENTATION,
- "No docs found for member '%s'" % identifier,
- token)
- elif jsdoc and (not state.InConstructor() or
- identifier.startswith('this.')):
- # We are at the top level and the function/member is documented.
- if identifier.endswith('_') and not identifier.endswith('__'):
- # Can have a private class which inherits documentation from a
- # public superclass.
- #
- # @inheritDoc is deprecated in favor of using @override, and they
- if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
- and ('accessControls' not in jsdoc.suppressions)):
- self._HandleError(
- errors.INVALID_OVERRIDE_PRIVATE,
- '%s should not override a private member.' % identifier,
- jsdoc.GetFlag('override').flag_token)
- if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
- and ('accessControls' not in jsdoc.suppressions)):
- self._HandleError(
- errors.INVALID_INHERIT_DOC_PRIVATE,
- '%s should not inherit from a private member.' % identifier,
- jsdoc.GetFlag('inheritDoc').flag_token)
- if (not jsdoc.HasFlag('private') and
- ('underscore' not in jsdoc.suppressions) and not
- ((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
- ('accessControls' in jsdoc.suppressions))):
- self._HandleError(
- errors.MISSING_PRIVATE,
- 'Member "%s" must have @private JsDoc.' %
- identifier, token)
- if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:
- self._HandleError(
- errors.UNNECESSARY_SUPPRESS,
- '@suppress {underscore} is not necessary with @private',
- jsdoc.suppressions['underscore'])
- elif (jsdoc.HasFlag('private') and
- not self.InExplicitlyTypedLanguage()):
- # It is convention to hide public fields in some ECMA
- # implementations from documentation using the @private tag.
- self._HandleError(
- errors.EXTRA_PRIVATE,
- 'Member "%s" must not have @private JsDoc' %
- identifier, token)
-
- # These flags are only legal on localizable message definitions;
- # such variables always begin with the prefix MSG_.
- for f in ('desc', 'hidden', 'meaning'):
- if (jsdoc.HasFlag(f)
- and not identifier.startswith('MSG_')
- and identifier.find('.MSG_') == -1):
- self._HandleError(
- errors.INVALID_USE_OF_DESC_TAG,
- 'Member "%s" should not have @%s JsDoc' % (identifier, f),
- token)
-
- # Check for illegaly assigning live objects as prototype property values.
- index = identifier.find('.prototype.')
- # Ignore anything with additional .s after the prototype.
- if index != -1 and identifier.find('.', index + 11) == -1:
- equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
- next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES)
- if next_code and (
- next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or
- next_code.IsOperator('new')):
- self._HandleError(
- errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,
- 'Member %s cannot have a non-primitive value' % identifier,
- token)
-
- elif token_type == Type.END_PARAMETERS:
- # Find extra space at the end of parameter lists. We check the token
- # prior to the current one when it is a closing paren.
- if (token.previous and token.previous.type == Type.PARAMETERS
- and self.ENDS_WITH_SPACE.search(token.previous.string)):
- self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"',
- token.previous)
-
- jsdoc = state.GetDocComment()
- if state.GetFunction().is_interface:
- if token.previous and token.previous.type == Type.PARAMETERS:
- self._HandleError(
- errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,
- 'Interface constructor cannot have parameters',
- token.previous)
- elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')
- and not jsdoc.InheritsDocumentation()
- and not state.InObjectLiteralDescendant() and not
- jsdoc.IsInvalidated()):
- distance, edit = jsdoc.CompareParameters(state.GetParams())
- if distance:
- params_iter = iter(state.GetParams())
- docs_iter = iter(jsdoc.ordered_params)
-
- for op in edit:
- if op == 'I':
- # Insertion.
- # Parsing doc comments is the same for all languages
- # but some languages care about parameters that don't have
- # doc comments and some languages don't care.
- # Languages that don't allow variables to by typed such as
- # JavaScript care but languages such as ActionScript or Java
- # that allow variables to be typed don't care.
- if not self._limited_doc_checks:
- self.HandleMissingParameterDoc(token, params_iter.next())
-
- elif op == 'D':
- # Deletion
- self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,
- 'Found docs for non-existing parameter: "%s"' %
- docs_iter.next(), token)
- elif op == 'S':
- # Substitution
- if not self._limited_doc_checks:
- self._HandleError(
- errors.WRONG_PARAMETER_DOCUMENTATION,
- 'Parameter mismatch: got "%s", expected "%s"' %
- (params_iter.next(), docs_iter.next()), token)
-
- else:
- # Equality - just advance the iterators
- params_iter.next()
- docs_iter.next()
-
- elif token_type == Type.STRING_TEXT:
- # If this is the first token after the start of the string, but it's at
- # the end of a line, we know we have a multi-line string.
- if token.previous.type in (
- Type.SINGLE_QUOTE_STRING_START,
- Type.DOUBLE_QUOTE_STRING_START) and last_in_line:
- self._HandleError(errors.MULTI_LINE_STRING,
- 'Multi-line strings are not allowed', token)
-
- # This check is orthogonal to the ones above, and repeats some types, so
- # it is a plain if and not an elif.
- if token.type in Type.COMMENT_TYPES:
- if self.ILLEGAL_TAB.search(token.string):
- self._HandleError(errors.ILLEGAL_TAB,
- 'Illegal tab in comment "%s"' % token.string, token)
-
- trimmed = token.string.rstrip()
- if last_in_line and token.string != trimmed:
- # Check for extra whitespace at the end of a line.
- self._HandleError(
- errors.EXTRA_SPACE, 'Extra space at end of line', token,
- position=Position(len(trimmed), len(token.string) - len(trimmed)))
-
- # This check is also orthogonal since it is based on metadata.
- if token.metadata.is_implied_semicolon:
- self._HandleError(errors.MISSING_SEMICOLON,
- 'Missing semicolon at end of line', token)
-
- def _HandleStartBracket(self, token, last_non_space_token):
- """Handles a token that is an open bracket.
-
- Args:
- token: The token to handle.
- last_non_space_token: The last token that was not a space.
- """
- if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and
- last_non_space_token and
- last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
- self._HandleError(
- errors.EXTRA_SPACE, 'Extra space before "["',
- token.previous, position=Position.All(token.previous.string))
- # If the [ token is the first token in a line we shouldn't complain
- # about a missing space before [. This is because some Ecma script
- # languages allow syntax like:
- # [Annotation]
- # class MyClass {...}
- # So we don't want to blindly warn about missing spaces before [.
- # In the the future, when rules for computing exactly how many spaces
- # lines should be indented are added, then we can return errors for
- # [ tokens that are improperly indented.
- # For example:
- # var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
- # [a,b,c];
- # should trigger a proper indentation warning message as [ is not indented
- # by four spaces.
- elif (not token.IsFirstInLine() and token.previous and
- token.previous.type not in (
- [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
- Type.EXPRESSION_ENDER_TYPES)):
- self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
- token, position=Position.AtBeginning())
-
- def Finalize(self, state):
- """Perform all checks that need to occur after all lines are processed.
-
- Args:
- state: State of the parser after parsing all tokens
-
- Raises:
- TypeError: If not overridden.
- """
- last_non_space_token = state.GetLastNonSpaceToken()
- # Check last line for ending with newline.
- if state.GetLastLine() and not (
- state.GetLastLine().isspace() or
- state.GetLastLine().rstrip('\n\r\f') != state.GetLastLine()):
- self._HandleError(
- errors.FILE_MISSING_NEWLINE,
- 'File does not end with new line. (%s)' % state.GetLastLine(),
- last_non_space_token)
-
- try:
- self._indentation.Finalize()
- except Exception, e:
- self._HandleError(
- errors.FILE_DOES_NOT_PARSE,
- str(e),
- last_non_space_token)
-
- def GetLongLineExceptions(self):
- """Gets a list of regexps for lines which can be longer than the limit.
-
- Returns:
- A list of regexps, used as matches (rather than searches).
- """
- return []
-
- def InExplicitlyTypedLanguage(self):
- """Returns whether this ecma implementation is explicitly typed."""
- return False
diff --git a/tools/closure_linter/closure_linter/ecmametadatapass.py b/tools/closure_linter/closure_linter/ecmametadatapass.py
deleted file mode 100755
index 50621610ef..0000000000
--- a/tools/closure_linter/closure_linter/ecmametadatapass.py
+++ /dev/null
@@ -1,574 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Metadata pass for annotating tokens in EcmaScript files."""
-
-__author__ = ('robbyw@google.com (Robert Walker)')
-
-from closure_linter import javascripttokens
-from closure_linter import tokenutil
-
-
-TokenType = javascripttokens.JavaScriptTokenType
-
-
-class ParseError(Exception):
- """Exception indicating a parse error at the given token.
-
- Attributes:
- token: The token where the parse error occurred.
- """
-
- def __init__(self, token, message=None):
- """Initialize a parse error at the given token with an optional message.
-
- Args:
- token: The token where the parse error occurred.
- message: A message describing the parse error.
- """
- Exception.__init__(self, message)
- self.token = token
-
-
-class EcmaContext(object):
- """Context object for EcmaScript languages.
-
- Attributes:
- type: The context type.
- start_token: The token where this context starts.
- end_token: The token where this context ends.
- parent: The parent context.
- """
-
- # The root context.
- ROOT = 'root'
-
- # A block of code.
- BLOCK = 'block'
-
- # A pseudo-block of code for a given case or default section.
- CASE_BLOCK = 'case_block'
-
- # Block of statements in a for loop's parentheses.
- FOR_GROUP_BLOCK = 'for_block'
-
- # An implied block of code for 1 line if, while, and for statements
- IMPLIED_BLOCK = 'implied_block'
-
- # An index in to an array or object.
- INDEX = 'index'
-
- # An array literal in [].
- ARRAY_LITERAL = 'array_literal'
-
- # An object literal in {}.
- OBJECT_LITERAL = 'object_literal'
-
- # An individual element in an array or object literal.
- LITERAL_ELEMENT = 'literal_element'
-
- # The portion of a ternary statement between ? and :
- TERNARY_TRUE = 'ternary_true'
-
- # The portion of a ternary statment after :
- TERNARY_FALSE = 'ternary_false'
-
- # The entire switch statment. This will contain a GROUP with the variable
- # and a BLOCK with the code.
-
- # Since that BLOCK is not a normal block, it can not contain statements except
- # for case and default.
- SWITCH = 'switch'
-
- # A normal comment.
- COMMENT = 'comment'
-
- # A JsDoc comment.
- DOC = 'doc'
-
- # An individual statement.
- STATEMENT = 'statement'
-
- # Code within parentheses.
- GROUP = 'group'
-
- # Parameter names in a function declaration.
- PARAMETERS = 'parameters'
-
- # A set of variable declarations appearing after the 'var' keyword.
- VAR = 'var'
-
- # Context types that are blocks.
- BLOCK_TYPES = frozenset([
- ROOT, BLOCK, CASE_BLOCK, FOR_GROUP_BLOCK, IMPLIED_BLOCK])
-
- def __init__(self, context_type, start_token, parent=None):
- """Initializes the context object.
-
- Args:
- context_type: The context type.
- start_token: The token where this context starts.
- parent: The parent context.
-
- Attributes:
- type: The context type.
- start_token: The token where this context starts.
- end_token: The token where this context ends.
- parent: The parent context.
- children: The child contexts of this context, in order.
- """
- self.type = context_type
- self.start_token = start_token
- self.end_token = None
-
- self.parent = None
- self.children = []
-
- if parent:
- parent.AddChild(self)
-
- def __repr__(self):
- """Returns a string representation of the context object."""
- stack = []
- context = self
- while context:
- stack.append(context.type)
- context = context.parent
- return 'Context(%s)' % ' > '.join(stack)
-
- def AddChild(self, child):
- """Adds a child to this context and sets child's parent to this context.
-
- Args:
- child: A child EcmaContext. The child's parent will be set to this
- context.
- """
-
- child.parent = self
-
- self.children.append(child)
- self.children.sort(EcmaContext._CompareContexts)
-
- def GetRoot(self):
- """Get the root context that contains this context, if any."""
- context = self
- while context:
- if context.type is EcmaContext.ROOT:
- return context
- context = context.parent
-
- @staticmethod
- def _CompareContexts(context1, context2):
- """Sorts contexts 1 and 2 by start token document position."""
- return tokenutil.Compare(context1.start_token, context2.start_token)
-
-
-class EcmaMetaData(object):
- """Token metadata for EcmaScript languages.
-
- Attributes:
- last_code: The last code token to appear before this one.
- context: The context this token appears in.
- operator_type: The operator type, will be one of the *_OPERATOR constants
- defined below.
- aliased_symbol: The full symbol being identified, as a string (e.g. an
- 'XhrIo' alias for 'goog.net.XhrIo'). Only applicable to identifier
- tokens. This is set in aliaspass.py and is a best guess.
- is_alias_definition: True if the symbol is part of an alias definition.
- If so, these symbols won't be counted towards goog.requires/provides.
- """
-
- UNARY_OPERATOR = 'unary'
-
- UNARY_POST_OPERATOR = 'unary_post'
-
- BINARY_OPERATOR = 'binary'
-
- TERNARY_OPERATOR = 'ternary'
-
- def __init__(self):
- """Initializes a token metadata object."""
- self.last_code = None
- self.context = None
- self.operator_type = None
- self.is_implied_semicolon = False
- self.is_implied_block = False
- self.is_implied_block_close = False
- self.aliased_symbol = None
- self.is_alias_definition = False
-
- def __repr__(self):
- """Returns a string representation of the context object."""
- parts = ['%r' % self.context]
- if self.operator_type:
- parts.append('optype: %r' % self.operator_type)
- if self.is_implied_semicolon:
- parts.append('implied;')
- if self.aliased_symbol:
- parts.append('alias for: %s' % self.aliased_symbol)
- return 'MetaData(%s)' % ', '.join(parts)
-
- def IsUnaryOperator(self):
- return self.operator_type in (EcmaMetaData.UNARY_OPERATOR,
- EcmaMetaData.UNARY_POST_OPERATOR)
-
- def IsUnaryPostOperator(self):
- return self.operator_type == EcmaMetaData.UNARY_POST_OPERATOR
-
-
-class EcmaMetaDataPass(object):
- """A pass that iterates over all tokens and builds metadata about them."""
-
- def __init__(self):
- """Initialize the meta data pass object."""
- self.Reset()
-
- def Reset(self):
- """Resets the metadata pass to prepare for the next file."""
- self._token = None
- self._context = None
- self._AddContext(EcmaContext.ROOT)
- self._last_code = None
-
- def _CreateContext(self, context_type):
- """Overridable by subclasses to create the appropriate context type."""
- return EcmaContext(context_type, self._token, self._context)
-
- def _CreateMetaData(self):
- """Overridable by subclasses to create the appropriate metadata type."""
- return EcmaMetaData()
-
- def _AddContext(self, context_type):
- """Adds a context of the given type to the context stack.
-
- Args:
- context_type: The type of context to create
- """
- self._context = self._CreateContext(context_type)
-
- def _PopContext(self):
- """Moves up one level in the context stack.
-
- Returns:
- The former context.
-
- Raises:
- ParseError: If the root context is popped.
- """
- top_context = self._context
- top_context.end_token = self._token
- self._context = top_context.parent
- if self._context:
- return top_context
- else:
- raise ParseError(self._token)
-
- def _PopContextType(self, *stop_types):
- """Pops the context stack until a context of the given type is popped.
-
- Args:
- *stop_types: The types of context to pop to - stops at the first match.
-
- Returns:
- The context object of the given type that was popped.
- """
- last = None
- while not last or last.type not in stop_types:
- last = self._PopContext()
- return last
-
- def _EndStatement(self):
- """Process the end of a statement."""
- self._PopContextType(EcmaContext.STATEMENT)
- if self._context.type == EcmaContext.IMPLIED_BLOCK:
- self._token.metadata.is_implied_block_close = True
- self._PopContext()
-
- def _ProcessContext(self):
- """Process the context at the current token.
-
- Returns:
- The context that should be assigned to the current token, or None if
- the current context after this method should be used.
-
- Raises:
- ParseError: When the token appears in an invalid context.
- """
- token = self._token
- token_type = token.type
-
- if self._context.type in EcmaContext.BLOCK_TYPES:
- # Whenever we're in a block, we add a statement context. We make an
- # exception for switch statements since they can only contain case: and
- # default: and therefore don't directly contain statements.
- # The block we add here may be immediately removed in some cases, but
- # that causes no harm.
- parent = self._context.parent
- if not parent or parent.type != EcmaContext.SWITCH:
- self._AddContext(EcmaContext.STATEMENT)
-
- elif self._context.type == EcmaContext.ARRAY_LITERAL:
- self._AddContext(EcmaContext.LITERAL_ELEMENT)
-
- if token_type == TokenType.START_PAREN:
- if self._last_code and self._last_code.IsKeyword('for'):
- # for loops contain multiple statements in the group unlike while,
- # switch, if, etc.
- self._AddContext(EcmaContext.FOR_GROUP_BLOCK)
- else:
- self._AddContext(EcmaContext.GROUP)
-
- elif token_type == TokenType.END_PAREN:
- result = self._PopContextType(EcmaContext.GROUP,
- EcmaContext.FOR_GROUP_BLOCK)
- keyword_token = result.start_token.metadata.last_code
- # keyword_token will not exist if the open paren is the first line of the
- # file, for example if all code is wrapped in an immediately executed
- # annonymous function.
- if keyword_token and keyword_token.string in ('if', 'for', 'while'):
- next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES)
- if next_code.type != TokenType.START_BLOCK:
- # Check for do-while.
- is_do_while = False
- pre_keyword_token = keyword_token.metadata.last_code
- if (pre_keyword_token and
- pre_keyword_token.type == TokenType.END_BLOCK):
- start_block_token = pre_keyword_token.metadata.context.start_token
- is_do_while = start_block_token.metadata.last_code.string == 'do'
-
- # If it's not do-while, it's an implied block.
- if not is_do_while:
- self._AddContext(EcmaContext.IMPLIED_BLOCK)
- token.metadata.is_implied_block = True
-
- return result
-
- # else (not else if) with no open brace after it should be considered the
- # start of an implied block, similar to the case with if, for, and while
- # above.
- elif (token_type == TokenType.KEYWORD and
- token.string == 'else'):
- next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES)
- if (next_code.type != TokenType.START_BLOCK and
- (next_code.type != TokenType.KEYWORD or next_code.string != 'if')):
- self._AddContext(EcmaContext.IMPLIED_BLOCK)
- token.metadata.is_implied_block = True
-
- elif token_type == TokenType.START_PARAMETERS:
- self._AddContext(EcmaContext.PARAMETERS)
-
- elif token_type == TokenType.END_PARAMETERS:
- return self._PopContextType(EcmaContext.PARAMETERS)
-
- elif token_type == TokenType.START_BRACKET:
- if (self._last_code and
- self._last_code.type in TokenType.EXPRESSION_ENDER_TYPES):
- self._AddContext(EcmaContext.INDEX)
- else:
- self._AddContext(EcmaContext.ARRAY_LITERAL)
-
- elif token_type == TokenType.END_BRACKET:
- return self._PopContextType(EcmaContext.INDEX, EcmaContext.ARRAY_LITERAL)
-
- elif token_type == TokenType.START_BLOCK:
- if (self._last_code.type in (TokenType.END_PAREN,
- TokenType.END_PARAMETERS) or
- self._last_code.IsKeyword('else') or
- self._last_code.IsKeyword('do') or
- self._last_code.IsKeyword('try') or
- self._last_code.IsKeyword('finally') or
- (self._last_code.IsOperator(':') and
- self._last_code.metadata.context.type == EcmaContext.CASE_BLOCK)):
- # else, do, try, and finally all might have no () before {.
- # Also, handle the bizzare syntax case 10: {...}.
- self._AddContext(EcmaContext.BLOCK)
- else:
- self._AddContext(EcmaContext.OBJECT_LITERAL)
-
- elif token_type == TokenType.END_BLOCK:
- context = self._PopContextType(EcmaContext.BLOCK,
- EcmaContext.OBJECT_LITERAL)
- if self._context.type == EcmaContext.SWITCH:
- # The end of the block also means the end of the switch statement it
- # applies to.
- return self._PopContext()
- return context
-
- elif token.IsKeyword('switch'):
- self._AddContext(EcmaContext.SWITCH)
-
- elif (token_type == TokenType.KEYWORD and
- token.string in ('case', 'default') and
- self._context.type != EcmaContext.OBJECT_LITERAL):
- # Pop up to but not including the switch block.
- while self._context.parent.type != EcmaContext.SWITCH:
- self._PopContext()
- if self._context.parent is None:
- raise ParseError(token, 'Encountered case/default statement '
- 'without switch statement')
-
- elif token.IsOperator('?'):
- self._AddContext(EcmaContext.TERNARY_TRUE)
-
- elif token.IsOperator(':'):
- if self._context.type == EcmaContext.OBJECT_LITERAL:
- self._AddContext(EcmaContext.LITERAL_ELEMENT)
-
- elif self._context.type == EcmaContext.TERNARY_TRUE:
- self._PopContext()
- self._AddContext(EcmaContext.TERNARY_FALSE)
-
- # Handle nested ternary statements like:
- # foo = bar ? baz ? 1 : 2 : 3
- # When we encounter the second ":" the context is
- # ternary_false > ternary_true > statement > root
- elif (self._context.type == EcmaContext.TERNARY_FALSE and
- self._context.parent.type == EcmaContext.TERNARY_TRUE):
- self._PopContext() # Leave current ternary false context.
- self._PopContext() # Leave current parent ternary true
- self._AddContext(EcmaContext.TERNARY_FALSE)
-
- elif self._context.parent.type == EcmaContext.SWITCH:
- self._AddContext(EcmaContext.CASE_BLOCK)
-
- elif token.IsKeyword('var'):
- self._AddContext(EcmaContext.VAR)
-
- elif token.IsOperator(','):
- while self._context.type not in (EcmaContext.VAR,
- EcmaContext.ARRAY_LITERAL,
- EcmaContext.OBJECT_LITERAL,
- EcmaContext.STATEMENT,
- EcmaContext.PARAMETERS,
- EcmaContext.GROUP):
- self._PopContext()
-
- elif token_type == TokenType.SEMICOLON:
- self._EndStatement()
-
- def Process(self, first_token):
- """Processes the token stream starting with the given token."""
- self._token = first_token
- while self._token:
- self._ProcessToken()
-
- if self._token.IsCode():
- self._last_code = self._token
-
- self._token = self._token.next
-
- try:
- self._PopContextType(self, EcmaContext.ROOT)
- except ParseError:
- # Ignore the "popped to root" error.
- pass
-
- def _ProcessToken(self):
- """Process the given token."""
- token = self._token
- token.metadata = self._CreateMetaData()
- context = (self._ProcessContext() or self._context)
- token.metadata.context = context
- token.metadata.last_code = self._last_code
-
- # Determine the operator type of the token, if applicable.
- if token.type == TokenType.OPERATOR:
- token.metadata.operator_type = self._GetOperatorType(token)
-
- # Determine if there is an implied semicolon after the token.
- if token.type != TokenType.SEMICOLON:
- next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES)
- # A statement like if (x) does not need a semicolon after it
- is_implied_block = self._context == EcmaContext.IMPLIED_BLOCK
- is_last_code_in_line = token.IsCode() and (
- not next_code or next_code.line_number != token.line_number)
- is_continued_operator = (token.type == TokenType.OPERATOR and
- not token.metadata.IsUnaryPostOperator())
- is_continued_dot = token.string == '.'
- next_code_is_operator = next_code and next_code.type == TokenType.OPERATOR
- is_end_of_block = (
- token.type == TokenType.END_BLOCK and
- token.metadata.context.type != EcmaContext.OBJECT_LITERAL)
- is_multiline_string = token.type == TokenType.STRING_TEXT
- is_continued_var_decl = (token.IsKeyword('var') and
- next_code and
- (next_code.type in [TokenType.IDENTIFIER,
- TokenType.SIMPLE_LVALUE]) and
- token.line_number < next_code.line_number)
- next_code_is_block = next_code and next_code.type == TokenType.START_BLOCK
- if (is_last_code_in_line and
- self._StatementCouldEndInContext() and
- not is_multiline_string and
- not is_end_of_block and
- not is_continued_var_decl and
- not is_continued_operator and
- not is_continued_dot and
- not next_code_is_operator and
- not is_implied_block and
- not next_code_is_block):
- token.metadata.is_implied_semicolon = True
- self._EndStatement()
-
- def _StatementCouldEndInContext(self):
- """Returns if the current statement (if any) may end in this context."""
- # In the basic statement or variable declaration context, statement can
- # always end in this context.
- if self._context.type in (EcmaContext.STATEMENT, EcmaContext.VAR):
- return True
-
- # End of a ternary false branch inside a statement can also be the
- # end of the statement, for example:
- # var x = foo ? foo.bar() : null
- # In this case the statement ends after the null, when the context stack
- # looks like ternary_false > var > statement > root.
- if (self._context.type == EcmaContext.TERNARY_FALSE and
- self._context.parent.type in (EcmaContext.STATEMENT, EcmaContext.VAR)):
- return True
-
- # In all other contexts like object and array literals, ternary true, etc.
- # the statement can't yet end.
- return False
-
- def _GetOperatorType(self, token):
- """Returns the operator type of the given operator token.
-
- Args:
- token: The token to get arity for.
-
- Returns:
- The type of the operator. One of the *_OPERATOR constants defined in
- EcmaMetaData.
- """
- if token.string == '?':
- return EcmaMetaData.TERNARY_OPERATOR
-
- if token.string in TokenType.UNARY_OPERATORS:
- return EcmaMetaData.UNARY_OPERATOR
-
- last_code = token.metadata.last_code
- if not last_code or last_code.type == TokenType.END_BLOCK:
- return EcmaMetaData.UNARY_OPERATOR
-
- if (token.string in TokenType.UNARY_POST_OPERATORS and
- last_code.type in TokenType.EXPRESSION_ENDER_TYPES):
- return EcmaMetaData.UNARY_POST_OPERATOR
-
- if (token.string in TokenType.UNARY_OK_OPERATORS and
- last_code.type not in TokenType.EXPRESSION_ENDER_TYPES and
- last_code.string not in TokenType.UNARY_POST_OPERATORS):
- return EcmaMetaData.UNARY_OPERATOR
-
- return EcmaMetaData.BINARY_OPERATOR
diff --git a/tools/closure_linter/closure_linter/error_check.py b/tools/closure_linter/closure_linter/error_check.py
deleted file mode 100755
index 8d657fe917..0000000000
--- a/tools/closure_linter/closure_linter/error_check.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Specific JSLint errors checker."""
-
-
-
-import gflags as flags
-
-FLAGS = flags.FLAGS
-
-
-class Rule(object):
- """Different rules to check."""
-
- # Documentations for specific rules goes in flag definition.
- BLANK_LINES_AT_TOP_LEVEL = 'blank_lines_at_top_level'
- INDENTATION = 'indentation'
- WELL_FORMED_AUTHOR = 'well_formed_author'
- NO_BRACES_AROUND_INHERIT_DOC = 'no_braces_around_inherit_doc'
- BRACES_AROUND_TYPE = 'braces_around_type'
- OPTIONAL_TYPE_MARKER = 'optional_type_marker'
- VARIABLE_ARG_MARKER = 'variable_arg_marker'
- UNUSED_PRIVATE_MEMBERS = 'unused_private_members'
- UNUSED_LOCAL_VARIABLES = 'unused_local_variables'
-
- # Rule to raise all known errors.
- ALL = 'all'
-
- # All rules that are to be checked when using the strict flag. E.g. the rules
- # that are specific to the stricter Closure style.
- CLOSURE_RULES = frozenset([BLANK_LINES_AT_TOP_LEVEL,
- INDENTATION,
- WELL_FORMED_AUTHOR,
- NO_BRACES_AROUND_INHERIT_DOC,
- BRACES_AROUND_TYPE,
- OPTIONAL_TYPE_MARKER,
- VARIABLE_ARG_MARKER])
-
-
-flags.DEFINE_boolean('strict', False,
- 'Whether to validate against the stricter Closure style. '
- 'This includes ' + (', '.join(Rule.CLOSURE_RULES)) + '.')
-flags.DEFINE_multistring('jslint_error', [],
- 'List of specific lint errors to check. Here is a list'
- ' of accepted values:\n'
- ' - ' + Rule.ALL + ': enables all following errors.\n'
- ' - ' + Rule.BLANK_LINES_AT_TOP_LEVEL + ': validates'
- 'number of blank lines between blocks at top level.\n'
- ' - ' + Rule.INDENTATION + ': checks correct '
- 'indentation of code.\n'
- ' - ' + Rule.WELL_FORMED_AUTHOR + ': validates the '
- '@author JsDoc tags.\n'
- ' - ' + Rule.NO_BRACES_AROUND_INHERIT_DOC + ': '
- 'forbids braces around @inheritdoc JsDoc tags.\n'
- ' - ' + Rule.BRACES_AROUND_TYPE + ': enforces braces '
- 'around types in JsDoc tags.\n'
- ' - ' + Rule.OPTIONAL_TYPE_MARKER + ': checks correct '
- 'use of optional marker = in param types.\n'
- ' - ' + Rule.UNUSED_PRIVATE_MEMBERS + ': checks for '
- 'unused private variables.\n'
- ' - ' + Rule.UNUSED_LOCAL_VARIABLES + ': checks for '
- 'unused local variables.\n')
-
-
-def ShouldCheck(rule):
- """Returns whether the optional rule should be checked.
-
- Computes different flags (strict, jslint_error, jslint_noerror) to find out if
- this specific rule should be checked.
-
- Args:
- rule: Name of the rule (see Rule).
-
- Returns:
- True if the rule should be checked according to the flags, otherwise False.
- """
- if rule in FLAGS.jslint_error or Rule.ALL in FLAGS.jslint_error:
- return True
- # Checks strict rules.
- return FLAGS.strict and rule in Rule.CLOSURE_RULES
diff --git a/tools/closure_linter/closure_linter/error_fixer.py b/tools/closure_linter/closure_linter/error_fixer.py
deleted file mode 100755
index 88f9c720ab..0000000000
--- a/tools/closure_linter/closure_linter/error_fixer.py
+++ /dev/null
@@ -1,618 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Main class responsible for automatically fixing simple style violations."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = 'robbyw@google.com (Robert Walker)'
-
-import re
-
-import gflags as flags
-from closure_linter import errors
-from closure_linter import javascriptstatetracker
-from closure_linter import javascripttokens
-from closure_linter import requireprovidesorter
-from closure_linter import tokenutil
-from closure_linter.common import errorhandler
-
-# Shorthand
-Token = javascripttokens.JavaScriptToken
-Type = javascripttokens.JavaScriptTokenType
-
-END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$')
-
-# Regex to represent common mistake inverting author name and email as
-# @author User Name (user@company)
-INVERTED_AUTHOR_SPEC = re.compile(r'(?P<leading_whitespace>\s*)'
- r'(?P<name>[^(]+)'
- r'(?P<whitespace_after_name>\s+)'
- r'\('
- r'(?P<email>[^\s]+@[^)\s]+)'
- r'\)'
- r'(?P<trailing_characters>.*)')
-
-FLAGS = flags.FLAGS
-flags.DEFINE_boolean('disable_indentation_fixing', False,
- 'Whether to disable automatic fixing of indentation.')
-flags.DEFINE_list('fix_error_codes', [], 'A list of specific error codes to '
- 'fix. Defaults to all supported error codes when empty. '
- 'See errors.py for a list of error codes.')
-
-
-class ErrorFixer(errorhandler.ErrorHandler):
- """Object that fixes simple style errors."""
-
- def __init__(self, external_file=None):
- """Initialize the error fixer.
-
- Args:
- external_file: If included, all output will be directed to this file
- instead of overwriting the files the errors are found in.
- """
- errorhandler.ErrorHandler.__init__(self)
-
- self._file_name = None
- self._file_token = None
- self._external_file = external_file
-
- try:
- self._fix_error_codes = set([errors.ByName(error.upper()) for error in
- FLAGS.fix_error_codes])
- except KeyError as ke:
- raise ValueError('Unknown error code ' + ke.args[0])
-
- def HandleFile(self, filename, first_token):
- """Notifies this ErrorPrinter that subsequent errors are in filename.
-
- Args:
- filename: The name of the file about to be checked.
- first_token: The first token in the file.
- """
- self._file_name = filename
- self._file_is_html = filename.endswith('.html') or filename.endswith('.htm')
- self._file_token = first_token
- self._file_fix_count = 0
- self._file_changed_lines = set()
-
- def _AddFix(self, tokens):
- """Adds the fix to the internal count.
-
- Args:
- tokens: The token or sequence of tokens changed to fix an error.
- """
- self._file_fix_count += 1
- if hasattr(tokens, 'line_number'):
- self._file_changed_lines.add(tokens.line_number)
- else:
- for token in tokens:
- self._file_changed_lines.add(token.line_number)
-
- def _FixJsDocPipeNull(self, js_type):
- """Change number|null or null|number to ?number.
-
- Args:
- js_type: The typeannotation.TypeAnnotation instance to fix.
- """
-
- # Recurse into all sub_types if the error was at a deeper level.
- map(self._FixJsDocPipeNull, js_type.IterTypes())
-
- if js_type.type_group and len(js_type.sub_types) == 2:
- # Find and remove the null sub_type:
- sub_type = None
- for sub_type in js_type.sub_types:
- if sub_type.identifier == 'null':
- map(tokenutil.DeleteToken, sub_type.tokens)
- self._AddFix(sub_type.tokens)
- break
- else:
- return
-
- first_token = js_type.FirstToken()
- question_mark = Token('?', Type.DOC_TYPE_MODIFIER, first_token.line,
- first_token.line_number)
- tokenutil.InsertTokenBefore(question_mark, first_token)
- js_type.tokens.insert(0, question_mark)
- js_type.tokens.remove(sub_type)
- js_type.or_null = True
-
- # Now also remove the separator, which is in the parent's token list,
- # either before or after the sub_type, there is exactly one. Scan for it.
- for token in js_type.tokens:
- if (token and isinstance(token, Token) and
- token.type == Type.DOC_TYPE_MODIFIER and token.string == '|'):
- tokenutil.DeleteToken(token)
- self._AddFix(token)
- break
-
- def HandleError(self, error):
- """Attempts to fix the error.
-
- Args:
- error: The error object
- """
- code = error.code
- token = error.token
-
- if self._fix_error_codes and code not in self._fix_error_codes:
- return
-
- if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL:
- self._FixJsDocPipeNull(token.attached_object.jstype)
-
- elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
- iterator = token.attached_object.type_end_token
- if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace():
- iterator = iterator.previous
-
- ending_space = len(iterator.string) - len(iterator.string.rstrip())
- iterator.string = '%s=%s' % (iterator.string.rstrip(),
- ' ' * ending_space)
-
- # Create a new flag object with updated type info.
- token.attached_object = javascriptstatetracker.JsDocFlag(token)
- self._AddFix(token)
-
- elif code == errors.JSDOC_MISSING_VAR_ARGS_TYPE:
- iterator = token.attached_object.type_start_token
- if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace():
- iterator = iterator.next
-
- starting_space = len(iterator.string) - len(iterator.string.lstrip())
- iterator.string = '%s...%s' % (' ' * starting_space,
- iterator.string.lstrip())
-
- # Create a new flag object with updated type info.
- token.attached_object = javascriptstatetracker.JsDocFlag(token)
- self._AddFix(token)
-
- elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
- errors.MISSING_SEMICOLON):
- semicolon_token = Token(';', Type.SEMICOLON, token.line,
- token.line_number)
- tokenutil.InsertTokenAfter(semicolon_token, token)
- token.metadata.is_implied_semicolon = False
- semicolon_token.metadata.is_implied_semicolon = False
- self._AddFix(token)
-
- elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
- errors.REDUNDANT_SEMICOLON,
- errors.COMMA_AT_END_OF_LITERAL):
- self._DeleteToken(token)
- self._AddFix(token)
-
- elif code == errors.INVALID_JSDOC_TAG:
- if token.string == '@returns':
- token.string = '@return'
- self._AddFix(token)
-
- elif code == errors.FILE_MISSING_NEWLINE:
- # This error is fixed implicitly by the way we restore the file
- self._AddFix(token)
-
- elif code == errors.MISSING_SPACE:
- if error.fix_data:
- token.string = error.fix_data
- self._AddFix(token)
- elif error.position:
- if error.position.IsAtBeginning():
- tokenutil.InsertSpaceTokenAfter(token.previous)
- elif error.position.IsAtEnd(token.string):
- tokenutil.InsertSpaceTokenAfter(token)
- else:
- token.string = error.position.Set(token.string, ' ')
- self._AddFix(token)
-
- elif code == errors.EXTRA_SPACE:
- if error.position:
- token.string = error.position.Set(token.string, '')
- self._AddFix(token)
-
- elif code == errors.MISSING_LINE:
- if error.position.IsAtBeginning():
- tokenutil.InsertBlankLineAfter(token.previous)
- else:
- tokenutil.InsertBlankLineAfter(token)
- self._AddFix(token)
-
- elif code == errors.EXTRA_LINE:
- self._DeleteToken(token)
- self._AddFix(token)
-
- elif code == errors.WRONG_BLANK_LINE_COUNT:
- if not token.previous:
- # TODO(user): Add an insertBefore method to tokenutil.
- return
-
- num_lines = error.fix_data
- should_delete = False
-
- if num_lines < 0:
- num_lines *= -1
- should_delete = True
-
- for unused_i in xrange(1, num_lines + 1):
- if should_delete:
- # TODO(user): DeleteToken should update line numbers.
- self._DeleteToken(token.previous)
- else:
- tokenutil.InsertBlankLineAfter(token.previous)
- self._AddFix(token)
-
- elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
- end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
- if end_quote:
- single_quote_start = Token(
- "'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number)
- single_quote_end = Token(
- "'", Type.SINGLE_QUOTE_STRING_START, end_quote.line,
- token.line_number)
-
- tokenutil.InsertTokenAfter(single_quote_start, token)
- tokenutil.InsertTokenAfter(single_quote_end, end_quote)
- self._DeleteToken(token)
- self._DeleteToken(end_quote)
- self._AddFix([token, end_quote])
-
- elif code == errors.MISSING_BRACES_AROUND_TYPE:
- fixed_tokens = []
- start_token = token.attached_object.type_start_token
-
- if start_token.type != Type.DOC_START_BRACE:
- leading_space = (
- len(start_token.string) - len(start_token.string.lstrip()))
- if leading_space:
- start_token = tokenutil.SplitToken(start_token, leading_space)
- # Fix case where start and end token were the same.
- if token.attached_object.type_end_token == start_token.previous:
- token.attached_object.type_end_token = start_token
-
- new_token = Token('{', Type.DOC_START_BRACE, start_token.line,
- start_token.line_number)
- tokenutil.InsertTokenAfter(new_token, start_token.previous)
- token.attached_object.type_start_token = new_token
- fixed_tokens.append(new_token)
-
- end_token = token.attached_object.type_end_token
- if end_token.type != Type.DOC_END_BRACE:
- # If the start token was a brace, the end token will be a
- # FLAG_ENDING_TYPE token, if there wasn't a starting brace then
- # the end token is the last token of the actual type.
- last_type = end_token
- if not fixed_tokens:
- last_type = end_token.previous
-
- while last_type.string.isspace():
- last_type = last_type.previous
-
- # If there was no starting brace then a lone end brace wouldn't have
- # been type end token. Now that we've added any missing start brace,
- # see if the last effective type token was an end brace.
- if last_type.type != Type.DOC_END_BRACE:
- trailing_space = (len(last_type.string) -
- len(last_type.string.rstrip()))
- if trailing_space:
- tokenutil.SplitToken(last_type,
- len(last_type.string) - trailing_space)
-
- new_token = Token('}', Type.DOC_END_BRACE, last_type.line,
- last_type.line_number)
- tokenutil.InsertTokenAfter(new_token, last_type)
- token.attached_object.type_end_token = new_token
- fixed_tokens.append(new_token)
-
- self._AddFix(fixed_tokens)
-
- elif code == errors.LINE_STARTS_WITH_OPERATOR:
- # Remove whitespace following the operator so the line starts clean.
- self._StripSpace(token, before=False)
-
- # Remove the operator.
- tokenutil.DeleteToken(token)
- self._AddFix(token)
-
- insertion_point = tokenutil.GetPreviousCodeToken(token)
-
- # Insert a space between the previous token and the new operator.
- space = Token(' ', Type.WHITESPACE, insertion_point.line,
- insertion_point.line_number)
- tokenutil.InsertTokenAfter(space, insertion_point)
-
- # Insert the operator on the end of the previous line.
- new_token = Token(token.string, token.type, insertion_point.line,
- insertion_point.line_number)
- tokenutil.InsertTokenAfter(new_token, space)
- self._AddFix(new_token)
-
- elif code == errors.LINE_ENDS_WITH_DOT:
- # Remove whitespace preceding the operator to remove trailing whitespace.
- self._StripSpace(token, before=True)
-
- # Remove the dot.
- tokenutil.DeleteToken(token)
- self._AddFix(token)
-
- insertion_point = tokenutil.GetNextCodeToken(token)
-
- # Insert the dot at the beginning of the next line of code.
- new_token = Token(token.string, token.type, insertion_point.line,
- insertion_point.line_number)
- tokenutil.InsertTokenBefore(new_token, insertion_point)
- self._AddFix(new_token)
-
- elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
- require_start_token = error.fix_data
- sorter = requireprovidesorter.RequireProvideSorter()
- sorter.FixRequires(require_start_token)
-
- self._AddFix(require_start_token)
-
- elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED:
- provide_start_token = error.fix_data
- sorter = requireprovidesorter.RequireProvideSorter()
- sorter.FixProvides(provide_start_token)
-
- self._AddFix(provide_start_token)
-
- elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
- if token.previous.string == '{' and token.next.string == '}':
- self._DeleteToken(token.previous)
- self._DeleteToken(token.next)
- self._AddFix([token])
-
- elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
- match = INVERTED_AUTHOR_SPEC.match(token.string)
- if match:
- token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'),
- match.group('email'),
- match.group('whitespace_after_name'),
- match.group('name'),
- match.group('trailing_characters'))
- self._AddFix(token)
-
- elif (code == errors.WRONG_INDENTATION and
- not FLAGS.disable_indentation_fixing):
- token = tokenutil.GetFirstTokenInSameLine(token)
- actual = error.position.start
- expected = error.position.length
-
- # Cases where first token is param but with leading spaces.
- if (len(token.string.lstrip()) == len(token.string) - actual and
- token.string.lstrip()):
- token.string = token.string.lstrip()
- actual = 0
-
- if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0:
- token.string = token.string.lstrip() + (' ' * expected)
- self._AddFix([token])
- else:
- # We need to add indentation.
- new_token = Token(' ' * expected, Type.WHITESPACE,
- token.line, token.line_number)
- # Note that we'll never need to add indentation at the first line,
- # since it will always not be indented. Therefore it's safe to assume
- # token.previous exists.
- tokenutil.InsertTokenAfter(new_token, token.previous)
- self._AddFix([token])
-
- elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT,
- errors.MISSING_END_OF_SCOPE_COMMENT]:
- # Only fix cases where }); is found with no trailing content on the line
- # other than a comment. Value of 'token' is set to } for this error.
- if (token.type == Type.END_BLOCK and
- token.next.type == Type.END_PAREN and
- token.next.next.type == Type.SEMICOLON):
- current_token = token.next.next.next
- removed_tokens = []
- while current_token and current_token.line_number == token.line_number:
- if current_token.IsAnyType(Type.WHITESPACE,
- Type.START_SINGLE_LINE_COMMENT,
- Type.COMMENT):
- removed_tokens.append(current_token)
- current_token = current_token.next
- else:
- return
-
- if removed_tokens:
- self._DeleteTokens(removed_tokens[0], len(removed_tokens))
-
- whitespace_token = Token(' ', Type.WHITESPACE, token.line,
- token.line_number)
- start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT,
- token.line, token.line_number)
- comment_token = Token(' goog.scope', Type.COMMENT, token.line,
- token.line_number)
- insertion_tokens = [whitespace_token, start_comment_token,
- comment_token]
-
- tokenutil.InsertTokensAfter(insertion_tokens, token.next.next)
- self._AddFix(removed_tokens + insertion_tokens)
-
- elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
- tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
- num_delete_tokens = len(tokens_in_line)
- # If line being deleted is preceded and succeed with blank lines then
- # delete one blank line also.
- if (tokens_in_line[0].previous and tokens_in_line[-1].next
- and tokens_in_line[0].previous.type == Type.BLANK_LINE
- and tokens_in_line[-1].next.type == Type.BLANK_LINE):
- num_delete_tokens += 1
- self._DeleteTokens(tokens_in_line[0], num_delete_tokens)
- self._AddFix(tokens_in_line)
-
- elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]:
- missing_namespaces = error.fix_data[0]
- need_blank_line = error.fix_data[1] or (not token.previous)
-
- insert_location = Token('', Type.NORMAL, '', token.line_number - 1)
- dummy_first_token = insert_location
- tokenutil.InsertTokenBefore(insert_location, token)
-
- # If inserting a blank line check blank line does not exist before
- # token to avoid extra blank lines.
- if (need_blank_line and insert_location.previous
- and insert_location.previous.type != Type.BLANK_LINE):
- tokenutil.InsertBlankLineAfter(insert_location)
- insert_location = insert_location.next
-
- for missing_namespace in missing_namespaces:
- new_tokens = self._GetNewRequireOrProvideTokens(
- code == errors.MISSING_GOOG_PROVIDE,
- missing_namespace, insert_location.line_number + 1)
- tokenutil.InsertLineAfter(insert_location, new_tokens)
- insert_location = new_tokens[-1]
- self._AddFix(new_tokens)
-
- # If inserting a blank line check blank line does not exist after
- # token to avoid extra blank lines.
- if (need_blank_line and insert_location.next
- and insert_location.next.type != Type.BLANK_LINE):
- tokenutil.InsertBlankLineAfter(insert_location)
-
- tokenutil.DeleteToken(dummy_first_token)
-
- def _StripSpace(self, token, before):
- """Strip whitespace tokens either preceding or following the given token.
-
- Args:
- token: The token.
- before: If true, strip space before the token, if false, after it.
- """
- token = token.previous if before else token.next
- while token and token.type == Type.WHITESPACE:
- tokenutil.DeleteToken(token)
- token = token.previous if before else token.next
-
- def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number):
- """Returns a list of tokens to create a goog.require/provide statement.
-
- Args:
- is_provide: True if getting tokens for a provide, False for require.
- namespace: The required or provided namespaces to get tokens for.
- line_number: The line number the new require or provide statement will be
- on.
-
- Returns:
- Tokens to create a new goog.require or goog.provide statement.
- """
- string = 'goog.require'
- if is_provide:
- string = 'goog.provide'
- line_text = string + '(\'' + namespace + '\');\n'
- return [
- Token(string, Type.IDENTIFIER, line_text, line_number),
- Token('(', Type.START_PAREN, line_text, line_number),
- Token('\'', Type.SINGLE_QUOTE_STRING_START, line_text, line_number),
- Token(namespace, Type.STRING_TEXT, line_text, line_number),
- Token('\'', Type.SINGLE_QUOTE_STRING_END, line_text, line_number),
- Token(')', Type.END_PAREN, line_text, line_number),
- Token(';', Type.SEMICOLON, line_text, line_number)
- ]
-
- def _DeleteToken(self, token):
- """Deletes the specified token from the linked list of tokens.
-
- Updates instance variables pointing to tokens such as _file_token if
- they reference the deleted token.
-
- Args:
- token: The token to delete.
- """
- if token == self._file_token:
- self._file_token = token.next
-
- tokenutil.DeleteToken(token)
-
- def _DeleteTokens(self, token, token_count):
- """Deletes the given number of tokens starting with the given token.
-
- Updates instance variables pointing to tokens such as _file_token if
- they reference the deleted token.
-
- Args:
- token: The first token to delete.
- token_count: The total number of tokens to delete.
- """
- if token == self._file_token:
- for unused_i in xrange(token_count):
- self._file_token = self._file_token.next
-
- tokenutil.DeleteTokens(token, token_count)
-
- def FinishFile(self):
- """Called when the current file has finished style checking.
-
- Used to go back and fix any errors in the file. It currently supports both
- js and html files. For js files it does a simple dump of all tokens, but in
- order to support html file, we need to merge the original file with the new
- token set back together. This works because the tokenized html file is the
- original html file with all non js lines kept but blanked out with one blank
- line token per line of html.
- """
- if self._file_fix_count:
- # Get the original file content for html.
- if self._file_is_html:
- f = open(self._file_name, 'r')
- original_lines = f.readlines()
- f.close()
-
- f = self._external_file
- if not f:
- error_noun = 'error' if self._file_fix_count == 1 else 'errors'
- print 'Fixed %d %s in %s' % (
- self._file_fix_count, error_noun, self._file_name)
- f = open(self._file_name, 'w')
-
- token = self._file_token
- # Finding the first not deleted token.
- while token.is_deleted:
- token = token.next
- # If something got inserted before first token (e.g. due to sorting)
- # then move to start. Bug 8398202.
- while token.previous:
- token = token.previous
- char_count = 0
- line = ''
- while token:
- line += token.string
- char_count += len(token.string)
-
- if token.IsLastInLine():
- # We distinguish if a blank line in html was from stripped original
- # file or newly added error fix by looking at the "org_line_number"
- # field on the token. It is only set in the tokenizer, so for all
- # error fixes, the value should be None.
- if (line or not self._file_is_html or
- token.orig_line_number is None):
- f.write(line)
- f.write('\n')
- else:
- f.write(original_lines[token.orig_line_number - 1])
- line = ''
- if char_count > 80 and token.line_number in self._file_changed_lines:
- print 'WARNING: Line %d of %s is now longer than 80 characters.' % (
- token.line_number, self._file_name)
-
- char_count = 0
-
- token = token.next
-
- if not self._external_file:
- # Close the file if we created it
- f.close()
diff --git a/tools/closure_linter/closure_linter/error_fixer_test.py b/tools/closure_linter/closure_linter/error_fixer_test.py
deleted file mode 100644
index 49f449de42..0000000000
--- a/tools/closure_linter/closure_linter/error_fixer_test.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for the error_fixer module."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-
-
-import unittest as googletest
-from closure_linter import error_fixer
-from closure_linter import testutil
-
-
-class ErrorFixerTest(googletest.TestCase):
- """Unit tests for error_fixer."""
-
- def setUp(self):
- self.error_fixer = error_fixer.ErrorFixer()
-
- def testDeleteToken(self):
- start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
- second_token = start_token.next
- self.error_fixer.HandleFile('test_file', start_token)
-
- self.error_fixer._DeleteToken(start_token)
-
- self.assertEqual(second_token, self.error_fixer._file_token)
-
- def testDeleteTokens(self):
- start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
- fourth_token = start_token.next.next.next
- self.error_fixer.HandleFile('test_file', start_token)
-
- self.error_fixer._DeleteTokens(start_token, 3)
-
- self.assertEqual(fourth_token, self.error_fixer._file_token)
-
-_TEST_SCRIPT = """\
-var x = 3;
-"""
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/closure_linter/errorrecord.py b/tools/closure_linter/closure_linter/errorrecord.py
deleted file mode 100644
index ce9fb908c7..0000000000
--- a/tools/closure_linter/closure_linter/errorrecord.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""A simple, pickle-serializable class to represent a lint error."""
-
-__author__ = 'nnaze@google.com (Nathan Naze)'
-
-import gflags as flags
-
-from closure_linter import errors
-from closure_linter.common import erroroutput
-
-FLAGS = flags.FLAGS
-
-
-class ErrorRecord(object):
- """Record-keeping struct that can be serialized back from a process.
-
- Attributes:
- path: Path to the file.
- error_string: Error string for the user.
- new_error: Whether this is a "new error" (see errors.NEW_ERRORS).
- """
-
- def __init__(self, path, error_string, new_error):
- self.path = path
- self.error_string = error_string
- self.new_error = new_error
-
-
-def MakeErrorRecord(path, error):
- """Make an error record with correctly formatted error string.
-
- Errors are not able to be serialized (pickled) over processes because of
- their pointers to the complex token/context graph. We use an intermediary
- serializable class to pass back just the relevant information.
-
- Args:
- path: Path of file the error was found in.
- error: An error.Error instance.
-
- Returns:
- _ErrorRecord instance.
- """
- new_error = error.code in errors.NEW_ERRORS
-
- if FLAGS.unix_mode:
- error_string = erroroutput.GetUnixErrorOutput(
- path, error, new_error=new_error)
- else:
- error_string = erroroutput.GetErrorOutput(error, new_error=new_error)
-
- return ErrorRecord(path, error_string, new_error)
diff --git a/tools/closure_linter/closure_linter/errorrules.py b/tools/closure_linter/closure_linter/errorrules.py
deleted file mode 100755
index b1b72aab6d..0000000000
--- a/tools/closure_linter/closure_linter/errorrules.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Linter error rules class for Closure Linter."""
-
-__author__ = 'robbyw@google.com (Robert Walker)'
-
-import gflags as flags
-from closure_linter import errors
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_boolean('jsdoc', True,
- 'Whether to report errors for missing JsDoc.')
-flags.DEFINE_list('disable', None,
- 'Disable specific error. Usage Ex.: gjslint --disable 1,'
- '0011 foo.js.')
-flags.DEFINE_integer('max_line_length', 80, 'Maximum line length allowed '
- 'without warning.', lower_bound=1)
-
-disabled_error_nums = None
-
-
-def GetMaxLineLength():
- """Returns allowed maximum length of line.
-
- Returns:
- Length of line allowed without any warning.
- """
- return FLAGS.max_line_length
-
-
-def ShouldReportError(error):
- """Whether the given error should be reported.
-
- Returns:
- True for all errors except missing documentation errors and disabled
- errors. For missing documentation, it returns the value of the
- jsdoc flag.
- """
- global disabled_error_nums
- if disabled_error_nums is None:
- disabled_error_nums = []
- if FLAGS.disable:
- for error_str in FLAGS.disable:
- error_num = 0
- try:
- error_num = int(error_str)
- except ValueError:
- pass
- disabled_error_nums.append(error_num)
-
- return ((FLAGS.jsdoc or error not in (
- errors.MISSING_PARAMETER_DOCUMENTATION,
- errors.MISSING_RETURN_DOCUMENTATION,
- errors.MISSING_MEMBER_DOCUMENTATION,
- errors.MISSING_PRIVATE,
- errors.MISSING_JSDOC_TAG_THIS)) and
- (not FLAGS.disable or error not in disabled_error_nums))
diff --git a/tools/closure_linter/closure_linter/errorrules_test.py b/tools/closure_linter/closure_linter/errorrules_test.py
deleted file mode 100644
index cb903785e6..0000000000
--- a/tools/closure_linter/closure_linter/errorrules_test.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2013 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Medium tests for the gjslint errorrules.
-
-Currently its just verifying that warnings can't be disabled.
-"""
-
-
-
-import gflags as flags
-import unittest as googletest
-
-from closure_linter import errors
-from closure_linter import runner
-from closure_linter.common import erroraccumulator
-
-flags.FLAGS.strict = True
-flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
-flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
-
-
-class ErrorRulesTest(googletest.TestCase):
- """Test case to for gjslint errorrules."""
-
- def testNoMaxLineLengthFlagExists(self):
- """Tests that --max_line_length flag does not exists."""
- self.assertTrue('max_line_length' not in flags.FLAGS.FlagDict())
-
- def testGetMaxLineLength(self):
- """Tests warning are reported for line greater than 80.
- """
-
- # One line > 100 and one line > 80 and < 100. So should produce two
- # line too long error.
- original = [
- 'goog.require(\'dummy.aa\');',
- '',
- 'function a() {',
- ' dummy.aa.i = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13'
- ' + 14 + 15 + 16 + 17 + 18 + 19 + 20;',
- ' dummy.aa.j = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13'
- ' + 14 + 15 + 16 + 17 + 18;',
- '}',
- ''
- ]
-
- # Expect line too long.
- expected = [errors.LINE_TOO_LONG, errors.LINE_TOO_LONG]
-
- self._AssertErrors(original, expected)
-
- def testNoDisableFlagExists(self):
- """Tests that --disable flag does not exists."""
- self.assertTrue('disable' not in flags.FLAGS.FlagDict())
-
- def testWarningsNotDisabled(self):
- """Tests warnings are reported when nothing is disabled.
- """
- original = [
- 'goog.require(\'dummy.aa\');',
- 'goog.require(\'dummy.Cc\');',
- 'goog.require(\'dummy.Dd\');',
- '',
- 'function a() {',
- ' dummy.aa.i = 1;',
- ' dummy.Cc.i = 1;',
- ' dummy.Dd.i = 1;',
- '}',
- ]
-
- expected = [errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
- errors.FILE_MISSING_NEWLINE]
-
- self._AssertErrors(original, expected)
-
- def _AssertErrors(self, original, expected_errors, include_header=True):
- """Asserts that the error fixer corrects original to expected."""
- if include_header:
- original = self._GetHeader() + original
-
- # Trap gjslint's output parse it to get messages added.
- error_accumulator = erroraccumulator.ErrorAccumulator()
- runner.Run('testing.js', error_accumulator, source=original)
- error_nums = [e.code for e in error_accumulator.GetErrors()]
-
- error_nums.sort()
- expected_errors.sort()
- self.assertListEqual(error_nums, expected_errors)
-
- def _GetHeader(self):
- """Returns a fake header for a JavaScript file."""
- return [
- '// Copyright 2011 Google Inc. All Rights Reserved.',
- '',
- '/**',
- ' * @fileoverview Fake file overview.',
- ' * @author fake@google.com (Fake Person)',
- ' */',
- ''
- ]
-
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/closure_linter/errors.py b/tools/closure_linter/closure_linter/errors.py
deleted file mode 100755
index 356ee0c5a6..0000000000
--- a/tools/closure_linter/closure_linter/errors.py
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Error codes for JavaScript style checker."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-
-def ByName(name):
- """Get the error code for the given error name.
-
- Args:
- name: The name of the error
-
- Returns:
- The error code
- """
- return globals()[name]
-
-
-# "File-fatal" errors - these errors stop further parsing of a single file
-FILE_NOT_FOUND = -1
-FILE_DOES_NOT_PARSE = -2
-
-# Spacing
-EXTRA_SPACE = 1
-MISSING_SPACE = 2
-EXTRA_LINE = 3
-MISSING_LINE = 4
-ILLEGAL_TAB = 5
-WRONG_INDENTATION = 6
-WRONG_BLANK_LINE_COUNT = 7
-
-# Semicolons
-MISSING_SEMICOLON = 10
-MISSING_SEMICOLON_AFTER_FUNCTION = 11
-ILLEGAL_SEMICOLON_AFTER_FUNCTION = 12
-REDUNDANT_SEMICOLON = 13
-
-# Miscellaneous
-ILLEGAL_PROTOTYPE_MEMBER_VALUE = 100
-LINE_TOO_LONG = 110
-LINE_STARTS_WITH_OPERATOR = 120
-COMMA_AT_END_OF_LITERAL = 121
-LINE_ENDS_WITH_DOT = 122
-MULTI_LINE_STRING = 130
-UNNECESSARY_DOUBLE_QUOTED_STRING = 131
-UNUSED_PRIVATE_MEMBER = 132
-UNUSED_LOCAL_VARIABLE = 133
-
-# Requires, provides
-GOOG_REQUIRES_NOT_ALPHABETIZED = 140
-GOOG_PROVIDES_NOT_ALPHABETIZED = 141
-MISSING_GOOG_REQUIRE = 142
-MISSING_GOOG_PROVIDE = 143
-EXTRA_GOOG_REQUIRE = 144
-EXTRA_GOOG_PROVIDE = 145
-ALIAS_STMT_NEEDS_GOOG_REQUIRE = 146
-
-# JsDoc
-INVALID_JSDOC_TAG = 200
-INVALID_USE_OF_DESC_TAG = 201
-NO_BUG_NUMBER_AFTER_BUG_TAG = 202
-MISSING_PARAMETER_DOCUMENTATION = 210
-EXTRA_PARAMETER_DOCUMENTATION = 211
-WRONG_PARAMETER_DOCUMENTATION = 212
-MISSING_JSDOC_TAG_TYPE = 213
-MISSING_JSDOC_TAG_DESCRIPTION = 214
-MISSING_JSDOC_PARAM_NAME = 215
-OUT_OF_ORDER_JSDOC_TAG_TYPE = 216
-MISSING_RETURN_DOCUMENTATION = 217
-UNNECESSARY_RETURN_DOCUMENTATION = 218
-MISSING_BRACES_AROUND_TYPE = 219
-MISSING_MEMBER_DOCUMENTATION = 220
-MISSING_PRIVATE = 221
-EXTRA_PRIVATE = 222
-INVALID_OVERRIDE_PRIVATE = 223
-INVALID_INHERIT_DOC_PRIVATE = 224
-MISSING_JSDOC_TAG_THIS = 225
-UNNECESSARY_BRACES_AROUND_INHERIT_DOC = 226
-INVALID_AUTHOR_TAG_DESCRIPTION = 227
-JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230
-JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231
-JSDOC_MISSING_OPTIONAL_TYPE = 232
-JSDOC_MISSING_OPTIONAL_PREFIX = 233
-JSDOC_MISSING_VAR_ARGS_TYPE = 234
-JSDOC_MISSING_VAR_ARGS_NAME = 235
-JSDOC_DOES_NOT_PARSE = 236
-# TODO(robbyw): Split this in to more specific syntax problems.
-INCORRECT_SUPPRESS_SYNTAX = 250
-INVALID_SUPPRESS_TYPE = 251
-UNNECESSARY_SUPPRESS = 252
-
-# File ending
-FILE_MISSING_NEWLINE = 300
-FILE_IN_BLOCK = 301
-
-# Interfaces
-INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400
-INTERFACE_METHOD_CANNOT_HAVE_CODE = 401
-
-# Comments
-MISSING_END_OF_SCOPE_COMMENT = 500
-MALFORMED_END_OF_SCOPE_COMMENT = 501
-
-# goog.scope - Namespace aliasing
-# TODO(nnaze) Add additional errors here and in aliaspass.py
-INVALID_USE_OF_GOOG_SCOPE = 600
-EXTRA_GOOG_SCOPE_USAGE = 601
-
-# ActionScript specific errors:
-# TODO(user): move these errors to their own file and move all JavaScript
-# specific errors to their own file as well.
-# All ActionScript specific errors should have error number at least 1000.
-FUNCTION_MISSING_RETURN_TYPE = 1132
-PARAMETER_MISSING_TYPE = 1133
-VAR_MISSING_TYPE = 1134
-PARAMETER_MISSING_DEFAULT_VALUE = 1135
-IMPORTS_NOT_ALPHABETIZED = 1140
-IMPORT_CONTAINS_WILDCARD = 1141
-UNUSED_IMPORT = 1142
-INVALID_TRACE_SEVERITY_LEVEL = 1250
-MISSING_TRACE_SEVERITY_LEVEL = 1251
-MISSING_TRACE_MESSAGE = 1252
-REMOVE_TRACE_BEFORE_SUBMIT = 1253
-REMOVE_COMMENT_BEFORE_SUBMIT = 1254
-# End of list of ActionScript specific errors.
-
-NEW_ERRORS = frozenset([
- # Errors added after 2.0.2:
- WRONG_INDENTATION,
- MISSING_SEMICOLON,
- # Errors added after 2.3.9:
- JSDOC_MISSING_VAR_ARGS_TYPE,
- JSDOC_MISSING_VAR_ARGS_NAME,
- # Errors added after 2.3.15:
- ALIAS_STMT_NEEDS_GOOG_REQUIRE,
- JSDOC_DOES_NOT_PARSE,
- LINE_ENDS_WITH_DOT,
- # Errors added after 2.3.17:
- ])
diff --git a/tools/closure_linter/closure_linter/fixjsstyle.py b/tools/closure_linter/closure_linter/fixjsstyle.py
deleted file mode 100755
index 2d65e0398f..0000000000
--- a/tools/closure_linter/closure_linter/fixjsstyle.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Automatically fix simple style guide violations."""
-
-__author__ = 'robbyw@google.com (Robert Walker)'
-
-import StringIO
-import sys
-
-import gflags as flags
-
-from closure_linter import error_fixer
-from closure_linter import runner
-from closure_linter.common import simplefileflags as fileflags
-
-FLAGS = flags.FLAGS
-flags.DEFINE_list('additional_extensions', None, 'List of additional file '
- 'extensions (not js) that should be treated as '
- 'JavaScript files.')
-flags.DEFINE_boolean('dry_run', False, 'Do not modify the file, only print it.')
-
-
-def main(argv=None):
- """Main function.
-
- Args:
- argv: Sequence of command line arguments.
- """
- if argv is None:
- argv = flags.FLAGS(sys.argv)
-
- suffixes = ['.js']
- if FLAGS.additional_extensions:
- suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
-
- files = fileflags.GetFileList(argv, 'JavaScript', suffixes)
-
- output_buffer = None
- if FLAGS.dry_run:
- output_buffer = StringIO.StringIO()
-
- fixer = error_fixer.ErrorFixer(output_buffer)
-
- # Check the list of files.
- for filename in files:
- runner.Run(filename, fixer)
- if FLAGS.dry_run:
- print output_buffer.getvalue()
-
-
-if __name__ == '__main__':
- main()
diff --git a/tools/closure_linter/closure_linter/fixjsstyle_test.py b/tools/closure_linter/closure_linter/fixjsstyle_test.py
deleted file mode 100755
index 34de3f8488..0000000000
--- a/tools/closure_linter/closure_linter/fixjsstyle_test.py
+++ /dev/null
@@ -1,615 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Medium tests for the gpylint auto-fixer."""
-
-__author__ = 'robbyw@google.com (Robby Walker)'
-
-import StringIO
-
-import gflags as flags
-import unittest as googletest
-from closure_linter import error_fixer
-from closure_linter import runner
-
-
-_RESOURCE_PREFIX = 'closure_linter/testdata'
-
-flags.FLAGS.strict = True
-flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
-flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
-
-
-class FixJsStyleTest(googletest.TestCase):
- """Test case to for gjslint auto-fixing."""
-
- def setUp(self):
- flags.FLAGS.dot_on_next_line = True
-
- def tearDown(self):
- flags.FLAGS.dot_on_next_line = False
-
- def testFixJsStyle(self):
- test_cases = [
- ['fixjsstyle.in.js', 'fixjsstyle.out.js'],
- ['indentation.js', 'fixjsstyle.indentation.out.js'],
- ['fixjsstyle.html.in.html', 'fixjsstyle.html.out.html'],
- ['fixjsstyle.oplineend.in.js', 'fixjsstyle.oplineend.out.js']]
- for [running_input_file, running_output_file] in test_cases:
- print 'Checking %s vs %s' % (running_input_file, running_output_file)
- input_filename = None
- golden_filename = None
- current_filename = None
- try:
- input_filename = '%s/%s' % (_RESOURCE_PREFIX, running_input_file)
- current_filename = input_filename
-
- golden_filename = '%s/%s' % (_RESOURCE_PREFIX, running_output_file)
- current_filename = golden_filename
- except IOError as ex:
- raise IOError('Could not find testdata resource for %s: %s' %
- (current_filename, ex))
-
- if running_input_file == 'fixjsstyle.in.js':
- with open(input_filename) as f:
- for line in f:
- # Go to last line.
- pass
- self.assertTrue(line == line.rstrip(), '%s file should not end '
- 'with a new line.' % (input_filename))
-
- # Autofix the file, sending output to a fake file.
- actual = StringIO.StringIO()
- runner.Run(input_filename, error_fixer.ErrorFixer(actual))
-
- # Now compare the files.
- actual.seek(0)
- expected = open(golden_filename, 'r')
-
- # Uncomment to generate new golden files and run
- # open('/'.join(golden_filename.split('/')[4:]), 'w').write(actual.read())
- # actual.seek(0)
-
- self.assertEqual(actual.readlines(), expected.readlines())
-
- def testAddProvideFirstLine(self):
- """Tests handling of case where goog.provide is added."""
- original = [
- 'dummy.bb.cc = 1;',
- ]
-
- expected = [
- 'goog.provide(\'dummy.bb\');',
- '',
- 'dummy.bb.cc = 1;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- original = [
- '',
- 'dummy.bb.cc = 1;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testAddRequireFirstLine(self):
- """Tests handling of case where goog.require is added."""
- original = [
- 'a = dummy.bb.cc;',
- ]
-
- expected = [
- 'goog.require(\'dummy.bb\');',
- '',
- 'a = dummy.bb.cc;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- original = [
- '',
- 'a = dummy.bb.cc;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testDeleteProvideAndAddProvideFirstLine(self):
- """Tests handling of case where goog.provide is deleted and added.
-
- Bug 14832597.
- """
- original = [
- 'goog.provide(\'dummy.aa\');',
- '',
- 'dummy.bb.cc = 1;',
- ]
-
- expected = [
- 'goog.provide(\'dummy.bb\');',
- '',
- 'dummy.bb.cc = 1;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- original = [
- 'goog.provide(\'dummy.aa\');',
- 'dummy.bb.cc = 1;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testDeleteProvideAndAddRequireFirstLine(self):
- """Tests handling where goog.provide is deleted and goog.require added.
-
- Bug 14832597.
- """
- original = [
- 'goog.provide(\'dummy.aa\');',
- '',
- 'a = dummy.bb.cc;',
- ]
-
- expected = [
- 'goog.require(\'dummy.bb\');',
- '',
- 'a = dummy.bb.cc;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- original = [
- 'goog.provide(\'dummy.aa\');',
- 'a = dummy.bb.cc;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testDeleteRequireAndAddRequireFirstLine(self):
- """Tests handling of case where goog.require is deleted and added.
-
- Bug 14832597.
- """
- original = [
- 'goog.require(\'dummy.aa\');',
- '',
- 'a = dummy.bb.cc;',
- ]
-
- expected = [
- 'goog.require(\'dummy.bb\');',
- '',
- 'a = dummy.bb.cc;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- original = [
- 'goog.require(\'dummy.aa\');',
- 'a = dummy.bb.cc;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testDeleteRequireAndAddProvideFirstLine(self):
- """Tests handling where goog.require is deleted and goog.provide added.
-
- Bug 14832597.
- """
- original = [
- 'goog.require(\'dummy.aa\');',
- '',
- 'dummy.bb.cc = 1;',
- ]
-
- expected = [
- 'goog.provide(\'dummy.bb\');',
- '',
- 'dummy.bb.cc = 1;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- original = [
- 'goog.require(\'dummy.aa\');',
- 'dummy.bb.cc = 1;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testMultipleProvideInsert(self):
- original = [
- 'goog.provide(\'dummy.bb\');',
- 'goog.provide(\'dummy.dd\');',
- '',
- 'dummy.aa.ff = 1;',
- 'dummy.bb.ff = 1;',
- 'dummy.cc.ff = 1;',
- 'dummy.dd.ff = 1;',
- 'dummy.ee.ff = 1;',
- ]
-
- expected = [
- 'goog.provide(\'dummy.aa\');',
- 'goog.provide(\'dummy.bb\');',
- 'goog.provide(\'dummy.cc\');',
- 'goog.provide(\'dummy.dd\');',
- 'goog.provide(\'dummy.ee\');',
- '',
- 'dummy.aa.ff = 1;',
- 'dummy.bb.ff = 1;',
- 'dummy.cc.ff = 1;',
- 'dummy.dd.ff = 1;',
- 'dummy.ee.ff = 1;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testMultipleRequireInsert(self):
- original = [
- 'goog.require(\'dummy.bb\');',
- 'goog.require(\'dummy.dd\');',
- '',
- 'a = dummy.aa.ff;',
- 'b = dummy.bb.ff;',
- 'c = dummy.cc.ff;',
- 'd = dummy.dd.ff;',
- 'e = dummy.ee.ff;',
- ]
-
- expected = [
- 'goog.require(\'dummy.aa\');',
- 'goog.require(\'dummy.bb\');',
- 'goog.require(\'dummy.cc\');',
- 'goog.require(\'dummy.dd\');',
- 'goog.require(\'dummy.ee\');',
- '',
- 'a = dummy.aa.ff;',
- 'b = dummy.bb.ff;',
- 'c = dummy.cc.ff;',
- 'd = dummy.dd.ff;',
- 'e = dummy.ee.ff;',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testUnsortedRequires(self):
- """Tests handling of unsorted goog.require statements without header.
-
- Bug 8398202.
- """
- original = [
- 'goog.require(\'dummy.aa\');',
- 'goog.require(\'dummy.Cc\');',
- 'goog.require(\'dummy.Dd\');',
- '',
- 'function a() {',
- ' dummy.aa.i = 1;',
- ' dummy.Cc.i = 1;',
- ' dummy.Dd.i = 1;',
- '}',
- ]
-
- expected = [
- 'goog.require(\'dummy.Cc\');',
- 'goog.require(\'dummy.Dd\');',
- 'goog.require(\'dummy.aa\');',
- '',
- 'function a() {',
- ' dummy.aa.i = 1;',
- ' dummy.Cc.i = 1;',
- ' dummy.Dd.i = 1;',
- '}',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testMissingExtraAndUnsortedRequires(self):
- """Tests handling of missing extra and unsorted goog.require statements."""
- original = [
- 'goog.require(\'dummy.aa\');',
- 'goog.require(\'dummy.Cc\');',
- 'goog.require(\'dummy.Dd\');',
- '',
- 'var x = new dummy.Bb();',
- 'dummy.Cc.someMethod();',
- 'dummy.aa.someMethod();',
- ]
-
- expected = [
- 'goog.require(\'dummy.Bb\');',
- 'goog.require(\'dummy.Cc\');',
- 'goog.require(\'dummy.aa\');',
- '',
- 'var x = new dummy.Bb();',
- 'dummy.Cc.someMethod();',
- 'dummy.aa.someMethod();',
- ]
-
- self._AssertFixes(original, expected)
-
- def testExtraRequireOnFirstLine(self):
- """Tests handling of extra goog.require statement on the first line.
-
- There was a bug when fixjsstyle quits with an exception. It happened if
- - the first line of the file is an extra goog.require() statement,
- - goog.require() statements are not sorted.
- """
- original = [
- 'goog.require(\'dummy.aa\');',
- 'goog.require(\'dummy.cc\');',
- 'goog.require(\'dummy.bb\');',
- '',
- 'var x = new dummy.bb();',
- 'var y = new dummy.cc();',
- ]
-
- expected = [
- 'goog.require(\'dummy.bb\');',
- 'goog.require(\'dummy.cc\');',
- '',
- 'var x = new dummy.bb();',
- 'var y = new dummy.cc();',
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testUnsortedProvides(self):
- """Tests handling of unsorted goog.provide statements without header.
-
- Bug 8398202.
- """
- original = [
- 'goog.provide(\'dummy.aa\');',
- 'goog.provide(\'dummy.Cc\');',
- 'goog.provide(\'dummy.Dd\');',
- '',
- 'dummy.aa = function() {};'
- 'dummy.Cc = function() {};'
- 'dummy.Dd = function() {};'
- ]
-
- expected = [
- 'goog.provide(\'dummy.Cc\');',
- 'goog.provide(\'dummy.Dd\');',
- 'goog.provide(\'dummy.aa\');',
- '',
- 'dummy.aa = function() {};'
- 'dummy.Cc = function() {};'
- 'dummy.Dd = function() {};'
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testMissingExtraAndUnsortedProvides(self):
- """Tests handling of missing extra and unsorted goog.provide statements."""
- original = [
- 'goog.provide(\'dummy.aa\');',
- 'goog.provide(\'dummy.Cc\');',
- 'goog.provide(\'dummy.Dd\');',
- '',
- 'dummy.Cc = function() {};',
- 'dummy.Bb = function() {};',
- 'dummy.aa.someMethod = function();',
- ]
-
- expected = [
- 'goog.provide(\'dummy.Bb\');',
- 'goog.provide(\'dummy.Cc\');',
- 'goog.provide(\'dummy.aa\');',
- '',
- 'dummy.Cc = function() {};',
- 'dummy.Bb = function() {};',
- 'dummy.aa.someMethod = function();',
- ]
-
- self._AssertFixes(original, expected)
-
- def testNoRequires(self):
- """Tests positioning of missing requires without existing requires."""
- original = [
- 'goog.provide(\'dummy.Something\');',
- '',
- 'dummy.Something = function() {};',
- '',
- 'var x = new dummy.Bb();',
- ]
-
- expected = [
- 'goog.provide(\'dummy.Something\');',
- '',
- 'goog.require(\'dummy.Bb\');',
- '',
- 'dummy.Something = function() {};',
- '',
- 'var x = new dummy.Bb();',
- ]
-
- self._AssertFixes(original, expected)
-
- def testNoProvides(self):
- """Tests positioning of missing provides without existing provides."""
- original = [
- 'goog.require(\'dummy.Bb\');',
- '',
- 'dummy.Something = function() {};',
- '',
- 'var x = new dummy.Bb();',
- ]
-
- expected = [
- 'goog.provide(\'dummy.Something\');',
- '',
- 'goog.require(\'dummy.Bb\');',
- '',
- 'dummy.Something = function() {};',
- '',
- 'var x = new dummy.Bb();',
- ]
-
- self._AssertFixes(original, expected)
-
- def testOutputOkayWhenFirstTokenIsDeleted(self):
- """Tests that autofix output is is correct when first token is deleted.
-
- Regression test for bug 4581567
- """
- original = ['"use strict";']
- expected = ["'use strict';"]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testGoogScopeIndentation(self):
- """Tests Handling a typical end-of-scope indentation fix."""
- original = [
- 'goog.scope(function() {',
- ' // TODO(brain): Take over the world.',
- '}); // goog.scope',
- ]
-
- expected = [
- 'goog.scope(function() {',
- '// TODO(brain): Take over the world.',
- '}); // goog.scope',
- ]
-
- self._AssertFixes(original, expected)
-
- def testMissingEndOfScopeComment(self):
- """Tests Handling a missing comment at end of goog.scope."""
- original = [
- 'goog.scope(function() {',
- '});',
- ]
-
- expected = [
- 'goog.scope(function() {',
- '}); // goog.scope',
- ]
-
- self._AssertFixes(original, expected)
-
- def testMissingEndOfScopeCommentWithOtherComment(self):
- """Tests handling an irrelevant comment at end of goog.scope."""
- original = [
- 'goog.scope(function() {',
- "}); // I don't belong here!",
- ]
-
- expected = [
- 'goog.scope(function() {',
- '}); // goog.scope',
- ]
-
- self._AssertFixes(original, expected)
-
- def testMalformedEndOfScopeComment(self):
- """Tests Handling a malformed comment at end of goog.scope."""
- original = [
- 'goog.scope(function() {',
- '}); // goog.scope FTW',
- ]
-
- expected = [
- 'goog.scope(function() {',
- '}); // goog.scope',
- ]
-
- self._AssertFixes(original, expected)
-
- def testEndsWithIdentifier(self):
- """Tests Handling case where script ends with identifier. Bug 7643404."""
- original = [
- 'goog.provide(\'xyz\');',
- '',
- 'abc'
- ]
-
- expected = [
- 'goog.provide(\'xyz\');',
- '',
- 'abc;'
- ]
-
- self._AssertFixes(original, expected)
-
- def testFileStartsWithSemicolon(self):
- """Tests handling files starting with semicolon.
-
- b/10062516
- """
- original = [
- ';goog.provide(\'xyz\');',
- '',
- 'abc;'
- ]
-
- expected = [
- 'goog.provide(\'xyz\');',
- '',
- 'abc;'
- ]
-
- self._AssertFixes(original, expected, include_header=False)
-
- def testCodeStartsWithSemicolon(self):
- """Tests handling code in starting with semicolon after comments.
-
- b/10062516
- """
- original = [
- ';goog.provide(\'xyz\');',
- '',
- 'abc;'
- ]
-
- expected = [
- 'goog.provide(\'xyz\');',
- '',
- 'abc;'
- ]
-
- self._AssertFixes(original, expected)
-
- def _AssertFixes(self, original, expected, include_header=True):
- """Asserts that the error fixer corrects original to expected."""
- if include_header:
- original = self._GetHeader() + original
- expected = self._GetHeader() + expected
-
- actual = StringIO.StringIO()
- runner.Run('testing.js', error_fixer.ErrorFixer(actual), original)
- actual.seek(0)
-
- expected = [x + '\n' for x in expected]
-
- self.assertListEqual(actual.readlines(), expected)
-
- def _GetHeader(self):
- """Returns a fake header for a JavaScript file."""
- return [
- '// Copyright 2011 Google Inc. All Rights Reserved.',
- '',
- '/**',
- ' * @fileoverview Fake file overview.',
- ' * @author fake@google.com (Fake Person)',
- ' */',
- ''
- ]
-
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/closure_linter/full_test.py b/tools/closure_linter/closure_linter/full_test.py
deleted file mode 100755
index d0a1557dc2..0000000000
--- a/tools/closure_linter/closure_linter/full_test.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Full regression-type (Medium) tests for gjslint.
-
-Tests every error that can be thrown by gjslint. Based heavily on
-devtools/javascript/gpylint/full_test.py
-"""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-import os
-import sys
-import unittest
-
-import gflags as flags
-import unittest as googletest
-
-from closure_linter import error_check
-from closure_linter import errors
-from closure_linter import runner
-from closure_linter.common import filetestcase
-
-_RESOURCE_PREFIX = 'closure_linter/testdata'
-
-flags.FLAGS.strict = True
-flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
-flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
-flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
- 'limited_doc_checks.js')
-flags.FLAGS.jslint_error = error_check.Rule.ALL
-
-# List of files under testdata to test.
-# We need to list files explicitly since pyglib can't list directories.
-# TODO(user): Figure out how to list the directory.
-_TEST_FILES = [
- 'all_js_wrapped.js',
- 'blank_lines.js',
- 'ends_with_block.js',
- 'empty_file.js',
- 'externs.js',
- 'externs_jsdoc.js',
- 'goog_scope.js',
- 'html_parse_error.html',
- 'indentation.js',
- 'interface.js',
- 'jsdoc.js',
- 'limited_doc_checks.js',
- 'minimal.js',
- 'other.js',
- 'provide_blank.js',
- 'provide_extra.js',
- 'provide_missing.js',
- 'require_alias.js',
- 'require_all_caps.js',
- 'require_blank.js',
- 'require_extra.js',
- 'require_function.js',
- 'require_function_missing.js',
- 'require_function_through_both.js',
- 'require_function_through_namespace.js',
- 'require_interface.js',
- 'require_interface_alias.js',
- 'require_interface_base.js',
- 'require_lower_case.js',
- 'require_missing.js',
- 'require_numeric.js',
- 'require_provide_blank.js',
- 'require_provide_missing.js',
- 'require_provide_ok.js',
- 'semicolon_missing.js',
- 'simple.html',
- 'spaces.js',
- 'tokenizer.js',
- 'unparseable.js',
- 'unused_local_variables.js',
- 'unused_private_members.js',
- 'utf8.html',
-]
-
-
-class GJsLintTestSuite(unittest.TestSuite):
- """Test suite to run a GJsLintTest for each of several files.
-
- If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
- testdata to test. Otherwise, _TEST_FILES is used.
- """
-
- def __init__(self, tests=()):
- unittest.TestSuite.__init__(self, tests)
-
- argv = sys.argv and sys.argv[1:] or []
- if argv:
- test_files = argv
- else:
- test_files = _TEST_FILES
- for test_file in test_files:
- resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
- self.addTest(
- filetestcase.AnnotatedFileTestCase(
- resource_path,
- runner.Run,
- errors.ByName))
-
-if __name__ == '__main__':
- # Don't let main parse args; it happens in the TestSuite.
- googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
diff --git a/tools/closure_linter/closure_linter/gjslint.py b/tools/closure_linter/closure_linter/gjslint.py
deleted file mode 100755
index 824e025dcb..0000000000
--- a/tools/closure_linter/closure_linter/gjslint.py
+++ /dev/null
@@ -1,319 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Checks JavaScript files for common style guide violations.
-
-gjslint.py is designed to be used as a PRESUBMIT script to check for javascript
-style guide violations. As of now, it checks for the following violations:
-
- * Missing and extra spaces
- * Lines longer than 80 characters
- * Missing newline at end of file
- * Missing semicolon after function declaration
- * Valid JsDoc including parameter matching
-
-Someday it will validate to the best of its ability against the entirety of the
-JavaScript style guide.
-
-This file is a front end that parses arguments and flags. The core of the code
-is in tokenizer.py and checker.py.
-"""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)',
- 'nnaze@google.com (Nathan Naze)',)
-
-import errno
-import itertools
-import os
-import platform
-import re
-import sys
-import time
-
-import gflags as flags
-
-from closure_linter import errorrecord
-from closure_linter import runner
-from closure_linter.common import erroraccumulator
-from closure_linter.common import simplefileflags as fileflags
-
-# Attempt import of multiprocessing (should be available in Python 2.6 and up).
-try:
- # pylint: disable=g-import-not-at-top
- import multiprocessing
-except ImportError:
- multiprocessing = None
-
-FLAGS = flags.FLAGS
-flags.DEFINE_boolean('unix_mode', False,
- 'Whether to emit warnings in standard unix format.')
-flags.DEFINE_boolean('beep', True, 'Whether to beep when errors are found.')
-flags.DEFINE_boolean('time', False, 'Whether to emit timing statistics.')
-flags.DEFINE_boolean('quiet', False, 'Whether to minimize logged messages. '
- 'Most useful for per-file linting, such as that performed '
- 'by the presubmit linter service.')
-flags.DEFINE_boolean('check_html', False,
- 'Whether to check javascript in html files.')
-flags.DEFINE_boolean('summary', False,
- 'Whether to show an error count summary.')
-flags.DEFINE_list('additional_extensions', None, 'List of additional file '
- 'extensions (not js) that should be treated as '
- 'JavaScript files.')
-flags.DEFINE_boolean('multiprocess',
- platform.system() is 'Linux' and bool(multiprocessing),
- 'Whether to attempt parallelized linting using the '
- 'multiprocessing module. Enabled by default on Linux '
- 'if the multiprocessing module is present (Python 2.6+). '
- 'Otherwise disabled by default. '
- 'Disabling may make debugging easier.')
-flags.ADOPT_module_key_flags(fileflags)
-flags.ADOPT_module_key_flags(runner)
-
-
-GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time',
- '--check_html', '--summary', '--quiet']
-
-
-
-def _MultiprocessCheckPaths(paths):
- """Run _CheckPath over mutltiple processes.
-
- Tokenization, passes, and checks are expensive operations. Running in a
- single process, they can only run on one CPU/core. Instead,
- shard out linting over all CPUs with multiprocessing to parallelize.
-
- Args:
- paths: paths to check.
-
- Yields:
- errorrecord.ErrorRecords for any found errors.
- """
-
- pool = multiprocessing.Pool()
-
- path_results = pool.imap(_CheckPath, paths)
- for results in path_results:
- for result in results:
- yield result
-
- # Force destruct before returning, as this can sometimes raise spurious
- # "interrupted system call" (EINTR), which we can ignore.
- try:
- pool.close()
- pool.join()
- del pool
- except OSError as err:
- if err.errno is not errno.EINTR:
- raise err
-
-
-def _CheckPaths(paths):
- """Run _CheckPath on all paths in one thread.
-
- Args:
- paths: paths to check.
-
- Yields:
- errorrecord.ErrorRecords for any found errors.
- """
-
- for path in paths:
- results = _CheckPath(path)
- for record in results:
- yield record
-
-
-def _CheckPath(path):
- """Check a path and return any errors.
-
- Args:
- path: paths to check.
-
- Returns:
- A list of errorrecord.ErrorRecords for any found errors.
- """
-
- error_handler = erroraccumulator.ErrorAccumulator()
- runner.Run(path, error_handler)
-
- make_error_record = lambda err: errorrecord.MakeErrorRecord(path, err)
- return map(make_error_record, error_handler.GetErrors())
-
-
-def _GetFilePaths(argv):
- suffixes = ['.js']
- if FLAGS.additional_extensions:
- suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
- if FLAGS.check_html:
- suffixes += ['.html', '.htm']
- return fileflags.GetFileList(argv, 'JavaScript', suffixes)
-
-
-# Error printing functions
-
-
-def _PrintFileSummary(paths, records):
- """Print a detailed summary of the number of errors in each file."""
-
- paths = list(paths)
- paths.sort()
-
- for path in paths:
- path_errors = [e for e in records if e.path == path]
- print '%s: %d' % (path, len(path_errors))
-
-
-def _PrintFileSeparator(path):
- print '----- FILE : %s -----' % path
-
-
-def _PrintSummary(paths, error_records):
- """Print a summary of the number of errors and files."""
-
- error_count = len(error_records)
- all_paths = set(paths)
- all_paths_count = len(all_paths)
-
- if error_count is 0:
- print '%d files checked, no errors found.' % all_paths_count
-
- new_error_count = len([e for e in error_records if e.new_error])
-
- error_paths = set([e.path for e in error_records])
- error_paths_count = len(error_paths)
- no_error_paths_count = all_paths_count - error_paths_count
-
- if (error_count or new_error_count) and not FLAGS.quiet:
- error_noun = 'error' if error_count == 1 else 'errors'
- new_error_noun = 'error' if new_error_count == 1 else 'errors'
- error_file_noun = 'file' if error_paths_count == 1 else 'files'
- ok_file_noun = 'file' if no_error_paths_count == 1 else 'files'
- print ('Found %d %s, including %d new %s, in %d %s (%d %s OK).' %
- (error_count,
- error_noun,
- new_error_count,
- new_error_noun,
- error_paths_count,
- error_file_noun,
- no_error_paths_count,
- ok_file_noun))
-
-
-def _PrintErrorRecords(error_records):
- """Print error records strings in the expected format."""
-
- current_path = None
- for record in error_records:
-
- if current_path != record.path:
- current_path = record.path
- if not FLAGS.unix_mode:
- _PrintFileSeparator(current_path)
-
- print record.error_string
-
-
-def _FormatTime(t):
- """Formats a duration as a human-readable string.
-
- Args:
- t: A duration in seconds.
-
- Returns:
- A formatted duration string.
- """
- if t < 1:
- return '%dms' % round(t * 1000)
- else:
- return '%.2fs' % t
-
-
-
-
-def main(argv=None):
- """Main function.
-
- Args:
- argv: Sequence of command line arguments.
- """
- if argv is None:
- argv = flags.FLAGS(sys.argv)
-
- if FLAGS.time:
- start_time = time.time()
-
- suffixes = ['.js']
- if FLAGS.additional_extensions:
- suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
- if FLAGS.check_html:
- suffixes += ['.html', '.htm']
- paths = fileflags.GetFileList(argv, 'JavaScript', suffixes)
-
- if FLAGS.multiprocess:
- records_iter = _MultiprocessCheckPaths(paths)
- else:
- records_iter = _CheckPaths(paths)
-
- records_iter, records_iter_copy = itertools.tee(records_iter, 2)
- _PrintErrorRecords(records_iter_copy)
-
- error_records = list(records_iter)
- _PrintSummary(paths, error_records)
-
- exit_code = 0
-
- # If there are any errors
- if error_records:
- exit_code += 1
-
- # If there are any new errors
- if [r for r in error_records if r.new_error]:
- exit_code += 2
-
- if exit_code:
- if FLAGS.summary:
- _PrintFileSummary(paths, error_records)
-
- if FLAGS.beep:
- # Make a beep noise.
- sys.stdout.write(chr(7))
-
- # Write out instructions for using fixjsstyle script to fix some of the
- # reported errors.
- fix_args = []
- for flag in sys.argv[1:]:
- for f in GJSLINT_ONLY_FLAGS:
- if flag.startswith(f):
- break
- else:
- fix_args.append(flag)
-
- if not FLAGS.quiet:
- print """
-Some of the errors reported by GJsLint may be auto-fixable using the script
-fixjsstyle. Please double check any changes it makes and report any bugs. The
-script can be run by executing:
-
-fixjsstyle %s """ % ' '.join(fix_args)
-
- if FLAGS.time:
- print 'Done in %s.' % _FormatTime(time.time() - start_time)
-
- sys.exit(exit_code)
-
-
-if __name__ == '__main__':
- main()
diff --git a/tools/closure_linter/closure_linter/indentation.py b/tools/closure_linter/closure_linter/indentation.py
deleted file mode 100755
index d48ad2b862..0000000000
--- a/tools/closure_linter/closure_linter/indentation.py
+++ /dev/null
@@ -1,617 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Methods for checking EcmaScript files for indentation issues."""
-
-__author__ = ('robbyw@google.com (Robert Walker)')
-
-import gflags as flags
-
-from closure_linter import ecmametadatapass
-from closure_linter import errors
-from closure_linter import javascripttokens
-from closure_linter import tokenutil
-from closure_linter.common import error
-from closure_linter.common import position
-
-
-flags.DEFINE_boolean('debug_indentation', False,
- 'Whether to print debugging information for indentation.')
-
-
-# Shorthand
-Context = ecmametadatapass.EcmaContext
-Error = error.Error
-Position = position.Position
-Type = javascripttokens.JavaScriptTokenType
-
-
-# The general approach:
-#
-# 1. Build a stack of tokens that can affect indentation.
-# For each token, we determine if it is a block or continuation token.
-# Some tokens need to be temporarily overwritten in case they are removed
-# before the end of the line.
-# Much of the work here is determining which tokens to keep on the stack
-# at each point. Operators, for example, should be removed once their
-# expression or line is gone, while parentheses must stay until the matching
-# end parentheses is found.
-#
-# 2. Given that stack, determine the allowable indentations.
-# Due to flexible indentation rules in JavaScript, there may be many
-# allowable indentations for each stack. We follows the general
-# "no false positives" approach of GJsLint and build the most permissive
-# set possible.
-
-
-class TokenInfo(object):
- """Stores information about a token.
-
- Attributes:
- token: The token
- is_block: Whether the token represents a block indentation.
- is_transient: Whether the token should be automatically removed without
- finding a matching end token.
- overridden_by: TokenInfo for a token that overrides the indentation that
- this token would require.
- is_permanent_override: Whether the override on this token should persist
- even after the overriding token is removed from the stack. For example:
- x([
- 1],
- 2);
- needs this to be set so the last line is not required to be a continuation
- indent.
- line_number: The effective line number of this token. Will either be the
- actual line number or the one before it in the case of a mis-wrapped
- operator.
- """
-
- def __init__(self, token, is_block=False):
- """Initializes a TokenInfo object.
-
- Args:
- token: The token
- is_block: Whether the token represents a block indentation.
- """
- self.token = token
- self.overridden_by = None
- self.is_permanent_override = False
- self.is_block = is_block
- self.is_transient = not is_block and token.type not in (
- Type.START_PAREN, Type.START_PARAMETERS)
- self.line_number = token.line_number
-
- def __repr__(self):
- result = '\n %s' % self.token
- if self.overridden_by:
- result = '%s OVERRIDDEN [by "%s"]' % (
- result, self.overridden_by.token.string)
- result += ' {is_block: %s, is_transient: %s}' % (
- self.is_block, self.is_transient)
- return result
-
-
-class IndentationRules(object):
- """EmcaScript indentation rules.
-
- Can be used to find common indentation errors in JavaScript, ActionScript and
- other Ecma like scripting languages.
- """
-
- def __init__(self):
- """Initializes the IndentationRules checker."""
- self._stack = []
-
- # Map from line number to number of characters it is off in indentation.
- self._start_index_offset = {}
-
- def Finalize(self):
- if self._stack:
- old_stack = self._stack
- self._stack = []
- raise Exception('INTERNAL ERROR: indentation stack is not empty: %r' %
- old_stack)
-
- def CheckToken(self, token, state):
- """Checks a token for indentation errors.
-
- Args:
- token: The current token under consideration
- state: Additional information about the current tree state
-
- Returns:
- An error array [error code, error string, error token] if the token is
- improperly indented, or None if indentation is correct.
- """
-
- token_type = token.type
- indentation_errors = []
- stack = self._stack
- is_first = self._IsFirstNonWhitespaceTokenInLine(token)
-
- # Add tokens that could decrease indentation before checking.
- if token_type == Type.END_PAREN:
- self._PopTo(Type.START_PAREN)
-
- elif token_type == Type.END_PARAMETERS:
- self._PopTo(Type.START_PARAMETERS)
-
- elif token_type == Type.END_BRACKET:
- self._PopTo(Type.START_BRACKET)
-
- elif token_type == Type.END_BLOCK:
- start_token = self._PopTo(Type.START_BLOCK)
- # Check for required goog.scope comment.
- if start_token:
- goog_scope = tokenutil.GoogScopeOrNoneFromStartBlock(start_token.token)
- if goog_scope is not None:
- if not token.line.endswith('; // goog.scope\n'):
- if (token.line.find('//') > -1 and
- token.line.find('goog.scope') >
- token.line.find('//')):
- indentation_errors.append([
- errors.MALFORMED_END_OF_SCOPE_COMMENT,
- ('Malformed end of goog.scope comment. Please use the '
- 'exact following syntax to close the scope:\n'
- '}); // goog.scope'),
- token,
- Position(token.start_index, token.length)])
- else:
- indentation_errors.append([
- errors.MISSING_END_OF_SCOPE_COMMENT,
- ('Missing comment for end of goog.scope which opened at line '
- '%d. End the scope with:\n'
- '}); // goog.scope' %
- (start_token.line_number)),
- token,
- Position(token.start_index, token.length)])
-
- elif token_type == Type.KEYWORD and token.string in ('case', 'default'):
- self._Add(self._PopTo(Type.START_BLOCK))
-
- elif token_type == Type.SEMICOLON:
- self._PopTransient()
-
- if (is_first and
- token_type not in (Type.COMMENT, Type.DOC_PREFIX, Type.STRING_TEXT)):
- if flags.FLAGS.debug_indentation:
- print 'Line #%d: stack %r' % (token.line_number, stack)
-
- # Ignore lines that start in JsDoc since we don't check them properly yet.
- # TODO(robbyw): Support checking JsDoc indentation.
- # Ignore lines that start as multi-line strings since indentation is N/A.
- # Ignore lines that start with operators since we report that already.
- # Ignore lines with tabs since we report that already.
- expected = self._GetAllowableIndentations()
- actual = self._GetActualIndentation(token)
-
- # Special case comments describing else, case, and default. Allow them
- # to outdent to the parent block.
- if token_type in Type.COMMENT_TYPES:
- next_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
- if next_code and next_code.type == Type.END_BLOCK:
- next_code = tokenutil.SearchExcept(next_code, Type.NON_CODE_TYPES)
- if next_code and next_code.string in ('else', 'case', 'default'):
- # TODO(robbyw): This almost certainly introduces false negatives.
- expected |= self._AddToEach(expected, -2)
-
- if actual >= 0 and actual not in expected:
- expected = sorted(expected)
- indentation_errors.append([
- errors.WRONG_INDENTATION,
- 'Wrong indentation: expected any of {%s} but got %d' % (
- ', '.join('%d' % x for x in expected if x < 80), actual),
- token,
- Position(actual, expected[0])])
- self._start_index_offset[token.line_number] = expected[0] - actual
-
- # Add tokens that could increase indentation.
- if token_type == Type.START_BRACKET:
- self._Add(TokenInfo(
- token=token,
- is_block=token.metadata.context.type == Context.ARRAY_LITERAL))
-
- elif token_type == Type.START_BLOCK or token.metadata.is_implied_block:
- self._Add(TokenInfo(token=token, is_block=True))
-
- elif token_type in (Type.START_PAREN, Type.START_PARAMETERS):
- self._Add(TokenInfo(token=token, is_block=False))
-
- elif token_type == Type.KEYWORD and token.string == 'return':
- self._Add(TokenInfo(token))
-
- elif not token.IsLastInLine() and (
- token.IsAssignment() or token.IsOperator('?')):
- self._Add(TokenInfo(token=token))
-
- # Handle implied block closes.
- if token.metadata.is_implied_block_close:
- self._PopToImpliedBlock()
-
- # Add some tokens only if they appear at the end of the line.
- is_last = self._IsLastCodeInLine(token)
- if is_last:
- next_code_token = tokenutil.GetNextCodeToken(token)
- # Increase required indentation if this is an overlong wrapped statement
- # ending in an operator.
- if token_type == Type.OPERATOR:
- if token.string == ':':
- if stack and stack[-1].token.string == '?':
- # When a ternary : is on a different line than its '?', it doesn't
- # add indentation.
- if token.line_number == stack[-1].token.line_number:
- self._Add(TokenInfo(token))
- elif token.metadata.context.type == Context.CASE_BLOCK:
- # Pop transient tokens from say, line continuations, e.g.,
- # case x.
- # y:
- # Want to pop the transient 4 space continuation indent.
- self._PopTransient()
- # Starting the body of the case statement, which is a type of
- # block.
- self._Add(TokenInfo(token=token, is_block=True))
- elif token.metadata.context.type == Context.LITERAL_ELEMENT:
- # When in an object literal, acts as operator indicating line
- # continuations.
- self._Add(TokenInfo(token))
- else:
- # ':' might also be a statement label, no effect on indentation in
- # this case.
- pass
-
- elif token.string != ',':
- self._Add(TokenInfo(token))
- else:
- # The token is a comma.
- if token.metadata.context.type == Context.VAR:
- self._Add(TokenInfo(token))
- elif token.metadata.context.type != Context.PARAMETERS:
- self._PopTransient()
- # Increase required indentation if this is the end of a statement that's
- # continued with an operator on the next line (e.g. the '.').
- elif (next_code_token and next_code_token.type == Type.OPERATOR and
- not next_code_token.metadata.IsUnaryOperator()):
- self._Add(TokenInfo(token))
- elif token_type == Type.PARAMETERS and token.string.endswith(','):
- # Parameter lists.
- self._Add(TokenInfo(token))
- elif token.IsKeyword('var'):
- self._Add(TokenInfo(token))
- elif token.metadata.is_implied_semicolon:
- self._PopTransient()
- elif token.IsAssignment():
- self._Add(TokenInfo(token))
-
- return indentation_errors
-
- def _AddToEach(self, original, amount):
- """Returns a new set with the given amount added to each element.
-
- Args:
- original: The original set of numbers
- amount: The amount to add to each element
-
- Returns:
- A new set containing each element of the original set added to the amount.
- """
- return set([x + amount for x in original])
-
- _HARD_STOP_TYPES = (Type.START_PAREN, Type.START_PARAMETERS,
- Type.START_BRACKET)
-
- _HARD_STOP_STRINGS = ('return', '?')
-
- def _IsHardStop(self, token):
- """Determines if the given token can have a hard stop after it.
-
- Args:
- token: token to examine
-
- Returns:
- Whether the token can have a hard stop after it.
-
- Hard stops are indentations defined by the position of another token as in
- indentation lined up with return, (, [, and ?.
- """
- return (token.type in self._HARD_STOP_TYPES or
- token.string in self._HARD_STOP_STRINGS or
- token.IsAssignment())
-
- def _GetAllowableIndentations(self):
- """Computes the set of allowable indentations.
-
- Returns:
- The set of allowable indentations, given the current stack.
- """
- expected = set([0])
- hard_stops = set([])
-
- # Whether the tokens are still in the same continuation, meaning additional
- # indentation is optional. As an example:
- # x = 5 +
- # 6 +
- # 7;
- # The second '+' does not add any required indentation.
- in_same_continuation = False
-
- for token_info in self._stack:
- token = token_info.token
-
- # Handle normal additive indentation tokens.
- if not token_info.overridden_by and token.string != 'return':
- if token_info.is_block:
- expected = self._AddToEach(expected, 2)
- hard_stops = self._AddToEach(hard_stops, 2)
- in_same_continuation = False
- elif in_same_continuation:
- expected |= self._AddToEach(expected, 4)
- hard_stops |= self._AddToEach(hard_stops, 4)
- else:
- expected = self._AddToEach(expected, 4)
- hard_stops |= self._AddToEach(hard_stops, 4)
- in_same_continuation = True
-
- # Handle hard stops after (, [, return, =, and ?
- if self._IsHardStop(token):
- override_is_hard_stop = (token_info.overridden_by and
- self._IsHardStop(
- token_info.overridden_by.token))
- if token.type == Type.START_PAREN and token.previous:
- # For someFunction(...) we allow to indent at the beginning of the
- # identifier +4
- prev = token.previous
- if (prev.type == Type.IDENTIFIER and
- prev.line_number == token.line_number):
- hard_stops.add(prev.start_index + 4)
- if not override_is_hard_stop:
- start_index = token.start_index
- if token.line_number in self._start_index_offset:
- start_index += self._start_index_offset[token.line_number]
- if (token.type in (Type.START_PAREN, Type.START_PARAMETERS) and
- not token_info.overridden_by):
- hard_stops.add(start_index + 1)
-
- elif token.string == 'return' and not token_info.overridden_by:
- hard_stops.add(start_index + 7)
-
- elif token.type == Type.START_BRACKET:
- hard_stops.add(start_index + 1)
-
- elif token.IsAssignment():
- hard_stops.add(start_index + len(token.string) + 1)
-
- elif token.IsOperator('?') and not token_info.overridden_by:
- hard_stops.add(start_index + 2)
-
- return (expected | hard_stops) or set([0])
-
- def _GetActualIndentation(self, token):
- """Gets the actual indentation of the line containing the given token.
-
- Args:
- token: Any token on the line.
-
- Returns:
- The actual indentation of the line containing the given token. Returns
- -1 if this line should be ignored due to the presence of tabs.
- """
- # Move to the first token in the line
- token = tokenutil.GetFirstTokenInSameLine(token)
-
- # If it is whitespace, it is the indentation.
- if token.type == Type.WHITESPACE:
- if token.string.find('\t') >= 0:
- return -1
- else:
- return len(token.string)
- elif token.type == Type.PARAMETERS:
- return len(token.string) - len(token.string.lstrip())
- else:
- return 0
-
- def _IsFirstNonWhitespaceTokenInLine(self, token):
- """Determines if the given token is the first non-space token on its line.
-
- Args:
- token: The token.
-
- Returns:
- True if the token is the first non-whitespace token on its line.
- """
- if token.type in (Type.WHITESPACE, Type.BLANK_LINE):
- return False
- if token.IsFirstInLine():
- return True
- return (token.previous and token.previous.IsFirstInLine() and
- token.previous.type == Type.WHITESPACE)
-
- def _IsLastCodeInLine(self, token):
- """Determines if the given token is the last code token on its line.
-
- Args:
- token: The token.
-
- Returns:
- True if the token is the last code token on its line.
- """
- if token.type in Type.NON_CODE_TYPES:
- return False
- start_token = token
- while True:
- token = token.next
- if not token or token.line_number != start_token.line_number:
- return True
- if token.type not in Type.NON_CODE_TYPES:
- return False
-
- def _AllFunctionPropertyAssignTokens(self, start_token, end_token):
- """Checks if tokens are (likely) a valid function property assignment.
-
- Args:
- start_token: Start of the token range.
- end_token: End of the token range.
-
- Returns:
- True if all tokens between start_token and end_token are legal tokens
- within a function declaration and assignment into a property.
- """
- for token in tokenutil.GetTokenRange(start_token, end_token):
- fn_decl_tokens = (Type.FUNCTION_DECLARATION,
- Type.PARAMETERS,
- Type.START_PARAMETERS,
- Type.END_PARAMETERS,
- Type.END_PAREN)
- if (token.type not in fn_decl_tokens and
- token.IsCode() and
- not tokenutil.IsIdentifierOrDot(token) and
- not token.IsAssignment() and
- not (token.type == Type.OPERATOR and token.string == ',')):
- return False
- return True
-
- def _Add(self, token_info):
- """Adds the given token info to the stack.
-
- Args:
- token_info: The token information to add.
- """
- if self._stack and self._stack[-1].token == token_info.token:
- # Don't add the same token twice.
- return
-
- if token_info.is_block or token_info.token.type == Type.START_PAREN:
- scope_token = tokenutil.GoogScopeOrNoneFromStartBlock(token_info.token)
- token_info.overridden_by = TokenInfo(scope_token) if scope_token else None
-
- if (token_info.token.type == Type.START_BLOCK and
- token_info.token.metadata.context.type == Context.BLOCK):
- # Handle function() {} assignments: their block contents get special
- # treatment and are allowed to just indent by two whitespace.
- # For example
- # long.long.name = function(
- # a) {
- # In this case the { and the = are on different lines. But the
- # override should still apply for all previous stack tokens that are
- # part of an assignment of a block.
-
- has_assignment = any(x for x in self._stack if x.token.IsAssignment())
- if has_assignment:
- last_token = token_info.token.previous
- for stack_info in reversed(self._stack):
- if (last_token and
- not self._AllFunctionPropertyAssignTokens(stack_info.token,
- last_token)):
- break
- stack_info.overridden_by = token_info
- stack_info.is_permanent_override = True
- last_token = stack_info.token
-
- index = len(self._stack) - 1
- while index >= 0:
- stack_info = self._stack[index]
- stack_token = stack_info.token
-
- if stack_info.line_number == token_info.line_number:
- # In general, tokens only override each other when they are on
- # the same line.
- stack_info.overridden_by = token_info
- if (token_info.token.type == Type.START_BLOCK and
- (stack_token.IsAssignment() or
- stack_token.type in (Type.IDENTIFIER, Type.START_PAREN))):
- # Multi-line blocks have lasting overrides, as in:
- # callFn({
- # a: 10
- # },
- # 30);
- # b/11450054. If a string is not closed properly then close_block
- # could be null.
- close_block = token_info.token.metadata.context.end_token
- stack_info.is_permanent_override = close_block and (
- close_block.line_number != token_info.token.line_number)
- else:
- break
- index -= 1
-
- self._stack.append(token_info)
-
- def _Pop(self):
- """Pops the top token from the stack.
-
- Returns:
- The popped token info.
- """
- token_info = self._stack.pop()
- if token_info.token.type not in (Type.START_BLOCK, Type.START_BRACKET):
- # Remove any temporary overrides.
- self._RemoveOverrides(token_info)
- else:
- # For braces and brackets, which can be object and array literals, remove
- # overrides when the literal is closed on the same line.
- token_check = token_info.token
- same_type = token_check.type
- goal_type = None
- if token_info.token.type == Type.START_BRACKET:
- goal_type = Type.END_BRACKET
- else:
- goal_type = Type.END_BLOCK
- line_number = token_info.token.line_number
- count = 0
- while token_check and token_check.line_number == line_number:
- if token_check.type == goal_type:
- count -= 1
- if not count:
- self._RemoveOverrides(token_info)
- break
- if token_check.type == same_type:
- count += 1
- token_check = token_check.next
- return token_info
-
- def _PopToImpliedBlock(self):
- """Pops the stack until an implied block token is found."""
- while not self._Pop().token.metadata.is_implied_block:
- pass
-
- def _PopTo(self, stop_type):
- """Pops the stack until a token of the given type is popped.
-
- Args:
- stop_type: The type of token to pop to.
-
- Returns:
- The token info of the given type that was popped.
- """
- last = None
- while True:
- last = self._Pop()
- if last.token.type == stop_type:
- break
- return last
-
- def _RemoveOverrides(self, token_info):
- """Marks any token that was overridden by this token as active again.
-
- Args:
- token_info: The token that is being removed from the stack.
- """
- for stack_token in self._stack:
- if (stack_token.overridden_by == token_info and
- not stack_token.is_permanent_override):
- stack_token.overridden_by = None
-
- def _PopTransient(self):
- """Pops all transient tokens - i.e. not blocks, literals, or parens."""
- while self._stack and self._stack[-1].is_transient:
- self._Pop()
diff --git a/tools/closure_linter/closure_linter/javascriptlintrules.py b/tools/closure_linter/closure_linter/javascriptlintrules.py
deleted file mode 100644
index 9578009daa..0000000000
--- a/tools/closure_linter/closure_linter/javascriptlintrules.py
+++ /dev/null
@@ -1,754 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Methods for checking JS files for common style guide violations.
-
-These style guide violations should only apply to JavaScript and not an Ecma
-scripting languages.
-"""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)',
- 'jacobr@google.com (Jacob Richman)')
-
-import re
-
-from closure_linter import ecmalintrules
-from closure_linter import error_check
-from closure_linter import errors
-from closure_linter import javascripttokenizer
-from closure_linter import javascripttokens
-from closure_linter import requireprovidesorter
-from closure_linter import tokenutil
-from closure_linter.common import error
-from closure_linter.common import position
-
-# Shorthand
-Error = error.Error
-Position = position.Position
-Rule = error_check.Rule
-Type = javascripttokens.JavaScriptTokenType
-
-
-class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
- """JavaScript lint rules that catch JavaScript specific style errors."""
-
- def __init__(self, namespaces_info):
- """Initializes a JavaScriptLintRules instance."""
- ecmalintrules.EcmaScriptLintRules.__init__(self)
- self._namespaces_info = namespaces_info
- self._declared_private_member_tokens = {}
- self._declared_private_members = set()
- self._used_private_members = set()
- # A stack of dictionaries, one for each function scope entered. Each
- # dictionary is keyed by an identifier that defines a local variable and has
- # a token as its value.
- self._unused_local_variables_by_scope = []
-
- def HandleMissingParameterDoc(self, token, param_name):
- """Handle errors associated with a parameter missing a param tag."""
- self._HandleError(errors.MISSING_PARAMETER_DOCUMENTATION,
- 'Missing docs for parameter: "%s"' % param_name, token)
-
- # pylint: disable=too-many-statements
- def CheckToken(self, token, state):
- """Checks a token, given the current parser_state, for warnings and errors.
-
- Args:
- token: The current token under consideration
- state: parser_state object that indicates the current state in the page
- """
-
- # Call the base class's CheckToken function.
- super(JavaScriptLintRules, self).CheckToken(token, state)
-
- # Store some convenience variables
- namespaces_info = self._namespaces_info
-
- if error_check.ShouldCheck(Rule.UNUSED_LOCAL_VARIABLES):
- self._CheckUnusedLocalVariables(token, state)
-
- if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
- # Find all assignments to private members.
- if token.type == Type.SIMPLE_LVALUE:
- identifier = token.string
- if identifier.endswith('_') and not identifier.endswith('__'):
- doc_comment = state.GetDocComment()
- suppressed = doc_comment and (
- 'underscore' in doc_comment.suppressions or
- 'unusedPrivateMembers' in doc_comment.suppressions)
- if not suppressed:
- # Look for static members defined on a provided namespace.
- if namespaces_info:
- namespace = namespaces_info.GetClosurizedNamespace(identifier)
- provided_namespaces = namespaces_info.GetProvidedNamespaces()
- else:
- namespace = None
- provided_namespaces = set()
-
- # Skip cases of this.something_.somethingElse_.
- regex = re.compile(r'^this\.[a-zA-Z_]+$')
- if namespace in provided_namespaces or regex.match(identifier):
- variable = identifier.split('.')[-1]
- self._declared_private_member_tokens[variable] = token
- self._declared_private_members.add(variable)
- elif not identifier.endswith('__'):
- # Consider setting public members of private members to be a usage.
- for piece in identifier.split('.'):
- if piece.endswith('_'):
- self._used_private_members.add(piece)
-
- # Find all usages of private members.
- if token.type == Type.IDENTIFIER:
- for piece in token.string.split('.'):
- if piece.endswith('_'):
- self._used_private_members.add(piece)
-
- if token.type == Type.DOC_FLAG:
- flag = token.attached_object
-
- if flag.flag_type == 'param' and flag.name_token is not None:
- self._CheckForMissingSpaceBeforeToken(
- token.attached_object.name_token)
-
- if flag.type is not None and flag.name is not None:
- if error_check.ShouldCheck(Rule.VARIABLE_ARG_MARKER):
- # Check for variable arguments marker in type.
- if flag.jstype.IsVarArgsType() and flag.name != 'var_args':
- self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_NAME,
- 'Variable length argument %s must be renamed '
- 'to var_args.' % flag.name,
- token)
- elif not flag.jstype.IsVarArgsType() and flag.name == 'var_args':
- self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_TYPE,
- 'Variable length argument %s type must start '
- 'with \'...\'.' % flag.name,
- token)
-
- if error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER):
- # Check for optional marker in type.
- if (flag.jstype.opt_arg and
- not flag.name.startswith('opt_')):
- self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX,
- 'Optional parameter name %s must be prefixed '
- 'with opt_.' % flag.name,
- token)
- elif (not flag.jstype.opt_arg and
- flag.name.startswith('opt_')):
- self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE,
- 'Optional parameter %s type must end with =.' %
- flag.name,
- token)
-
- if flag.flag_type in state.GetDocFlag().HAS_TYPE:
- # Check for both missing type token and empty type braces '{}'
- # Missing suppress types are reported separately and we allow enums,
- # const, private, public and protected without types.
- if (flag.flag_type not in state.GetDocFlag().CAN_OMIT_TYPE
- and (not flag.jstype or flag.jstype.IsEmpty())):
- self._HandleError(errors.MISSING_JSDOC_TAG_TYPE,
- 'Missing type in %s tag' % token.string, token)
-
- elif flag.name_token and flag.type_end_token and tokenutil.Compare(
- flag.type_end_token, flag.name_token) > 0:
- self._HandleError(
- errors.OUT_OF_ORDER_JSDOC_TAG_TYPE,
- 'Type should be immediately after %s tag' % token.string,
- token)
-
- elif token.type == Type.DOUBLE_QUOTE_STRING_START:
- next_token = token.next
- while next_token.type == Type.STRING_TEXT:
- if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search(
- next_token.string):
- break
- next_token = next_token.next
- else:
- self._HandleError(
- errors.UNNECESSARY_DOUBLE_QUOTED_STRING,
- 'Single-quoted string preferred over double-quoted string.',
- token,
- position=Position.All(token.string))
-
- elif token.type == Type.END_DOC_COMMENT:
- doc_comment = state.GetDocComment()
-
- # When @externs appears in a @fileoverview comment, it should trigger
- # the same limited doc checks as a special filename like externs.js.
- if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag('externs'):
- self._SetLimitedDocChecks(True)
-
- if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL) and
- not self._is_html and
- state.InTopLevel() and
- not state.InNonScopeBlock()):
-
- # Check if we're in a fileoverview or constructor JsDoc.
- is_constructor = (
- doc_comment.HasFlag('constructor') or
- doc_comment.HasFlag('interface'))
- # @fileoverview is an optional tag so if the dosctring is the first
- # token in the file treat it as a file level docstring.
- is_file_level_comment = (
- doc_comment.HasFlag('fileoverview') or
- not doc_comment.start_token.previous)
-
- # If the comment is not a file overview, and it does not immediately
- # precede some code, skip it.
- # NOTE: The tokenutil methods are not used here because of their
- # behavior at the top of a file.
- next_token = token.next
- if (not next_token or
- (not is_file_level_comment and
- next_token.type in Type.NON_CODE_TYPES)):
- return
-
- # Don't require extra blank lines around suppression of extra
- # goog.require errors.
- if (doc_comment.SuppressionOnly() and
- next_token.type == Type.IDENTIFIER and
- next_token.string in ['goog.provide', 'goog.require']):
- return
-
- # Find the start of this block (include comments above the block, unless
- # this is a file overview).
- block_start = doc_comment.start_token
- if not is_file_level_comment:
- token = block_start.previous
- while token and token.type in Type.COMMENT_TYPES:
- block_start = token
- token = token.previous
-
- # Count the number of blank lines before this block.
- blank_lines = 0
- token = block_start.previous
- while token and token.type in [Type.WHITESPACE, Type.BLANK_LINE]:
- if token.type == Type.BLANK_LINE:
- # A blank line.
- blank_lines += 1
- elif token.type == Type.WHITESPACE and not token.line.strip():
- # A line with only whitespace on it.
- blank_lines += 1
- token = token.previous
-
- # Log errors.
- error_message = False
- expected_blank_lines = 0
-
- # Only need blank line before file overview if it is not the beginning
- # of the file, e.g. copyright is first.
- if is_file_level_comment and blank_lines == 0 and block_start.previous:
- error_message = 'Should have a blank line before a file overview.'
- expected_blank_lines = 1
- elif is_constructor and blank_lines != 3:
- error_message = (
- 'Should have 3 blank lines before a constructor/interface.')
- expected_blank_lines = 3
- elif (not is_file_level_comment and not is_constructor and
- blank_lines != 2):
- error_message = 'Should have 2 blank lines between top-level blocks.'
- expected_blank_lines = 2
-
- if error_message:
- self._HandleError(
- errors.WRONG_BLANK_LINE_COUNT, error_message,
- block_start, position=Position.AtBeginning(),
- fix_data=expected_blank_lines - blank_lines)
-
- elif token.type == Type.END_BLOCK:
- if state.InFunction() and state.IsFunctionClose():
- is_immediately_called = (token.next and
- token.next.type == Type.START_PAREN)
-
- function = state.GetFunction()
- if not self._limited_doc_checks:
- if (function.has_return and function.doc and
- not is_immediately_called and
- not function.doc.HasFlag('return') and
- not function.doc.InheritsDocumentation() and
- not function.doc.HasFlag('constructor')):
- # Check for proper documentation of return value.
- self._HandleError(
- errors.MISSING_RETURN_DOCUMENTATION,
- 'Missing @return JsDoc in function with non-trivial return',
- function.doc.end_token, position=Position.AtBeginning())
- elif (not function.has_return and
- not function.has_throw and
- function.doc and
- function.doc.HasFlag('return') and
- not state.InInterfaceMethod()):
- flag = function.doc.GetFlag('return')
- valid_no_return_names = ['undefined', 'void', '*']
- invalid_return = flag.jstype is None or not any(
- sub_type.identifier in valid_no_return_names
- for sub_type in flag.jstype.IterTypeGroup())
-
- if invalid_return:
- self._HandleError(
- errors.UNNECESSARY_RETURN_DOCUMENTATION,
- 'Found @return JsDoc on function that returns nothing',
- flag.flag_token, position=Position.AtBeginning())
-
- # b/4073735. Method in object literal definition of prototype can
- # safely reference 'this'.
- prototype_object_literal = False
- block_start = None
- previous_code = None
- previous_previous_code = None
-
- # Search for cases where prototype is defined as object literal.
- # previous_previous_code
- # | previous_code
- # | | block_start
- # | | |
- # a.b.prototype = {
- # c : function() {
- # this.d = 1;
- # }
- # }
-
- # If in object literal, find first token of block so to find previous
- # tokens to check above condition.
- if state.InObjectLiteral():
- block_start = state.GetCurrentBlockStart()
-
- # If an object literal then get previous token (code type). For above
- # case it should be '='.
- if block_start:
- previous_code = tokenutil.SearchExcept(block_start,
- Type.NON_CODE_TYPES,
- reverse=True)
-
- # If previous token to block is '=' then get its previous token.
- if previous_code and previous_code.IsOperator('='):
- previous_previous_code = tokenutil.SearchExcept(previous_code,
- Type.NON_CODE_TYPES,
- reverse=True)
-
- # If variable/token before '=' ends with '.prototype' then its above
- # case of prototype defined with object literal.
- prototype_object_literal = (previous_previous_code and
- previous_previous_code.string.endswith(
- '.prototype'))
-
- if (function.has_this and function.doc and
- not function.doc.HasFlag('this') and
- not function.is_constructor and
- not function.is_interface and
- '.prototype.' not in function.name and
- not prototype_object_literal):
- self._HandleError(
- errors.MISSING_JSDOC_TAG_THIS,
- 'Missing @this JsDoc in function referencing "this". ('
- 'this usually means you are trying to reference "this" in '
- 'a static function, or you have forgotten to mark a '
- 'constructor with @constructor)',
- function.doc.end_token, position=Position.AtBeginning())
-
- elif token.type == Type.IDENTIFIER:
- if token.string == 'goog.inherits' and not state.InFunction():
- if state.GetLastNonSpaceToken().line_number == token.line_number:
- self._HandleError(
- errors.MISSING_LINE,
- 'Missing newline between constructor and goog.inherits',
- token,
- position=Position.AtBeginning())
-
- extra_space = state.GetLastNonSpaceToken().next
- while extra_space != token:
- if extra_space.type == Type.BLANK_LINE:
- self._HandleError(
- errors.EXTRA_LINE,
- 'Extra line between constructor and goog.inherits',
- extra_space)
- extra_space = extra_space.next
-
- # TODO(robbyw): Test the last function was a constructor.
- # TODO(robbyw): Test correct @extends and @implements documentation.
-
- elif (token.string == 'goog.provide' and
- not state.InFunction() and
- namespaces_info is not None):
- namespace = tokenutil.GetStringAfterToken(token)
-
- # Report extra goog.provide statement.
- if not namespace or namespaces_info.IsExtraProvide(token):
- if not namespace:
- msg = 'Empty namespace in goog.provide'
- else:
- msg = 'Unnecessary goog.provide: ' + namespace
-
- # Hint to user if this is a Test namespace.
- if namespace.endswith('Test'):
- msg += (' *Test namespaces must be mentioned in the '
- 'goog.setTestOnly() call')
-
- self._HandleError(
- errors.EXTRA_GOOG_PROVIDE,
- msg,
- token, position=Position.AtBeginning())
-
- if namespaces_info.IsLastProvide(token):
- # Report missing provide statements after the last existing provide.
- missing_provides = namespaces_info.GetMissingProvides()
- if missing_provides:
- self._ReportMissingProvides(
- missing_provides,
- tokenutil.GetLastTokenInSameLine(token).next,
- False)
-
- # If there are no require statements, missing requires should be
- # reported after the last provide.
- if not namespaces_info.GetRequiredNamespaces():
- missing_requires, illegal_alias_statements = (
- namespaces_info.GetMissingRequires())
- if missing_requires:
- self._ReportMissingRequires(
- missing_requires,
- tokenutil.GetLastTokenInSameLine(token).next,
- True)
- if illegal_alias_statements:
- self._ReportIllegalAliasStatement(illegal_alias_statements)
-
- elif (token.string == 'goog.require' and
- not state.InFunction() and
- namespaces_info is not None):
- namespace = tokenutil.GetStringAfterToken(token)
-
- # If there are no provide statements, missing provides should be
- # reported before the first require.
- if (namespaces_info.IsFirstRequire(token) and
- not namespaces_info.GetProvidedNamespaces()):
- missing_provides = namespaces_info.GetMissingProvides()
- if missing_provides:
- self._ReportMissingProvides(
- missing_provides,
- tokenutil.GetFirstTokenInSameLine(token),
- True)
-
- # Report extra goog.require statement.
- if not namespace or namespaces_info.IsExtraRequire(token):
- if not namespace:
- msg = 'Empty namespace in goog.require'
- else:
- msg = 'Unnecessary goog.require: ' + namespace
-
- self._HandleError(
- errors.EXTRA_GOOG_REQUIRE,
- msg,
- token, position=Position.AtBeginning())
-
- # Report missing goog.require statements.
- if namespaces_info.IsLastRequire(token):
- missing_requires, illegal_alias_statements = (
- namespaces_info.GetMissingRequires())
- if missing_requires:
- self._ReportMissingRequires(
- missing_requires,
- tokenutil.GetLastTokenInSameLine(token).next,
- False)
- if illegal_alias_statements:
- self._ReportIllegalAliasStatement(illegal_alias_statements)
-
- elif token.type == Type.OPERATOR:
- last_in_line = token.IsLastInLine()
- # If the token is unary and appears to be used in a unary context
- # it's ok. Otherwise, if it's at the end of the line or immediately
- # before a comment, it's ok.
- # Don't report an error before a start bracket - it will be reported
- # by that token's space checks.
- if (not token.metadata.IsUnaryOperator() and not last_in_line
- and not token.next.IsComment()
- and not token.next.IsOperator(',')
- and not tokenutil.IsDot(token)
- and token.next.type not in (Type.WHITESPACE, Type.END_PAREN,
- Type.END_BRACKET, Type.SEMICOLON,
- Type.START_BRACKET)):
- self._HandleError(
- errors.MISSING_SPACE,
- 'Missing space after "%s"' % token.string,
- token,
- position=Position.AtEnd(token.string))
- elif token.type == Type.WHITESPACE:
- first_in_line = token.IsFirstInLine()
- last_in_line = token.IsLastInLine()
- # Check whitespace length if it's not the first token of the line and
- # if it's not immediately before a comment.
- if not last_in_line and not first_in_line and not token.next.IsComment():
- # Ensure there is no space after opening parentheses.
- if (token.previous.type in (Type.START_PAREN, Type.START_BRACKET,
- Type.FUNCTION_NAME)
- or token.next.type == Type.START_PARAMETERS):
- self._HandleError(
- errors.EXTRA_SPACE,
- 'Extra space after "%s"' % token.previous.string,
- token,
- position=Position.All(token.string))
- elif token.type == Type.SEMICOLON:
- previous_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES,
- reverse=True)
- if not previous_token:
- self._HandleError(
- errors.REDUNDANT_SEMICOLON,
- 'Semicolon without any statement',
- token,
- position=Position.AtEnd(token.string))
- elif (previous_token.type == Type.KEYWORD and
- previous_token.string not in ['break', 'continue', 'return']):
- self._HandleError(
- errors.REDUNDANT_SEMICOLON,
- ('Semicolon after \'%s\' without any statement.'
- ' Looks like an error.' % previous_token.string),
- token,
- position=Position.AtEnd(token.string))
-
- def _CheckUnusedLocalVariables(self, token, state):
- """Checks for unused local variables in function blocks.
-
- Args:
- token: The token to check.
- state: The state tracker.
- """
- # We don't use state.InFunction because that disregards scope functions.
- in_function = state.FunctionDepth() > 0
- if token.type == Type.SIMPLE_LVALUE or token.type == Type.IDENTIFIER:
- if in_function:
- identifier = token.string
- # Check whether the previous token was var.
- previous_code_token = tokenutil.CustomSearch(
- token,
- lambda t: t.type not in Type.NON_CODE_TYPES,
- reverse=True)
- if previous_code_token and previous_code_token.IsKeyword('var'):
- # Add local variable declaration to the top of the unused locals
- # stack.
- self._unused_local_variables_by_scope[-1][identifier] = token
- elif token.type == Type.IDENTIFIER:
- # This covers most cases where the variable is used as an identifier.
- self._MarkLocalVariableUsed(token.string)
- elif token.type == Type.SIMPLE_LVALUE and '.' in identifier:
- # This covers cases where a value is assigned to a property of the
- # variable.
- self._MarkLocalVariableUsed(token.string)
- elif token.type == Type.START_BLOCK:
- if in_function and state.IsFunctionOpen():
- # Push a new map onto the stack
- self._unused_local_variables_by_scope.append({})
- elif token.type == Type.END_BLOCK:
- if state.IsFunctionClose():
- # Pop the stack and report any remaining locals as unused.
- unused_local_variables = self._unused_local_variables_by_scope.pop()
- for unused_token in unused_local_variables.values():
- self._HandleError(
- errors.UNUSED_LOCAL_VARIABLE,
- 'Unused local variable: %s.' % unused_token.string,
- unused_token)
- elif token.type == Type.DOC_FLAG:
- # Flags that use aliased symbols should be counted.
- flag = token.attached_object
- js_type = flag and flag.jstype
- if flag and flag.flag_type in state.GetDocFlag().HAS_TYPE and js_type:
- self._MarkAliasUsed(js_type)
-
- def _MarkAliasUsed(self, js_type):
- """Marks aliases in a type as used.
-
- Recursively iterates over all subtypes in a jsdoc type annotation and
- tracks usage of aliased symbols (which may be local variables).
- Marks the local variable as used in the scope nearest to the current
- scope that matches the given token.
-
- Args:
- js_type: The jsdoc type, a typeannotation.TypeAnnotation object.
- """
- if js_type.alias:
- self._MarkLocalVariableUsed(js_type.identifier)
- for sub_type in js_type.IterTypes():
- self._MarkAliasUsed(sub_type)
-
- def _MarkLocalVariableUsed(self, identifier):
- """Marks the local variable as used in the relevant scope.
-
- Marks the local variable in the scope nearest to the current scope that
- matches the given identifier as used.
-
- Args:
- identifier: The identifier representing the potential usage of a local
- variable.
- """
- identifier = identifier.split('.', 1)[0]
- # Find the first instance of the identifier in the stack of function scopes
- # and mark it used.
- for unused_local_variables in reversed(
- self._unused_local_variables_by_scope):
- if identifier in unused_local_variables:
- del unused_local_variables[identifier]
- break
-
- def _ReportMissingProvides(self, missing_provides, token, need_blank_line):
- """Reports missing provide statements to the error handler.
-
- Args:
- missing_provides: A dictionary of string(key) and integer(value) where
- each string(key) is a namespace that should be provided, but is not
- and integer(value) is first line number where it's required.
- token: The token where the error was detected (also where the new provides
- will be inserted.
- need_blank_line: Whether a blank line needs to be inserted after the new
- provides are inserted. May be True, False, or None, where None
- indicates that the insert location is unknown.
- """
-
- missing_provides_msg = 'Missing the following goog.provide statements:\n'
- missing_provides_msg += '\n'.join(['goog.provide(\'%s\');' % x for x in
- sorted(missing_provides)])
- missing_provides_msg += '\n'
-
- missing_provides_msg += '\nFirst line where provided: \n'
- missing_provides_msg += '\n'.join(
- [' %s : line %d' % (x, missing_provides[x]) for x in
- sorted(missing_provides)])
- missing_provides_msg += '\n'
-
- self._HandleError(
- errors.MISSING_GOOG_PROVIDE,
- missing_provides_msg,
- token, position=Position.AtBeginning(),
- fix_data=(missing_provides.keys(), need_blank_line))
-
- def _ReportMissingRequires(self, missing_requires, token, need_blank_line):
- """Reports missing require statements to the error handler.
-
- Args:
- missing_requires: A dictionary of string(key) and integer(value) where
- each string(key) is a namespace that should be required, but is not
- and integer(value) is first line number where it's required.
- token: The token where the error was detected (also where the new requires
- will be inserted.
- need_blank_line: Whether a blank line needs to be inserted before the new
- requires are inserted. May be True, False, or None, where None
- indicates that the insert location is unknown.
- """
-
- missing_requires_msg = 'Missing the following goog.require statements:\n'
- missing_requires_msg += '\n'.join(['goog.require(\'%s\');' % x for x in
- sorted(missing_requires)])
- missing_requires_msg += '\n'
-
- missing_requires_msg += '\nFirst line where required: \n'
- missing_requires_msg += '\n'.join(
- [' %s : line %d' % (x, missing_requires[x]) for x in
- sorted(missing_requires)])
- missing_requires_msg += '\n'
-
- self._HandleError(
- errors.MISSING_GOOG_REQUIRE,
- missing_requires_msg,
- token, position=Position.AtBeginning(),
- fix_data=(missing_requires.keys(), need_blank_line))
-
- def _ReportIllegalAliasStatement(self, illegal_alias_statements):
- """Reports alias statements that would need a goog.require."""
- for namespace, token in illegal_alias_statements.iteritems():
- self._HandleError(
- errors.ALIAS_STMT_NEEDS_GOOG_REQUIRE,
- 'The alias definition would need the namespace \'%s\' which is not '
- 'required through any other symbol.' % namespace,
- token, position=Position.AtBeginning())
-
- def Finalize(self, state):
- """Perform all checks that need to occur after all lines are processed."""
- # Call the base class's Finalize function.
- super(JavaScriptLintRules, self).Finalize(state)
-
- if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
- # Report an error for any declared private member that was never used.
- unused_private_members = (self._declared_private_members -
- self._used_private_members)
-
- for variable in unused_private_members:
- token = self._declared_private_member_tokens[variable]
- self._HandleError(errors.UNUSED_PRIVATE_MEMBER,
- 'Unused private member: %s.' % token.string,
- token)
-
- # Clear state to prepare for the next file.
- self._declared_private_member_tokens = {}
- self._declared_private_members = set()
- self._used_private_members = set()
-
- namespaces_info = self._namespaces_info
- if namespaces_info is not None:
- # If there are no provide or require statements, missing provides and
- # requires should be reported on line 1.
- if (not namespaces_info.GetProvidedNamespaces() and
- not namespaces_info.GetRequiredNamespaces()):
- missing_provides = namespaces_info.GetMissingProvides()
- if missing_provides:
- self._ReportMissingProvides(
- missing_provides, state.GetFirstToken(), None)
-
- missing_requires, illegal_alias = namespaces_info.GetMissingRequires()
- if missing_requires:
- self._ReportMissingRequires(
- missing_requires, state.GetFirstToken(), None)
- if illegal_alias:
- self._ReportIllegalAliasStatement(illegal_alias)
-
- self._CheckSortedRequiresProvides(state.GetFirstToken())
-
- def _CheckSortedRequiresProvides(self, token):
- """Checks that all goog.require and goog.provide statements are sorted.
-
- Note that this method needs to be run after missing statements are added to
- preserve alphabetical order.
-
- Args:
- token: The first token in the token stream.
- """
- sorter = requireprovidesorter.RequireProvideSorter()
- first_provide_token = sorter.CheckProvides(token)
- if first_provide_token:
- new_order = sorter.GetFixedProvideString(first_provide_token)
- self._HandleError(
- errors.GOOG_PROVIDES_NOT_ALPHABETIZED,
- 'goog.provide classes must be alphabetized. The correct code is:\n' +
- new_order,
- first_provide_token,
- position=Position.AtBeginning(),
- fix_data=first_provide_token)
-
- first_require_token = sorter.CheckRequires(token)
- if first_require_token:
- new_order = sorter.GetFixedRequireString(first_require_token)
- self._HandleError(
- errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
- 'goog.require classes must be alphabetized. The correct code is:\n' +
- new_order,
- first_require_token,
- position=Position.AtBeginning(),
- fix_data=first_require_token)
-
- def GetLongLineExceptions(self):
- """Gets a list of regexps for lines which can be longer than the limit.
-
- Returns:
- A list of regexps, used as matches (rather than searches).
- """
- return [
- re.compile(r'(var .+\s*=\s*)?goog\.require\(.+\);?\s*$'),
- re.compile(r'goog\.(provide|module|setTestOnly)\(.+\);?\s*$'),
- re.compile(r'[\s/*]*@visibility\s*{.*}[\s*/]*$'),
- ]
diff --git a/tools/closure_linter/closure_linter/javascriptstatetracker.py b/tools/closure_linter/closure_linter/javascriptstatetracker.py
deleted file mode 100755
index e0a42f66a8..0000000000
--- a/tools/closure_linter/closure_linter/javascriptstatetracker.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Parser for JavaScript files."""
-
-
-
-from closure_linter import javascripttokens
-from closure_linter import statetracker
-from closure_linter import tokenutil
-
-# Shorthand
-Type = javascripttokens.JavaScriptTokenType
-
-
-class JsDocFlag(statetracker.DocFlag):
- """Javascript doc flag object.
-
- Attribute:
- flag_type: param, return, define, type, etc.
- flag_token: The flag token.
- type_start_token: The first token specifying the flag JS type,
- including braces.
- type_end_token: The last token specifying the flag JS type,
- including braces.
- type: The type spec string.
- jstype: The type spec, a TypeAnnotation instance.
- name_token: The token specifying the flag name.
- name: The flag name
- description_start_token: The first token in the description.
- description_end_token: The end token in the description.
- description: The description.
- """
-
- # Please keep these lists alphabetized.
-
- # Some projects use the following extensions to JsDoc.
- # TODO(robbyw): determine which of these, if any, should be illegal.
- EXTENDED_DOC = frozenset([
- 'class', 'code', 'desc', 'final', 'hidden', 'inheritDoc', 'link',
- 'meaning', 'provideGoog', 'throws'])
-
- LEGAL_DOC = EXTENDED_DOC | statetracker.DocFlag.LEGAL_DOC
-
-
-class JavaScriptStateTracker(statetracker.StateTracker):
- """JavaScript state tracker.
-
- Inherits from the core EcmaScript StateTracker adding extra state tracking
- functionality needed for JavaScript.
- """
-
- def __init__(self):
- """Initializes a JavaScript token stream state tracker."""
- statetracker.StateTracker.__init__(self, JsDocFlag)
-
- def Reset(self):
- self._scope_depth = 0
- self._block_stack = []
- super(JavaScriptStateTracker, self).Reset()
-
- def InTopLevel(self):
- """Compute whether we are at the top level in the class.
-
- This function call is language specific. In some languages like
- JavaScript, a function is top level if it is not inside any parenthesis.
- In languages such as ActionScript, a function is top level if it is directly
- within a class.
-
- Returns:
- Whether we are at the top level in the class.
- """
- return self._scope_depth == self.ParenthesesDepth()
-
- def InFunction(self):
- """Returns true if the current token is within a function.
-
- This js-specific override ignores goog.scope functions.
-
- Returns:
- True if the current token is within a function.
- """
- return self._scope_depth != self.FunctionDepth()
-
- def InNonScopeBlock(self):
- """Compute whether we are nested within a non-goog.scope block.
-
- Returns:
- True if the token is not enclosed in a block that does not originate from
- a goog.scope statement. False otherwise.
- """
- return self._scope_depth != self.BlockDepth()
-
- def GetBlockType(self, token):
- """Determine the block type given a START_BLOCK token.
-
- Code blocks come after parameters, keywords like else, and closing parens.
-
- Args:
- token: The current token. Can be assumed to be type START_BLOCK
- Returns:
- Code block type for current token.
- """
- last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, reverse=True)
- if last_code.type in (Type.END_PARAMETERS, Type.END_PAREN,
- Type.KEYWORD) and not last_code.IsKeyword('return'):
- return self.CODE
- else:
- return self.OBJECT_LITERAL
-
- def GetCurrentBlockStart(self):
- """Gets the start token of current block.
-
- Returns:
- Starting token of current block. None if not in block.
- """
- if self._block_stack:
- return self._block_stack[-1]
- else:
- return None
-
- def HandleToken(self, token, last_non_space_token):
- """Handles the given token and updates state.
-
- Args:
- token: The token to handle.
- last_non_space_token: The last non space token encountered
- """
- if token.type == Type.START_BLOCK:
- self._block_stack.append(token)
- if token.type == Type.IDENTIFIER and token.string == 'goog.scope':
- self._scope_depth += 1
- if token.type == Type.END_BLOCK:
- start_token = self._block_stack.pop()
- if tokenutil.GoogScopeOrNoneFromStartBlock(start_token):
- self._scope_depth -= 1
- super(JavaScriptStateTracker, self).HandleToken(token,
- last_non_space_token)
diff --git a/tools/closure_linter/closure_linter/javascriptstatetracker_test.py b/tools/closure_linter/closure_linter/javascriptstatetracker_test.py
deleted file mode 100644
index 76dabd2c70..0000000000
--- a/tools/closure_linter/closure_linter/javascriptstatetracker_test.py
+++ /dev/null
@@ -1,278 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for the javascriptstatetracker module."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-
-import unittest as googletest
-
-from closure_linter import javascripttokens
-from closure_linter import testutil
-from closure_linter import tokenutil
-
-
-_FUNCTION_SCRIPT = """\
-var a = 3;
-
-function foo(aaa, bbb, ccc) {
- var b = 4;
-}
-
-
-/**
- * JSDoc comment.
- */
-var bar = function(ddd, eee, fff) {
-
-};
-
-
-/**
- * Verify that nested functions get their proper parameters recorded.
- */
-var baz = function(ggg, hhh, iii) {
- var qux = function(jjj, kkk, lll) {
- };
- // make sure that entering a new block does not change baz' parameters.
- {};
-};
-
-"""
-
-
-class FunctionTest(googletest.TestCase):
-
- def testFunctionParse(self):
- functions, _ = testutil.ParseFunctionsAndComments(_FUNCTION_SCRIPT)
- self.assertEquals(4, len(functions))
-
- # First function
- function = functions[0]
- self.assertEquals(['aaa', 'bbb', 'ccc'], function.parameters)
-
- start_token = function.start_token
- end_token = function.end_token
-
- self.assertEquals(
- javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
- function.start_token.type)
-
- self.assertEquals('function', start_token.string)
- self.assertEquals(3, start_token.line_number)
- self.assertEquals(0, start_token.start_index)
-
- self.assertEquals('}', end_token.string)
- self.assertEquals(5, end_token.line_number)
- self.assertEquals(0, end_token.start_index)
-
- self.assertEquals('foo', function.name)
-
- self.assertIsNone(function.doc)
-
- # Second function
- function = functions[1]
- self.assertEquals(['ddd', 'eee', 'fff'], function.parameters)
-
- start_token = function.start_token
- end_token = function.end_token
-
- self.assertEquals(
- javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
- function.start_token.type)
-
- self.assertEquals('function', start_token.string)
- self.assertEquals(11, start_token.line_number)
- self.assertEquals(10, start_token.start_index)
-
- self.assertEquals('}', end_token.string)
- self.assertEquals(13, end_token.line_number)
- self.assertEquals(0, end_token.start_index)
-
- self.assertEquals('bar', function.name)
-
- self.assertIsNotNone(function.doc)
-
- # Check function JSDoc
- doc = function.doc
- doc_tokens = tokenutil.GetTokenRange(doc.start_token, doc.end_token)
-
- comment_type = javascripttokens.JavaScriptTokenType.COMMENT
- comment_tokens = filter(lambda t: t.type is comment_type, doc_tokens)
-
- self.assertEquals('JSDoc comment.',
- tokenutil.TokensToString(comment_tokens).strip())
-
- # Third function
- function = functions[2]
- self.assertEquals(['ggg', 'hhh', 'iii'], function.parameters)
-
- start_token = function.start_token
- end_token = function.end_token
-
- self.assertEquals(
- javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
- function.start_token.type)
-
- self.assertEquals('function', start_token.string)
- self.assertEquals(19, start_token.line_number)
- self.assertEquals(10, start_token.start_index)
-
- self.assertEquals('}', end_token.string)
- self.assertEquals(24, end_token.line_number)
- self.assertEquals(0, end_token.start_index)
-
- self.assertEquals('baz', function.name)
- self.assertIsNotNone(function.doc)
-
- # Fourth function (inside third function)
- function = functions[3]
- self.assertEquals(['jjj', 'kkk', 'lll'], function.parameters)
-
- start_token = function.start_token
- end_token = function.end_token
-
- self.assertEquals(
- javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
- function.start_token.type)
-
- self.assertEquals('function', start_token.string)
- self.assertEquals(20, start_token.line_number)
- self.assertEquals(12, start_token.start_index)
-
- self.assertEquals('}', end_token.string)
- self.assertEquals(21, end_token.line_number)
- self.assertEquals(2, end_token.start_index)
-
- self.assertEquals('qux', function.name)
- self.assertIsNone(function.doc)
-
-
-
-class CommentTest(googletest.TestCase):
-
- def testGetDescription(self):
- comment = self._ParseComment("""
- /**
- * Comment targeting goog.foo.
- *
- * This is the second line.
- * @param {number} foo The count of foo.
- */
- target;""")
-
- self.assertEqual(
- 'Comment targeting goog.foo.\n\nThis is the second line.',
- comment.description)
-
- def testCommentGetTarget(self):
- self.assertCommentTarget('goog.foo', """
- /**
- * Comment targeting goog.foo.
- */
- goog.foo = 6;
- """)
-
- self.assertCommentTarget('bar', """
- /**
- * Comment targeting bar.
- */
- var bar = "Karate!";
- """)
-
- self.assertCommentTarget('doThing', """
- /**
- * Comment targeting doThing.
- */
- function doThing() {};
- """)
-
- self.assertCommentTarget('this.targetProperty', """
- goog.bar.Baz = function() {
- /**
- * Comment targeting targetProperty.
- */
- this.targetProperty = 3;
- };
- """)
-
- self.assertCommentTarget('goog.bar.prop', """
- /**
- * Comment targeting goog.bar.prop.
- */
- goog.bar.prop;
- """)
-
- self.assertCommentTarget('goog.aaa.bbb', """
- /**
- * Comment targeting goog.aaa.bbb.
- */
- (goog.aaa.bbb)
- """)
-
- self.assertCommentTarget('theTarget', """
- /**
- * Comment targeting symbol preceded by newlines, whitespace,
- * and parens -- things we ignore.
- */
- (theTarget)
- """)
-
- self.assertCommentTarget(None, """
- /**
- * @fileoverview File overview.
- */
- (notATarget)
- """)
-
- self.assertCommentTarget(None, """
- /**
- * Comment that doesn't find a target.
- */
- """)
-
- self.assertCommentTarget('theTarget.is.split.across.lines', """
- /**
- * Comment that addresses a symbol split across lines.
- */
- (theTarget.is.split
- .across.lines)
- """)
-
- self.assertCommentTarget('theTarget.is.split.across.lines', """
- /**
- * Comment that addresses a symbol split across lines.
- */
- (theTarget.is.split.
- across.lines)
- """)
-
- def _ParseComment(self, script):
- """Parse a script that contains one comment and return it."""
- _, comments = testutil.ParseFunctionsAndComments(script)
- self.assertEquals(1, len(comments))
- return comments[0]
-
- def assertCommentTarget(self, target, script):
- comment = self._ParseComment(script)
- self.assertEquals(target, comment.GetTargetIdentifier())
-
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/closure_linter/javascripttokenizer.py b/tools/closure_linter/closure_linter/javascripttokenizer.py
deleted file mode 100755
index 2ee5b81ee1..0000000000
--- a/tools/closure_linter/closure_linter/javascripttokenizer.py
+++ /dev/null
@@ -1,463 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Regular expression based JavaScript parsing classes."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-import copy
-import re
-
-from closure_linter import javascripttokens
-from closure_linter.common import matcher
-from closure_linter.common import tokenizer
-
-# Shorthand
-Type = javascripttokens.JavaScriptTokenType
-Matcher = matcher.Matcher
-
-
-class JavaScriptModes(object):
- """Enumeration of the different matcher modes used for JavaScript."""
- TEXT_MODE = 'text'
- SINGLE_QUOTE_STRING_MODE = 'single_quote_string'
- DOUBLE_QUOTE_STRING_MODE = 'double_quote_string'
- BLOCK_COMMENT_MODE = 'block_comment'
- DOC_COMMENT_MODE = 'doc_comment'
- DOC_COMMENT_LEX_SPACES_MODE = 'doc_comment_spaces'
- LINE_COMMENT_MODE = 'line_comment'
- PARAMETER_MODE = 'parameter'
- FUNCTION_MODE = 'function'
-
-
-class JavaScriptTokenizer(tokenizer.Tokenizer):
- """JavaScript tokenizer.
-
- Convert JavaScript code in to an array of tokens.
- """
-
- # Useful patterns for JavaScript parsing.
- IDENTIFIER_CHAR = r'A-Za-z0-9_$'
-
- # Number patterns based on:
- # http://www.mozilla.org/js/language/js20-2000-07/formal/lexer-grammar.html
- MANTISSA = r"""
- (\d+(?!\.)) | # Matches '10'
- (\d+\.(?!\d)) | # Matches '10.'
- (\d*\.\d+) # Matches '.5' or '10.5'
- """
- DECIMAL_LITERAL = r'(%s)([eE][-+]?\d+)?' % MANTISSA
- HEX_LITERAL = r'0[xX][0-9a-fA-F]+'
- NUMBER = re.compile(r"""
- ((%s)|(%s))
- """ % (HEX_LITERAL, DECIMAL_LITERAL), re.VERBOSE)
-
- # Strings come in three parts - first we match the start of the string, then
- # the contents, then the end. The contents consist of any character except a
- # backslash or end of string, or a backslash followed by any character, or a
- # backslash followed by end of line to support correct parsing of multi-line
- # strings.
- SINGLE_QUOTE = re.compile(r"'")
- SINGLE_QUOTE_TEXT = re.compile(r"([^'\\]|\\(.|$))+")
- DOUBLE_QUOTE = re.compile(r'"')
- DOUBLE_QUOTE_TEXT = re.compile(r'([^"\\]|\\(.|$))+')
-
- START_SINGLE_LINE_COMMENT = re.compile(r'//')
- END_OF_LINE_SINGLE_LINE_COMMENT = re.compile(r'//$')
-
- START_DOC_COMMENT = re.compile(r'/\*\*')
- START_BLOCK_COMMENT = re.compile(r'/\*')
- END_BLOCK_COMMENT = re.compile(r'\*/')
- BLOCK_COMMENT_TEXT = re.compile(r'([^*]|\*(?!/))+')
-
- # Comment text is anything that we are not going to parse into another special
- # token like (inline) flags or end comments. Complicated regex to match
- # most normal characters, and '*', '{', '}', and '@' when we are sure that
- # it is safe. Expression [^*{\s]@ must come first, or the other options will
- # match everything before @, and we won't match @'s that aren't part of flags
- # like in email addresses in the @author tag.
- DOC_COMMENT_TEXT = re.compile(r'([^*{}\s]@|[^*{}@]|\*(?!/))+')
- DOC_COMMENT_NO_SPACES_TEXT = re.compile(r'([^*{}\s]@|[^*{}@\s]|\*(?!/))+')
- # Match anything that is allowed in a type definition, except for tokens
- # needed to parse it (and the lookahead assertion for "*/").
- DOC_COMMENT_TYPE_TEXT = re.compile(r'([^*|!?=<>(){}:,\s]|\*(?!/))+')
-
- # Match the prefix ' * ' that starts every line of jsdoc. Want to include
- # spaces after the '*', but nothing else that occurs after a '*', and don't
- # want to match the '*' in '*/'.
- DOC_PREFIX = re.compile(r'\s*\*(\s+|(?!/))')
-
- START_BLOCK = re.compile('{')
- END_BLOCK = re.compile('}')
-
- REGEX_CHARACTER_CLASS = r"""
- \[ # Opening bracket
- ([^\]\\]|\\.)* # Anything but a ] or \,
- # or a backslash followed by anything
- \] # Closing bracket
- """
- # We ensure the regex is followed by one of the above tokens to avoid
- # incorrectly parsing something like x / y / z as x REGEX(/ y /) z
- POST_REGEX_LIST = [
- ';', ',', r'\.', r'\)', r'\]', '$', r'\/\/', r'\/\*', ':', '}']
-
- REGEX = re.compile(r"""
- / # opening slash
- (?!\*) # not the start of a comment
- (\\.|[^\[\/\\]|(%s))* # a backslash followed by anything,
- # or anything but a / or [ or \,
- # or a character class
- / # closing slash
- [gimsx]* # optional modifiers
- (?=\s*(%s))
- """ % (REGEX_CHARACTER_CLASS, '|'.join(POST_REGEX_LIST)),
- re.VERBOSE)
-
- ANYTHING = re.compile(r'.*')
- PARAMETERS = re.compile(r'[^\)]+')
- CLOSING_PAREN_WITH_SPACE = re.compile(r'\)\s*')
-
- FUNCTION_DECLARATION = re.compile(r'\bfunction\b')
-
- OPENING_PAREN = re.compile(r'\(')
- CLOSING_PAREN = re.compile(r'\)')
-
- OPENING_BRACKET = re.compile(r'\[')
- CLOSING_BRACKET = re.compile(r'\]')
-
- # We omit these JS keywords from the list:
- # function - covered by FUNCTION_DECLARATION.
- # delete, in, instanceof, new, typeof - included as operators.
- # this - included in identifiers.
- # null, undefined - not included, should go in some "special constant" list.
- KEYWORD_LIST = [
- 'break',
- 'case',
- 'catch',
- 'continue',
- 'default',
- 'do',
- 'else',
- 'finally',
- 'for',
- 'if',
- 'return',
- 'switch',
- 'throw',
- 'try',
- 'var',
- 'while',
- 'with',
- ]
-
- # List of regular expressions to match as operators. Some notes: for our
- # purposes, the comma behaves similarly enough to a normal operator that we
- # include it here. r'\bin\b' actually matches 'in' surrounded by boundary
- # characters - this may not match some very esoteric uses of the in operator.
- # Operators that are subsets of larger operators must come later in this list
- # for proper matching, e.g., '>>' must come AFTER '>>>'.
- OPERATOR_LIST = [
- ',',
- r'\+\+',
- '===',
- '!==',
- '>>>=',
- '>>>',
- '==',
- '>=',
- '<=',
- '!=',
- '<<=',
- '>>=',
- '<<',
- '>>',
- '=>',
- '>',
- '<',
- r'\+=',
- r'\+',
- '--',
- r'\^=',
- '-=',
- '-',
- '/=',
- '/',
- r'\*=',
- r'\*',
- '%=',
- '%',
- '&&',
- r'\|\|',
- '&=',
- '&',
- r'\|=',
- r'\|',
- '=',
- '!',
- ':',
- r'\?',
- r'\^',
- r'\bdelete\b',
- r'\bin\b',
- r'\binstanceof\b',
- r'\bnew\b',
- r'\btypeof\b',
- r'\bvoid\b',
- r'\.',
- ]
- OPERATOR = re.compile('|'.join(OPERATOR_LIST))
-
- WHITESPACE = re.compile(r'\s+')
- SEMICOLON = re.compile(r';')
- # Technically JavaScript identifiers can't contain '.', but we treat a set of
- # nested identifiers as a single identifier, except for trailing dots.
- NESTED_IDENTIFIER = r'[a-zA-Z_$]([%s]|\.[a-zA-Z_$])*' % IDENTIFIER_CHAR
- IDENTIFIER = re.compile(NESTED_IDENTIFIER)
-
- SIMPLE_LVALUE = re.compile(r"""
- (?P<identifier>%s) # a valid identifier
- (?=\s* # optional whitespace
- \= # look ahead to equal sign
- (?!=)) # not follwed by equal
- """ % NESTED_IDENTIFIER, re.VERBOSE)
-
- # A doc flag is a @ sign followed by non-space characters that appears at the
- # beginning of the line, after whitespace, or after a '{'. The look-behind
- # check is necessary to not match someone@google.com as a flag.
- DOC_FLAG = re.compile(r'(^|(?<=\s))@(?P<name>[a-zA-Z]+)')
- # To properly parse parameter names and complex doctypes containing
- # whitespace, we need to tokenize whitespace into a token after certain
- # doctags. All statetracker.HAS_TYPE that are not listed here must not contain
- # any whitespace in their types.
- DOC_FLAG_LEX_SPACES = re.compile(
- r'(^|(?<=\s))@(?P<name>%s)\b' %
- '|'.join([
- 'const',
- 'enum',
- 'extends',
- 'final',
- 'implements',
- 'param',
- 'private',
- 'protected',
- 'public',
- 'return',
- 'type',
- 'typedef'
- ]))
-
- DOC_INLINE_FLAG = re.compile(r'(?<={)@(?P<name>[a-zA-Z]+)')
-
- DOC_TYPE_BLOCK_START = re.compile(r'[<(]')
- DOC_TYPE_BLOCK_END = re.compile(r'[>)]')
- DOC_TYPE_MODIFIERS = re.compile(r'[!?|,:=]')
-
- # Star followed by non-slash, i.e a star that does not end a comment.
- # This is used for TYPE_GROUP below.
- SAFE_STAR = r'(\*(?!/))'
-
- COMMON_DOC_MATCHERS = [
- # Find the end of the comment.
- Matcher(END_BLOCK_COMMENT, Type.END_DOC_COMMENT,
- JavaScriptModes.TEXT_MODE),
-
- # Tokenize documented flags like @private.
- Matcher(DOC_INLINE_FLAG, Type.DOC_INLINE_FLAG),
- Matcher(DOC_FLAG_LEX_SPACES, Type.DOC_FLAG,
- JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE),
-
- # Encountering a doc flag should leave lex spaces mode.
- Matcher(DOC_FLAG, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_MODE),
-
- # Tokenize braces so we can find types.
- Matcher(START_BLOCK, Type.DOC_START_BRACE),
- Matcher(END_BLOCK, Type.DOC_END_BRACE),
-
- # And some more to parse types.
- Matcher(DOC_TYPE_BLOCK_START, Type.DOC_TYPE_START_BLOCK),
- Matcher(DOC_TYPE_BLOCK_END, Type.DOC_TYPE_END_BLOCK),
-
- Matcher(DOC_TYPE_MODIFIERS, Type.DOC_TYPE_MODIFIER),
- Matcher(DOC_COMMENT_TYPE_TEXT, Type.COMMENT),
-
- Matcher(DOC_PREFIX, Type.DOC_PREFIX, None, True)]
-
- # When text is not matched, it is given this default type based on mode.
- # If unspecified in this map, the default default is Type.NORMAL.
- JAVASCRIPT_DEFAULT_TYPES = {
- JavaScriptModes.DOC_COMMENT_MODE: Type.COMMENT,
- JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: Type.COMMENT
- }
-
- @classmethod
- def BuildMatchers(cls):
- """Builds the token matcher group.
-
- The token matcher groups work as follows: it is a list of Matcher objects.
- The matchers will be tried in this order, and the first to match will be
- returned. Hence the order is important because the matchers that come first
- overrule the matchers that come later.
-
- Returns:
- The completed token matcher group.
- """
- # Match a keyword string followed by a non-identifier character in order to
- # not match something like doSomething as do + Something.
- keyword = re.compile('(%s)((?=[^%s])|$)' % (
- '|'.join(cls.KEYWORD_LIST), cls.IDENTIFIER_CHAR))
- return {
-
- # Matchers for basic text mode.
- JavaScriptModes.TEXT_MODE: [
- # Check a big group - strings, starting comments, and regexes - all
- # of which could be intertwined. 'string with /regex/',
- # /regex with 'string'/, /* comment with /regex/ and string */ (and
- # so on)
- Matcher(cls.START_DOC_COMMENT, Type.START_DOC_COMMENT,
- JavaScriptModes.DOC_COMMENT_MODE),
- Matcher(cls.START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT,
- JavaScriptModes.BLOCK_COMMENT_MODE),
- Matcher(cls.END_OF_LINE_SINGLE_LINE_COMMENT,
- Type.START_SINGLE_LINE_COMMENT),
- Matcher(cls.START_SINGLE_LINE_COMMENT,
- Type.START_SINGLE_LINE_COMMENT,
- JavaScriptModes.LINE_COMMENT_MODE),
- Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START,
- JavaScriptModes.SINGLE_QUOTE_STRING_MODE),
- Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START,
- JavaScriptModes.DOUBLE_QUOTE_STRING_MODE),
- Matcher(cls.REGEX, Type.REGEX),
-
- # Next we check for start blocks appearing outside any of the items
- # above.
- Matcher(cls.START_BLOCK, Type.START_BLOCK),
- Matcher(cls.END_BLOCK, Type.END_BLOCK),
-
- # Then we search for function declarations.
- Matcher(cls.FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION,
- JavaScriptModes.FUNCTION_MODE),
-
- # Next, we convert non-function related parens to tokens.
- Matcher(cls.OPENING_PAREN, Type.START_PAREN),
- Matcher(cls.CLOSING_PAREN, Type.END_PAREN),
-
- # Next, we convert brackets to tokens.
- Matcher(cls.OPENING_BRACKET, Type.START_BRACKET),
- Matcher(cls.CLOSING_BRACKET, Type.END_BRACKET),
-
- # Find numbers. This has to happen before operators because
- # scientific notation numbers can have + and - in them.
- Matcher(cls.NUMBER, Type.NUMBER),
-
- # Find operators and simple assignments
- Matcher(cls.SIMPLE_LVALUE, Type.SIMPLE_LVALUE),
- Matcher(cls.OPERATOR, Type.OPERATOR),
-
- # Find key words and whitespace.
- Matcher(keyword, Type.KEYWORD),
- Matcher(cls.WHITESPACE, Type.WHITESPACE),
-
- # Find identifiers.
- Matcher(cls.IDENTIFIER, Type.IDENTIFIER),
-
- # Finally, we convert semicolons to tokens.
- Matcher(cls.SEMICOLON, Type.SEMICOLON)],
-
- # Matchers for single quote strings.
- JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [
- Matcher(cls.SINGLE_QUOTE_TEXT, Type.STRING_TEXT),
- Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END,
- JavaScriptModes.TEXT_MODE)],
-
- # Matchers for double quote strings.
- JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [
- Matcher(cls.DOUBLE_QUOTE_TEXT, Type.STRING_TEXT),
- Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END,
- JavaScriptModes.TEXT_MODE)],
-
- # Matchers for block comments.
- JavaScriptModes.BLOCK_COMMENT_MODE: [
- # First we check for exiting a block comment.
- Matcher(cls.END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT,
- JavaScriptModes.TEXT_MODE),
-
- # Match non-comment-ending text..
- Matcher(cls.BLOCK_COMMENT_TEXT, Type.COMMENT)],
-
- # Matchers for doc comments.
- JavaScriptModes.DOC_COMMENT_MODE: cls.COMMON_DOC_MATCHERS + [
- Matcher(cls.DOC_COMMENT_TEXT, Type.COMMENT)],
-
- JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: cls.COMMON_DOC_MATCHERS + [
- Matcher(cls.WHITESPACE, Type.COMMENT),
- Matcher(cls.DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)],
-
- # Matchers for single line comments.
- JavaScriptModes.LINE_COMMENT_MODE: [
- # We greedy match until the end of the line in line comment mode.
- Matcher(cls.ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)],
-
- # Matchers for code after the function keyword.
- JavaScriptModes.FUNCTION_MODE: [
- # Must match open paren before anything else and move into parameter
- # mode, otherwise everything inside the parameter list is parsed
- # incorrectly.
- Matcher(cls.OPENING_PAREN, Type.START_PARAMETERS,
- JavaScriptModes.PARAMETER_MODE),
- Matcher(cls.WHITESPACE, Type.WHITESPACE),
- Matcher(cls.IDENTIFIER, Type.FUNCTION_NAME)],
-
- # Matchers for function parameters
- JavaScriptModes.PARAMETER_MODE: [
- # When in function parameter mode, a closing paren is treated
- # specially. Everything else is treated as lines of parameters.
- Matcher(cls.CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS,
- JavaScriptModes.TEXT_MODE),
- Matcher(cls.PARAMETERS, Type.PARAMETERS,
- JavaScriptModes.PARAMETER_MODE)]}
-
- def __init__(self, parse_js_doc=True):
- """Create a tokenizer object.
-
- Args:
- parse_js_doc: Whether to do detailed parsing of javascript doc comments,
- or simply treat them as normal comments. Defaults to parsing JsDoc.
- """
- matchers = self.BuildMatchers()
- if not parse_js_doc:
- # Make a copy so the original doesn't get modified.
- matchers = copy.deepcopy(matchers)
- matchers[JavaScriptModes.DOC_COMMENT_MODE] = matchers[
- JavaScriptModes.BLOCK_COMMENT_MODE]
-
- tokenizer.Tokenizer.__init__(self, JavaScriptModes.TEXT_MODE, matchers,
- self.JAVASCRIPT_DEFAULT_TYPES)
-
- def _CreateToken(self, string, token_type, line, line_number, values=None):
- """Creates a new JavaScriptToken object.
-
- Args:
- string: The string of input the token contains.
- token_type: The type of token.
- line: The text of the line this token is in.
- line_number: The line number of the token.
- values: A dict of named values within the token. For instance, a
- function declaration may have a value called 'name' which captures the
- name of the function.
- """
- return javascripttokens.JavaScriptToken(string, token_type, line,
- line_number, values, line_number)
diff --git a/tools/closure_linter/closure_linter/javascripttokens.py b/tools/closure_linter/closure_linter/javascripttokens.py
deleted file mode 100755
index f5815d2bf8..0000000000
--- a/tools/closure_linter/closure_linter/javascripttokens.py
+++ /dev/null
@@ -1,153 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Classes to represent JavaScript tokens."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-from closure_linter.common import tokens
-
-class JavaScriptTokenType(tokens.TokenType):
- """Enumeration of JavaScript token types, and useful sets of token types."""
- NUMBER = 'number'
- START_SINGLE_LINE_COMMENT = '//'
- START_BLOCK_COMMENT = '/*'
- START_DOC_COMMENT = '/**'
- END_BLOCK_COMMENT = '*/'
- END_DOC_COMMENT = 'doc */'
- COMMENT = 'comment'
- SINGLE_QUOTE_STRING_START = "'string"
- SINGLE_QUOTE_STRING_END = "string'"
- DOUBLE_QUOTE_STRING_START = '"string'
- DOUBLE_QUOTE_STRING_END = 'string"'
- STRING_TEXT = 'string'
- START_BLOCK = '{'
- END_BLOCK = '}'
- START_PAREN = '('
- END_PAREN = ')'
- START_BRACKET = '['
- END_BRACKET = ']'
- REGEX = '/regex/'
- FUNCTION_DECLARATION = 'function(...)'
- FUNCTION_NAME = 'function functionName(...)'
- START_PARAMETERS = 'startparams('
- PARAMETERS = 'pa,ra,ms'
- END_PARAMETERS = ')endparams'
- SEMICOLON = ';'
- DOC_FLAG = '@flag'
- DOC_INLINE_FLAG = '{@flag ...}'
- DOC_START_BRACE = 'doc {'
- DOC_END_BRACE = 'doc }'
- DOC_PREFIX = 'comment prefix: * '
- DOC_TYPE_START_BLOCK = 'Type <'
- DOC_TYPE_END_BLOCK = 'Type >'
- DOC_TYPE_MODIFIER = 'modifier'
- SIMPLE_LVALUE = 'lvalue='
- KEYWORD = 'keyword'
- OPERATOR = 'operator'
- IDENTIFIER = 'identifier'
-
- STRING_TYPES = frozenset([
- SINGLE_QUOTE_STRING_START, SINGLE_QUOTE_STRING_END,
- DOUBLE_QUOTE_STRING_START, DOUBLE_QUOTE_STRING_END, STRING_TEXT])
-
- COMMENT_TYPES = frozenset([
- START_SINGLE_LINE_COMMENT, COMMENT,
- START_BLOCK_COMMENT, START_DOC_COMMENT,
- END_BLOCK_COMMENT, END_DOC_COMMENT,
- DOC_START_BRACE, DOC_END_BRACE,
- DOC_FLAG, DOC_INLINE_FLAG, DOC_PREFIX,
- DOC_TYPE_START_BLOCK, DOC_TYPE_END_BLOCK, DOC_TYPE_MODIFIER])
-
- FLAG_DESCRIPTION_TYPES = frozenset([
- DOC_INLINE_FLAG, COMMENT, DOC_START_BRACE, DOC_END_BRACE,
- DOC_TYPE_START_BLOCK, DOC_TYPE_END_BLOCK, DOC_TYPE_MODIFIER])
-
- FLAG_ENDING_TYPES = frozenset([DOC_FLAG, END_DOC_COMMENT])
-
- NON_CODE_TYPES = COMMENT_TYPES | frozenset([
- tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE])
-
- UNARY_OPERATORS = ['!', 'new', 'delete', 'typeof', 'void']
-
- UNARY_OK_OPERATORS = ['--', '++', '-', '+'] + UNARY_OPERATORS
-
- UNARY_POST_OPERATORS = ['--', '++']
-
- # An expression ender is any token that can end an object - i.e. we could have
- # x.y or [1, 2], or (10 + 9) or {a: 10}.
- EXPRESSION_ENDER_TYPES = [tokens.TokenType.NORMAL, IDENTIFIER, NUMBER,
- SIMPLE_LVALUE, END_BRACKET, END_PAREN, END_BLOCK,
- SINGLE_QUOTE_STRING_END, DOUBLE_QUOTE_STRING_END]
-
-
-class JavaScriptToken(tokens.Token):
- """JavaScript token subclass of Token, provides extra instance checks.
-
- The following token types have data in attached_object:
- - All JsDoc flags: a parser.JsDocFlag object.
- """
-
- def IsKeyword(self, keyword):
- """Tests if this token is the given keyword.
-
- Args:
- keyword: The keyword to compare to.
-
- Returns:
- True if this token is a keyword token with the given name.
- """
- return self.type == JavaScriptTokenType.KEYWORD and self.string == keyword
-
- def IsOperator(self, operator):
- """Tests if this token is the given operator.
-
- Args:
- operator: The operator to compare to.
-
- Returns:
- True if this token is a operator token with the given name.
- """
- return self.type == JavaScriptTokenType.OPERATOR and self.string == operator
-
- def IsAssignment(self):
- """Tests if this token is an assignment operator.
-
- Returns:
- True if this token is an assignment operator.
- """
- return (self.type == JavaScriptTokenType.OPERATOR and
- self.string.endswith('=') and
- self.string not in ('==', '!=', '>=', '<=', '===', '!=='))
-
- def IsComment(self):
- """Tests if this token is any part of a comment.
-
- Returns:
- True if this token is any part of a comment.
- """
- return self.type in JavaScriptTokenType.COMMENT_TYPES
-
- def IsCode(self):
- """Tests if this token is code, as opposed to a comment or whitespace."""
- return self.type not in JavaScriptTokenType.NON_CODE_TYPES
-
- def __repr__(self):
- return '<JavaScriptToken: %d, %s, "%s", %r, %r>' % (self.line_number,
- self.type, self.string,
- self.values,
- self.metadata)
diff --git a/tools/closure_linter/closure_linter/not_strict_test.py b/tools/closure_linter/closure_linter/not_strict_test.py
deleted file mode 100755
index c92c13ee03..0000000000
--- a/tools/closure_linter/closure_linter/not_strict_test.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tests for gjslint --nostrict.
-
-Tests errors that can be thrown by gjslint when not in strict mode.
-"""
-
-
-
-import os
-import sys
-import unittest
-
-import gflags as flags
-import unittest as googletest
-
-from closure_linter import errors
-from closure_linter import runner
-from closure_linter.common import filetestcase
-
-_RESOURCE_PREFIX = 'closure_linter/testdata'
-
-flags.FLAGS.strict = False
-flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
-flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
-flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
- 'limited_doc_checks.js')
-
-
-# List of files under testdata to test.
-# We need to list files explicitly since pyglib can't list directories.
-_TEST_FILES = [
- 'not_strict.js'
- ]
-
-
-class GJsLintTestSuite(unittest.TestSuite):
- """Test suite to run a GJsLintTest for each of several files.
-
- If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
- testdata to test. Otherwise, _TEST_FILES is used.
- """
-
- def __init__(self, tests=()):
- unittest.TestSuite.__init__(self, tests)
-
- argv = sys.argv and sys.argv[1:] or []
- if argv:
- test_files = argv
- else:
- test_files = _TEST_FILES
- for test_file in test_files:
- resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
- self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
- runner.Run,
- errors.ByName))
-
-if __name__ == '__main__':
- # Don't let main parse args; it happens in the TestSuite.
- googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
diff --git a/tools/closure_linter/closure_linter/requireprovidesorter.py b/tools/closure_linter/closure_linter/requireprovidesorter.py
deleted file mode 100755
index e7e08a13c2..0000000000
--- a/tools/closure_linter/closure_linter/requireprovidesorter.py
+++ /dev/null
@@ -1,329 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Contains logic for sorting goog.provide and goog.require statements.
-
-Closurized JavaScript files use goog.provide and goog.require statements at the
-top of the file to manage dependencies. These statements should be sorted
-alphabetically, however, it is common for them to be accompanied by inline
-comments or suppression annotations. In order to sort these statements without
-disrupting their comments and annotations, the association between statements
-and comments/annotations must be maintained while sorting.
-
- RequireProvideSorter: Handles checking/fixing of provide/require statements.
-"""
-
-
-
-from closure_linter import javascripttokens
-from closure_linter import tokenutil
-
-# Shorthand
-Type = javascripttokens.JavaScriptTokenType
-
-
-class RequireProvideSorter(object):
- """Checks for and fixes alphabetization of provide and require statements.
-
- When alphabetizing, comments on the same line or comments directly above a
- goog.provide or goog.require statement are associated with that statement and
- stay with the statement as it gets sorted.
- """
-
- def CheckProvides(self, token):
- """Checks alphabetization of goog.provide statements.
-
- Iterates over tokens in given token stream, identifies goog.provide tokens,
- and checks that they occur in alphabetical order by the object being
- provided.
-
- Args:
- token: A token in the token stream before any goog.provide tokens.
-
- Returns:
- The first provide token in the token stream.
-
- None is returned if all goog.provide statements are already sorted.
- """
- provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide')
- provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens)
- sorted_provide_strings = sorted(provide_strings)
- if provide_strings != sorted_provide_strings:
- return provide_tokens[0]
- return None
-
- def CheckRequires(self, token):
- """Checks alphabetization of goog.require statements.
-
- Iterates over tokens in given token stream, identifies goog.require tokens,
- and checks that they occur in alphabetical order by the dependency being
- required.
-
- Args:
- token: A token in the token stream before any goog.require tokens.
-
- Returns:
- The first require token in the token stream.
-
- None is returned if all goog.require statements are already sorted.
- """
- require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require')
- require_strings = self._GetRequireOrProvideTokenStrings(require_tokens)
- sorted_require_strings = sorted(require_strings)
- if require_strings != sorted_require_strings:
- return require_tokens[0]
- return None
-
- def FixProvides(self, token):
- """Sorts goog.provide statements in the given token stream alphabetically.
-
- Args:
- token: The first token in the token stream.
- """
- self._FixProvidesOrRequires(
- self._GetRequireOrProvideTokens(token, 'goog.provide'))
-
- def FixRequires(self, token):
- """Sorts goog.require statements in the given token stream alphabetically.
-
- Args:
- token: The first token in the token stream.
- """
- self._FixProvidesOrRequires(
- self._GetRequireOrProvideTokens(token, 'goog.require'))
-
- def _FixProvidesOrRequires(self, tokens):
- """Sorts goog.provide or goog.require statements.
-
- Args:
- tokens: A list of goog.provide or goog.require tokens in the order they
- appear in the token stream. i.e. the first token in this list must
- be the first goog.provide or goog.require token.
- """
- strings = self._GetRequireOrProvideTokenStrings(tokens)
- sorted_strings = sorted(strings)
-
- # Make a separate pass to remove any blank lines between goog.require/
- # goog.provide tokens.
- first_token = tokens[0]
- last_token = tokens[-1]
- i = last_token
- while i != first_token and i is not None:
- if i.type is Type.BLANK_LINE:
- tokenutil.DeleteToken(i)
- i = i.previous
-
- # A map from required/provided object name to tokens that make up the line
- # it was on, including any comments immediately before it or after it on the
- # same line.
- tokens_map = self._GetTokensMap(tokens)
-
- # Iterate over the map removing all tokens.
- for name in tokens_map:
- tokens_to_delete = tokens_map[name]
- for i in tokens_to_delete:
- tokenutil.DeleteToken(i)
-
- # Save token to rest of file. Sorted token will be inserted before this.
- rest_of_file = tokens_map[strings[-1]][-1].next
-
- # Re-add all tokens in the map in alphabetical order.
- insert_after = tokens[0].previous
- for string in sorted_strings:
- for i in tokens_map[string]:
- if rest_of_file:
- tokenutil.InsertTokenBefore(i, rest_of_file)
- else:
- tokenutil.InsertTokenAfter(i, insert_after)
- insert_after = i
-
- def _GetRequireOrProvideTokens(self, token, token_string):
- """Gets all goog.provide or goog.require tokens in the given token stream.
-
- Args:
- token: The first token in the token stream.
- token_string: One of 'goog.provide' or 'goog.require' to indicate which
- tokens to find.
-
- Returns:
- A list of goog.provide or goog.require tokens in the order they appear in
- the token stream.
- """
- tokens = []
- while token:
- if token.type == Type.IDENTIFIER:
- if token.string == token_string:
- tokens.append(token)
- elif token.string not in [
- 'goog.provide', 'goog.require', 'goog.setTestOnly']:
- # These 3 identifiers are at the top of the file. So if any other
- # identifier is encountered, return.
- # TODO(user): Once it's decided what ordering goog.require
- # should use, add 'goog.module' to the list above and implement the
- # decision.
- break
- token = token.next
-
- return tokens
-
- def _GetRequireOrProvideTokenStrings(self, tokens):
- """Gets a list of strings corresponding to the given list of tokens.
-
- The string will be the next string in the token stream after each token in
- tokens. This is used to find the object being provided/required by a given
- goog.provide or goog.require token.
-
- Args:
- tokens: A list of goog.provide or goog.require tokens.
-
- Returns:
- A list of object names that are being provided or required by the given
- list of tokens. For example:
-
- ['object.a', 'object.c', 'object.b']
- """
- token_strings = []
- for token in tokens:
- if not token.is_deleted:
- name = tokenutil.GetStringAfterToken(token)
- token_strings.append(name)
- return token_strings
-
- def _GetTokensMap(self, tokens):
- """Gets a map from object name to tokens associated with that object.
-
- Starting from the goog.provide/goog.require token, searches backwards in the
- token stream for any lines that start with a comment. These lines are
- associated with the goog.provide/goog.require token. Also associates any
- tokens on the same line as the goog.provide/goog.require token with that
- token.
-
- Args:
- tokens: A list of goog.provide or goog.require tokens.
-
- Returns:
- A dictionary that maps object names to the tokens associated with the
- goog.provide or goog.require of that object name. For example:
-
- {
- 'object.a': [JavaScriptToken, JavaScriptToken, ...],
- 'object.b': [...]
- }
-
- The list of tokens includes any comment lines above the goog.provide or
- goog.require statement and everything after the statement on the same
- line. For example, all of the following would be associated with
- 'object.a':
-
- /** @suppress {extraRequire} */
- goog.require('object.a'); // Some comment.
- """
- tokens_map = {}
- for token in tokens:
- object_name = tokenutil.GetStringAfterToken(token)
- # If the previous line starts with a comment, presume that the comment
- # relates to the goog.require or goog.provide and keep them together when
- # sorting.
- first_token = token
- previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token)
- while (previous_first_token and
- previous_first_token.IsAnyType(Type.COMMENT_TYPES)):
- first_token = previous_first_token
- previous_first_token = tokenutil.GetFirstTokenInPreviousLine(
- first_token)
-
- # Find the last token on the line.
- last_token = tokenutil.GetLastTokenInSameLine(token)
-
- all_tokens = self._GetTokenList(first_token, last_token)
- tokens_map[object_name] = all_tokens
- return tokens_map
-
- def _GetTokenList(self, first_token, last_token):
- """Gets a list of all tokens from first_token to last_token, inclusive.
-
- Args:
- first_token: The first token to get.
- last_token: The last token to get.
-
- Returns:
- A list of all tokens between first_token and last_token, including both
- first_token and last_token.
-
- Raises:
- Exception: If the token stream ends before last_token is reached.
- """
- token_list = []
- token = first_token
- while token != last_token:
- if not token:
- raise Exception('ran out of tokens')
- token_list.append(token)
- token = token.next
- token_list.append(last_token)
-
- return token_list
-
- def GetFixedRequireString(self, token):
- """Get fixed/sorted order of goog.require statements.
-
- Args:
- token: The first token in the token stream.
-
- Returns:
- A string for correct sorted order of goog.require.
- """
- return self._GetFixedRequireOrProvideString(
- self._GetRequireOrProvideTokens(token, 'goog.require'))
-
- def GetFixedProvideString(self, token):
- """Get fixed/sorted order of goog.provide statements.
-
- Args:
- token: The first token in the token stream.
-
- Returns:
- A string for correct sorted order of goog.provide.
- """
- return self._GetFixedRequireOrProvideString(
- self._GetRequireOrProvideTokens(token, 'goog.provide'))
-
- def _GetFixedRequireOrProvideString(self, tokens):
- """Sorts goog.provide or goog.require statements.
-
- Args:
- tokens: A list of goog.provide or goog.require tokens in the order they
- appear in the token stream. i.e. the first token in this list must
- be the first goog.provide or goog.require token.
-
- Returns:
- A string for sorted goog.require or goog.provide statements
- """
-
- # A map from required/provided object name to tokens that make up the line
- # it was on, including any comments immediately before it or after it on the
- # same line.
- tokens_map = self._GetTokensMap(tokens)
- sorted_strings = sorted(tokens_map.keys())
-
- new_order = ''
- for string in sorted_strings:
- for i in tokens_map[string]:
- new_order += i.string
- if i.IsLastInLine():
- new_order += '\n'
-
- return new_order
diff --git a/tools/closure_linter/closure_linter/requireprovidesorter_test.py b/tools/closure_linter/closure_linter/requireprovidesorter_test.py
deleted file mode 100644
index fecb6d04da..0000000000
--- a/tools/closure_linter/closure_linter/requireprovidesorter_test.py
+++ /dev/null
@@ -1,155 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for RequireProvideSorter."""
-
-
-
-import unittest as googletest
-from closure_linter import javascripttokens
-from closure_linter import requireprovidesorter
-from closure_linter import testutil
-
-# pylint: disable=g-bad-name
-TokenType = javascripttokens.JavaScriptTokenType
-
-
-class RequireProvideSorterTest(googletest.TestCase):
- """Tests for RequireProvideSorter."""
-
- def testGetFixedProvideString(self):
- """Tests that fixed string constains proper comments also."""
- input_lines = [
- 'goog.provide(\'package.xyz\');',
- '/** @suppress {extraprovide} **/',
- 'goog.provide(\'package.abcd\');'
- ]
-
- expected_lines = [
- '/** @suppress {extraprovide} **/',
- 'goog.provide(\'package.abcd\');',
- 'goog.provide(\'package.xyz\');'
- ]
-
- token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
-
- sorter = requireprovidesorter.RequireProvideSorter()
- fixed_provide_string = sorter.GetFixedProvideString(token)
-
- self.assertEquals(expected_lines, fixed_provide_string.splitlines())
-
- def testGetFixedRequireString(self):
- """Tests that fixed string constains proper comments also."""
- input_lines = [
- 'goog.require(\'package.xyz\');',
- '/** This is needed for scope. **/',
- 'goog.require(\'package.abcd\');'
- ]
-
- expected_lines = [
- '/** This is needed for scope. **/',
- 'goog.require(\'package.abcd\');',
- 'goog.require(\'package.xyz\');'
- ]
-
- token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
-
- sorter = requireprovidesorter.RequireProvideSorter()
- fixed_require_string = sorter.GetFixedRequireString(token)
-
- self.assertEquals(expected_lines, fixed_require_string.splitlines())
-
- def testFixRequires_removeBlankLines(self):
- """Tests that blank lines are omitted in sorted goog.require statements."""
- input_lines = [
- 'goog.provide(\'package.subpackage.Whatever\');',
- '',
- 'goog.require(\'package.subpackage.ClassB\');',
- '',
- 'goog.require(\'package.subpackage.ClassA\');'
- ]
- expected_lines = [
- 'goog.provide(\'package.subpackage.Whatever\');',
- '',
- 'goog.require(\'package.subpackage.ClassA\');',
- 'goog.require(\'package.subpackage.ClassB\');'
- ]
- token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
-
- sorter = requireprovidesorter.RequireProvideSorter()
- sorter.FixRequires(token)
-
- self.assertEquals(expected_lines, self._GetLines(token))
-
- def fixRequiresTest_withTestOnly(self, position):
- """Regression-tests sorting even with a goog.setTestOnly statement.
-
- Args:
- position: The position in the list where to insert the goog.setTestOnly
- statement. Will be used to test all possible combinations for
- this test.
- """
- input_lines = [
- 'goog.provide(\'package.subpackage.Whatever\');',
- '',
- 'goog.require(\'package.subpackage.ClassB\');',
- 'goog.require(\'package.subpackage.ClassA\');'
- ]
- expected_lines = [
- 'goog.provide(\'package.subpackage.Whatever\');',
- '',
- 'goog.require(\'package.subpackage.ClassA\');',
- 'goog.require(\'package.subpackage.ClassB\');'
- ]
- input_lines.insert(position, 'goog.setTestOnly();')
- expected_lines.insert(position, 'goog.setTestOnly();')
-
- token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
-
- sorter = requireprovidesorter.RequireProvideSorter()
- sorter.FixRequires(token)
-
- self.assertEquals(expected_lines, self._GetLines(token))
-
- def testFixRequires_withTestOnly(self):
- """Regression-tests sorting even after a goog.setTestOnly statement."""
-
- # goog.setTestOnly at first line.
- self.fixRequiresTest_withTestOnly(position=0)
-
- # goog.setTestOnly after goog.provide.
- self.fixRequiresTest_withTestOnly(position=1)
-
- # goog.setTestOnly before goog.require.
- self.fixRequiresTest_withTestOnly(position=2)
-
- # goog.setTestOnly after goog.require.
- self.fixRequiresTest_withTestOnly(position=4)
-
- def _GetLines(self, token):
- """Returns an array of lines based on the specified token stream."""
- lines = []
- line = ''
- while token:
- line += token.string
- if token.IsLastInLine():
- lines.append(line)
- line = ''
- token = token.next
- return lines
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/closure_linter/runner.py b/tools/closure_linter/closure_linter/runner.py
deleted file mode 100644
index 04e7fa4ac8..0000000000
--- a/tools/closure_linter/closure_linter/runner.py
+++ /dev/null
@@ -1,198 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Main lint function. Tokenizes file, runs passes, and feeds to checker."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = 'nnaze@google.com (Nathan Naze)'
-
-import traceback
-
-import gflags as flags
-
-from closure_linter import checker
-from closure_linter import ecmalintrules
-from closure_linter import ecmametadatapass
-from closure_linter import error_check
-from closure_linter import errors
-from closure_linter import javascriptstatetracker
-from closure_linter import javascripttokenizer
-
-from closure_linter.common import error
-from closure_linter.common import htmlutil
-from closure_linter.common import tokens
-
-flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'],
- 'List of files with relaxed documentation checks. Will not '
- 'report errors for missing documentation, some missing '
- 'descriptions, or methods whose @return tags don\'t have a '
- 'matching return statement.')
-flags.DEFINE_boolean('error_trace', False,
- 'Whether to show error exceptions.')
-flags.ADOPT_module_key_flags(checker)
-flags.ADOPT_module_key_flags(ecmalintrules)
-flags.ADOPT_module_key_flags(error_check)
-
-
-def _GetLastNonWhiteSpaceToken(start_token):
- """Get the last non-whitespace token in a token stream."""
- ret_token = None
-
- whitespace_tokens = frozenset([
- tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE])
- for t in start_token:
- if t.type not in whitespace_tokens:
- ret_token = t
-
- return ret_token
-
-
-def _IsHtml(filename):
- return filename.endswith('.html') or filename.endswith('.htm')
-
-
-def _Tokenize(fileobj):
- """Tokenize a file.
-
- Args:
- fileobj: file-like object (or iterable lines) with the source.
-
- Returns:
- The first token in the token stream and the ending mode of the tokenizer.
- """
- tokenizer = javascripttokenizer.JavaScriptTokenizer()
- start_token = tokenizer.TokenizeFile(fileobj)
- return start_token, tokenizer.mode
-
-
-def _IsLimitedDocCheck(filename, limited_doc_files):
- """Whether this this a limited-doc file.
-
- Args:
- filename: The filename.
- limited_doc_files: Iterable of strings. Suffixes of filenames that should
- be limited doc check.
-
- Returns:
- Whether the file should be limited check.
- """
- for limited_doc_filename in limited_doc_files:
- if filename.endswith(limited_doc_filename):
- return True
- return False
-
-
-def Run(filename, error_handler, source=None):
- """Tokenize, run passes, and check the given file.
-
- Args:
- filename: The path of the file to check
- error_handler: The error handler to report errors to.
- source: A file-like object with the file source. If omitted, the file will
- be read from the filename path.
- """
- if not source:
- try:
- source = open(filename)
- except IOError:
- error_handler.HandleFile(filename, None)
- error_handler.HandleError(
- error.Error(errors.FILE_NOT_FOUND, 'File not found'))
- error_handler.FinishFile()
- return
-
- if _IsHtml(filename):
- source_file = htmlutil.GetScriptLines(source)
- else:
- source_file = source
-
- token, tokenizer_mode = _Tokenize(source_file)
-
- error_handler.HandleFile(filename, token)
-
- # If we did not end in the basic mode, this a failed parse.
- if tokenizer_mode is not javascripttokenizer.JavaScriptModes.TEXT_MODE:
- error_handler.HandleError(
- error.Error(errors.FILE_IN_BLOCK,
- 'File ended in mode "%s".' % tokenizer_mode,
- _GetLastNonWhiteSpaceToken(token)))
-
- # Run the ECMA pass
- error_token = None
-
- ecma_pass = ecmametadatapass.EcmaMetaDataPass()
- error_token = RunMetaDataPass(token, ecma_pass, error_handler, filename)
-
- is_limited_doc_check = (
- _IsLimitedDocCheck(filename, flags.FLAGS.limited_doc_files))
-
- _RunChecker(token, error_handler,
- is_limited_doc_check,
- is_html=_IsHtml(filename),
- stop_token=error_token)
-
- error_handler.FinishFile()
-
-
-def RunMetaDataPass(start_token, metadata_pass, error_handler, filename=''):
- """Run a metadata pass over a token stream.
-
- Args:
- start_token: The first token in a token stream.
- metadata_pass: Metadata pass to run.
- error_handler: The error handler to report errors to.
- filename: Filename of the source.
-
- Returns:
- The token where the error occurred (if any).
- """
-
- try:
- metadata_pass.Process(start_token)
- except ecmametadatapass.ParseError, parse_err:
- if flags.FLAGS.error_trace:
- traceback.print_exc()
- error_token = parse_err.token
- error_msg = str(parse_err)
- error_handler.HandleError(
- error.Error(errors.FILE_DOES_NOT_PARSE,
- ('Error parsing file at token "%s". Unable to '
- 'check the rest of file.'
- '\nError "%s"' % (error_token, error_msg)), error_token))
- return error_token
- except Exception: # pylint: disable=broad-except
- traceback.print_exc()
- error_handler.HandleError(
- error.Error(
- errors.FILE_DOES_NOT_PARSE,
- 'Internal error in %s' % filename))
-
-
-def _RunChecker(start_token, error_handler,
- limited_doc_checks, is_html,
- stop_token=None):
-
- state_tracker = javascriptstatetracker.JavaScriptStateTracker()
-
- style_checker = checker.JavaScriptStyleChecker(
- state_tracker=state_tracker,
- error_handler=error_handler)
-
- style_checker.Check(start_token,
- is_html=is_html,
- limited_doc_checks=limited_doc_checks,
- stop_token=stop_token)
diff --git a/tools/closure_linter/closure_linter/runner_test.py b/tools/closure_linter/closure_linter/runner_test.py
deleted file mode 100644
index da5857d309..0000000000
--- a/tools/closure_linter/closure_linter/runner_test.py
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for the runner module."""
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-import StringIO
-
-
-import mox
-
-
-import unittest as googletest
-
-from closure_linter import errors
-from closure_linter import runner
-from closure_linter.common import error
-from closure_linter.common import errorhandler
-from closure_linter.common import tokens
-
-
-class LimitedDocTest(googletest.TestCase):
-
- def testIsLimitedDocCheck(self):
- self.assertTrue(runner._IsLimitedDocCheck('foo_test.js', ['_test.js']))
- self.assertFalse(runner._IsLimitedDocCheck('foo_bar.js', ['_test.js']))
-
- self.assertTrue(runner._IsLimitedDocCheck(
- 'foo_moo.js', ['moo.js', 'quack.js']))
- self.assertFalse(runner._IsLimitedDocCheck(
- 'foo_moo.js', ['woof.js', 'quack.js']))
-
-
-class RunnerTest(googletest.TestCase):
-
- def setUp(self):
- self.mox = mox.Mox()
-
- def testRunOnMissingFile(self):
- mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler)
-
- def ValidateError(err):
- return (isinstance(err, error.Error) and
- err.code is errors.FILE_NOT_FOUND and
- err.token is None)
-
- mock_error_handler.HandleFile('does_not_exist.js', None)
- mock_error_handler.HandleError(mox.Func(ValidateError))
- mock_error_handler.FinishFile()
-
- self.mox.ReplayAll()
-
- runner.Run('does_not_exist.js', mock_error_handler)
-
- self.mox.VerifyAll()
-
- def testBadTokenization(self):
- mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler)
-
- def ValidateError(err):
- return (isinstance(err, error.Error) and
- err.code is errors.FILE_IN_BLOCK and
- err.token.string == '}')
-
- mock_error_handler.HandleFile('foo.js', mox.IsA(tokens.Token))
- mock_error_handler.HandleError(mox.Func(ValidateError))
- mock_error_handler.HandleError(mox.IsA(error.Error))
- mock_error_handler.FinishFile()
-
- self.mox.ReplayAll()
-
- source = StringIO.StringIO(_BAD_TOKENIZATION_SCRIPT)
- runner.Run('foo.js', mock_error_handler, source)
-
- self.mox.VerifyAll()
-
-
-_BAD_TOKENIZATION_SCRIPT = """
-function foo () {
- var a = 3;
- var b = 2;
- return b + a; /* Comment not closed
-}
-"""
-
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/closure_linter/scopeutil.py b/tools/closure_linter/closure_linter/scopeutil.py
deleted file mode 100644
index a7ca9b630a..0000000000
--- a/tools/closure_linter/closure_linter/scopeutil.py
+++ /dev/null
@@ -1,206 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tools to match goog.scope alias statements."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-import itertools
-
-from closure_linter import ecmametadatapass
-from closure_linter import tokenutil
-from closure_linter.javascripttokens import JavaScriptTokenType
-
-
-
-def IsGoogScopeBlock(context):
- """Whether the given context is a goog.scope block.
-
- This function only checks that the block is a function block inside
- a goog.scope() call.
-
- TODO(nnaze): Implement goog.scope checks that verify the call is
- in the root context and contains only a single function literal.
-
- Args:
- context: An EcmaContext of type block.
-
- Returns:
- Whether the context is a goog.scope block.
- """
-
- if context.type != ecmametadatapass.EcmaContext.BLOCK:
- return False
-
- if not _IsFunctionLiteralBlock(context):
- return False
-
- # Check that this function is contained by a group
- # of form "goog.scope(...)".
- parent = context.parent
- if parent and parent.type is ecmametadatapass.EcmaContext.GROUP:
-
- last_code_token = parent.start_token.metadata.last_code
-
- if (last_code_token and
- last_code_token.type is JavaScriptTokenType.IDENTIFIER and
- last_code_token.string == 'goog.scope'):
- return True
-
- return False
-
-
-def _IsFunctionLiteralBlock(block_context):
- """Check if a context is a function literal block (without parameters).
-
- Example function literal block: 'function() {}'
-
- Args:
- block_context: An EcmaContext of type block.
-
- Returns:
- Whether this context is a function literal block.
- """
-
- previous_code_tokens_iter = itertools.ifilter(
- lambda token: token not in JavaScriptTokenType.NON_CODE_TYPES,
- reversed(block_context.start_token))
-
- # Ignore the current token
- next(previous_code_tokens_iter, None)
-
- # Grab the previous three tokens and put them in correct order.
- previous_code_tokens = list(itertools.islice(previous_code_tokens_iter, 3))
- previous_code_tokens.reverse()
-
- # There aren't three previous tokens.
- if len(previous_code_tokens) is not 3:
- return False
-
- # Check that the previous three code tokens are "function ()"
- previous_code_token_types = [token.type for token in previous_code_tokens]
- if (previous_code_token_types == [
- JavaScriptTokenType.FUNCTION_DECLARATION,
- JavaScriptTokenType.START_PARAMETERS,
- JavaScriptTokenType.END_PARAMETERS]):
- return True
-
- return False
-
-
-def IsInClosurizedNamespace(symbol, closurized_namespaces):
- """Match a goog.scope alias.
-
- Args:
- symbol: An identifier like 'goog.events.Event'.
- closurized_namespaces: Iterable of valid Closurized namespaces (strings).
-
- Returns:
- True if symbol is an identifier in a Closurized namespace, otherwise False.
- """
- for ns in closurized_namespaces:
- if symbol.startswith(ns + '.'):
- return True
-
- return False
-
-
-def _GetVarAssignmentTokens(context):
- """Returns the tokens from context if it is a var assignment.
-
- Args:
- context: An EcmaContext.
-
- Returns:
- If a var assignment, the tokens contained within it w/o the trailing
- semicolon.
- """
- if context.type != ecmametadatapass.EcmaContext.VAR:
- return
-
- # Get the tokens in this statement.
- if context.start_token and context.end_token:
- statement_tokens = tokenutil.GetTokenRange(context.start_token,
- context.end_token)
- else:
- return
-
- # And now just those tokens that are actually code.
- is_non_code_type = lambda t: t.type not in JavaScriptTokenType.NON_CODE_TYPES
- code_tokens = filter(is_non_code_type, statement_tokens)
-
- # Pop off the semicolon if present.
- if code_tokens and code_tokens[-1].IsType(JavaScriptTokenType.SEMICOLON):
- code_tokens.pop()
-
- if len(code_tokens) < 4:
- return
-
- if (code_tokens[0].IsKeyword('var') and
- code_tokens[1].IsType(JavaScriptTokenType.SIMPLE_LVALUE) and
- code_tokens[2].IsOperator('=')):
- return code_tokens
-
-
-def MatchAlias(context):
- """Match an alias statement (some identifier assigned to a variable).
-
- Example alias: var MyClass = proj.longNamespace.MyClass.
-
- Args:
- context: An EcmaContext of type EcmaContext.VAR.
-
- Returns:
- If a valid alias, returns a tuple of alias and symbol, otherwise None.
- """
- code_tokens = _GetVarAssignmentTokens(context)
- if code_tokens is None:
- return
-
- if all(tokenutil.IsIdentifierOrDot(t) for t in code_tokens[3:]):
- # var Foo = bar.Foo;
- alias, symbol = code_tokens[1], code_tokens[3]
- # Mark both tokens as an alias definition to not count them as usages.
- alias.metadata.is_alias_definition = True
- symbol.metadata.is_alias_definition = True
- return alias.string, tokenutil.GetIdentifierForToken(symbol)
-
-
-def MatchModuleAlias(context):
- """Match an alias statement in a goog.module style import.
-
- Example alias: var MyClass = goog.require('proj.longNamespace.MyClass').
-
- Args:
- context: An EcmaContext.
-
- Returns:
- If a valid alias, returns a tuple of alias and symbol, otherwise None.
- """
- code_tokens = _GetVarAssignmentTokens(context)
- if code_tokens is None:
- return
-
- if(code_tokens[3].IsType(JavaScriptTokenType.IDENTIFIER) and
- code_tokens[3].string == 'goog.require'):
- # var Foo = goog.require('bar.Foo');
- alias = code_tokens[1]
- symbol = tokenutil.GetStringAfterToken(code_tokens[3])
- if symbol:
- alias.metadata.is_alias_definition = True
- return alias.string, symbol
diff --git a/tools/closure_linter/closure_linter/scopeutil_test.py b/tools/closure_linter/closure_linter/scopeutil_test.py
deleted file mode 100644
index 722a953900..0000000000
--- a/tools/closure_linter/closure_linter/scopeutil_test.py
+++ /dev/null
@@ -1,222 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for the scopeutil module."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-
-import unittest as googletest
-
-from closure_linter import ecmametadatapass
-from closure_linter import scopeutil
-from closure_linter import testutil
-
-
-def _FindContexts(start_token):
- """Depth first search of all contexts referenced by a token stream.
-
- Includes contexts' parents, which might not be directly referenced
- by any token in the stream.
-
- Args:
- start_token: First token in the token stream.
-
- Yields:
- All contexts referenced by this token stream.
- """
-
- seen_contexts = set()
-
- # For each token, yield the context if we haven't seen it before.
- for token in start_token:
-
- token_context = token.metadata.context
- contexts = [token_context]
-
- # Also grab all the context's ancestors.
- parent = token_context.parent
- while parent:
- contexts.append(parent)
- parent = parent.parent
-
- # Yield each of these contexts if we've not seen them.
- for context in contexts:
- if context not in seen_contexts:
- yield context
-
- seen_contexts.add(context)
-
-
-def _FindFirstContextOfType(token, context_type):
- """Returns the first statement context."""
- for context in _FindContexts(token):
- if context.type == context_type:
- return context
-
-
-def _ParseAssignment(script):
- start_token = testutil.TokenizeSourceAndRunEcmaPass(script)
- statement = _FindFirstContextOfType(
- start_token, ecmametadatapass.EcmaContext.VAR)
- return statement
-
-
-class StatementTest(googletest.TestCase):
-
- def assertAlias(self, expected_match, script):
- statement = _ParseAssignment(script)
- match = scopeutil.MatchAlias(statement)
- self.assertEquals(expected_match, match)
-
- def assertModuleAlias(self, expected_match, script):
- statement = _ParseAssignment(script)
- match = scopeutil.MatchModuleAlias(statement)
- self.assertEquals(expected_match, match)
-
- def testSimpleAliases(self):
- self.assertAlias(
- ('foo', 'goog.foo'),
- 'var foo = goog.foo;')
-
- self.assertAlias(
- ('foo', 'goog.foo'),
- 'var foo = goog.foo') # No semicolon
-
- def testAliasWithComment(self):
- self.assertAlias(
- ('Component', 'goog.ui.Component'),
- 'var Component = /* comment */ goog.ui.Component;')
-
- def testMultilineAlias(self):
- self.assertAlias(
- ('Component', 'goog.ui.Component'),
- 'var Component = \n goog.ui.\n Component;')
-
- def testNonSymbolAliasVarStatements(self):
- self.assertAlias(None, 'var foo = 3;')
- self.assertAlias(None, 'var foo = function() {};')
- self.assertAlias(None, 'var foo = bar ? baz : qux;')
-
- def testModuleAlias(self):
- self.assertModuleAlias(
- ('foo', 'goog.foo'),
- 'var foo = goog.require("goog.foo");')
- self.assertModuleAlias(
- None,
- 'var foo = goog.require(notastring);')
-
-
-class ScopeBlockTest(googletest.TestCase):
-
- @staticmethod
- def _GetBlocks(source):
- start_token = testutil.TokenizeSourceAndRunEcmaPass(source)
- for context in _FindContexts(start_token):
- if context.type is ecmametadatapass.EcmaContext.BLOCK:
- yield context
-
- def assertNoBlocks(self, script):
- blocks = list(self._GetBlocks(script))
- self.assertEquals([], blocks)
-
- def testNotBlocks(self):
- # Ensure these are not considered blocks.
- self.assertNoBlocks('goog.scope(if{});')
- self.assertNoBlocks('goog.scope(for{});')
- self.assertNoBlocks('goog.scope(switch{});')
- self.assertNoBlocks('goog.scope(function foo{});')
-
- def testNonScopeBlocks(self):
-
- blocks = list(self._GetBlocks('goog.scope(try{});'))
- self.assertEquals(1, len(blocks))
- self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
-
- blocks = list(self._GetBlocks('goog.scope(function(a,b){});'))
- self.assertEquals(1, len(blocks))
- self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
-
- blocks = list(self._GetBlocks('goog.scope(try{} catch(){});'))
- # Two blocks: try and catch.
- self.assertEquals(2, len(blocks))
- self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
- self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
-
- blocks = list(self._GetBlocks('goog.scope(try{} catch(){} finally {});'))
- self.assertEquals(3, len(blocks))
- self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
- self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
- self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
-
-
-class AliasTest(googletest.TestCase):
-
- def setUp(self):
- self.start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
-
- def testMatchAliasStatement(self):
- matches = set()
- for context in _FindContexts(self.start_token):
- match = scopeutil.MatchAlias(context)
- if match:
- matches.add(match)
-
- self.assertEquals(
- set([('bar', 'baz'),
- ('foo', 'this.foo_'),
- ('Component', 'goog.ui.Component'),
- ('MyClass', 'myproject.foo.MyClass'),
- ('NonClosurizedClass', 'aaa.bbb.NonClosurizedClass')]),
- matches)
-
- def testMatchAliasStatement_withClosurizedNamespaces(self):
-
- closurized_namepaces = frozenset(['goog', 'myproject'])
-
- matches = set()
- for context in _FindContexts(self.start_token):
- match = scopeutil.MatchAlias(context)
- if match:
- unused_alias, symbol = match
- if scopeutil.IsInClosurizedNamespace(symbol, closurized_namepaces):
- matches.add(match)
-
- self.assertEquals(
- set([('MyClass', 'myproject.foo.MyClass'),
- ('Component', 'goog.ui.Component')]),
- matches)
-
-_TEST_SCRIPT = """
-goog.scope(function() {
- var Component = goog.ui.Component; // scope alias
- var MyClass = myproject.foo.MyClass; // scope alias
-
- // Scope alias of non-Closurized namespace.
- var NonClosurizedClass = aaa.bbb.NonClosurizedClass;
-
- var foo = this.foo_; // non-scope object property alias
- var bar = baz; // variable alias
-
- var component = new Component();
-});
-
-"""
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/closure_linter/statetracker.py b/tools/closure_linter/closure_linter/statetracker.py
deleted file mode 100644
index 52e363972f..0000000000
--- a/tools/closure_linter/closure_linter/statetracker.py
+++ /dev/null
@@ -1,1294 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Light weight EcmaScript state tracker that reads tokens and tracks state."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-import re
-
-from closure_linter import javascripttokenizer
-from closure_linter import javascripttokens
-from closure_linter import tokenutil
-from closure_linter import typeannotation
-
-# Shorthand
-Type = javascripttokens.JavaScriptTokenType
-
-
-class DocFlag(object):
- """Generic doc flag object.
-
- Attribute:
- flag_type: param, return, define, type, etc.
- flag_token: The flag token.
- type_start_token: The first token specifying the flag type,
- including braces.
- type_end_token: The last token specifying the flag type,
- including braces.
- type: The type spec string.
- jstype: The type spec, a TypeAnnotation instance.
- name_token: The token specifying the flag name.
- name: The flag name
- description_start_token: The first token in the description.
- description_end_token: The end token in the description.
- description: The description.
- """
-
- # Please keep these lists alphabetized.
-
- # The list of standard jsdoc tags is from
- STANDARD_DOC = frozenset([
- 'author',
- 'bug',
- 'classTemplate',
- 'consistentIdGenerator',
- 'const',
- 'constructor',
- 'define',
- 'deprecated',
- 'dict',
- 'enum',
- 'export',
- 'expose',
- 'extends',
- 'externs',
- 'fileoverview',
- 'idGenerator',
- 'implements',
- 'implicitCast',
- 'interface',
- 'lends',
- 'license',
- 'ngInject', # This annotation is specific to AngularJS.
- 'noalias',
- 'nocompile',
- 'nosideeffects',
- 'override',
- 'owner',
- 'package',
- 'param',
- 'preserve',
- 'private',
- 'protected',
- 'public',
- 'return',
- 'see',
- 'stableIdGenerator',
- 'struct',
- 'supported',
- 'template',
- 'this',
- 'type',
- 'typedef',
- 'unrestricted',
- ])
-
- ANNOTATION = frozenset(['preserveTry', 'suppress'])
-
- LEGAL_DOC = STANDARD_DOC | ANNOTATION
-
- # Includes all Closure Compiler @suppress types.
- # Not all of these annotations are interpreted by Closure Linter.
- #
- # Specific cases:
- # - accessControls is supported by the compiler at the expression
- # and method level to suppress warnings about private/protected
- # access (method level applies to all references in the method).
- # The linter mimics the compiler behavior.
- SUPPRESS_TYPES = frozenset([
- 'accessControls',
- 'ambiguousFunctionDecl',
- 'checkDebuggerStatement',
- 'checkRegExp',
- 'checkStructDictInheritance',
- 'checkTypes',
- 'checkVars',
- 'const',
- 'constantProperty',
- 'deprecated',
- 'duplicate',
- 'es5Strict',
- 'externsValidation',
- 'extraProvide',
- 'extraRequire',
- 'fileoverviewTags',
- 'globalThis',
- 'internetExplorerChecks',
- 'invalidCasts',
- 'missingProperties',
- 'missingProvide',
- 'missingRequire',
- 'missingReturn',
- 'nonStandardJsDocs',
- 'strictModuleDepCheck',
- 'suspiciousCode',
- 'tweakValidation',
- 'typeInvalidation',
- 'undefinedNames',
- 'undefinedVars',
- 'underscore',
- 'unknownDefines',
- 'unnecessaryCasts',
- 'unusedPrivateMembers',
- 'uselessCode',
- 'visibility',
- 'with',
- ])
-
- HAS_DESCRIPTION = frozenset([
- 'define',
- 'deprecated',
- 'desc',
- 'fileoverview',
- 'license',
- 'param',
- 'preserve',
- 'return',
- 'supported',
- ])
-
- # Docflags whose argument should be parsed using the typeannotation parser.
- HAS_TYPE = frozenset([
- 'const',
- 'define',
- 'enum',
- 'extends',
- 'final',
- 'implements',
- 'mods',
- 'package',
- 'param',
- 'private',
- 'protected',
- 'public',
- 'return',
- 'suppress',
- 'type',
- 'typedef',
- ])
-
- # Docflags for which it's ok to omit the type (flag without an argument).
- CAN_OMIT_TYPE = frozenset([
- 'const',
- 'enum',
- 'final',
- 'package',
- 'private',
- 'protected',
- 'public',
- 'suppress', # We'll raise a separate INCORRECT_SUPPRESS_SYNTAX instead.
- ])
-
- # Docflags that only take a type as an argument and should not parse a
- # following description.
- TYPE_ONLY = frozenset([
- 'const',
- 'enum',
- 'extends',
- 'implements',
- 'package',
- 'suppress',
- 'type',
- ])
-
- HAS_NAME = frozenset(['param'])
-
- EMPTY_COMMENT_LINE = re.compile(r'^\s*\*?\s*$')
- EMPTY_STRING = re.compile(r'^\s*$')
-
- def __init__(self, flag_token, error_handler=None):
- """Creates the DocFlag object and attaches it to the given start token.
-
- Args:
- flag_token: The starting token of the flag.
- error_handler: An optional error handler for errors occurring while
- parsing the doctype.
- """
- self.flag_token = flag_token
- self.flag_type = flag_token.string.strip().lstrip('@')
-
- # Extract type, if applicable.
- self.type = None
- self.jstype = None
- self.type_start_token = None
- self.type_end_token = None
- if self.flag_type in self.HAS_TYPE:
- brace = tokenutil.SearchUntil(flag_token, [Type.DOC_START_BRACE],
- Type.FLAG_ENDING_TYPES)
- if brace:
- end_token, contents = _GetMatchingEndBraceAndContents(brace)
- self.type = contents
- self.jstype = typeannotation.Parse(brace, end_token,
- error_handler)
- self.type_start_token = brace
- self.type_end_token = end_token
- elif (self.flag_type in self.TYPE_ONLY and
- flag_token.next.type not in Type.FLAG_ENDING_TYPES and
- flag_token.line_number == flag_token.next.line_number):
- # b/10407058. If the flag is expected to be followed by a type then
- # search for type in same line only. If no token after flag in same
- # line then conclude that no type is specified.
- self.type_start_token = flag_token.next
- self.type_end_token, self.type = _GetEndTokenAndContents(
- self.type_start_token)
- if self.type is not None:
- self.type = self.type.strip()
- self.jstype = typeannotation.Parse(flag_token, self.type_end_token,
- error_handler)
-
- # Extract name, if applicable.
- self.name_token = None
- self.name = None
- if self.flag_type in self.HAS_NAME:
- # Handle bad case, name could be immediately after flag token.
- self.name_token = _GetNextPartialIdentifierToken(flag_token)
-
- # Handle good case, if found token is after type start, look for
- # a identifier (substring to cover cases like [cnt] b/4197272) after
- # type end, since types contain identifiers.
- if (self.type and self.name_token and
- tokenutil.Compare(self.name_token, self.type_start_token) > 0):
- self.name_token = _GetNextPartialIdentifierToken(self.type_end_token)
-
- if self.name_token:
- self.name = self.name_token.string
-
- # Extract description, if applicable.
- self.description_start_token = None
- self.description_end_token = None
- self.description = None
- if self.flag_type in self.HAS_DESCRIPTION:
- search_start_token = flag_token
- if self.name_token and self.type_end_token:
- if tokenutil.Compare(self.type_end_token, self.name_token) > 0:
- search_start_token = self.type_end_token
- else:
- search_start_token = self.name_token
- elif self.name_token:
- search_start_token = self.name_token
- elif self.type:
- search_start_token = self.type_end_token
-
- interesting_token = tokenutil.Search(search_start_token,
- Type.FLAG_DESCRIPTION_TYPES | Type.FLAG_ENDING_TYPES)
- if interesting_token.type in Type.FLAG_DESCRIPTION_TYPES:
- self.description_start_token = interesting_token
- self.description_end_token, self.description = (
- _GetEndTokenAndContents(interesting_token))
-
- def HasType(self):
- """Returns whether this flag should have a type annotation."""
- return self.flag_type in self.HAS_TYPE
-
- def __repr__(self):
- return '<Flag: %s, type:%s>' % (self.flag_type, repr(self.jstype))
-
-
-class DocComment(object):
- """JavaScript doc comment object.
-
- Attributes:
- ordered_params: Ordered list of parameters documented.
- start_token: The token that starts the doc comment.
- end_token: The token that ends the doc comment.
- suppressions: Map of suppression type to the token that added it.
- """
- def __init__(self, start_token):
- """Create the doc comment object.
-
- Args:
- start_token: The first token in the doc comment.
- """
- self.__flags = []
- self.start_token = start_token
- self.end_token = None
- self.suppressions = {}
- self.invalidated = False
-
- @property
- def ordered_params(self):
- """Gives the list of parameter names as a list of strings."""
- params = []
- for flag in self.__flags:
- if flag.flag_type == 'param' and flag.name:
- params.append(flag.name)
- return params
-
- def Invalidate(self):
- """Indicate that the JSDoc is well-formed but we had problems parsing it.
-
- This is a short-circuiting mechanism so that we don't emit false
- positives about well-formed doc comments just because we don't support
- hot new syntaxes.
- """
- self.invalidated = True
-
- def IsInvalidated(self):
- """Test whether Invalidate() has been called."""
- return self.invalidated
-
- def AddSuppression(self, token):
- """Add a new error suppression flag.
-
- Args:
- token: The suppression flag token.
- """
- flag = token and token.attached_object
- if flag and flag.jstype:
- for suppression in flag.jstype.IterIdentifiers():
- self.suppressions[suppression] = token
-
- def SuppressionOnly(self):
- """Returns whether this comment contains only suppression flags."""
- if not self.__flags:
- return False
-
- for flag in self.__flags:
- if flag.flag_type != 'suppress':
- return False
-
- return True
-
- def AddFlag(self, flag):
- """Add a new document flag.
-
- Args:
- flag: DocFlag object.
- """
- self.__flags.append(flag)
-
- def InheritsDocumentation(self):
- """Test if the jsdoc implies documentation inheritance.
-
- Returns:
- True if documentation may be pulled off the superclass.
- """
- return self.HasFlag('inheritDoc') or self.HasFlag('override')
-
- def HasFlag(self, flag_type):
- """Test if the given flag has been set.
-
- Args:
- flag_type: The type of the flag to check.
-
- Returns:
- True if the flag is set.
- """
- for flag in self.__flags:
- if flag.flag_type == flag_type:
- return True
- return False
-
- def GetFlag(self, flag_type):
- """Gets the last flag of the given type.
-
- Args:
- flag_type: The type of the flag to get.
-
- Returns:
- The last instance of the given flag type in this doc comment.
- """
- for flag in reversed(self.__flags):
- if flag.flag_type == flag_type:
- return flag
-
- def GetDocFlags(self):
- """Return the doc flags for this comment."""
- return list(self.__flags)
-
- def _YieldDescriptionTokens(self):
- for token in self.start_token:
-
- if (token is self.end_token or
- token.type is javascripttokens.JavaScriptTokenType.DOC_FLAG or
- token.type not in javascripttokens.JavaScriptTokenType.COMMENT_TYPES):
- return
-
- if token.type not in [
- javascripttokens.JavaScriptTokenType.START_DOC_COMMENT,
- javascripttokens.JavaScriptTokenType.END_DOC_COMMENT,
- javascripttokens.JavaScriptTokenType.DOC_PREFIX]:
- yield token
-
- @property
- def description(self):
- return tokenutil.TokensToString(
- self._YieldDescriptionTokens())
-
- def GetTargetIdentifier(self):
- """Returns the identifier (as a string) that this is a comment for.
-
- Note that this uses method uses GetIdentifierForToken to get the full
- identifier, even if broken up by whitespace, newlines, or comments,
- and thus could be longer than GetTargetToken().string.
-
- Returns:
- The identifier for the token this comment is for.
- """
- token = self.GetTargetToken()
- if token:
- return tokenutil.GetIdentifierForToken(token)
-
- def GetTargetToken(self):
- """Get this comment's target token.
-
- Returns:
- The token that is the target of this comment, or None if there isn't one.
- """
-
- # File overviews describe the file, not a token.
- if self.HasFlag('fileoverview'):
- return
-
- skip_types = frozenset([
- Type.WHITESPACE,
- Type.BLANK_LINE,
- Type.START_PAREN])
-
- target_types = frozenset([
- Type.FUNCTION_NAME,
- Type.IDENTIFIER,
- Type.SIMPLE_LVALUE])
-
- token = self.end_token.next
- while token:
- if token.type in target_types:
- return token
-
- # Handles the case of a comment on "var foo = ...'
- if token.IsKeyword('var'):
- next_code_token = tokenutil.CustomSearch(
- token,
- lambda t: t.type not in Type.NON_CODE_TYPES)
-
- if (next_code_token and
- next_code_token.IsType(Type.SIMPLE_LVALUE)):
- return next_code_token
-
- return
-
- # Handles the case of a comment on "function foo () {}"
- if token.type is Type.FUNCTION_DECLARATION:
- next_code_token = tokenutil.CustomSearch(
- token,
- lambda t: t.type not in Type.NON_CODE_TYPES)
-
- if next_code_token.IsType(Type.FUNCTION_NAME):
- return next_code_token
-
- return
-
- # Skip types will end the search.
- if token.type not in skip_types:
- return
-
- token = token.next
-
- def CompareParameters(self, params):
- """Computes the edit distance and list from the function params to the docs.
-
- Uses the Levenshtein edit distance algorithm, with code modified from
- http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python
-
- Args:
- params: The parameter list for the function declaration.
-
- Returns:
- The edit distance, the edit list.
- """
- source_len, target_len = len(self.ordered_params), len(params)
- edit_lists = [[]]
- distance = [[]]
- for i in range(target_len+1):
- edit_lists[0].append(['I'] * i)
- distance[0].append(i)
-
- for j in range(1, source_len+1):
- edit_lists.append([['D'] * j])
- distance.append([j])
-
- for i in range(source_len):
- for j in range(target_len):
- cost = 1
- if self.ordered_params[i] == params[j]:
- cost = 0
-
- deletion = distance[i][j+1] + 1
- insertion = distance[i+1][j] + 1
- substitution = distance[i][j] + cost
-
- edit_list = None
- best = None
- if deletion <= insertion and deletion <= substitution:
- # Deletion is best.
- best = deletion
- edit_list = list(edit_lists[i][j+1])
- edit_list.append('D')
-
- elif insertion <= substitution:
- # Insertion is best.
- best = insertion
- edit_list = list(edit_lists[i+1][j])
- edit_list.append('I')
- edit_lists[i+1].append(edit_list)
-
- else:
- # Substitution is best.
- best = substitution
- edit_list = list(edit_lists[i][j])
- if cost:
- edit_list.append('S')
- else:
- edit_list.append('=')
-
- edit_lists[i+1].append(edit_list)
- distance[i+1].append(best)
-
- return distance[source_len][target_len], edit_lists[source_len][target_len]
-
- def __repr__(self):
- """Returns a string representation of this object.
-
- Returns:
- A string representation of this object.
- """
- return '<DocComment: %s, %s>' % (
- str(self.ordered_params), str(self.__flags))
-
-
-#
-# Helper methods used by DocFlag and DocComment to parse out flag information.
-#
-
-
-def _GetMatchingEndBraceAndContents(start_brace):
- """Returns the matching end brace and contents between the two braces.
-
- If any FLAG_ENDING_TYPE token is encountered before a matching end brace, then
- that token is used as the matching ending token. Contents will have all
- comment prefixes stripped out of them, and all comment prefixes in between the
- start and end tokens will be split out into separate DOC_PREFIX tokens.
-
- Args:
- start_brace: The DOC_START_BRACE token immediately before desired contents.
-
- Returns:
- The matching ending token (DOC_END_BRACE or FLAG_ENDING_TYPE) and a string
- of the contents between the matching tokens, minus any comment prefixes.
- """
- open_count = 1
- close_count = 0
- contents = []
-
- # We don't consider the start brace part of the type string.
- token = start_brace.next
- while open_count != close_count:
- if token.type == Type.DOC_START_BRACE:
- open_count += 1
- elif token.type == Type.DOC_END_BRACE:
- close_count += 1
-
- if token.type != Type.DOC_PREFIX:
- contents.append(token.string)
-
- if token.type in Type.FLAG_ENDING_TYPES:
- break
- token = token.next
-
- #Don't include the end token (end brace, end doc comment, etc.) in type.
- token = token.previous
- contents = contents[:-1]
-
- return token, ''.join(contents)
-
-
-def _GetNextPartialIdentifierToken(start_token):
- """Returns the first token having identifier as substring after a token.
-
- Searches each token after the start to see if it contains an identifier.
- If found, token is returned. If no identifier is found returns None.
- Search is abandoned when a FLAG_ENDING_TYPE token is found.
-
- Args:
- start_token: The token to start searching after.
-
- Returns:
- The token found containing identifier, None otherwise.
- """
- token = start_token.next
-
- while token and token.type not in Type.FLAG_ENDING_TYPES:
- match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.search(
- token.string)
- if match is not None and token.type == Type.COMMENT:
- return token
-
- token = token.next
-
- return None
-
-
-def _GetEndTokenAndContents(start_token):
- """Returns last content token and all contents before FLAG_ENDING_TYPE token.
-
- Comment prefixes are split into DOC_PREFIX tokens and stripped from the
- returned contents.
-
- Args:
- start_token: The token immediately before the first content token.
-
- Returns:
- The last content token and a string of all contents including start and
- end tokens, with comment prefixes stripped.
- """
- iterator = start_token
- last_line = iterator.line_number
- last_token = None
- contents = ''
- doc_depth = 0
- while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0:
- if (iterator.IsFirstInLine() and
- DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)):
- # If we have a blank comment line, consider that an implicit
- # ending of the description. This handles a case like:
- #
- # * @return {boolean} True
- # *
- # * Note: This is a sentence.
- #
- # The note is not part of the @return description, but there was
- # no definitive ending token. Rather there was a line containing
- # only a doc comment prefix or whitespace.
- break
-
- # b/2983692
- # don't prematurely match against a @flag if inside a doc flag
- # need to think about what is the correct behavior for unterminated
- # inline doc flags
- if (iterator.type == Type.DOC_START_BRACE and
- iterator.next.type == Type.DOC_INLINE_FLAG):
- doc_depth += 1
- elif (iterator.type == Type.DOC_END_BRACE and
- doc_depth > 0):
- doc_depth -= 1
-
- if iterator.type in Type.FLAG_DESCRIPTION_TYPES:
- contents += iterator.string
- last_token = iterator
-
- iterator = iterator.next
- if iterator.line_number != last_line:
- contents += '\n'
- last_line = iterator.line_number
-
- end_token = last_token
- if DocFlag.EMPTY_STRING.match(contents):
- contents = None
- else:
- # Strip trailing newline.
- contents = contents[:-1]
-
- return end_token, contents
-
-
-class Function(object):
- """Data about a JavaScript function.
-
- Attributes:
- block_depth: Block depth the function began at.
- doc: The DocComment associated with the function.
- has_return: If the function has a return value.
- has_this: If the function references the 'this' object.
- is_assigned: If the function is part of an assignment.
- is_constructor: If the function is a constructor.
- name: The name of the function, whether given in the function keyword or
- as the lvalue the function is assigned to.
- start_token: First token of the function (the function' keyword token).
- end_token: Last token of the function (the closing '}' token).
- parameters: List of parameter names.
- """
-
- def __init__(self, block_depth, is_assigned, doc, name):
- self.block_depth = block_depth
- self.is_assigned = is_assigned
- self.is_constructor = doc and doc.HasFlag('constructor')
- self.is_interface = doc and doc.HasFlag('interface')
- self.has_return = False
- self.has_throw = False
- self.has_this = False
- self.name = name
- self.doc = doc
- self.start_token = None
- self.end_token = None
- self.parameters = None
-
-
-class StateTracker(object):
- """EcmaScript state tracker.
-
- Tracks block depth, function names, etc. within an EcmaScript token stream.
- """
-
- OBJECT_LITERAL = 'o'
- CODE = 'c'
-
- def __init__(self, doc_flag=DocFlag):
- """Initializes a JavaScript token stream state tracker.
-
- Args:
- doc_flag: An optional custom DocFlag used for validating
- documentation flags.
- """
- self._doc_flag = doc_flag
- self.Reset()
-
- def Reset(self):
- """Resets the state tracker to prepare for processing a new page."""
- self._block_depth = 0
- self._is_block_close = False
- self._paren_depth = 0
- self._function_stack = []
- self._functions_by_name = {}
- self._last_comment = None
- self._doc_comment = None
- self._cumulative_params = None
- self._block_types = []
- self._last_non_space_token = None
- self._last_line = None
- self._first_token = None
- self._documented_identifiers = set()
- self._variables_in_scope = []
-
- def DocFlagPass(self, start_token, error_handler):
- """Parses doc flags.
-
- This pass needs to be executed before the aliaspass and we don't want to do
- a full-blown statetracker dry run for these.
-
- Args:
- start_token: The token at which to start iterating
- error_handler: An error handler for error reporting.
- """
- if not start_token:
- return
- doc_flag_types = (Type.DOC_FLAG, Type.DOC_INLINE_FLAG)
- for token in start_token:
- if token.type in doc_flag_types:
- token.attached_object = self._doc_flag(token, error_handler)
-
- def InFunction(self):
- """Returns true if the current token is within a function.
-
- Returns:
- True if the current token is within a function.
- """
- return bool(self._function_stack)
-
- def InConstructor(self):
- """Returns true if the current token is within a constructor.
-
- Returns:
- True if the current token is within a constructor.
- """
- return self.InFunction() and self._function_stack[-1].is_constructor
-
- def InInterfaceMethod(self):
- """Returns true if the current token is within an interface method.
-
- Returns:
- True if the current token is within an interface method.
- """
- if self.InFunction():
- if self._function_stack[-1].is_interface:
- return True
- else:
- name = self._function_stack[-1].name
- prototype_index = name.find('.prototype.')
- if prototype_index != -1:
- class_function_name = name[0:prototype_index]
- if (class_function_name in self._functions_by_name and
- self._functions_by_name[class_function_name].is_interface):
- return True
-
- return False
-
- def InTopLevelFunction(self):
- """Returns true if the current token is within a top level function.
-
- Returns:
- True if the current token is within a top level function.
- """
- return len(self._function_stack) == 1 and self.InTopLevel()
-
- def InAssignedFunction(self):
- """Returns true if the current token is within a function variable.
-
- Returns:
- True if if the current token is within a function variable
- """
- return self.InFunction() and self._function_stack[-1].is_assigned
-
- def IsFunctionOpen(self):
- """Returns true if the current token is a function block open.
-
- Returns:
- True if the current token is a function block open.
- """
- return (self._function_stack and
- self._function_stack[-1].block_depth == self._block_depth - 1)
-
- def IsFunctionClose(self):
- """Returns true if the current token is a function block close.
-
- Returns:
- True if the current token is a function block close.
- """
- return (self._function_stack and
- self._function_stack[-1].block_depth == self._block_depth)
-
- def InBlock(self):
- """Returns true if the current token is within a block.
-
- Returns:
- True if the current token is within a block.
- """
- return bool(self._block_depth)
-
- def IsBlockClose(self):
- """Returns true if the current token is a block close.
-
- Returns:
- True if the current token is a block close.
- """
- return self._is_block_close
-
- def InObjectLiteral(self):
- """Returns true if the current token is within an object literal.
-
- Returns:
- True if the current token is within an object literal.
- """
- return self._block_depth and self._block_types[-1] == self.OBJECT_LITERAL
-
- def InObjectLiteralDescendant(self):
- """Returns true if the current token has an object literal ancestor.
-
- Returns:
- True if the current token has an object literal ancestor.
- """
- return self.OBJECT_LITERAL in self._block_types
-
- def InParentheses(self):
- """Returns true if the current token is within parentheses.
-
- Returns:
- True if the current token is within parentheses.
- """
- return bool(self._paren_depth)
-
- def ParenthesesDepth(self):
- """Returns the number of parens surrounding the token.
-
- Returns:
- The number of parenthesis surrounding the token.
- """
- return self._paren_depth
-
- def BlockDepth(self):
- """Returns the number of blocks in which the token is nested.
-
- Returns:
- The number of blocks in which the token is nested.
- """
- return self._block_depth
-
- def FunctionDepth(self):
- """Returns the number of functions in which the token is nested.
-
- Returns:
- The number of functions in which the token is nested.
- """
- return len(self._function_stack)
-
- def InTopLevel(self):
- """Whether we are at the top level in the class.
-
- This function call is language specific. In some languages like
- JavaScript, a function is top level if it is not inside any parenthesis.
- In languages such as ActionScript, a function is top level if it is directly
- within a class.
- """
- raise TypeError('Abstract method InTopLevel not implemented')
-
- def GetBlockType(self, token):
- """Determine the block type given a START_BLOCK token.
-
- Code blocks come after parameters, keywords like else, and closing parens.
-
- Args:
- token: The current token. Can be assumed to be type START_BLOCK.
- Returns:
- Code block type for current token.
- """
- raise TypeError('Abstract method GetBlockType not implemented')
-
- def GetParams(self):
- """Returns the accumulated input params as an array.
-
- In some EcmasSript languages, input params are specified like
- (param:Type, param2:Type2, ...)
- in other they are specified just as
- (param, param2)
- We handle both formats for specifying parameters here and leave
- it to the compilers for each language to detect compile errors.
- This allows more code to be reused between lint checkers for various
- EcmaScript languages.
-
- Returns:
- The accumulated input params as an array.
- """
- params = []
- if self._cumulative_params:
- params = re.compile(r'\s+').sub('', self._cumulative_params).split(',')
- # Strip out the type from parameters of the form name:Type.
- params = map(lambda param: param.split(':')[0], params)
-
- return params
-
- def GetLastComment(self):
- """Return the last plain comment that could be used as documentation.
-
- Returns:
- The last plain comment that could be used as documentation.
- """
- return self._last_comment
-
- def GetDocComment(self):
- """Return the most recent applicable documentation comment.
-
- Returns:
- The last applicable documentation comment.
- """
- return self._doc_comment
-
- def HasDocComment(self, identifier):
- """Returns whether the identifier has been documented yet.
-
- Args:
- identifier: The identifier.
-
- Returns:
- Whether the identifier has been documented yet.
- """
- return identifier in self._documented_identifiers
-
- def InDocComment(self):
- """Returns whether the current token is in a doc comment.
-
- Returns:
- Whether the current token is in a doc comment.
- """
- return self._doc_comment and self._doc_comment.end_token is None
-
- def GetDocFlag(self):
- """Returns the current documentation flags.
-
- Returns:
- The current documentation flags.
- """
- return self._doc_flag
-
- def IsTypeToken(self, t):
- if self.InDocComment() and t.type not in (Type.START_DOC_COMMENT,
- Type.DOC_FLAG, Type.DOC_INLINE_FLAG, Type.DOC_PREFIX):
- f = tokenutil.SearchUntil(t, [Type.DOC_FLAG], [Type.START_DOC_COMMENT],
- None, True)
- if (f and f.attached_object.type_start_token is not None and
- f.attached_object.type_end_token is not None):
- return (tokenutil.Compare(t, f.attached_object.type_start_token) > 0 and
- tokenutil.Compare(t, f.attached_object.type_end_token) < 0)
- return False
-
- def GetFunction(self):
- """Return the function the current code block is a part of.
-
- Returns:
- The current Function object.
- """
- if self._function_stack:
- return self._function_stack[-1]
-
- def GetBlockDepth(self):
- """Return the block depth.
-
- Returns:
- The current block depth.
- """
- return self._block_depth
-
- def GetLastNonSpaceToken(self):
- """Return the last non whitespace token."""
- return self._last_non_space_token
-
- def GetLastLine(self):
- """Return the last line."""
- return self._last_line
-
- def GetFirstToken(self):
- """Return the very first token in the file."""
- return self._first_token
-
- def IsVariableInScope(self, token_string):
- """Checks if string is variable in current scope.
-
- For given string it checks whether the string is a defined variable
- (including function param) in current state.
-
- E.g. if variables defined (variables in current scope) is docs
- then docs, docs.length etc will be considered as variable in current
- scope. This will help in avoding extra goog.require for variables.
-
- Args:
- token_string: String to check if its is a variable in current scope.
-
- Returns:
- true if given string is a variable in current scope.
- """
- for variable in self._variables_in_scope:
- if (token_string == variable
- or token_string.startswith(variable + '.')):
- return True
-
- return False
-
- def HandleToken(self, token, last_non_space_token):
- """Handles the given token and updates state.
-
- Args:
- token: The token to handle.
- last_non_space_token:
- """
- self._is_block_close = False
-
- if not self._first_token:
- self._first_token = token
-
- # Track block depth.
- type = token.type
- if type == Type.START_BLOCK:
- self._block_depth += 1
-
- # Subclasses need to handle block start very differently because
- # whether a block is a CODE or OBJECT_LITERAL block varies significantly
- # by language.
- self._block_types.append(self.GetBlockType(token))
-
- # When entering a function body, record its parameters.
- if self.InFunction():
- function = self._function_stack[-1]
- if self._block_depth == function.block_depth + 1:
- function.parameters = self.GetParams()
-
- # Track block depth.
- elif type == Type.END_BLOCK:
- self._is_block_close = not self.InObjectLiteral()
- self._block_depth -= 1
- self._block_types.pop()
-
- # Track parentheses depth.
- elif type == Type.START_PAREN:
- self._paren_depth += 1
-
- # Track parentheses depth.
- elif type == Type.END_PAREN:
- self._paren_depth -= 1
-
- elif type == Type.COMMENT:
- self._last_comment = token.string
-
- elif type == Type.START_DOC_COMMENT:
- self._last_comment = None
- self._doc_comment = DocComment(token)
-
- elif type == Type.END_DOC_COMMENT:
- self._doc_comment.end_token = token
-
- elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
- # Don't overwrite flags if they were already parsed in a previous pass.
- if token.attached_object is None:
- flag = self._doc_flag(token)
- token.attached_object = flag
- else:
- flag = token.attached_object
- self._doc_comment.AddFlag(flag)
-
- if flag.flag_type == 'suppress':
- self._doc_comment.AddSuppression(token)
-
- elif type == Type.FUNCTION_DECLARATION:
- last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None,
- True)
- doc = None
- # Only top-level functions are eligible for documentation.
- if self.InTopLevel():
- doc = self._doc_comment
-
- name = ''
- is_assigned = last_code and (last_code.IsOperator('=') or
- last_code.IsOperator('||') or last_code.IsOperator('&&') or
- (last_code.IsOperator(':') and not self.InObjectLiteral()))
- if is_assigned:
- # TODO(robbyw): This breaks for x[2] = ...
- # Must use loop to find full function name in the case of line-wrapped
- # declarations (bug 1220601) like:
- # my.function.foo.
- # bar = function() ...
- identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE, None, True)
- while identifier and tokenutil.IsIdentifierOrDot(identifier):
- name = identifier.string + name
- # Traverse behind us, skipping whitespace and comments.
- while True:
- identifier = identifier.previous
- if not identifier or not identifier.type in Type.NON_CODE_TYPES:
- break
-
- else:
- next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
- while next_token and next_token.IsType(Type.FUNCTION_NAME):
- name += next_token.string
- next_token = tokenutil.Search(next_token, Type.FUNCTION_NAME, 2)
-
- function = Function(self._block_depth, is_assigned, doc, name)
- function.start_token = token
-
- self._function_stack.append(function)
- self._functions_by_name[name] = function
-
- # Add a delimiter in stack for scope variables to define start of
- # function. This helps in popping variables of this function when
- # function declaration ends.
- self._variables_in_scope.append('')
-
- elif type == Type.START_PARAMETERS:
- self._cumulative_params = ''
-
- elif type == Type.PARAMETERS:
- self._cumulative_params += token.string
- self._variables_in_scope.extend(self.GetParams())
-
- elif type == Type.KEYWORD and token.string == 'return':
- next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
- if not next_token.IsType(Type.SEMICOLON):
- function = self.GetFunction()
- if function:
- function.has_return = True
-
- elif type == Type.KEYWORD and token.string == 'throw':
- function = self.GetFunction()
- if function:
- function.has_throw = True
-
- elif type == Type.KEYWORD and token.string == 'var':
- function = self.GetFunction()
- next_token = tokenutil.Search(token, [Type.IDENTIFIER,
- Type.SIMPLE_LVALUE])
-
- if next_token:
- if next_token.type == Type.SIMPLE_LVALUE:
- self._variables_in_scope.append(next_token.values['identifier'])
- else:
- self._variables_in_scope.append(next_token.string)
-
- elif type == Type.SIMPLE_LVALUE:
- identifier = token.values['identifier']
- jsdoc = self.GetDocComment()
- if jsdoc:
- self._documented_identifiers.add(identifier)
-
- self._HandleIdentifier(identifier, True)
-
- elif type == Type.IDENTIFIER:
- self._HandleIdentifier(token.string, False)
-
- # Detect documented non-assignments.
- next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
- if next_token and next_token.IsType(Type.SEMICOLON):
- if (self._last_non_space_token and
- self._last_non_space_token.IsType(Type.END_DOC_COMMENT)):
- self._documented_identifiers.add(token.string)
-
- def _HandleIdentifier(self, identifier, is_assignment):
- """Process the given identifier.
-
- Currently checks if it references 'this' and annotates the function
- accordingly.
-
- Args:
- identifier: The identifer to process.
- is_assignment: Whether the identifer is being written to.
- """
- if identifier == 'this' or identifier.startswith('this.'):
- function = self.GetFunction()
- if function:
- function.has_this = True
-
- def HandleAfterToken(self, token):
- """Handle updating state after a token has been checked.
-
- This function should be used for destructive state changes such as
- deleting a tracked object.
-
- Args:
- token: The token to handle.
- """
- type = token.type
- if type == Type.SEMICOLON or type == Type.END_PAREN or (
- type == Type.END_BRACKET and
- self._last_non_space_token.type not in (
- Type.SINGLE_QUOTE_STRING_END, Type.DOUBLE_QUOTE_STRING_END)):
- # We end on any numeric array index, but keep going for string based
- # array indices so that we pick up manually exported identifiers.
- self._doc_comment = None
- self._last_comment = None
-
- elif type == Type.END_BLOCK:
- self._doc_comment = None
- self._last_comment = None
-
- if self.InFunction() and self.IsFunctionClose():
- # TODO(robbyw): Detect the function's name for better errors.
- function = self._function_stack.pop()
- function.end_token = token
-
- # Pop all variables till delimiter ('') those were defined in the
- # function being closed so make them out of scope.
- while self._variables_in_scope and self._variables_in_scope[-1]:
- self._variables_in_scope.pop()
-
- # Pop delimiter
- if self._variables_in_scope:
- self._variables_in_scope.pop()
-
- elif type == Type.END_PARAMETERS and self._doc_comment:
- self._doc_comment = None
- self._last_comment = None
-
- if not token.IsAnyType(Type.WHITESPACE, Type.BLANK_LINE):
- self._last_non_space_token = token
-
- self._last_line = token.line
diff --git a/tools/closure_linter/closure_linter/statetracker_test.py b/tools/closure_linter/closure_linter/statetracker_test.py
deleted file mode 100755
index 494dc642fc..0000000000
--- a/tools/closure_linter/closure_linter/statetracker_test.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for the statetracker module."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-
-
-import unittest as googletest
-
-from closure_linter import javascripttokens
-from closure_linter import statetracker
-from closure_linter import testutil
-
-
-class _FakeDocFlag(object):
-
- def __repr__(self):
- return '@%s %s' % (self.flag_type, self.name)
-
-
-class IdentifierTest(googletest.TestCase):
-
- def testJustIdentifier(self):
- a = javascripttokens.JavaScriptToken(
- 'abc', javascripttokens.JavaScriptTokenType.IDENTIFIER, 'abc', 1)
-
- st = statetracker.StateTracker()
- st.HandleToken(a, None)
-
-
-class DocCommentTest(googletest.TestCase):
-
- @staticmethod
- def _MakeDocFlagFake(flag_type, name=None):
- flag = _FakeDocFlag()
- flag.flag_type = flag_type
- flag.name = name
- return flag
-
- def testDocFlags(self):
- comment = statetracker.DocComment(None)
-
- a = self._MakeDocFlagFake('param', 'foo')
- comment.AddFlag(a)
-
- b = self._MakeDocFlagFake('param', '')
- comment.AddFlag(b)
-
- c = self._MakeDocFlagFake('param', 'bar')
- comment.AddFlag(c)
-
- self.assertEquals(
- ['foo', 'bar'],
- comment.ordered_params)
-
- self.assertEquals(
- [a, b, c],
- comment.GetDocFlags())
-
- def testInvalidate(self):
- comment = statetracker.DocComment(None)
-
- self.assertFalse(comment.invalidated)
- self.assertFalse(comment.IsInvalidated())
-
- comment.Invalidate()
-
- self.assertTrue(comment.invalidated)
- self.assertTrue(comment.IsInvalidated())
-
- def testSuppressionOnly(self):
- comment = statetracker.DocComment(None)
-
- self.assertFalse(comment.SuppressionOnly())
- comment.AddFlag(self._MakeDocFlagFake('suppress'))
- self.assertTrue(comment.SuppressionOnly())
- comment.AddFlag(self._MakeDocFlagFake('foo'))
- self.assertFalse(comment.SuppressionOnly())
-
- def testRepr(self):
- comment = statetracker.DocComment(None)
- comment.AddFlag(self._MakeDocFlagFake('param', 'foo'))
- comment.AddFlag(self._MakeDocFlagFake('param', 'bar'))
-
- self.assertEquals(
- '<DocComment: [\'foo\', \'bar\'], [@param foo, @param bar]>',
- repr(comment))
-
- def testDocFlagParam(self):
- comment = self._ParseComment("""
- /**
- * @param {string} [name] Name of customer.
- */""")
- flag = comment.GetFlag('param')
- self.assertEquals('string', flag.type)
- self.assertEquals('string', flag.jstype.ToString())
- self.assertEquals('[name]', flag.name)
-
- def _ParseComment(self, script):
- """Parse a script that contains one comment and return it."""
- _, comments = testutil.ParseFunctionsAndComments(script)
- self.assertEquals(1, len(comments))
- return comments[0]
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/closure_linter/strict_test.py b/tools/closure_linter/closure_linter/strict_test.py
deleted file mode 100755
index 2634456874..0000000000
--- a/tools/closure_linter/closure_linter/strict_test.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2013 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tests for gjslint --strict.
-
-Tests errors that can be thrown by gjslint when in strict mode.
-"""
-
-
-
-import unittest
-
-import gflags as flags
-import unittest as googletest
-
-from closure_linter import errors
-from closure_linter import runner
-from closure_linter.common import erroraccumulator
-
-flags.FLAGS.strict = True
-
-
-class StrictTest(unittest.TestCase):
- """Tests scenarios where strict generates warnings."""
-
- def testUnclosedString(self):
- """Tests warnings are reported when nothing is disabled.
-
- b/11450054.
- """
- original = [
- 'bug = function() {',
- ' (\'foo\'\');',
- '};',
- '',
- ]
-
- expected = [errors.FILE_DOES_NOT_PARSE, errors.MULTI_LINE_STRING,
- errors.FILE_IN_BLOCK]
- self._AssertErrors(original, expected)
-
- def _AssertErrors(self, original, expected_errors):
- """Asserts that the error fixer corrects original to expected."""
-
- # Trap gjslint's output parse it to get messages added.
- error_accumulator = erroraccumulator.ErrorAccumulator()
- runner.Run('testing.js', error_accumulator, source=original)
- error_nums = [e.code for e in error_accumulator.GetErrors()]
-
- error_nums.sort()
- expected_errors.sort()
- self.assertListEqual(error_nums, expected_errors)
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/closure_linter/testdata/all_js_wrapped.js b/tools/closure_linter/closure_linter/testdata/all_js_wrapped.js
deleted file mode 100644
index 6eb3b38f52..0000000000
--- a/tools/closure_linter/closure_linter/testdata/all_js_wrapped.js
+++ /dev/null
@@ -1,5 +0,0 @@
-(function($) {
- // My code goes here.
- // linter should not throw random exceptions because the file starts with
- // an open paren. Regression test for bug 2966755.
-})(jQuery);
diff --git a/tools/closure_linter/closure_linter/testdata/blank_lines.js b/tools/closure_linter/closure_linter/testdata/blank_lines.js
deleted file mode 100644
index 1dc3da2905..0000000000
--- a/tools/closure_linter/closure_linter/testdata/blank_lines.js
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * @fileoverview This is a file overview with no lines above it, at the top of
- * the file (GOOD).
- */
-/** // WRONG_BLANK_LINE_COUNT
- * @fileoverview This is a file overview with no lines above it (BAD).
- */
-
-/**
- * @fileoverview This is a file overview with one line above it (GOOD).
- */
-
-
-/**
- * @fileoverview This is a file overview with two lines above it (GOOD).
- */
-
-/** // WRONG_BLANK_LINE_COUNT
- * A constructor with 1 line above it (BAD).
- * @constructor
- */
-function someFunction() {}
-
-
-/** // WRONG_BLANK_LINE_COUNT
- * A constructor with 2 lines above it (BAD).
- * @constructor
- */
-function someFunction() {}
-
-
-
-/**
- * A constructor with 3 lines above it (GOOD).
- * @constructor
- */
-function someFunction() {}
-
-
-
-
-/** // WRONG_BLANK_LINE_COUNT
- * A constructor with 4 lines above it (BAD).
- * @constructor
- */
-function someFunction() {}
-
-/** // WRONG_BLANK_LINE_COUNT
- * Top level block with 1 line above it (BAD).
- */
-function someFunction() {}
-
-
-/**
- * Top level block with 2 lines above it (GOOD).
- */
-function someFunction() {}
-
-
-
-/** // WRONG_BLANK_LINE_COUNT
- * Top level block with 3 lines above it (BAD).
- */
-function someFunction() {}
-
-
-// -1: EXTRA_SPACE
-/**
- * Top level block with 2 lines above it, one contains whitespace (GOOD).
- */
-function someFunction() {}
-
-
-// This comment should be ignored.
-/**
- * Top level block with 2 lines above it (GOOD).
- */
-function someFunction() {}
-
-// Should not check jsdocs which are inside a block.
-var x = {
- /**
- * @constructor
- */
-};
-
-/**
- * This jsdoc-style comment should not be required to have two lines above it
- * since it does not immediately precede any code.
- */
-// This is a comment.
-
-/**
- * This jsdoc-style comment should not be required to have two lines above it
- * since it does not immediately precede any code.
- */
-/**
- * This is a comment.
- */
-
-/**
- * This jsdoc-style comment should not be required to have two lines above it
- * since it does not immediately precede any code.
- */
diff --git a/tools/closure_linter/closure_linter/testdata/bugs.js b/tools/closure_linter/closure_linter/testdata/bugs.js
deleted file mode 100644
index 735200545d..0000000000
--- a/tools/closure_linter/closure_linter/testdata/bugs.js
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// A file full of known bugs - this file serves only as a reference and is not
-// tested in any way.
-
-/**
- * @param {{foo} x This is a bad record type.
- * @param {{foo}} y This is a good record type with bad spacing.
- * @param {{foo}} This is a good record type with no parameter name.
- */
-function f(x, y, z) {
-}
-
-
-// Should report extra space errors.
-var magicProps = { renderRow: 0 };
-
-// No error reported here for missing space before {.
-if (x){
-}
-
-// Should have a "brace on wrong line" error.
-if (x)
-{
-}
-
-// We could consider not reporting it when wrapping makes it necessary, as in:
-if (aLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongCondition)
- {
- // Code here.
-}
diff --git a/tools/closure_linter/closure_linter/testdata/empty_file.js b/tools/closure_linter/closure_linter/testdata/empty_file.js
deleted file mode 100644
index e69de29bb2..0000000000
--- a/tools/closure_linter/closure_linter/testdata/empty_file.js
+++ /dev/null
diff --git a/tools/closure_linter/closure_linter/testdata/ends_with_block.js b/tools/closure_linter/closure_linter/testdata/ends_with_block.js
deleted file mode 100644
index 40aa872e30..0000000000
--- a/tools/closure_linter/closure_linter/testdata/ends_with_block.js
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2009 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Regression test for an old crasher.
-
-if (x) {
- alert('Hello world');
-}
diff --git a/tools/closure_linter/closure_linter/testdata/externs.js b/tools/closure_linter/closure_linter/testdata/externs.js
deleted file mode 100644
index 94e2ad38a4..0000000000
--- a/tools/closure_linter/closure_linter/testdata/externs.js
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * Externs files are treated specially. We don't require documentation or
- * return statements in functions when they are documented.
- *
- * @externs
- */
-
-
-function VXMLBaseElement() {}
-
-
-/**
- * Should not complain about return tag with no return statement in
- * externs.js file.
- * @param {string} attrName The name of the attribute.
- * @return {string}
- */
-VXMLBaseElement.prototype.getAttribute = function(attrName) {};
-
-VXMLBaseElement.prototype.undocumentedMethod = function() {};
diff --git a/tools/closure_linter/closure_linter/testdata/externs_jsdoc.js b/tools/closure_linter/closure_linter/testdata/externs_jsdoc.js
deleted file mode 100644
index 926593f7c4..0000000000
--- a/tools/closure_linter/closure_linter/testdata/externs_jsdoc.js
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Tests that the externs tag within a fileoverview comment is
- * sufficient to identify an externs file.
- * @externs
- *
- * Externs files are treated specially. We don't require documentation or
- * return statements in functions when they are documented.
- *
- */
-
-
-function VXMLBaseElement() {}
-
-
-/**
- * Should not complain about return tag with no return statement in
- * an externs file.
- * @param {string} attrName The name of the attribute.
- * @return {string}
- */
-VXMLBaseElement.prototype.getAttribute = function(attrName) {};
-
-VXMLBaseElement.prototype.undocumentedMethod = function() {};
diff --git a/tools/closure_linter/closure_linter/testdata/file_level_comment.js b/tools/closure_linter/closure_linter/testdata/file_level_comment.js
deleted file mode 100644
index 86581155a0..0000000000
--- a/tools/closure_linter/closure_linter/testdata/file_level_comment.js
+++ /dev/null
@@ -1,13 +0,0 @@
-/**
- * File level comment as the first thing in a file (GOOD).
- * @license Copyright 2009 SomeThirdParty.
- */
-/** // WRONG_BLANK_LINE_COUNT
- * Comment block that is not the first thing in a file (BAD).
- * @license Copyright 2009 SomeThirdParty.
- */
-
-/** // WRONG_BLANK_LINE_COUNT
- * Top level comment with a single line above it (BAD).
- * @license Copyright 2009 SomeThirdParty.
- */
diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.in.html b/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.in.html
deleted file mode 100644
index c341bb9a39..0000000000
--- a/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.in.html
+++ /dev/null
@@ -1,52 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<!--
- Copyright 2012 Google Inc. All rights reserved.
--->
-<head>
-<meta http-equiv="pragma" content="no-cache">
-<title>Tests</title>
-</head>
-<body>
-<!-- An empty script tag with src.-->
-<script type="text/javascript" src="../../deps_html.js"></script>
-<!-- Missing a require statement and unsorted.-->
-<script type="text/javascript">
-goog.require('goog.testing.MockControl');
-goog.require('goog.SomeTestVar');
- goog.require('goog.SomeMoreExtra');
-goog.require('goog.SomeExtra');
-</script>
-<script type="text/javascript">
-
-var mc;
-var objectToTest;
-
-function setUp() {
- mc = new goog.testing.MockControl();
-
- objectToTest = new goog.AMissingObject();
-}
-<!-- some extra blank lines here-->
-
-
-
-
-function testEnterMeetingId_startsMeeting() {
- var someTestVar = new goog.SomeTestVar();
- objectToTest.test();
-}
-
-
-
-/** @private */
-function somePrivateHelper_() {
-
-}
-
-
-</script>
-<!-- An empty script tag with no src.-->
-<script type="text/javascript"></script>
-</body>
-</html>
diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.out.html b/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.out.html
deleted file mode 100644
index bb9a16f003..0000000000
--- a/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.out.html
+++ /dev/null
@@ -1,51 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<!--
- Copyright 2012 Google Inc. All rights reserved.
--->
-<head>
-<meta http-equiv="pragma" content="no-cache">
-<title>Tests</title>
-</head>
-<body>
-<!-- An empty script tag with src.-->
-<script type="text/javascript" src="../../deps_html.js"></script>
-<!-- Missing a require statement and unsorted.-->
-<script type="text/javascript">
-goog.require('goog.AMissingObject');
-goog.require('goog.SomeTestVar');
-goog.require('goog.testing.MockControl');
-</script>
-<script type="text/javascript">
-
-var mc;
-var objectToTest;
-
-function setUp() {
- mc = new goog.testing.MockControl();
-
- objectToTest = new goog.AMissingObject();
-}
-<!-- some extra blank lines here-->
-
-
-
-
-function testEnterMeetingId_startsMeeting() {
- var someTestVar = new goog.SomeTestVar();
- objectToTest.test();
-}
-
-
-
-/** @private */
-function somePrivateHelper_() {
-
-}
-
-
-</script>
-<!-- An empty script tag with no src.-->
-<script type="text/javascript"></script>
-</body>
-</html>
diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.in.js b/tools/closure_linter/closure_linter/testdata/fixjsstyle.in.js
deleted file mode 100644
index 3a1ccb1f1f..0000000000
--- a/tools/closure_linter/closure_linter/testdata/fixjsstyle.in.js
+++ /dev/null
@@ -1,293 +0,0 @@
-// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-/**
- * @fileoverview Autofix test script.
- *
- * @author robbyw@google.com (Robby Walker)
- * @author robbyw@google.com (Robby Walker)
- * @author robbyw@google.com(Robby Walker)
- * @author robbyw@google.com
- * @author robbyw@google.com Robby
- */
-
-goog.provide('w');
-goog.provide('Y');
-goog.provide('X');
-goog.provide('Z');
-
-// Some comment about why this is suppressed top.
-/** @suppress {extraRequire} */
-goog.require('dummy.NotUsedTop'); // Comment top.
-goog.require('dummy.Bb');
-/** @suppress {extraRequire} */
-// Some comment about why this is suppressed different.
-goog.require('dummy.NotUsedDifferentComment');
-goog.require('dummy.Cc');
-// Some comment about why this is suppressed middle.
-/** @suppress {extraRequire} */
-goog.require('dummy.NotUsedMiddle'); // Comment middle.
-goog.require('dummy.Dd');
-goog.require('dummy.aa');
-// Some comment about why this is suppressed bottom.
-/** @suppress {extraRequire} */
-goog.require('dummy.NotUsedBottom'); // Comment bottom.
-
-var x = new dummy.Bb();
-dummy.Cc.someMethod();
-dummy.aa.someMethod();
-
-
-/**
- * @param {number|null} badTypeWithExtraSpace |null -> ?.
- * @returns {number} returns -> return.
- */
-x.y = function( badTypeWithExtraSpace) {
-}
-
-
-/** @type {function():null|Array.<string|null>} only 2nd |null -> ? */
-x.badType;
-
-
-/** @type {Array.<number|string|null>|null} only 2nd |null -> ? */
-x.wickedType;
-
-
-/** @type { string | null } null -> ? */
-x.nullWithSpace;
-
-spaceBeforeSemicolon = 10 ;
-spaceBeforeParen = 10 +(5 * 2);
-arrayNoSpace =[10];
-arrayExtraSpace [10] = 10;
-spaceBeforeClose = ([10 ] );
-spaceAfterStart = ( [ 10]);
-extraSpaceAfterPlus = 10 + 20;
-extraSpaceBeforeOperator = x ++;
-extraSpaceBeforeOperator = x --;
-extraSpaceBeforeComma = x(y , z);
-missingSpaceBeforeOperator = x+ y;
-missingSpaceAfterOperator = x +y;
-missingBothSpaces = x+y;
-equalsSpacing= 10;
-equalsSpacing =10;
-equalsSpacing=10;
-equalsSpacing=[10];
-reallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyLongName=1000;
-
-"string should be single quotes";
-
-// Regression test for interaction between space fixing and semicolon fixing -
-// previously the fix for the missing space caused the function to be seen as
-// a non-assigned function and then its semicolon was being stripped.
-x=function() {
-};
-
-/**
- * Missing a newline.
- * @constructor
- * @extends {a.b.c}
- */
-x.y.z = function() {
-};goog.inherits(x.y.z, a.b.c);
-
-/**
- * Extra blank line.
- * @constructor
- * @extends {a.b.c}
- */
-x.y.z = function() {
-};
-
-goog.inherits(x.y.z, a.b.c);
-
-/**
- * Perfect!
- * @constructor
- * @extends {a.b.c}
- */
-x.y.z = function() {
-};
-goog.inherits(x.y.z, a.b.c);
-
-// Whitespace at end of comment.
-var removeWhiteSpaceAtEndOfLine;
-
-/**
- * Whitespace at EOL (here and the line of code and the one below it).
- * @type {string}
- * @param {string} Description with whitespace at EOL.
- */
-x = 10;
-
-/**
- * @type number
- */
-foo.bar = 3;
-
-/**
- * @enum {boolean
- */
-bar.baz = true;
-
-/**
- * @extends Object}
- */
-bar.foo = x;
-
-/**
- * @type function(string, boolean) : void
- */
-baz.bar = goog.nullFunction;
-
-/** {@inheritDoc} */
-baz.baz = function() {
-};
-
-TR_Node.splitDomTreeAt(splitNode, clone, /** @type Node */ (quoteNode));
-
-x = [1, 2, 3,];
-x = {
- a: 1,
-};
-
-if (x) {
-};
-
-for (i = 0;i < 10; i++) {
-}
-for (i = 0; i < 10;i++) {
-}
-for ( i = 0; i < 10; i++) {
-}
-for (i = 0 ; i < 10; i++) {
-}
-for (i = 0; i < 10 ; i++) {
-}
-for (i = 0; i < 10; i++ ) {
-}
-for (i = 0; i < 10; i++) {
-}
-for (i = 0; i < 10; i++) {
-}
-for (i = 0 ;i < 10; i++) {
-}
-
-var x = 10
-var y = 100;
-
-
-/**
- * This is to test the ability to add or remove a = in type to mark optional
- * parameters.
- * @param {number=} firstArg Incorrect the name should start with opt_. Don't
- * handle the fix (yet).
- * @param {function(string=):number} opt_function This should end with a =.
- * @param {function(number)} opt_otherFunc This should end with a =.
- * @param {string} opt_otherArg Incorrect this should be string=.
- * @param {{string, number}} opt_recordArg Incorrect this should
- * be {string, number}=.
- */
-function someFunction(firstArg, opt_function, opt_otherFunc, opt_otherArg,
- opt_recordArg) {
-}
-
-
-/**
- * This is to test the ability to add '...' in type with variable arguments.
- * @param {number} firstArg First argument.
- * @param {string} var_args This should start with '...'.
- */
-function varArgFunction(firstArg, var_args) {
-}
-
-
-/**
- * This is to test the ability to add '...' in type with variable arguments.
- * @param {number} firstArg First argument.
- * @param {{a, b}} var_args This should start with '...'.
- */
-function varArgRecordTypeFunction(firstArg, var_args) {
-}
-
-var indent = 'correct';
- indent = 'too far';
-if (indent) {
-indent = 'too short';
-}
-indent = function() {
- return a +
- b;
-};
-
-
-/**
- * Regression test, must insert whitespace before the 'b' when fixing
- * indentation. Its different from below case of bug 3473113 as has spaces
- * before parameter which was not working in part of the bug fix.
- */
-indentWrongSpaces = function(
- b) {
-};
-
-
-/**
- * Regression test, must insert whitespace before the 'b' when fixing
- * indentation.
- * @bug 3473113
- */
-indent = function(
-b) {
-};
-
-
-
-/**
- * This is to test the ability to remove multiple extra lines before a top-level
- * block.
- */
-function someFunction() {}
-/**
- * This is to test the ability to add multiple extra lines before a top-level
- * block.
- */
-function someFunction() {}
-
-
-// This is a comment.
-/**
- * This is to test that blank lines removed before a top level block skips any
- * comments above the block.
- */
-function someFunction() {}
-// This is a comment.
-/**
- * This is to test that blank lines added before a top level block skips any
- * comments above the block.
- */
-function someFunction() {}
-
-
-/**
- * Parameters don't have proper spaces.
- * @param {number} a
- * @param {number} b
- * @param {number} d
- * @param {number} e
- * @param {number} f
- */
-function someFunction(a, b,d, e, f) {
-}
-
-// File does not end with newline \ No newline at end of file
diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.indentation.out.js b/tools/closure_linter/closure_linter/testdata/fixjsstyle.indentation.out.js
deleted file mode 100644
index 4d7c3853c8..0000000000
--- a/tools/closure_linter/closure_linter/testdata/fixjsstyle.indentation.out.js
+++ /dev/null
@@ -1,465 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Test file for indentation.
- * @author robbyw@google.com (Robert Walker)
- */
-
-goog.provide('goog.editor.SeamlessField');
-goog.provide('goog.something');
-
-goog.require('goog.events.KeyCodes');
-goog.require('goog.userAgent');
-
-// Some good indentation examples.
-
-var x = 10;
-var y = 'some really really really really really really really long string',
- z = 14;
-if (x == 10) {
- x = 12;
-}
-if (x == 10 ||
- x == 12) {
- x = 14;
-}
-if (x == 14) {
- if (z >= x) {
- y = 'test';
- }
-}
-x = x +
- 10 + (
- 14
- );
-something =
- 5;
-var arr = [
- 1, 2, 3];
-var arr2 = [
- 1,
- 2,
- 3];
-var obj = {
- a: 10,
- b: 20
-};
-callAFunction(10, [100, 200],
- 300);
-callAFunction([
- 100,
- 200
-],
-300);
-callAFunction('abc' +
- 'def' +
- 'ghi');
-
-x.reallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyLongName
- .someMember = 10;
-
-
-// confused on allowed indentation in continued function assignments vs overlong
-// wrapped function calls.
-some.sample() // LINE_ENDS_WITH_DOT
- .then(function(response) {
- return 1;
- });
-
-
-/**
- * Some function.
- * @return {number} The number ten.
- */
-goog.something.x = function() {
- return 10 +
- 20;
-};
-
-
-/**
- * Some function.
- * @param {number} longParameterName1 Some number.
- * @param {number} longParameterName2 Some number.
- * @param {number} longParameterName3 Some number.
- * @return {number} Sum number.
- */
-goog.something.y = function(longParameterName1, longParameterName2,
- longParameterName3) {
- return longParameterName1 + longParameterName2 + longParameterName3;
-};
-
-
-/**
- * Some function.
- * @param {number} longParameterName1 Some number.
- * @param {number} longParameterName2 Some number.
- * @param {number} longParameterName3 Some number.
- * @return {number} Sum number.
- */
-goog.something.z = function(longParameterName1, longParameterName2,
- longParameterName3) {
- return longParameterName1 + longParameterName2 + longParameterName3;
-};
-
-if (opt_rootTagName) {
- doc.appendChild(doc.createNode(3,
- opt_rootTagName,
- opt_namespaceUri || ''));
-}
-
-
-/**
- * For a while this errored because the function call parens were overriding
- * the other opening paren.
- */
-goog.something.q = function() {
- goog.something.x(a.getStartNode(),
- a.getStartOffset(), a.getEndNode(), a.getEndOffset());
-};
-
-function doSomething() {
- var titleElement = goog.something(x, // UNUSED_LOCAL_VARIABLE
- y);
-}
-
-switch (x) {
- case 10:
- y = 100;
- break;
-
- // This should be allowed.
- case 20:
- if (y) {
- z = 0;
- }
- break;
-
- // This should be allowed,
- // even with mutliple lines.
- case 30:
- if (y) {
- z = 0;
- }
- break;
-
- case SadThatYouSwitch
- .onSomethingLikeThis:
- z = 10;
-
- case 40:
- z = 20;
-
- default:
- break;
-}
-
-// Description of if case.
-if (x) {
-
-// Description of else case should be allowed at this indent.
-// Multiple lines is ok.
-} else {
-
-}
-
-
-/** @inheritDoc */
-goog.editor.SeamlessField.prototype.setupMutationEventHandlersGecko =
- function() {
- var x = 10;
- x++;
-};
-
-
-// Regression test for '.' at the end confusing the indentation checker if it is
-// not considered to be part of the identifier.
-/** @inheritDoc */
-goog.editor.SeamlessField.prototype
- .setupMutationEventHandlersGecko = function() {
- // -2: LINE_ENDS_WITH_DOT
- var x = 10;
- x++;
-};
-
-var someReallyReallyLongVariableName =
- y ? /veryVeryVeryVeryVeryVeryVeryVeryLongRegex1/gi :
- /slightlyLessLongRegex2/gi;
-
-var somethingOrOther = z ?
- a :
- b;
-
-var z = x ? y :
- 'bar';
-
-var z = x ?
- y :
- a;
-
-var z = z ?
- a ? b : c :
- d ? e : f;
-
-var z = z ?
- a ? b :
- c :
- d ? e : f;
-
-var z = z ?
- a ?
- b :
- c :
- d ? e : f;
-
-var z = z ?
- a ? b : c :
- d ? e :
- f ? g : h;
-
-var z = z ?
- a +
- i ?
- b +
- j : c :
- d ? e :
- f ? g : h;
-
-
-if (x) {
- var block =
- // some comment
- // and some more comment
- (e.keyCode == goog.events.KeyCodes.TAB && !this.dispatchBeforeTab_(e)) ||
- // #2: to block a Firefox-specific bug where Macs try to navigate
- // back a page when you hit command+left arrow or comamnd-right arrow.
- // See https://bugzilla.mozilla.org/show_bug.cgi?id=341886
- // get Firefox to fix this.
- (goog.userAgent.GECKO && e.metaKey &&
- (e.keyCode == goog.events.KeyCodes.LEFT ||
- e.keyCode == goog.events.KeyCodes.RIGHT));
-}
-
-if (x) {
-}
-
-var somethingElse = {
- HAS_W3C_RANGES: goog.userAgent.GECKO || goog.userAgent.WEBKIT ||
- goog.userAgent.OPERA,
-
- // A reasonably placed comment.
- SOME_KEY: goog.userAgent.IE
-};
-
-var x = {
- ySomethingReallyReallyLong:
- 'foo',
- z: 'bar'
-};
-
-// Some bad indentation.
-
-var a = 10; // WRONG_INDENTATION
-var b = 10,
- c = 12; // WRONG_INDENTATION
-x = x +
- 10; // WRONG_INDENTATION
-if (x == 14) {
- x = 15; // WRONG_INDENTATION
- x = 16; // WRONG_INDENTATION
-}
-
-var longFunctionName = function(opt_element) {
- return opt_element ?
- new z(q(opt_element)) : 100;
- // -1: WRONG_INDENTATION
-};
-
-longFunctionName(a, b, c,
- d, e, f); // WRONG_INDENTATION
-longFunctionName(a, b,
- c, // WRONG_INDENTATION
- d); // WRONG_INDENTATION
-
-x = a ? b :
- c; // WRONG_INDENTATION
-y = a ?
- b : c; // WRONG_INDENTATION
-
-switch (x) {
- case 10:
- break; // WRONG_INDENTATION
- case 20: // WRONG_INDENTATION
- break;
- default: // WRONG_INDENTATION
- break;
-}
-
-while (true) {
- x = 10; // WRONG_INDENTATION
- break; // WRONG_INDENTATION
-}
-
-function foo() {
- return entryUrlTemplate
- .replace(
- '${authorResourceId}',
- this.sanitizer_.sanitize(authorResourceId));
-}
-
-return [new x(
- 10)];
-return [
- new x(10)];
-
-return [new x(
- 10)]; // WRONG_INDENTATION
-return [new x(
- 10)]; // WRONG_INDENTATION
-
-return {x: y(
- z)};
-return {
- x: y(z)
-};
-
-return {x: y(
- z)}; // WRONG_INDENTATION
-return {x: y(
- z)}; // WRONG_INDENTATION
-
-return /** @type {Window} */ (x(
- 'javascript:"' + encodeURI(loadingMessage) + '"')); // WRONG_INDENTATION
-
-x = {
- y: function() {}
-};
-
-x = {
- y: foo,
- z: bar +
- baz // WRONG_INDENTATION
-};
-
-x({
- a: b
-},
-10);
-
-z = function(arr, f, val, opt_obj) {
- x(arr, function(val, index) {
- rval = f.call(opt_obj, rval, val, index, arr);
- });
-};
-
-var xyz = [100,
- 200,
- 300];
-
-var def = [100,
- 200]; // WRONG_INDENTATION
-
-var ghi = [100,
- 200]; // WRONG_INDENTATION
-
-var abcdefg = ('a' +
- 'b');
-
-var x9 = z('7: ' +
- x(x)); // WRONG_INDENTATION
-
-function abc() {
- var z = d('div', // UNUSED_LOCAL_VARIABLE
- {
- a: 'b'
- });
-}
-
-abcdefg('p', {x: 10},
- 'Para 1');
-
-function bar1() {
- return 3 +
- 4; // WRONG_INDENTATION
-}
-
-function bar2() {
- return 3 + // WRONG_INDENTATION
- 4; // WRONG_INDENTATION
-}
-
-function bar3() {
- return 3 + // WRONG_INDENTATION
- 4;
-}
-
-// Regression test for unfiled bug. Wrongly going into implied block after else
-// when there was an explicit block (was an else if) caused false positive
-// indentation errors.
-if (true) {
-} else if (doc.foo(
- doc.getBar(baz))) {
- var x = 3;
-}
-
-// Regression tests for function indent + 4.
-// (The first example is from the styleguide.)
-if (veryLongFunctionNameA(
- veryLongArgumentName) ||
- veryLongFunctionNameB(
- veryLongArgumentName)) {
- veryLongFunctionNameC(veryLongFunctionNameD(
- veryLongFunctioNameE(
- veryLongFunctionNameF)));
-}
-
-if (outer(middle(
- inner(first)))) {}
-if (outer(middle(
- inner(second)),
- outer_second)) {}
-if (nested.outer(
- first)) {}
-if (nested.outer(nested.middle(
- first))) {}
-if (nested
- .outer(nested.middle(
- first))) {}
-if (nested.outer(first
- .middle(
- second),
- third)) {}
-
-// goog.scope should not increase indentation.
-goog.scope(function() {
-var x = 5;
-while (x > 0) {
- --x;
-}
-}); // goog.scope
-
-
-goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE
-// +1: UNUSED_LOCAL_VARIABLE
-var x = 5; // WRONG_INDENTATION
-}); // goog.scope
-
-goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE
-var x = 5; // UNUSED_LOCAL_VARIABLE
-}); // goog.scope
-
-goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE
-var x = 5; // UNUSED_LOCAL_VARIABLE
-}); // goog.scope
diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.in.js b/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.in.js
deleted file mode 100644
index 974af915d3..0000000000
--- a/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.in.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// Correct dot placement:
-var x = window.some()
- .method()
- .calls();
-
-// Wrong dots:
-window.
- some().
- // With a comment in between.
- method().
- calls();
-
-// Wrong plus operator:
-var y = 'hello'
- + 'world'
- // With a comment in between.
- + '!';
-
-// Correct plus operator (untouched):
-var y = 'hello' +
- 'world';
diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.out.js b/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.out.js
deleted file mode 100644
index c03e11730a..0000000000
--- a/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.out.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// Correct dot placement:
-var x = window.some()
- .method()
- .calls();
-
-// Wrong dots:
-window
- .some()
- // With a comment in between.
- .method()
- .calls();
-
-// Wrong plus operator:
-var y = 'hello' +
- 'world' +
- // With a comment in between.
- '!';
-
-// Correct plus operator (untouched):
-var y = 'hello' +
- 'world';
diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.out.js b/tools/closure_linter/closure_linter/testdata/fixjsstyle.out.js
deleted file mode 100644
index 37fe2b8b3c..0000000000
--- a/tools/closure_linter/closure_linter/testdata/fixjsstyle.out.js
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Autofix test script.
- *
- * @author robbyw@google.com (Robby Walker)
- * @author robbyw@google.com (Robby Walker)
- * @author robbyw@google.com (Robby Walker)
- * @author robbyw@google.com (Robby Walker)
- * @author robbyw@google.com (Robby)
- * @author robbyw@google.com
- * @author robbyw@google.com Robby
- */
-
-goog.provide('X');
-goog.provide('Y');
-goog.provide('Z');
-goog.provide('w');
-
-goog.require('dummy.Bb');
-goog.require('dummy.Cc');
-// Some comment about why this is suppressed bottom.
-/** @suppress {extraRequire} */
-goog.require('dummy.NotUsedBottom'); // Comment bottom.
-/** @suppress {extraRequire} */
-// Some comment about why this is suppressed different.
-goog.require('dummy.NotUsedDifferentComment');
-// Some comment about why this is suppressed middle.
-/** @suppress {extraRequire} */
-goog.require('dummy.NotUsedMiddle'); // Comment middle.
-// Some comment about why this is suppressed top.
-/** @suppress {extraRequire} */
-goog.require('dummy.NotUsedTop'); // Comment top.
-goog.require('dummy.aa');
-
-var x = new dummy.Bb();
-dummy.Cc.someMethod();
-dummy.aa.someMethod();
-
-
-/**
- * @param {?number} badTypeWithExtraSpace |null -> ?.
- * @return {number} returns -> return.
- */
-x.y = function(badTypeWithExtraSpace) {
-};
-
-
-/** @type {function():null|Array.<?string>} only 2nd |null -> ? */
-x.badType;
-
-
-/** @type {?Array.<number|string|null>} only 2nd |null -> ? */
-x.wickedType;
-
-
-/** @type {? string } null -> ? */
-x.nullWithSpace;
-
-spaceBeforeSemicolon = 10;
-spaceBeforeParen = 10 + (5 * 2);
-arrayNoSpace = [10];
-arrayExtraSpace[10] = 10;
-spaceBeforeClose = ([10]);
-spaceAfterStart = ([10]);
-extraSpaceAfterPlus = 10 + 20;
-extraSpaceBeforeOperator = x++;
-extraSpaceBeforeOperator = x--;
-extraSpaceBeforeComma = x(y, z);
-missingSpaceBeforeOperator = x + y;
-missingSpaceAfterOperator = x + y;
-missingBothSpaces = x + y;
-equalsSpacing = 10;
-equalsSpacing = 10;
-equalsSpacing = 10;
-equalsSpacing = [10];
-reallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyLongName = 1000;
-
-'string should be single quotes';
-
-// Regression test for interaction between space fixing and semicolon fixing -
-// previously the fix for the missing space caused the function to be seen as
-// a non-assigned function and then its semicolon was being stripped.
-x = function() {
-};
-
-
-
-/**
- * Missing a newline.
- * @constructor
- * @extends {a.b.c}
- */
-x.y.z = function() {
-};
-goog.inherits(x.y.z, a.b.c);
-
-
-
-/**
- * Extra blank line.
- * @constructor
- * @extends {a.b.c}
- */
-x.y.z = function() {
-};
-goog.inherits(x.y.z, a.b.c);
-
-
-
-/**
- * Perfect!
- * @constructor
- * @extends {a.b.c}
- */
-x.y.z = function() {
-};
-goog.inherits(x.y.z, a.b.c);
-
-// Whitespace at end of comment.
-var removeWhiteSpaceAtEndOfLine;
-
-
-/**
- * Whitespace at EOL (here and the line of code and the one below it).
- * @type {string}
- * @param {string} Description with whitespace at EOL.
- */
-x = 10;
-
-
-/**
- * @type {number}
- */
-foo.bar = 3;
-
-
-/**
- * @enum {boolean}
- */
-bar.baz = true;
-
-
-/**
- * @extends {Object}
- */
-bar.foo = x;
-
-
-/**
- * @type {function(string, boolean) : void}
- */
-baz.bar = goog.nullFunction;
-
-
-/** @inheritDoc */
-baz.baz = function() {
-};
-
-TR_Node.splitDomTreeAt(splitNode, clone, /** @type {Node} */ (quoteNode));
-
-x = [1, 2, 3,];
-x = {
- a: 1,
-};
-
-if (x) {
-}
-
-for (i = 0; i < 10; i++) {
-}
-for (i = 0; i < 10; i++) {
-}
-for (i = 0; i < 10; i++) {
-}
-for (i = 0; i < 10; i++) {
-}
-for (i = 0; i < 10; i++) {
-}
-for (i = 0; i < 10; i++) {
-}
-for (i = 0; i < 10; i++) {
-}
-for (i = 0; i < 10; i++) {
-}
-for (i = 0; i < 10; i++) {
-}
-
-var x = 10;
-var y = 100;
-
-
-/**
- * This is to test the ability to add or remove a = in type to mark optional
- * parameters.
- * @param {number=} firstArg Incorrect the name should start with opt_. Don't
- * handle the fix (yet).
- * @param {function(string=):number=} opt_function This should end with a =.
- * @param {function(number)=} opt_otherFunc This should end with a =.
- * @param {string=} opt_otherArg Incorrect this should be string=.
- * @param {{string, number}=} opt_recordArg Incorrect this should
- * be {string, number}=.
- */
-function someFunction(firstArg, opt_function, opt_otherFunc, opt_otherArg,
- opt_recordArg) {
-}
-
-
-/**
- * This is to test the ability to add '...' in type with variable arguments.
- * @param {number} firstArg First argument.
- * @param {...string} var_args This should start with '...'.
- */
-function varArgFunction(firstArg, var_args) {
-}
-
-
-/**
- * This is to test the ability to add '...' in type with variable arguments.
- * @param {number} firstArg First argument.
- * @param {...{a, b}} var_args This should start with '...'.
- */
-function varArgRecordTypeFunction(firstArg, var_args) {
-}
-
-var indent = 'correct';
-indent = 'too far';
-if (indent) {
- indent = 'too short';
-}
-indent = function() {
- return a +
- b;
-};
-
-
-/**
- * Regression test, must insert whitespace before the 'b' when fixing
- * indentation. Its different from below case of bug 3473113 as has spaces
- * before parameter which was not working in part of the bug fix.
- */
-indentWrongSpaces = function(
- b) {
-};
-
-
-/**
- * Regression test, must insert whitespace before the 'b' when fixing
- * indentation.
- * @bug 3473113
- */
-indent = function(
- b) {
-};
-
-
-/**
- * This is to test the ability to remove multiple extra lines before a top-level
- * block.
- */
-function someFunction() {}
-
-
-/**
- * This is to test the ability to add multiple extra lines before a top-level
- * block.
- */
-function someFunction() {}
-
-
-// This is a comment.
-/**
- * This is to test that blank lines removed before a top level block skips any
- * comments above the block.
- */
-function someFunction() {}
-
-
-// This is a comment.
-/**
- * This is to test that blank lines added before a top level block skips any
- * comments above the block.
- */
-function someFunction() {}
-
-
-/**
- * Parameters don't have proper spaces.
- * @param {number} a
- * @param {number} b
- * @param {number} d
- * @param {number} e
- * @param {number} f
- */
-function someFunction(a, b, d, e, f) {
-}
-
-// File does not end with newline
diff --git a/tools/closure_linter/closure_linter/testdata/goog_scope.js b/tools/closure_linter/closure_linter/testdata/goog_scope.js
deleted file mode 100644
index aa655d8e6d..0000000000
--- a/tools/closure_linter/closure_linter/testdata/goog_scope.js
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2011 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Tests provides/requires in the presence of goog.scope.
- * There should be no errors for missing provides or requires.
- *
- * @author nicksantos@google.com (Nick Santos)
- */
-
-goog.provide('goog.something.Something');
-
-goog.require('goog.util.Else');
-
-goog.scope(function() {
-var Else = goog.util.Else;
-var something = goog.something;
-
-/** // WRONG_BLANK_LINE_COUNT
- * This is a something.
- * @constructor
- */
-something.Something = function() {
- /**
- * This is an else.
- * @type {Else}
- */
- this.myElse = new Else();
-
- /** @type {boolean} */
- this.private_ = false; // MISSING_PRIVATE, UNUSED_PRIVATE_MEMBER
-};
-
-/** // WRONG_BLANK_LINE_COUNT
- * // +3: MISSING_PRIVATE
- * Missing private.
- */
-something.withTrailingUnderscore_ = 'should be declared @private';
-
-/** // WRONG_BLANK_LINE_COUNT
- * Does nothing.
- */
-something.Something.prototype.noOp = function() {};
-
-
-/**
- * Does something.
- * Tests for included semicolon in function expression in goog.scope.
- */
-something.Something.prototype.someOp = function() {
-} // MISSING_SEMICOLON_AFTER_FUNCTION
-}); // goog.scope
diff --git a/tools/closure_linter/closure_linter/testdata/html_parse_error.html b/tools/closure_linter/closure_linter/testdata/html_parse_error.html
deleted file mode 100644
index df61da10c7..0000000000
--- a/tools/closure_linter/closure_linter/testdata/html_parse_error.html
+++ /dev/null
@@ -1,32 +0,0 @@
-<!DOCTYPE html>
-<html lang="en" dir="ltr">
-<head>
- <!--
- Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS-IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
- <title>GJsLint HTML JavaScript extraction tests</title>
-</head>
-<body>
- Text outside the script tag should not be linted as JavaScript.
- Stray closing brace: }
- <script type="text/javascript">
- /**
- * @type {!Array.<!Array.<string>>}
- */
- var badParse = [['a']];
- </script>
- <div>Some more non-JavaScript text with missing whitespace: (a+b).</div>
-</body>
-</html>
diff --git a/tools/closure_linter/closure_linter/testdata/indentation.js b/tools/closure_linter/closure_linter/testdata/indentation.js
deleted file mode 100644
index 10d2ad0174..0000000000
--- a/tools/closure_linter/closure_linter/testdata/indentation.js
+++ /dev/null
@@ -1,465 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Test file for indentation.
- * @author robbyw@google.com (Robert Walker)
- */
-
-goog.provide('goog.editor.SeamlessField');
-goog.provide('goog.something');
-
-goog.require('goog.events.KeyCodes');
-goog.require('goog.userAgent');
-
-// Some good indentation examples.
-
-var x = 10;
-var y = 'some really really really really really really really long string',
- z = 14;
-if (x == 10) {
- x = 12;
-}
-if (x == 10 ||
- x == 12) {
- x = 14;
-}
-if (x == 14) {
- if (z >= x) {
- y = 'test';
- }
-}
-x = x +
- 10 + (
- 14
- );
-something =
- 5;
-var arr = [
- 1, 2, 3];
-var arr2 = [
- 1,
- 2,
- 3];
-var obj = {
- a: 10,
- b: 20
-};
-callAFunction(10, [100, 200],
- 300);
-callAFunction([
- 100,
- 200
-],
-300);
-callAFunction('abc' +
- 'def' +
- 'ghi');
-
-x.reallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyLongName
- .someMember = 10;
-
-
-// confused on allowed indentation in continued function assignments vs overlong
-// wrapped function calls.
-some.sample(). // LINE_ENDS_WITH_DOT
- then(function(response) {
- return 1;
- });
-
-
-/**
- * Some function.
- * @return {number} The number ten.
- */
-goog.something.x = function() {
- return 10 +
- 20;
-};
-
-
-/**
- * Some function.
- * @param {number} longParameterName1 Some number.
- * @param {number} longParameterName2 Some number.
- * @param {number} longParameterName3 Some number.
- * @return {number} Sum number.
- */
-goog.something.y = function(longParameterName1, longParameterName2,
- longParameterName3) {
- return longParameterName1 + longParameterName2 + longParameterName3;
-};
-
-
-/**
- * Some function.
- * @param {number} longParameterName1 Some number.
- * @param {number} longParameterName2 Some number.
- * @param {number} longParameterName3 Some number.
- * @return {number} Sum number.
- */
-goog.something.z = function(longParameterName1, longParameterName2,
- longParameterName3) {
- return longParameterName1 + longParameterName2 + longParameterName3;
-};
-
-if (opt_rootTagName) {
- doc.appendChild(doc.createNode(3,
- opt_rootTagName,
- opt_namespaceUri || ''));
-}
-
-
-/**
- * For a while this errored because the function call parens were overriding
- * the other opening paren.
- */
-goog.something.q = function() {
- goog.something.x(a.getStartNode(),
- a.getStartOffset(), a.getEndNode(), a.getEndOffset());
-};
-
-function doSomething() {
- var titleElement = goog.something(x, // UNUSED_LOCAL_VARIABLE
- y);
-}
-
-switch (x) {
- case 10:
- y = 100;
- break;
-
- // This should be allowed.
- case 20:
- if (y) {
- z = 0;
- }
- break;
-
- // This should be allowed,
- // even with mutliple lines.
- case 30:
- if (y) {
- z = 0;
- }
- break;
-
- case SadThatYouSwitch
- .onSomethingLikeThis:
- z = 10;
-
- case 40:
- z = 20;
-
- default:
- break;
-}
-
-// Description of if case.
-if (x) {
-
-// Description of else case should be allowed at this indent.
-// Multiple lines is ok.
-} else {
-
-}
-
-
-/** @inheritDoc */
-goog.editor.SeamlessField.prototype.setupMutationEventHandlersGecko =
- function() {
- var x = 10;
- x++;
-};
-
-
-// Regression test for '.' at the end confusing the indentation checker if it is
-// not considered to be part of the identifier.
-/** @inheritDoc */
-goog.editor.SeamlessField.prototype.
- setupMutationEventHandlersGecko = function() {
- // -2: LINE_ENDS_WITH_DOT
- var x = 10;
- x++;
-};
-
-var someReallyReallyLongVariableName =
- y ? /veryVeryVeryVeryVeryVeryVeryVeryLongRegex1/gi :
- /slightlyLessLongRegex2/gi;
-
-var somethingOrOther = z ?
- a :
- b;
-
-var z = x ? y :
- 'bar';
-
-var z = x ?
- y :
- a;
-
-var z = z ?
- a ? b : c :
- d ? e : f;
-
-var z = z ?
- a ? b :
- c :
- d ? e : f;
-
-var z = z ?
- a ?
- b :
- c :
- d ? e : f;
-
-var z = z ?
- a ? b : c :
- d ? e :
- f ? g : h;
-
-var z = z ?
- a +
- i ?
- b +
- j : c :
- d ? e :
- f ? g : h;
-
-
-if (x) {
- var block =
- // some comment
- // and some more comment
- (e.keyCode == goog.events.KeyCodes.TAB && !this.dispatchBeforeTab_(e)) ||
- // #2: to block a Firefox-specific bug where Macs try to navigate
- // back a page when you hit command+left arrow or comamnd-right arrow.
- // See https://bugzilla.mozilla.org/show_bug.cgi?id=341886
- // get Firefox to fix this.
- (goog.userAgent.GECKO && e.metaKey &&
- (e.keyCode == goog.events.KeyCodes.LEFT ||
- e.keyCode == goog.events.KeyCodes.RIGHT));
-}
-
-if (x) {
-}
-
-var somethingElse = {
- HAS_W3C_RANGES: goog.userAgent.GECKO || goog.userAgent.WEBKIT ||
- goog.userAgent.OPERA,
-
- // A reasonably placed comment.
- SOME_KEY: goog.userAgent.IE
-};
-
-var x = {
- ySomethingReallyReallyLong:
- 'foo',
- z: 'bar'
-};
-
-// Some bad indentation.
-
- var a = 10; // WRONG_INDENTATION
-var b = 10,
- c = 12; // WRONG_INDENTATION
-x = x +
- 10; // WRONG_INDENTATION
-if (x == 14) {
- x = 15; // WRONG_INDENTATION
- x = 16; // WRONG_INDENTATION
-}
-
-var longFunctionName = function(opt_element) {
- return opt_element ?
- new z(q(opt_element)) : 100;
- // -1: WRONG_INDENTATION
-};
-
-longFunctionName(a, b, c,
- d, e, f); // WRONG_INDENTATION
-longFunctionName(a, b,
- c, // WRONG_INDENTATION
- d); // WRONG_INDENTATION
-
-x = a ? b :
- c; // WRONG_INDENTATION
-y = a ?
- b : c; // WRONG_INDENTATION
-
-switch (x) {
- case 10:
- break; // WRONG_INDENTATION
- case 20: // WRONG_INDENTATION
- break;
-default: // WRONG_INDENTATION
- break;
-}
-
-while (true) {
- x = 10; // WRONG_INDENTATION
- break; // WRONG_INDENTATION
-}
-
-function foo() {
- return entryUrlTemplate
- .replace(
- '${authorResourceId}',
- this.sanitizer_.sanitize(authorResourceId));
-}
-
-return [new x(
- 10)];
-return [
- new x(10)];
-
-return [new x(
- 10)]; // WRONG_INDENTATION
-return [new x(
- 10)]; // WRONG_INDENTATION
-
-return {x: y(
- z)};
-return {
- x: y(z)
-};
-
-return {x: y(
- z)}; // WRONG_INDENTATION
-return {x: y(
- z)}; // WRONG_INDENTATION
-
-return /** @type {Window} */ (x(
-'javascript:"' + encodeURI(loadingMessage) + '"')); // WRONG_INDENTATION
-
-x = {
- y: function() {}
-};
-
-x = {
- y: foo,
- z: bar +
- baz // WRONG_INDENTATION
-};
-
-x({
- a: b
-},
-10);
-
-z = function(arr, f, val, opt_obj) {
- x(arr, function(val, index) {
- rval = f.call(opt_obj, rval, val, index, arr);
- });
-};
-
-var xyz = [100,
- 200,
- 300];
-
-var def = [100,
- 200]; // WRONG_INDENTATION
-
-var ghi = [100,
- 200]; // WRONG_INDENTATION
-
-var abcdefg = ('a' +
- 'b');
-
-var x9 = z('7: ' +
-x(x)); // WRONG_INDENTATION
-
-function abc() {
- var z = d('div', // UNUSED_LOCAL_VARIABLE
- {
- a: 'b'
- });
-}
-
-abcdefg('p', {x: 10},
- 'Para 1');
-
-function bar1() {
- return 3 +
- 4; // WRONG_INDENTATION
-}
-
-function bar2() {
- return 3 + // WRONG_INDENTATION
- 4; // WRONG_INDENTATION
-}
-
-function bar3() {
- return 3 + // WRONG_INDENTATION
- 4;
-}
-
-// Regression test for unfiled bug. Wrongly going into implied block after else
-// when there was an explicit block (was an else if) caused false positive
-// indentation errors.
-if (true) {
-} else if (doc.foo(
- doc.getBar(baz))) {
- var x = 3;
-}
-
-// Regression tests for function indent + 4.
-// (The first example is from the styleguide.)
-if (veryLongFunctionNameA(
- veryLongArgumentName) ||
- veryLongFunctionNameB(
- veryLongArgumentName)) {
- veryLongFunctionNameC(veryLongFunctionNameD(
- veryLongFunctioNameE(
- veryLongFunctionNameF)));
-}
-
-if (outer(middle(
- inner(first)))) {}
-if (outer(middle(
- inner(second)),
- outer_second)) {}
-if (nested.outer(
- first)) {}
-if (nested.outer(nested.middle(
- first))) {}
-if (nested
- .outer(nested.middle(
- first))) {}
-if (nested.outer(first
- .middle(
- second),
- third)) {}
-
-// goog.scope should not increase indentation.
-goog.scope(function() {
-var x = 5;
-while (x > 0) {
- --x;
-}
-}); // goog.scope
-
-
-goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE
-// +1: UNUSED_LOCAL_VARIABLE
- var x = 5; // WRONG_INDENTATION
-}); // goog.scope
-
-goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE
-var x = 5; // UNUSED_LOCAL_VARIABLE
-}); // MISSING_END_OF_SCOPE_COMMENT
-
-goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE
-var x = 5; // UNUSED_LOCAL_VARIABLE
-}); // malformed goog.scope comment // MALFORMED_END_OF_SCOPE_COMMENT
diff --git a/tools/closure_linter/closure_linter/testdata/interface.js b/tools/closure_linter/closure_linter/testdata/interface.js
deleted file mode 100644
index 7daeee3ca8..0000000000
--- a/tools/closure_linter/closure_linter/testdata/interface.js
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Test file for interfaces.
- * @author robbyw@google.com (Robert Walker)
- */
-
-goog.provide('sample.BadInterface');
-goog.provide('sample.GoodInterface');
-
-
-
-/**
- * Sample interface to demonstrate correct style.
- * @interface
- */
-sample.GoodInterface = function() {
-};
-
-
-/**
- * Legal methods can take parameters and have a return type.
- * @param {string} param1 First parameter.
- * @param {Object} param2 Second parameter.
- * @return {number} Some return value.
- */
-sample.GoodInterface.prototype.legalMethod = function(param1, param2) {
-};
-
-
-/**
- * Legal methods can also take no parameters and return nothing.
- */
-sample.GoodInterface.prototype.legalMethod2 = function() {
- // Comments should be allowed.
-};
-
-
-/**
- * Legal methods can also be omitted, even with params and return values.
- * @param {string} param1 First parameter.
- * @param {Object} param2 Second parameter.
- * @return {number} Some return value.
- */
-sample.GoodInterface.prototype.legalMethod3;
-
-
-/**
- * Legal methods can also be set to abstract, even with params and return
- * values.
- * @param {string} param1 First parameter.
- * @param {Object} param2 Second parameter.
- * @return {number} Some return value.
- */
-sample.GoodInterface.prototype.legalMethod4 = goog.abstractMethod;
-
-
-
-/**
- * Sample interface to demonstrate style errors.
- * @param {string} a This is illegal.
- * @interface
- */
-sample.BadInterface = function(a) { // INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS
- this.x = a; // INTERFACE_METHOD_CANNOT_HAVE_CODE
-};
-
-
-/**
- * It is illegal to include code in an interface method.
- * @param {string} param1 First parameter.
- * @param {Object} param2 Second parameter.
- * @return {number} Some return value.
- */
-sample.BadInterface.prototype.illegalMethod = function(param1, param2) {
- return 10; // INTERFACE_METHOD_CANNOT_HAVE_CODE
-};
diff --git a/tools/closure_linter/closure_linter/testdata/jsdoc.js b/tools/closure_linter/closure_linter/testdata/jsdoc.js
deleted file mode 100644
index d62fd3c5fc..0000000000
--- a/tools/closure_linter/closure_linter/testdata/jsdoc.js
+++ /dev/null
@@ -1,1455 +0,0 @@
-// Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Errors related to JsDoc.
- *
- * @author robbyw@google.com (Robby Walker)
- *
- * @author robbyw@google.com (Robby Walker) // EXTRA_SPACE, EXTRA_SPACE
- * @author robbyw@google.com(Robby Walker) // MISSING_SPACE
- *
- * @author robbyw@google.com () // INVALID_AUTHOR_TAG_DESCRIPTION
- * @author robbyw@google.com // INVALID_AUTHOR_TAG_DESCRIPTION
- *
- * @owner ajp@google.com (Andy Perelson)
- * @badtag // INVALID_JSDOC_TAG
- * @customtag This tag is passed as a flag in full_test.py
- * @requires anotherCustomTagPassedInFromFullTestThatShouldAllowASingleWordLongerThan80Lines
- * @requires firstWord, secondWordWhichShouldMakeThisLineTooLongSinceThereIsAFirstWord
- * @wizmodule
- * @wizModule // INVALID_JSDOC_TAG
- */
-// -4: LINE_TOO_LONG
-
-goog.provide('MyClass');
-goog.provide('goog.NumberLike');
-goog.provide('goog.math.Vec2.sum');
-
-goog.require('goog.array');
-goog.require('goog.color');
-goog.require('goog.dom.Range');
-goog.require('goog.math.Matrix');
-goog.require('goog.math.Vec2');
-
-
-/**
- * Test the "no compilation should be done after annotation processing" tag.
- * @nocompile
- */
-
-
-/**
- * @returns // INVALID_JSDOC_TAG
- * @params // INVALID_JSDOC_TAG
- * @defines // INVALID_JSDOC_TAG
- * @nginject // INVALID_JSDOC_TAG
- * @wizAction // INVALID_JSDOC_TAG
- */
-function badTags() {
-}
-
-
-// +4: MISSING_JSDOC_TAG_DESCRIPTION
-/**
- * @license Description.
- * @preserve Good tag, missing punctuation
- * @preserve
- */
-function goodTags() {
- /** @preserveTry */
- try {
- hexColor = goog.color.parse(value).hex;
- } catch (ext) {
- // Regression test. The preserveTry tag was incorrectly causing a warning
- // for a missing period at the end of tag description. Parsed as
- // flag: preserve, description: Try.
- }
-}
-
-
-/**
- * Some documentation goes here.
- *
- * @param {Object} object Good docs.
- * @ngInject
- * @wizaction
- */
-function good(object) {
-}
-
-
-/**
- * Some documentation goes here.
- * @param {function(string, string) : string} f A function.
- */
-function setConcatFunc(f) {
-}
-
-
-/**
- * Some docs.
- */
-function missingParam(object) { // MISSING_PARAMETER_DOCUMENTATION
-}
-
-
-/**
- * @return {number} Hiya.
- * @override
- */
-function missingParamButInherit(object) {
- return 3;
-}
-
-
-/**
- * @inheritDoc
- */
-function missingParamButInherit(object) {
-}
-
-
-/**
- * @override
- */
-function missingParamButOverride(object) {
-}
-
-
-// +2: UNNECESSARY_BRACES_AROUND_INHERIT_DOC
-/**
- * {@inheritDoc}
- */
-function missingParamButInherit(object) {
-}
-
-
-/**
- * Some docs.
- *
- * @param {Object} object Docs.
- */
-function mismatchedParam(elem) { // WRONG_PARAMETER_DOCUMENTATION
- /** @param {number} otherElem */
- function nestedFunction(elem) { // WRONG_PARAMETER_DOCUMENTATION
- };
-}
-
-
-/**
- * @return {boolean} A boolean primitive.
- */
-function goodReturn() {
- return something;
-}
-
-
-/**
- * @return {some.long.type.that.will.make.the.description.start.on.next.line}
- * An object.
- */
-function anotherGoodReturn() {
- return something;
-}
-
-
-// +2: MISSING_JSDOC_TAG_TYPE
-/**
- * @return false.
- */
-function missingReturnType() {
- return something;
-}
-
-
-// +2: MISSING_SPACE
-/**
- * @return{type}
- */
-function missingSpaceOnReturnType() {
- return something;
-}
-
-
-// +2: MISSING_JSDOC_TAG_TYPE
-/**
- * @return
- */
-function missingReturnType() {
- return something;
-}
-
-class.missingDocs = function() { // MISSING_MEMBER_DOCUMENTATION
-};
-
-
-/**
- * No return doc needed.
- */
-function okMissingReturnDoc() {
- return;
-}
-
-
-// +2: UNNECESSARY_RETURN_DOCUMENTATION
-/**
- * @return {number} Unnecessary return doc.
- */
-function unnecessaryMissingReturnDoc() {
-}
-
-
-/**
- * The "suppress" causes the compiler to ignore the 'debugger' statement.
- * @suppress {checkDebuggerStatement}
- */
-function checkDebuggerStatementWithSuppress() {
- debugger;
-}
-
-
-/**
- * Return doc is present, but the function doesn't have a 'return' statement.
- * The "suppress" causes the compiler to ignore the error.
- * @suppress {missingReturn}
- * @return {string}
- */
-function unnecessaryMissingReturnDocWithSuppress() {
- if (false) {
- return '';
- } else {
- // Missing return statement in this branch.
- }
-}
-
-
-// +3: MISSING_JSDOC_TAG_TYPE
-// +2: UNNECESSARY_RETURN_DOCUMENTATION
-/**
- * @return
- */
-function unnecessaryMissingReturnNoType() {
-}
-
-
-/**
- * @return {undefined} Ok unnecessary return doc.
- */
-function okUnnecessaryMissingReturnDoc() {
-}
-
-
-/**
- * @return {*} Ok unnecessary return doc.
- */
-function okUnnecessaryMissingReturnDoc2() {
-}
-
-
-/**
- * @return {void} Ok unnecessary return doc.
- */
-function okUnnecessaryMissingReturnDoc3() {
-}
-
-
-/**
- * This function doesn't return anything, but it does contain the string return.
- */
-function makeSureReturnTokenizesRight() {
- fn(returnIsNotSomethingHappeningHere);
-}
-
-
-/**
- * @return {number|undefined} Ok unnecessary return doc.
- */
-function okUnnecessaryMissingReturnDoc3() {
-}
-
-
-/**
- * @return {number} Ok unnecessary return doc.
- */
-function okUnnecessaryReturnWithThrow() {
- throw 'foo';
-}
-
-
-/** @inheritDoc */
-function okNoReturnWithInheritDoc() {
- return 10;
-}
-
-
-/** @override */
-function okNoReturnWithOverride() {
- return 10;
-}
-
-
-/**
- * No return doc.
- */ // MISSING_RETURN_DOCUMENTATION
-function badMissingReturnDoc() {
- return 10;
-}
-
-
-
-/**
- * Constructor so we should not have a return jsdoc tag.
- * @constructor
- */
-function OkNoReturnWithConstructor() {
- return this;
-}
-
-
-/**
- * Type of array is known, so the cast is unnecessary.
- * @suppress {unnecessaryCasts}
- */
-function unnecessaryCastWithSuppress() {
- var numberArray = /** @type {!Array.<number>} */ ([]);
- /** @type {number} */ (goog.array.peek(numberArray));
-}
-
-
-
-/**
- * Make sure the 'unrestricted' annotation is accepted.
- * @constructor @unrestricted
- */
-function UnrestrictedClass() {}
-
-
-
-/**
- * Check definition of fields in constructors.
- * @constructor
- */
-function AConstructor() {
- /**
- * A field.
- * @type {string}
- * @private
- */
- this.isOk_ = 'ok';
-
- // +5: MISSING_PRIVATE
- /**
- * Another field.
- * @type {string}
- */
- this.isBad_ = 'missing private';
-
- /**
- * This is ok, but a little weird.
- * @type {number}
- * @private
- */
- var x = this.x_ = 10;
-
- // At first, this block mis-attributed the first typecast as a member doc,
- // and therefore expected it to contain @private.
- if (goog.math.Matrix.isValidArray(/** @type {Array} */ (m))) {
- this.array_ = goog.array.clone(/** @type {Array.<Array.<number>>} */ (m));
- }
-
- // Use the private and local variables we've defined so they don't generate a
- // warning.
- var y = [
- this.isOk_,
- this.isBad_,
- this.array_,
- this.x_,
- y,
- x
- ];
-}
-
-
-/**
- * @desc This message description is allowed.
- */
-var MSG_YADDA_YADDA_YADDA = 'A great message!';
-
-
-/**
- * @desc So is this one.
- * @hidden
- * @meaning Some unusual meaning.
- */
-x.y.z.MSG_YADDA_YADDA_YADDA = 'A great message!';
-
-
-/**
- * @desc But desc can only apply to messages.
- */
-var x = 10; // INVALID_USE_OF_DESC_TAG
-
-
-/**
- * Same with hidden.
- * @hidden
- */
-var x = 10; // INVALID_USE_OF_DESC_TAG
-
-
-/**
- * Same with meaning.
- * @meaning Some unusual meaning.
- */
-var x = 10; // INVALID_USE_OF_DESC_TAG
-
-
-// +9: MISSING_SPACE
-// +9: MISSING_JSDOC_TAG_TYPE
-// +10: OUT_OF_ORDER_JSDOC_TAG_TYPE
-// +10: MISSING_JSDOC_TAG_TYPE, MISSING_SPACE
-/**
- * Lots of problems in this documentation.
- *
- * @param {Object} q params b & d are missing descriptions.
- * @param {Object} a param d is missing a type (oh my).
- * @param {Object}b
- * @param d
- * @param {Object} x param desc.
- * @param z {type} Out of order type.
- * @param{} y Empty type and missing space.
- * @param {Object} omega mis-matched param.
- */
-function manyProblems(a, b, c, d, x, z, y, alpha) {
- // -1: MISSING_PARAMETER_DOCUMENTATION, EXTRA_PARAMETER_DOCUMENTATION
- // -2: WRONG_PARAMETER_DOCUMENTATION
-}
-
-
-/**
- * Good docs
- *
- * @param {really.really.really.really.really.really.really.long.type} good
- * My param description.
- * @param {really.really.really.really.really.really.really.really.long.type}
- * okay My param description.
- * @param
- * {really.really.really.really.really.really.really.really.really.really.long.type}
- * fine Wow that's a lot of wrapping.
- */
-function wrappedParams(good, okay, fine) {
-}
-
-
-// +4: MISSING_JSDOC_TAG_TYPE
-// +3: MISSING_JSDOC_PARAM_NAME
-/**
- * Really bad
- * @param
- */
-function reallyBadParam(a) { // MISSING_PARAMETER_DOCUMENTATION
-}
-
-
-/**
- * Some docs.
- *
- * @private
- */
-class.goodPrivate_ = function() {
-};
-
-
-/**
- * Some docs.
- */
-class.missingPrivate_ = function() { // MISSING_PRIVATE
-};
-
-
-/**
- * Some docs.
- *
- * @private
- */
-class.extraPrivate = function() { // EXTRA_PRIVATE
-};
-
-
-/**
- * Anything ending with two underscores is not treated as private.
- */
-class.__iterator__ = function() {
-};
-
-
-/**
- * Some docs.
- * @package
- */
-class.goodPackage = function() {
-};
-
-
-/**
- * Some docs.
- * @package
- */
-class.badPackage_ = function() { // MISSING_PRIVATE
-};
-
-
-/**
- * Some docs.
- * @protected
- */
-class.goodProtected = function() {
-};
-
-
-/**
- * Some docs.
- * @protected
- */
-class.badProtected_ = function() { // MISSING_PRIVATE
-};
-
-
-/**
- * Example of a legacy name.
- * @protected
- * @suppress {underscore}
- */
-class.dom_ = function() {
- /** @suppress {with} */
- with ({}) {}
-};
-
-
-/**
- * Legacy names must be protected.
- * @suppress {underscore}
- */
-class.dom_ = function() {
-};
-
-
-/**
- * Allow compound suppression.
- * @private
- */
-class.dom_ = function() {
- /** @suppress {visibility|with} */
- with ({}) {}
-};
-
-
-/**
- * Allow compound suppression.
- * @private
- */
-class.dom_ = function() {
- /** @suppress {visibility,with} */
- with ({}) {}
-};
-
-
-// +4: UNNECESSARY_SUPPRESS
-/**
- * Some docs.
- * @private
- * @suppress {underscore}
- */
-class.unnecessarySuppress_ = function() {
-};
-
-
-/**
- * Some docs.
- * @public
- */
-class.goodProtected = function() {
-};
-
-
-/**
- * Some docs.
- * @public
- */
-class.badProtected_ = function() { // MISSING_PRIVATE
-};
-
-
-/**
- * Example of a legacy name.
- * @public
- * @suppress {underscore}
- */
-class.dom_ = function() {
-};
-
-
-// +5: JSDOC_PREFER_QUESTION_TO_PIPE_NULL
-// +7: JSDOC_PREFER_QUESTION_TO_PIPE_NULL
-/**
- * Check JsDoc type annotations.
- * @param {Object?} good A good one.
- * @param {Object|null} bad A bad one.
- * @param {Object|Element?} ok1 This is acceptable.
- * @param {Object|Element|null} right The right way to do the above.
- * @param {null|Object} bad2 Another bad one.
- * @param {Object?|Element} ok2 Not good but acceptable.
- * @param {Array.<string|number>?} complicated A good one that was reported as
- * bad. See bug 1154506.
- */
-class.sampleFunction = function(good, bad, ok1, right, bad2, ok2,
- complicated) {
-};
-
-
-/**
- * @return {Object?} A good return.
- */
-class.goodReturn = function() {
- return something;
-};
-
-
-/** @type {Array.<Object|null>} // JSDOC_PREFER_QUESTION_TO_PIPE_NULL */
-class.badType;
-
-
-/**
- * For template types, the ?TYPE notation is not parsed correctly by the
- * compiler, so don't warn here.
- * @type {Array.<TYPE|null>}
- * @template TYPE
- */
-class.goodTemplateType;
-
-
-// As the syntax may look ambivalent: The function returns just null.
-/** @type {function():null|Object} */
-class.goodType;
-
-
-/** @type {function():(null|Object)} // JSDOC_PREFER_QUESTION_TO_PIPE_NULL */
-class.badType;
-
-
-// As the syntax may look ambivalent: The function returns just Object.
-/** @type {function():Object|null} // JSDOC_PREFER_QUESTION_TO_PIPE_NULL */
-class.badType;
-
-
-/** @type {(function():Object)|null} // JSDOC_PREFER_QUESTION_TO_PIPE_NULL */
-class.badType;
-
-
-/** @type {function(null,Object)} */
-class.goodType;
-
-
-/** @type {{a:null,b:Object}} */
-class.goodType;
-
-
-// +2: JSDOC_PREFER_QUESTION_TO_PIPE_NULL
-/**
- * @return {Object|null} A bad return.
- */
-class.badReturn = function() {
- return something;
-};
-
-
-/**
- * @return {Object|Element?} An not so pretty return, but acceptable.
- */
-class.uglyReturn = function() {
- return something;
-};
-
-
-/**
- * @return {Object|Element|null} The right way to do the above.
- */
-class.okReturn = function() {
- return something;
-};
-
-
-// +2: MISSING_SPACE, MISSING_SPACE
-/**
- * @return{mytype}Something.
- */
-class.missingSpacesReturn = function() {
- return something;
-};
-
-
-/**
- * A good type in the new notation.
- * @type {Object?}
- */
-class.otherGoodType = null;
-
-
-/**
- * A complex type that should allow both ? and |.
- * @bug 1570763
- * @type {function(number?, Object|undefined):void}
- */
-class.complexGoodType = goog.nullFunction;
-
-
-/**
- * A complex bad type that we can catch, though there are many we can't.
- * Its acceptable.
- * @type {Array.<string>|string?}
- */
-class.complexBadType = x || 'foo';
-
-
-/**
- * A strange good type that caught a bad version of type checking from
- * other.js, so I added it here too just because.
- * @type {number|string|Object|Element|Array.<Object>|null}
- */
-class.aStrangeGoodType = null;
-
-
-/**
- * A type that includes spaces.
- * @type {function() : void}
- */
-class.assignedFunc = goog.nullFunction;
-
-
-// +4: JSDOC_PREFER_QUESTION_TO_PIPE_NULL
-// +3: MISSING_BRACES_AROUND_TYPE
-/**
- * A bad type.
- * @type Object|null
- */
-class.badType = null;
-
-
-// +3: JSDOC_PREFER_QUESTION_TO_PIPE_NULL
-/**
- * A bad type, in the new notation.
- * @type {Object|null}
- */
-class.badType = null;
-
-
-/**
- * An not pretty type, but acceptable.
- * @type {Object|Element?}
- */
-class.uglyType = null;
-
-
-/**
- * The right way to do the above.
- * @type {Object|Element|null}
- */
-class.okType = null;
-
-
-/**
- * @type {boolean} Is it okay to have a description here?
- */
-class.maybeOkType = null;
-
-
-/**
- * A property whose type will be infered from the right hand side since it is
- * constant.
- * @const
- */
-class.okWithoutType = 'stout';
-
-
-/**
- * Const property without type and text in next line. b/10407058.
- * @const
- * TODO(user): Nothing to do, just for scenario.
- */
-class.okWithoutType = 'string';
-
-
-/**
- * Another constant property, but we should use the type tag if the type can't
- * be inferred.
- * @type {string}
- * @const
- */
-class.useTypeWithConst = functionWithUntypedReturnValue();
-
-
-/**
- * Another constant property, but using type with const if the type can't
- * be inferred.
- * @const {string}
- */
-class.useTypeWithConst = functionWithUntypedReturnValue();
-
-
-// +3: MISSING_BRACES_AROUND_TYPE
-/**
- * Constant property without proper type.
- * @const string
- */
-class.useImproperTypeWithConst = functionWithUntypedReturnValue();
-
-
-/**
- * @define {boolean} A define.
- */
-var COMPILED = false;
-
-
-// +2: MISSING_JSDOC_TAG_TYPE
-/**
- * @define A define without type info.
- */
-var UNTYPED_DEFINE = false;
-
-
-// +4: MISSING_JSDOC_TAG_DESCRIPTION, MISSING_SPACE
-/**
- * A define without a description and missing a space.
- *
- * @define{boolean}
- */
-var UNDESCRIBED_DEFINE = false;
-
-
-// Test where to check for docs.
-/**
- * Docs for member object.
- * @type {Object}
- */
-x.objectContainingFunctionNeedsNoDocs = {
- x: function(params, params) {}
-};
-
-if (test) {
- x.functionInIfBlockNeedsDocs = function() { // MISSING_MEMBER_DOCUMENTATION
- x.functionInFunctionNeedsNoDocs = function() {
- };
- };
-} else {
- x.functionInElseBlockNeedsDocs = function() { // MISSING_MEMBER_DOCUMENTATION
- x.functionInFunctionNeedsNoDocs = function() {
- };
- };
-}
-
-
-/**
- * Regression test.
- * @param {goog.math.Vec2} a
- * @param {goog.math.Vec2} b
- * @return {goog.math.Vec2} The sum vector.
- */
-goog.math.Vec2.sum = function(a, b) {
- return new goog.math.Vec2(a.x + b.x, a.y + b.y);
-};
-
-
-// +6: JSDOC_MISSING_OPTIONAL_PREFIX
-// +8: JSDOC_MISSING_OPTIONAL_PREFIX
-// +8: JSDOC_MISSING_OPTIONAL_TYPE
-// +8: JSDOC_MISSING_OPTIONAL_TYPE
-/**
- * Optional parameters test.
- * @param {number=} numberOptional The name should be prefixed by opt_.
- * @param {function(number=)} funcOk Ok.
- * @param {number} numberOk The type is ok.
- * @param {function(string=):number=} funcOpt Param name need opt_ prefix.
- * @param {string} opt_stringMissing The type miss an ending =.
- * @param {function(number=)} opt_func The type miss an ending =.
- * @param {string=} opt_ok The type is ok.
- * @param {function(string=):number=} opt_funcOk Type is ok.
- */
-goog.math.Vec2.aFunction = function(
- numberOptional, funcOk, numberOk, funcOpt, opt_stringMissing, opt_func,
- opt_ok, opt_funcOk) {
-};
-
-
-/**
- * Good documentation!
- *
- * @override
- */
-class.goodOverrideDocs = function() {
-};
-
-
-/**
- * Test that flags embedded in docs don't trigger ends with invalid character
- * error.
- * @bug 2983692
- * @deprecated Please use the {@code @hidden} annotation.
- */
-function goodEndChar() {
-}
-
-
-/**
- * Test that previous case handles unballanced doc tags.
- * @param {boolean} a Whether we should honor '{' characters in the string.
- */
-function goodEndChar2(a) {
-}
-
-
-/**
- * Regression test for braces in description invalidly being matched as types.
- * This caused a false error for missing punctuation because the bad token
- * caused us to incorrectly calculate the full description.
- * @bug 1406513
- * @return {Object|undefined} A hash containing the attributes for the found url
- * as in: {url: "page1.html", title: "First page"}
- * or undefined if no match was found.
- */
-x.z.a = function() {
- return a;
-};
-
-
-/**
- * @bug 1492606 HTML parse error for JSDoc descriptions grashed gjslint.
- * @param {string} description a long email or common name, e.g.,
- * "John Doe <john.doe@gmail.com>" or "Birthdays Calendar"
- */
-function calendar(description) {
-}
-
-
-/**
- * @bug 1492606 HTML parse error for JSDoc descriptions grashed gjslint.
- * @param {string} description a long email or common name, e.g.,
- * "John Doe <john.doe@gmail.com>" or <b>"Birthdays Calendar".</b>
- */
-function calendar(description) {
-}
-
-
-/**
- * Regression test for invoked functions, this code used to report missing
- * param and missing return errors.
- * @type {number}
- */
-x.y.z = (function(x) {
- return x + 1;
-})();
-
-
-/**
- * Test for invoked function as part of an expression. It should not return
- * an error for missing docs for x.
- */
-goog.currentTime = something.Else || (function(x) {
- //...
-})(10);
-
-
-/**
- * @type boolean //MISSING_BRACES_AROUND_TYPE
- */
-foo.bar = true;
-
-
-/**
- * @enum {null //MISSING_BRACES_AROUND_TYPE
- */
-bar.foo = null;
-
-
-/**
- * @extends Object} //MISSING_BRACES_AROUND_TYPE
- */ // JSDOC_DOES_NOT_PARSE
-bar.baz = x;
-
-
-/** @inheritDoc */ // INVALID_INHERIT_DOC_PRIVATE
-x.privateFoo_ = function() { // MISSING_PRIVATE
-};
-
-
-/**
- * Does bar.
- * @override // INVALID_OVERRIDE_PRIVATE
- */
-x.privateBar_ = function() { // MISSING_PRIVATE
-};
-
-
-/**
- * Inherits private baz_ method (evil, wrong behavior, but we have no choice).
- * @override
- * @suppress {accessControls}
- */
-x.prototype.privateBaz_ = function() {
-};
-
-
-/**
- * This looks like a function but it's a function call.
- * @type {number}
- */
-test.x = function() {
- return 3;
-}();
-
-
-/**
- * Invalid reference to this.
- */ // MISSING_JSDOC_TAG_THIS
-test.x.y = function() {
- var x = this.x; // UNUSED_LOCAL_VARIABLE
-};
-
-
-/**
- * Invalid write to this.
- */ // MISSING_JSDOC_TAG_THIS
-test.x.y = function() {
- this.x = 10;
-};
-
-
-/**
- * Invalid standalone this.
- */ // MISSING_JSDOC_TAG_THIS
-test.x.y = function() {
- some.func.call(this);
-};
-
-
-/**
- * Invalid reference to this.
- */ // MISSING_JSDOC_TAG_THIS
-function a() {
- var x = this.x; // UNUSED_LOCAL_VARIABLE
-}
-
-
-/**
- * Invalid write to this.
- */ // MISSING_JSDOC_TAG_THIS
-function b() {
- this.x = 10;
-}
-
-
-/**
- * Invalid standalone this.
- */ // MISSING_JSDOC_TAG_THIS
-function c() {
- some.func.call(this);
-}
-
-
-/**
- * Ok to do any in a prototype.
- */
-test.prototype.x = function() {
- var x = this.x;
- this.y = x;
- some.func.call(this);
-};
-
-
-/**
- * Ok to do any in a prototype that ends in a hex-like number.
- */
-test.prototype.getColorX2 = function() {
- var x = this.x;
- this.y = x;
- some.func.call(this);
-};
-
-
-/**
- * Ok to do any in a function with documented this usage.
- * @this {test.x.y} Object bound to this via goog.bind.
- */
-function a() {
- var x = this.x;
- this.y = x;
- some.func.call(this);
-}
-
-
-/**
- * Ok to do any in a function with documented this usage.
- * @this {test.x.y} Object bound to this via goog.bind.
- */
-test.x.y = function() {
- var x = this.x;
- this.y = x;
- some.func.call(this);
-};
-
-
-/**
- * Regression test for bug 1220601. Wrapped function declarations shouldn't
- * cause need for an (at)this flag, which I can't write out or it would get
- * parsed as being here.
- * @param {Event} e The event.
- */
-detroit.commands.ChangeOwnerCommand
- .prototype.handleDocumentStoreCompleteEvent = function(e) {
- this.x = e.target;
-};
-
-
-
-/**
- * Ok to do any in a constructor.
- * @constructor
- */
-test.x.y = function() {
- this.y = x;
- var x = this.y; // UNUSED_LOCAL_VARIABLE
- some.func.call(this);
-};
-
-// Test that anonymous function doesn't throw an error.
-window.setTimeout(function() {
- var x = 10; // UNUSED_LOCAL_VARIABLE
-}, 0);
-
-
-/**
- * @bug 1234567
- */
-function testGoodBug() {
-}
-
-
-/**
- * @bug 1234567 Descriptions are allowed.
- */
-function testGoodBugWithDescription() {
-}
-
-
-// +2: NO_BUG_NUMBER_AFTER_BUG_TAG
-/**
- * @bug Wrong
- */
-function testBadBugNumber() {
-}
-
-
-// +2: NO_BUG_NUMBER_AFTER_BUG_TAG
-/**
- * @bug Wrong
- */
-function testMissingBugNumber() {
-}
-
-
-
-/**
- * @interface
- */
-function testInterface() {
-}
-
-
-
-/**
- * @implements {testInterface}
- * @constructor
- */
-function testImplements() {
-}
-
-
-/**
- * Function that has an export jsdoc tag.
- * @export
- */
-function testExport() {
-}
-
-
-/**
- * Declare and doc this member here, without assigning to it.
- * @bug 1473402
- * @type {number}
- */
-x.declareOnly;
-
-if (!someCondition) {
- x.declareOnly = 10;
-}
-
-
-/**
- * JsDoc describing array x.y as an array of function(arg). The missing
- * semicolon caused the original bug.
- * @type {Array.<Function>}
- */
-x.y = [] // MISSING_SEMICOLON
-x.y[0] = function(arg) {};
-x.y[1] = function(arg) {};
-
-
-/**
- * Regression test for unfiled bug where descriptions didn't properly exclude
- * the star-slash that must end doc comments.
- * @return {Function} A factory method.
- */
-x.y.foo = function() {
- /** @return {goog.dom.Range} A range. */
- return function() {
- return goog.dom.Range.createRangeFromNothing();
- };
-};
-
-
-// +4: INCORRECT_SUPPRESS_SYNTAX
-// +4: INVALID_SUPPRESS_TYPE
-/**
- * Docs...
- * @suppress
- * @suppress {fake}
- */
-class.x = 10;
-
-
-/**
- * These docs are OK. They used to not parse the identifier due to the use of
- * array indices.
- * @bug 1640846
- * @private
- */
-window['goog']['forms']['Validation'].prototype.form_ = null;
-
-
-/**
- * Check JsDoc multiline type annotations.
- * @param {string|
- * number} multiline description.
- */
-function testMultiline(multiline) {
-}
-
-
-/**
- * Check JsDoc nosideeffects annotations.
- * @nosideeffects
- */
-function testNoSideEffects() {
-}
-
-
-/**
- * @enum {google.visualization.DateFormat|google.visualization.NumberFormat|
- * google.visualization.PatternFormat}
- */
-MultiLineEnumTypeTest = {
- FOO: 1,
- BAR: 2,
- BAZ: 3
-};
-
-
-/**
- * @enum {google.visualization.DateFormat|google.visualization.NumberFormat|google.visualization.PatternFormat}
- */
-AllowedLongLineEnum = {
- CAT: 1,
- DOG: 2,
- RAT: 3
-};
-
-
-/**
- * Typeless enum test
- * @enum
- */
-TypelessEnumTest = {
- OK: 0,
- CHECKING: 1,
- DOWNLOADING: 2,
- FAILURE: 3
-};
-
-// Regression test for bug 1880803, shouldn't need to document assignments to
-// prototype.
-x.prototype = {};
-
-y
- .prototype = {};
-
-x.y
- .z.prototype = {};
-
-x.myprototype = {}; // MISSING_MEMBER_DOCUMENTATION
-
-x.prototype.y = 5; // MISSING_MEMBER_DOCUMENTATION
-
-x.prototype
- .y.z = {}; // MISSING_MEMBER_DOCUMENTATION
-
-
-/** @typedef {(string|number)} */
-goog.NumberLike;
-
-
-/**
- * Something from the html5 externs file.
- * @type {string}
- * @implicitCast
- */
-CanvasRenderingContext2D.prototype.fillStyle;
-
-
-
-/**
- * Regression test.
- * @bug 2994247
- * @inheritDoc
- * @extends {Bar}
- * @constructor
- * @private
- */
-Foo_ = function() {
-};
-
-
-/**
- * @param {function(this:T,...)} fn The function.
- * @param {T} obj The object.
- * @template T
- */
-function bind(fn, obj) {
-}
-
-
-
-/**
- * @constructor
- * @classTemplate T
- */
-function MyClass() {
-}
-
-
-foo(/** @lends {T} */ ({foo: 'bar'}));
-
-
-
-/**
- * @param {*} x .
- * @constructor
- * @struct
- */
-function StructMaker(x) { this.x = x; }
-
-var structObjLit = /** @struct */ { x: 123 };
-
-
-
-/**
- * @param {*} x .
- * @constructor
- * @dict
- */
-function DictMaker(x) { this['x'] = x; }
-
-var dictObjLit = /** @dict */ { x: 123 };
-
-
-/**
- * @idGenerator
- * @param {string} x .
- * @return {string} .
- */
-function makeId(x) {
- return '';
-}
-
-
-/**
- * @consistentIdGenerator
- * @param {string} x .
- * @return {string} .
- */
-function makeConsistentId(x) {
- return '';
-}
-
-
-/**
- * @stableIdGenerator
- * @param {string} x .
- * @return {string} .
- */
-function makeStableId(x) {
- return '';
-}
-
-
-/**
- * Test to make sure defining object with object literal doest not produce
- * doc warning for @this.
- * Regression test for b/4073735.
- */
-var Foo = function();
-Foo.prototype = {
- /**
- * @return {number} Never.
- */
- method: function() {
- return this.method();
- }
-};
-
-/** Regression tests for annotation types with spaces. */
-
-
-/** @enum {goog.events.Event<string, number>} */
-var Bar;
-
-
-
-/**
- * @constructor
- * @implements {goog.dom.Range<string, number>}
- */
-var Foo = function() {
- /** @final {goog.events.Event<string, number>} */
- this.bar = null;
-};
-
-/* Regression tests for not ending block comments. Keep at end of file! **/
-/**
- * When there are multiple asteriks. In the failure case we would get an
- * error that the file ended mid comment, with no end comment token***/
-/**
- * Was a separate bug 2950646 when the closing bit was on it's own line
- * because the ending star was being put into a different token type: DOC_PREFIX
- * rather than DOC_COMMENT.
- **/
diff --git a/tools/closure_linter/closure_linter/testdata/limited_doc_checks.js b/tools/closure_linter/closure_linter/testdata/limited_doc_checks.js
deleted file mode 100644
index 701cce9892..0000000000
--- a/tools/closure_linter/closure_linter/testdata/limited_doc_checks.js
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Test file for limited doc checks.
- */
-
-
-/**
- * Don't require documentation of parameters.
- * @param {boolean}
- * @param {boolean} c
- * @param {boolean} d No check for punctuation
- * @bug 3259564
- */
-x.y = function(a, b, c, d) {
- return a;
-};
diff --git a/tools/closure_linter/closure_linter/testdata/minimal.js b/tools/closure_linter/closure_linter/testdata/minimal.js
deleted file mode 100644
index 6dbe7333f4..0000000000
--- a/tools/closure_linter/closure_linter/testdata/minimal.js
+++ /dev/null
@@ -1 +0,0 @@
-function f(x) {} // Regression test for old parsing bug.
diff --git a/tools/closure_linter/closure_linter/testdata/not_strict.js b/tools/closure_linter/closure_linter/testdata/not_strict.js
deleted file mode 100644
index f8ede3dc1d..0000000000
--- a/tools/closure_linter/closure_linter/testdata/not_strict.js
+++ /dev/null
@@ -1,42 +0,0 @@
-/** // _WRONG_BLANK_LINE_COUNT
- * @fileoverview This file has errors that could trigger both in strict and non
- * strict mode. The errors beginning with _ should not be triggered when strict
- * flag is false.
- * // -1: _INVALID_AUTHOR_TAG_DESCRIPTION
- */
-
-/** // _WRONG_BLANK_LINE_COUNT
- * A constructor with 1 line above it (BAD).
- * // +1: MISSING_JSDOC_TAG_TYPE
- * @param a A parameter.
- * @privtae // INVALID_JSDOC_TAG
- * @constructor
- */
-function someFunction(a) {
- /** +1: _MISSING_BRACES_AROUND_TYPE
- * @type number
- */
- this.a = 0;
- someReallyReallyReallyReallyReallyReallyReallyReallyLongiName = quiteBigValue; // LINE_TOO_LONG
- if (this.a == 0) {
- // _WRONG_INDENTATION
- return // MISSING_SEMICOLON
- }
-}; // ILLEGAL_SEMICOLON_AFTER_FUNCTION
-
-
-// +1: _UNNECESSARY_BRACES_AROUND_INHERIT_DOC
-/** {@inheritDoc} */
-function someFunction.prototype.toString() {
-}
-
-
-/**
- * When not strict, there is no problem with optional markers in types.
- * @param {string=} a A string.
- * @param {string} aOk An other string.
- * @param {number} opt_b An optional number.
- * @param {number=} opt_bOk An other optional number.
- */
-someFunction.optionalParams = function(a, aOk, opt_b, opt_bOk) {
-};
diff --git a/tools/closure_linter/closure_linter/testdata/other.js b/tools/closure_linter/closure_linter/testdata/other.js
deleted file mode 100644
index 1e424ce3f2..0000000000
--- a/tools/closure_linter/closure_linter/testdata/other.js
+++ /dev/null
@@ -1,459 +0,0 @@
-// Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Miscellaneous style errors.
- *
- * @author robbyw@google.com (Robby Walker)
- */
-
-goog.provide('goog.dom');
-
-goog.require('goog.events.EventHandler');
-
-var this_is_a_really_long_line = 100000000000000000000000000000000000000000000000; // LINE_TOO_LONG
-
-// Declaration in multiple lines.
-// Regression test for b/3009648
-var
- a,
- b = 10;
-
-// http://this.comment.should.be.allowed/because/it/is/a/URL/that/can't/be/broken/up
-
-
-/**
- * Types are allowed to be long even though they contain spaces.
- * @type {function(ReallyReallyReallyReallyLongType, AnotherExtremelyLongType) : LongReturnType}
- */
-x.z = 1000;
-
-
-/**
- * Params are also allowed to be long even though they contain spaces.
- * @param {function(ReallyReallyReallyReallyLongType, AnotherExtremelyLongType) : LongReturnType} fn
- * The function to call.
- */
-x.z = function(fn) {
-};
-
-
-/**
- * Visibility tags are allowed to have type, therefore they allowed to be long.
- * @private {function(ReallyReallyReallyReallyLongType, AnotherExtremelyLongType) : LongReturnType}
- */
-x.z_ = 1000;
-
-
-/**
- * Visibility tags are allowed to have type, therefore they allowed to be long.
- * @public {function(ReallyReallyReallyReallyLongType, AnotherExtremelyLongType) : LongReturnType}
- */
-x.z = 1000;
-
-
-/**
- * Visibility tags are allowed to have type, therefore they allowed to be long.
- * @protected {function(ReallyReallyReallyReallyLongType, AnotherExtremelyLongType) : LongReturnType}
- */
-x.z = 1000;
-
-
-/**
- * Visibility tags are allowed to have type, therefore they allowed to be long.
- * @package {function(ReallyReallyReallyReallyLongType,AnotherExtremelyLongType):LongReturnType}
- */
-x.z = 1000;
-
-// +2: LINE_TOO_LONG
-var x =
- a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.tooLongEvenThoughNoSpaces;
-
-// +1: LINE_TOO_LONG
-getSomeExtremelyLongNamedFunctionWowThisNameIsSoLongItIsAlmostUnbelievable().dispose();
-
-
-/**
- * @param {number|string|Object|Element|Array.<Object>|null} aReallyReallyReallyStrangeParameter
- * @param {number|string|Object|Element|goog.a.really.really.really.really.really.really.really.really.long.Type|null} shouldThisParameterWrap
- * @return {goog.a.really.really.really.really.really.really.really.really.long.Type}
- */
-x.y = function(aReallyReallyReallyStrangeParameter, shouldThisParameterWrap) {
- return something;
-};
-
-
-/**
- * @type {goog.a.really.really.really.really.really.really.really.really.long.Type?}
- */
-x.y = null;
-
-function doesEndWithSemicolon() {
-}; // ILLEGAL_SEMICOLON_AFTER_FUNCTION
-
-function doesNotEndWithSemicolon() {
-}
-
-doesEndWithSemicolon = function() {
- // +1: UNUSED_LOCAL_VARIABLE
- var shouldEndWithSemicolon = function() {
- } // MISSING_SEMICOLON_AFTER_FUNCTION
-};
-
-doesNotEndWithSemicolon = function() {
-} // MISSING_SEMICOLON_AFTER_FUNCTION
-
-doesEndWithSemicolon['100'] = function() {
-};
-
-doesNotEndWithSemicolon['100'] = function() {
-} // MISSING_SEMICOLON_AFTER_FUNCTION
-
-if (some_flag) {
- function doesEndWithSemicolon() {
- }; // ILLEGAL_SEMICOLON_AFTER_FUNCTION
-
- function doesNotEndWithSemicolon() {
- }
-
- doesEndWithSemicolon = function() {
- };
-
- doesNotEndWithSemicolon = function() {
- } // MISSING_SEMICOLON_AFTER_FUNCTION
-}
-
-// No semicolon for expressions that are immediately called.
-var immediatelyCalledFunctionReturnValue = function() {
-}();
-
-
-/**
- * Regression test for function expressions treating semicolons wrong.
- * @bug 1044052
- */
-goog.now = Date.now || function() {
- //...
-};
-
-
-/**
- * Regression test for function expressions treating semicolons wrong.
- * @bug 1044052
- */
-goog.now = Date.now || function() {
- //...
-} // MISSING_SEMICOLON_AFTER_FUNCTION
-
-
-/**
- * Function defined in ternary operator
- * @bug 1413743
- * @param {string} id The ID of the element.
- * @return {Element} The matching element.
- */
-goog.dom.$ = document.getElementById ?
- function(id) {
- return document.getElementById(id);
- } :
- function(id) {
- return document.all[id];
- };
-
-
-/**
- * Test function in object literal needs no semicolon.
- * @type {Object}
- */
-x.y = {
- /**
- * @return {number} Doc the inner function too.
- */
- a: function() {
- return 10;
- }
-};
-
-// Semicolon required at end of object literal.
-var throwObjectLiteral = function() {
- throw {
- x: 0,
- y: 1
- } // MISSING_SEMICOLON
-};
-
-var testRegex = /(\([^\)]*\))|(\[[^\]]*\])|({[^}]*})|(&lt;[^&]*&gt;)/g;
-var testRegex2 = /abc/gimsx;
-
-var x = 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100
- + 20; // LINE_STARTS_WITH_OPERATOR
-
-var x = 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 +
- -20; // unary minus is ok
-
-var x = z++
- + 20; // LINE_STARTS_WITH_OPERATOR
-
-var x = z. // LINE_ENDS_WITH_DOT
- y();
-
-// Regression test: This line was incorrectly not reporting an error
-var marginHeight = x.layout.getSpacing_(elem, 'marginTop')
- + x.layout.getSpacing_(elem, 'marginBottom');
-// -1: LINE_STARTS_WITH_OPERATOR
-
-// Regression test: This line was correctly reporting an error
-x.layout.setHeight(elem, totalHeight - paddingHeight - borderHeight
- - marginHeight); // LINE_STARTS_WITH_OPERATOR
-
-// Regression test: This line was incorrectly reporting spacing and binary
-// operator errors
-if (i == index) {
-}
-++i;
-
-var twoSemicolons = 10;; // REDUNDANT_SEMICOLON
-
-if (i == index) {
-} else; // REDUNDANT_SEMICOLON
-i++;
-
-do; // REDUNDANT_SEMICOLON
-{
-} while (i == index);
-
-twoSemicolons = 10;
-// A more interesting example of two semicolons
- ; // EXTRA_SPACE, WRONG_INDENTATION, REDUNDANT_SEMICOLON
-
-
-/** @bug 1598895 */
-for (;;) {
- // Do nothing.
-}
-
-for (var x = 0, foo = blah(), bar = {};; x = update(x)) {
- // A ridiculous case that should probably never happen, but I suppose is
- // valid.
-}
-
-var x = "allow'd double quoted string";
-var x = "unnecessary double quotes string"; // UNNECESSARY_DOUBLE_QUOTED_STRING
-// +1: MULTI_LINE_STRING, UNNECESSARY_DOUBLE_QUOTED_STRING,
-var x = "multi-line unnecessary double quoted \
- string.";
-
-
-// Regression test: incorrectly reported missing doc for variable used in global
-// scope.
-/**
- * Whether the "Your browser isn't fully supported..." warning should be shown
- * to the user; defaults to false.
- * @type {boolean}
- * @private
- */
-init.browserWarning_ = false;
-
-init.browserWarning_ = true;
-
-if (someCondition) {
- delete this.foo_[bar];
-}
-
-// Commas at the end of literals used to be forbidden.
-x = [1, 2, 3,];
-x = [1, 2, 3, /* A comment */];
-x = [
- 1,
- 2,
- 3,
-];
-x = {
- a: 1,
-};
-
-// Make sure we don't screw up typing for Lvalues and think b:c is a type value
-// pair.
-x = a ? b : c = 34;
-x = a ? b:c; // MISSING_SPACE, MISSING_SPACE
-x = (a ? b:c = 34); // MISSING_SPACE, MISSING_SPACE
-
-if (x) {
- x += 10;
-}; // REDUNDANT_SEMICOLON
-
-
-/**
- * Bad assignment of array to prototype.
- * @type {Array}
- */
-x.prototype.badArray = []; // ILLEGAL_PROTOTYPE_MEMBER_VALUE
-
-
-/**
- * Bad assignment of object to prototype.
- * @type {Object}
- */
-x.prototype.badObject = {}; // ILLEGAL_PROTOTYPE_MEMBER_VALUE
-
-
-/**
- * Bad assignment of class instance to prototype.
- * @type {goog.events.EventHandler}
- */
-x.prototype.badInstance = new goog.events.EventHandler();
-// -1: ILLEGAL_PROTOTYPE_MEMBER_VALUE
-
-// Check that some basic structures cause no errors.
-x = function() {
- try {
- } finally {
- y = 10;
- }
-};
-
-switch (x) {
- case 10:
- break;
- case 20:
- // Fallthrough.
- case 30:
- break;
- case 40: {
- break;
- }
- default:
- break;
-}
-
-do {
- x += 10;
-} while (x < 100);
-
-do {
- x += 10;
-} while (x < 100) // MISSING_SEMICOLON
-
-// Missing semicolon checks.
-x = 10 // MISSING_SEMICOLON
-x = someOtherVariable // MISSING_SEMICOLON
-x = fnCall() // MISSING_SEMICOLON
-x = {a: 10, b: 20} // MISSING_SEMICOLON
-x = [10, 20, 30] // MISSING_SEMICOLON
-x = (1 + 2) // MISSING_SEMICOLON
-x = {
- a: [
- 10, 20, (30 +
- 40)
- ]
-} // MISSING_SEMICOLON
-x = a
- .b
- .c(). // LINE_ENDS_WITH_DOT
- d;
-
-// Test that blocks without braces don't generate incorrect semicolon and
-// indentation errors. TODO: consider disallowing blocks without braces.
-if (x)
- y = 10;
-
-if (x)
- y = 8 // MISSING_SEMICOLON
-
-// Regression test for bug 2973408, bad missing semi-colon error when else
-// is not followed by an opening brace.
-if (x)
- y = 3;
-else
- z = 4;
-
-// We used to erroneously report a missing semicolon error.
-if (x)
-{
-}
-
-while (x)
- y = 10;
-
-for (x = 0; x < 10; x++)
- y += 10;
- z += 10; // WRONG_INDENTATION
-
-var x = 100 // MISSING_SEMICOLON
-
-// Also regression test for bug 2973407 Parse error on nested ternary statments.
-foo = bar ? baz ? 1 : 2 : 3 // MISSING_SEMICOLON
-foo = bar ? 1 : baz ? 2 : 3;
-bar ? 1 : baz ? 2 : bat ? 3 : 4;
-bar ? 1 : baz ? bat ? 3 : 4 : baq ? 5 : 6;
-foo = bar ? 1 : 2;
-
-foo = {
- str: bar ? baz ? blah ? 1 : 2 : 3 : 4
-} // MISSING_SEMICOLON
-
-
-// Regression tests for bug 2969408 GJsLint doesn't like labeled statements.
-mainLoop: while (!y) {
-}
-
-myLabel1: myLabel2: var x;
-
-for (var i = 0; i < n; i++) {
- myLabel3:
- while (true) {
- break myLabel3;
- }
-}
-
-myLabelA : myLabelB : x > y ? 0 : 1; // EXTRA_SPACE, EXTRA_SPACE, EXTRA_SPACE
-
-// Regression test for bug 4269466.
-var a = new Scheme({default: 0});
-switch (foo) {
- default:
- var a = new Scheme({default: 0});
- break;
-}
-
-
-/** @private Some text is allowed after tag */
-x.y_ = function() {
-};
-
-
-/** @private Some text is allowed after tag but not the long oneeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee.*/ // LINE_TOO_LONG
-x.y_ = function() {
-};
-
-
-/** @private {number} Some text is allowed after tag */
-x.z_ = 200;
-
-
-/** @private {number} Some text is allowed after tag but not the long oneeeeeeeeeeeeeeee. */ // LINE_TOO_LONG
-x.z_ = 200;
-
-// Regression tests for b/16298424.
-var z = function() {}.bind();
-window.alert(function() {}.bind());
-function() {
-}.bind();
-var y = function() {
-}.bind();
-var y = function() {
- }
- .bind();
-
-/* comment not closed // FILE_MISSING_NEWLINE, FILE_IN_BLOCK \ No newline at end of file
diff --git a/tools/closure_linter/closure_linter/testdata/provide_blank.js b/tools/closure_linter/closure_linter/testdata/provide_blank.js
deleted file mode 100644
index a4e0716419..0000000000
--- a/tools/closure_linter/closure_linter/testdata/provide_blank.js
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Checks that missing provides are reported at the first require
- * when there are no other provides in the file.
- */
-
-goog.require('dummy.package.ClassName'); // MISSING_GOOG_PROVIDE
-
-
-
-/**
- * @constructor
- */
-dummy.Something = function() {};
-
-var x = new dummy.package.ClassName();
diff --git a/tools/closure_linter/closure_linter/testdata/provide_extra.js b/tools/closure_linter/closure_linter/testdata/provide_extra.js
deleted file mode 100644
index 3370950e3b..0000000000
--- a/tools/closure_linter/closure_linter/testdata/provide_extra.js
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed 2to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Checks for extra goog.provides.
- *
- */
-
-goog.provide(''); // EXTRA_GOOG_PROVIDE
-
-goog.provide('dummy.AnotherThingTest'); // ok since mentioned in setTestOnly
-goog.provide('dummy.AnotherTrulyLongNamespaceToMakeItExceedEightyCharactersThingTest');
-
-goog.provide('dummy.Something');
-goog.provide('dummy.Something'); // EXTRA_GOOG_PROVIDE
-goog.provide('dummy.SomethingElse'); // EXTRA_GOOG_PROVIDE
-
-goog.provide('dummy.YetAnotherThingTest'); // EXTRA_GOOG_PROVIDE
-
-goog.setTestOnly('dummy.AnotherThingTest');
-goog.setTestOnly('dummy.AnotherTrulyLongNamespaceToMakeItExceedEightyCharactersThingTest');
-
-
-
-/**
- * @constructor
- */
-dummy.Something = function() {};
diff --git a/tools/closure_linter/closure_linter/testdata/provide_missing.js b/tools/closure_linter/closure_linter/testdata/provide_missing.js
deleted file mode 100644
index 42de489671..0000000000
--- a/tools/closure_linter/closure_linter/testdata/provide_missing.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2011 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// We are missing a provide of goog.something.Else.
-// -15: MISSING_GOOG_PROVIDE
-
-/**
- * @fileoverview Tests missing provides and the usage of the missing provide
- * suppression annotation.
- *
- */
-
-
-
-/**
- * Constructor for Something.
- * @constructor
- * @suppress {missingProvide}
- */
-goog.something.Something = function() {};
-
-
-
-/**
- * Constructor for Else. We should get an error about providing this, but not
- * about the constructor for Something.
- * @constructor
- */
-goog.something.Else = function() {};
diff --git a/tools/closure_linter/closure_linter/testdata/require_alias.js b/tools/closure_linter/closure_linter/testdata/require_alias.js
deleted file mode 100644
index 804b2ed142..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_alias.js
+++ /dev/null
@@ -1,14 +0,0 @@
-// We are missing a require of goog.sample.UsedType
-goog.provide('goog.something.Else'); // +1: MISSING_GOOG_REQUIRE
-
-
-goog.scope(function() {
-var unused = goog.events.unused; // UNUSED_LOCAL_VARIABLE
-var used = goog.events.used; // ALIAS_STMT_NEEDS_GOOG_REQUIRE
-var UsedType = goog.sample.UsedType;
-var other = goog.sample.other;
-
-
-/** @type {used.UsedAlias|other.UsedAlias} */
-goog.something.Else = UsedType.create();
-}); // goog.scope
diff --git a/tools/closure_linter/closure_linter/testdata/require_all_caps.js b/tools/closure_linter/closure_linter/testdata/require_all_caps.js
deleted file mode 100644
index 49344f2c18..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_all_caps.js
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview A should come before B.
- *
- */
-
-goog.provide('XX'); // GOOG_PROVIDES_NOT_ALPHABETIZED
-goog.provide('ZZ');
-goog.provide('YY');
-
-goog.require('dummy.AA'); // GOOG_REQUIRES_NOT_ALPHABETIZED
-goog.require('dummy.CC');
-goog.require('dummy.BB');
-
-dummy.AA();
-dummy.CC();
-dummy.BB();
diff --git a/tools/closure_linter/closure_linter/testdata/require_blank.js b/tools/closure_linter/closure_linter/testdata/require_blank.js
deleted file mode 100644
index 060781ce7c..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_blank.js
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Checks that missing requires are reported just after the last
- * provide when there are no other requires in the file.
- */
-
-goog.provide('dummy.Something'); // +1: MISSING_GOOG_REQUIRE
-
-
-
-/**
- * @constructor
- */
-dummy.Something = function() {};
-
-var x = new dummy.package.ClassName();
diff --git a/tools/closure_linter/closure_linter/testdata/require_extra.js b/tools/closure_linter/closure_linter/testdata/require_extra.js
deleted file mode 100644
index 3ee39c73a8..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_extra.js
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Checks for extra goog.requires.
- *
- */
-
-goog.require(''); // EXTRA_GOOG_REQUIRE
-goog.require('dummy.Aa');
-goog.require('dummy.Aa.CONSTANT'); // EXTRA_GOOG_REQUIRE
-goog.require('dummy.Aa.Enum'); // EXTRA_GOOG_REQUIRE
-goog.require('dummy.Bb');
-goog.require('dummy.Ff'); // EXTRA_GOOG_REQUIRE
-goog.require('dummy.Gg'); // EXTRA_GOOG_REQUIRE
-goog.require('dummy.cc');
-goog.require('dummy.cc'); // EXTRA_GOOG_REQUIRE
-goog.require('dummy.hh'); // EXTRA_GOOG_REQUIRE
-
-new dummy.Aa();
-dummy.Bb.someMethod();
-dummy.cc();
-var x = dummy.Aa.Enum.VALUE;
-var y = dummy.Aa.CONSTANT;
diff --git a/tools/closure_linter/closure_linter/testdata/require_function.js b/tools/closure_linter/closure_linter/testdata/require_function.js
deleted file mode 100644
index 532bb67103..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_function.js
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Description of this file.
- */
-
-goog.require('goog.mobile.paging.getPage');
-
-
-goog.mobile.paging.getPage();
diff --git a/tools/closure_linter/closure_linter/testdata/require_function_missing.js b/tools/closure_linter/closure_linter/testdata/require_function_missing.js
deleted file mode 100644
index 33bec21eab..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_function_missing.js
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// -14: MISSING_GOOG_REQUIRE
-
-/**
- * @fileoverview Description of this file.
- */
-
-
-
-goog.mobile.paging.getPage();
-goog.mobile.paging.getOtherPage();
diff --git a/tools/closure_linter/closure_linter/testdata/require_function_through_both.js b/tools/closure_linter/closure_linter/testdata/require_function_through_both.js
deleted file mode 100644
index d9525ec4e1..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_function_through_both.js
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Description of this file.
- */
-
-goog.require('goog.mobile.paging');
-goog.require('goog.mobile.paging.getPage');
-
-
-goog.mobile.paging.getPage();
diff --git a/tools/closure_linter/closure_linter/testdata/require_function_through_namespace.js b/tools/closure_linter/closure_linter/testdata/require_function_through_namespace.js
deleted file mode 100644
index 55628fccfb..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_function_through_namespace.js
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Description of this file.
- */
-
-goog.require('goog.mobile.paging');
-
-
-goog.mobile.paging.getPage();
diff --git a/tools/closure_linter/closure_linter/testdata/require_interface.js b/tools/closure_linter/closure_linter/testdata/require_interface.js
deleted file mode 100644
index d6e83024b4..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_interface.js
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Contains a test to verify that interfaces implemented in a file
- * are goog.require'd.
- *
- */
-
-// We're missing a goog.require of goog.something.SomeInterface.
-goog.provide('goog.something.SomeClass'); // +1: MISSING_GOOG_REQUIRE
-
-
-
-/**
- * Constructor for SomeClass.
- * @constructor
- * @implements {goog.something.SomeInterface}
- */
-goog.something.SomeClass = function() {};
diff --git a/tools/closure_linter/closure_linter/testdata/require_interface_alias.js b/tools/closure_linter/closure_linter/testdata/require_interface_alias.js
deleted file mode 100644
index c71b29ca3c..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_interface_alias.js
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Contains a test to verify that aliased interfaces
- * are goog.require'd.
- */
-
-// We're missing a goog.require of goog.something.SomeInterface.
-goog.provide('goog.something.SomeClass'); // +1: MISSING_GOOG_REQUIRE
-
-goog.scope(function() {
-var something = goog.something;
-
-
-
-/**
- * Constructor for SomeClass.
- * @constructor
- * @implements {something.SomeInterface}
- */
-something.SomeClass = function() {};
-}); // goog.scope
diff --git a/tools/closure_linter/closure_linter/testdata/require_interface_base.js b/tools/closure_linter/closure_linter/testdata/require_interface_base.js
deleted file mode 100644
index c8bb1f67a5..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_interface_base.js
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2011 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Contains a test to verify that parent interfaces
- * implemented are goog.require'd.
- *
- */
-
-// We're missing a goog.require of goog.something.BaseInterface.
-goog.provide('goog.something.SomeInterface'); // +1: MISSING_GOOG_REQUIRE
-
-
-
-/**
- * Constructor for SomeInterface.
- * @interface
- * @extends {goog.something.BaseInterface}
- */
-goog.something.SomeInterface = function() {};
diff --git a/tools/closure_linter/closure_linter/testdata/require_lower_case.js b/tools/closure_linter/closure_linter/testdata/require_lower_case.js
deleted file mode 100644
index c1fff4a3ed..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_lower_case.js
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview The B should come before the b.
- *
- */
-
-goog.provide('x'); // GOOG_PROVIDES_NOT_ALPHABETIZED
-goog.provide('X');
-goog.provide('Y');
-
-goog.require('dummy.bb'); // GOOG_REQUIRES_NOT_ALPHABETIZED
-goog.require('dummy.Bb');
-goog.require('dummy.Cc');
-
-var x = dummy.bb.a();
-var y = dummy.Bb.a();
-var z = dummy.Cc.a();
diff --git a/tools/closure_linter/closure_linter/testdata/require_missing.js b/tools/closure_linter/closure_linter/testdata/require_missing.js
deleted file mode 100644
index 3539c9489d..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_missing.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2011 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Tests missing requires around the usage of the require
- * suppression annotation.
- *
- */
-
-// We are missing a require of goog.foo.
-goog.provide('goog.something.Else'); // +1: MISSING_GOOG_REQUIRE
-
-
-
-/**
- * Constructor for Else.
- * @constructor
- */
-goog.something.Else = function() {
- /** @suppress {missingRequire} */
- this.control.createConstructorMock(
- goog.foo.bar, 'Baz');
-
- // Previous suppress should only be scoped to that statement.
- this.control.createConstructorMock(
- goog.foo.bar, 'Baz');
-
- this.control.invoke(goog.foo.bar, 'Test');
-};
diff --git a/tools/closure_linter/closure_linter/testdata/require_numeric.js b/tools/closure_linter/closure_linter/testdata/require_numeric.js
deleted file mode 100644
index 29d8377afa..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_numeric.js
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Numbers should come before letters.
- *
- */
-
-goog.provide('xa'); // GOOG_PROVIDES_NOT_ALPHABETIZED
-goog.provide('x1');
-goog.provide('xb');
-
-goog.require('dummy.aa'); // GOOG_REQUIRES_NOT_ALPHABETIZED
-goog.require('dummy.a1');
-goog.require('dummy.ab');
-
-dummy.aa.a;
-dummy.a1.a;
-dummy.ab.a;
diff --git a/tools/closure_linter/closure_linter/testdata/require_provide_blank.js b/tools/closure_linter/closure_linter/testdata/require_provide_blank.js
deleted file mode 100644
index 0e0c188ca5..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_provide_blank.js
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// -14: MISSING_GOOG_PROVIDE
-// -15: MISSING_GOOG_REQUIRE
-
-/**
- * @fileoverview Checks that missing requires and provides are reported at the
- * top of the file when there are no existing goog.requires or provides in the
- * file.
- */
-
-
-
-/**
- * @constructor
- */
-dummy.Something = function() {};
-
-var x = new dummy.package.ClassName();
diff --git a/tools/closure_linter/closure_linter/testdata/require_provide_missing.js b/tools/closure_linter/closure_linter/testdata/require_provide_missing.js
deleted file mode 100644
index a56f4d0007..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_provide_missing.js
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview The same code as require_provide_ok, but missing a provide
- * and a require call.
- *
- */
-
-goog.provide('goog.something'); // +1: MISSING_GOOG_PROVIDE
-// Missing provide of goog.something.Else and goog.something.SomeTypeDef.
-
-goog.require('goog.Class');
-goog.require('goog.package'); // +1: MISSING_GOOG_REQUIRE
-// Missing requires of goog.Class.Enum and goog.otherThing.Class.Enum.
-
-
-var x = new goog.Class();
-goog.package.staticFunction();
-
-var y = goog.Class.Enum.VALUE;
-
-
-/**
- * @typedef {string}
- */
-goog.something.SomeTypeDef;
-
-
-/**
- * Private variable.
- * @type {number}
- * @private
- */
-goog.something.private_ = 10;
-
-
-/**
- * Use private variables defined in this file so they don't cause a warning.
- */
-goog.something.usePrivateVariables = function() {
- var x = [
- goog.something.private_,
- x
- ];
-};
-
-
-/**
- * Static function.
- */
-goog.something.staticFunction = function() {
-};
-
-
-
-/**
- * Constructor for Else.
- * @constructor
- */
-goog.something.Else = function() {
- // Bug 1801608: Provide goog.otherThing.Class.Enum isn't missing.
- var enum = goog.otherThing.Class.Enum;
- goog.otherThing.Class.Enum = enum;
-};
diff --git a/tools/closure_linter/closure_linter/testdata/require_provide_ok.js b/tools/closure_linter/closure_linter/testdata/require_provide_ok.js
deleted file mode 100644
index 01ddafe490..0000000000
--- a/tools/closure_linter/closure_linter/testdata/require_provide_ok.js
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview There is nothing wrong w/ this javascript.
- *
- */
-goog.module('goog.super.long.DependencyNameThatForcesMethodDefinitionToSpanMultipleLinesFooBar');
-goog.provide('goog.something');
-goog.provide('goog.something.Else');
-goog.provide('goog.something.Else.Enum');
-/** @suppress {extraProvide} */
-goog.provide('goog.something.Extra');
-goog.provide('goog.something.SomeTypeDef');
-goog.provide('goog.somethingelse.someMethod');
-goog.provide('goog.super.long.DependencyNameThatForcesTheLineToBeOverEightyCharacters');
-goog.provide('notInClosurizedNamespacesSoNotExtra');
-
-goog.require('dummy.foo');
-goog.require('dummy.foo.someSpecificallyRequiredMethod');
-goog.require('goog.Class');
-/** @suppress {extraRequire} */
-goog.require('goog.extra.require');
-goog.require('goog.package');
-goog.require('goog.package.ClassName');
-goog.require('goog.package.OtherClassName');
-/** @suppress {extraRequire} Legacy dependency on enum */
-goog.require('goog.package.OuterClassName.InnerClassName');
-goog.require('goog.super.long.DependencyNameThatForcesMethodDefinitionToSpanMultipleLinesFooBar');
-goog.require('goog.super.long.DependencyNameThatForcesTheLineToBeOverEightyCharacters2');
-goog.require('goog.super.long.DependencyNameThatForcesTheLineToBeOverEightyCharacters3');
-goog.require('notInClosurizedNamespacesSoNotExtra');
-
-dummy.foo.someMethod();
-dummy.foo.someSpecificallyRequiredMethod();
-
-
-// Regression test for bug 3473189. Both of these 'goog.provide' tokens should
-// be completely ignored by alphabetization checks.
-if (typeof goog != 'undefined' && typeof goog.provide == 'function') {
- goog.provide('goog.something.SomethingElse');
-}
-
-
-var x = new goog.Class();
-goog.package.staticFunction();
-
-var y = goog.Class.Enum.VALUE;
-
-
-// This should not trigger a goog.require.
-var somethingPrivate = goog.somethingPrivate.PrivateEnum_.VALUE;
-
-
-/**
- * This method is provided directly, instead of its namespace.
- */
-goog.somethingelse.someMethod = function() {};
-
-
-/**
- * Defining a private property on a required namespace should not trigger a
- * provide of that namespace. Yes, people actually do this.
- * @private
- */
-goog.Class.privateProperty_ = 1;
-
-
-/**
- * @typedef {string}
- */
-goog.something.SomeTypeDef;
-
-
-/**
- * @typedef {string}
- * @private
- */
-goog.something.SomePrivateTypeDef_;
-
-
-/**
- * Some variable that is declared but not initialized.
- * @type {string|undefined}
- * @private
- */
-goog.something.somePrivateVariable_;
-
-
-/**
- * Private variable.
- * @type {number}
- * @private
- */
-goog.something.private_ = 10;
-
-
-/**
- * Use private variables defined in this file so they don't cause a warning.
- */
-goog.something.usePrivateVariables = function() {
- var x = [
- goog.something.private_,
- goog.Class.privateProperty_,
- x
- ];
-};
-
-
-
-/**
- * A really long class name to provide and usage of a really long class name to
- * be required.
- * @constructor
- */
-goog.super.long.DependencyNameThatForcesTheLineToBeOverEightyCharacters =
- function() {
- var x = new goog.super.long. // LINE_ENDS_WITH_DOT
- DependencyNameThatForcesTheLineToBeOverEightyCharacters2();
- var x = new goog.super.long
- .DependencyNameThatForcesTheLineToBeOverEightyCharacters3();
- // Use x to avoid a warning.
- var x = [x];
-};
-
-
-/**
- * A really long class name to to force a method definition to be greater than
- * 80 lines. We should be grabbing the whole identifier regardless of how many
- * lines it is on.
- */
-goog.super.long
- .DependencyNameThatForcesMethodDefinitionToSpanMultipleLinesFooBar
- .prototype.someMethod = function() {
-};
-
-
-/**
- * Static function.
- */
-goog.something.staticFunction = function() {
- // Tests that namespace usages are identified using 'namespace.' not just
- // 'namespace'.
- googSomething.property;
- dummySomething.property;
- goog.package.ClassName // A comment in between the identifier pieces.
- .IDENTIFIER_SPLIT_OVER_MULTIPLE_LINES;
- goog.package.OtherClassName.property = 1;
-
- // Test case where inner class needs to be required explicitly.
- new goog.package.OuterClassName.InnerClassName();
-
- // Don't just use goog.bar for missing namespace, hard coded to never require
- // goog since it's never provided.
- control.createConstructorMock(
- /** @suppress {missingRequire} */ goog.foo.bar, 'Baz');
-
- goog.require('goog.shouldBeIgnored');
-};
-
-
-
-/**
- * Constructor for Else.
- * @constructor
- */
-goog.something.Else = function() {
- /** @suppress {missingRequire} */
- this.control.createConstructorMock(goog.foo.bar, 'Baz');
-};
-
-
-/**
- * Enum attached to Else. Should not need to be provided explicitly, but
- * should not generate an extra require warning either.
- * @enum {number}
- */
-goog.something.Else.Enum = {
- 'key': 1
-};
-
-
-/**
- * Sample of a typedef. This should not need a provide as it is an inner
- * element like an enum.
- *
- * @typedef {{val1: string, val2: boolean, val3: number}}
- */
-goog.something.Else.Typedef;
-
-
-
-/**
- * Constructor for SomethingElse.
- * @constructor
- */
-goog.something.SomethingElse = function() {};
-
-
-/**
- * @suppress {missingProvide}
- */
-goog.suppress.someMethod = function() {};
diff --git a/tools/closure_linter/closure_linter/testdata/semicolon_missing.js b/tools/closure_linter/closure_linter/testdata/semicolon_missing.js
deleted file mode 100644
index 5601db8dec..0000000000
--- a/tools/closure_linter/closure_linter/testdata/semicolon_missing.js
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * @fileoverview This is for regression testing of scenario where semicolon is
- * missing at EOF. b/10801776.
- */
-
-goog.provide('dummy.foo.DATA');
-
-/**
- * @type {string}
- * @const
- *
- * For repeating the bug blank comment line above this is needed.
- */
-
-// +3: MISSING_SEMICOLON
-dummy.foo.DATA =
- 'SFSDFSDdfgdfgdftreterterterterterggsdfsrrwerwerwsfwerwerwere55454ss' +
- 'SFSDFSDdfgdfgdftretertertertertergg'
diff --git a/tools/closure_linter/closure_linter/testdata/simple.html b/tools/closure_linter/closure_linter/testdata/simple.html
deleted file mode 100644
index 42ebab97d0..0000000000
--- a/tools/closure_linter/closure_linter/testdata/simple.html
+++ /dev/null
@@ -1,33 +0,0 @@
-<!DOCTYPE html>
-<html lang="en" dir="ltr">
-<head>
- <!--
- Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS-IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
- <title>GJsLint HTML JavaScript extraction tests</title>
- <script src="some_file.js">Ignore this</script>
- <script>
- x =10; // MISSING_SPACE
- </script>
-<body>
- <script type="text/javascript">
- // +1: EXTRA_SPACE
- x = 10; </script>
- <script type="text/javascript">
- // Not extra space.
- x = 10;
- </script>
-</body>
-</html>
diff --git a/tools/closure_linter/closure_linter/testdata/spaces.js b/tools/closure_linter/closure_linter/testdata/spaces.js
deleted file mode 100644
index 85a36e53bf..0000000000
--- a/tools/closure_linter/closure_linter/testdata/spaces.js
+++ /dev/null
@@ -1,354 +0,0 @@
-// Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Errors relating to whitespace.
- *
- * @author robbyw@google.com (Robby Walker)
- */
-
-if(needs_space) { // MISSING_SPACE
-}
-
-if ( too_much_space) { // EXTRA_SPACE
-}
-
-if (different_extra_space ) { // EXTRA_SPACE
-}
-
-switch(needs_space) { // MISSING_SPACE
-}
-
-var x = 'if(not_an_error)';
-
-var y = afjkljkl + ajklasdflj + ajkadfjkasdfklj + aadskfasdjklf + jkasdfa + (
- kasdfkjlasdfjkl / jklasdfjklasdfjkl);
-
-x = 5+ 8; // MISSING_SPACE
-x = 5 +8; // MISSING_SPACE
-x= 5; // MISSING_SPACE
-x = 6; // EXTRA_SPACE
-x = 7; // EXTRA_SPACE
-x = 6 + 2; // EXTRA_SPACE
-x += 10;
-
-throw Error('Selector not supported yet('+ opt_selector + ')'); // MISSING_SPACE
-throw Error('Selector not supported yet(' +opt_selector + ')'); // MISSING_SPACE
-throw Error(
- 'Selector not supported yet' +
- '(' +(opt_selector ? 'foo' : 'bar') + ')'); // MISSING_SPACE
-
-x++;
-x ++; // EXTRA_SPACE
-x++ ; // EXTRA_SPACE
-y = a + ++b;
-for (var i = 0; i < 10; ++i) {
-}
-
-// We omit the update section of the for loop to test that a space is allowed
-// in this special case.
-for (var part; part = parts.shift(); ) {
-}
-
-if (x == y) {
-}
-
-x = 10; // no error here
-x = -1;
-x++;
-++x;
-
-x = bool ? -1 : -1;
-
-x = {a: 10};
-x = {a:10}; // MISSING_SPACE
-
-x = !!y;
-
-x >>= 0;
-x <<= 10;
-
-x[100] = 10;
-x[ 100] = 10; // EXTRA_SPACE
-x[100 ] = 10; // EXTRA_SPACE
-x [100] = 10; // EXTRA_SPACE
-x[10]= 5; // MISSING_SPACE
-var x = [];
-x = [[]];
-x = [[x]];
-x = [[[x, y]]];
-var craziness = ([1, 2, 3])[1];
-var crazinessError = ([1, 2, 3]) [1]; // EXTRA_SPACE
-var multiArray = x[1][2][3][4];
-var multiArrayError = x[1] [2][3][4]; // EXTRA_SPACE
-
-array[aReallyLooooooooooooooooooooooooooooongIndex1][
- anotherVeryLoooooooooooooooooooooooooooooooooooongIndex
-] = 10;
-
-if (x) {
- array[aReallyLooooooooooooooooooooooooooooongIndex1][
- anotherVeryLoooooooooooooooooooooooooooooooooooongIndex
- ] = 10;
-}
-
-
-/**
- * Docs.
- * @param {Number} x desc.
- * @return {boolean} Some boolean value.
- */
-function functionName( x) { // EXTRA_SPACE
- return !!x;
-}
-
-
-/**
- * Docs.
- * @param {Number} x desc.
- */
-function functionName(x ) { // EXTRA_SPACE
- return;
-}
-
-
-/**
- * Docs.
- * @param {Number} x desc.
- * @param {Number} y desc.
- */
-function functionName(x,y) { // MISSING_SPACE
-}
-
-
-/**
- * Docs.
- * @param {Number} x desc.
- * @param {Number} y desc.
- */
-function functionName(x, y) {
-}
-
-
-/**
- * Docs.
- */
-function functionName() { // EXTRA_SPACE
-}
-
-
-/**
- * Docs.
- */
-function functionName(){ // MISSING_SPACE
-}
-
-functionName (); // EXTRA_SPACE
-
-
-/**
- * Docs.
- */
-function functionName () { // EXTRA_SPACE
-}
-
-
-/**
- * Docs.
- */
-var foo = function () { // EXTRA_SPACE
-};
-
-
-
-/**
- * Missing a newline.
- * @constructor
- * @extends {a.b.c}
- */
-x.y.z = function() {
-};goog.inherits(x.y.z, a.b.c); // MISSING_LINE
-
-
-
-/**
- * Extra space.
- * @constructor
- * @extends {a.b.c}
- */
-x.y.z = function() {
-};
- goog.inherits(x.y.z, a.b.c); // WRONG_INDENTATION
-
-
-
-/**
- * Extra blank line.
- * @constructor
- * @extends {a.b.c}
- */
-x.y.z = function() {
-};
-
-goog.inherits(x.y.z, a.b.c); // -1: EXTRA_LINE
-
-
-
-/**
- * Perfect!
- * @constructor
- * @extends {a.b.c}
- */
-x.y.z = function() {
-};
-goog.inherits(x.y.z, a.b.c);
-
-if (flag) {
- /**
- * Also ok!
- * @constructor
- * @extends {a.b.c}
- */
- x.y.z = function() {
- };
- goog.inherits(x.y.z, a.b.c);
-}
-
-
-/**
- * Docs.
- */
-x.finally = function() {
-};
-
-x.finally();
-x
- .finally();
-x.finally (); // EXTRA_SPACE
-x
- .finally (); // EXTRA_SPACE
-try {
-} finally (e) {
-}
-try {
-} finally(e) { // MISSING_SPACE
-}
-
-functionName(x , y); // EXTRA_SPACE
-functionName(x,y); // MISSING_SPACE
-functionName(x, y);
-
-var really_really_really_really_really_really_really_really_really_long_name =
- 2;
-
-var current = arr[cursorRead++];
-
-var x = -(y + z);
-
-// Tab before +
-var foo + 3; // ILLEGAL_TAB
-if (something) {
- var x = 4; // ILLEGAL_TAB
-}
-
-// +1: ILLEGAL_TAB
-// Tab <-- in a comment.
-
-
-// +3: ILLEGAL_TAB
-// +3: ILLEGAL_TAB
-/**
- * An inline flag with a tab {@code asdfasd}.
- * @return {string} Illegal <-- tab in a doc description.
- */
-function x() {
- return '';
-}
-
-
-// +2: ILLEGAL_TAB
-/**
- * @type {tabBeforeMe}
- */
-
-// +1: EXTRA_SPACE
-var whitespaceAtEndOfLine;
-
-// +1: EXTRA_SPACE
-// Whitespace at end of comment.
-
-
-// +4: EXTRA_SPACE
-// +4: EXTRA_SPACE
-// +4: EXTRA_SPACE
-// +4: EXTRA_SPACE
-/*
- * Whitespace at EOL.
- * @type {string}
- * @param {string} Description with whitespace at EOL.
- */
-x = 10;
-
-
-/**
- * @param {?{foo, bar: number}} x This is a valid annotation.
- * @return {{baz}} This is also a valid annotation.
- */
-function recordTypeFunction(x) {
- return x;
-}
-
-if (y) {
- // Colons are difficult.
- y = x ? 1 : 2;
- y = x ? 1: 2; // MISSING_SPACE
-
- x = {
- b: 'Good',
- d : 'Space before colon is bad', // EXTRA_SPACE
- f: abc ? def : ghi // These colons should be treated differently
- };
-
- x = {language: langCode}; // EXTRA_SPACE
-}
-
-// 1094445 - should produce missing space error before +.
-// +1: MISSING_SPACE
-throw Error('Selector not supported yet ('+ opt_selector + ')');
-
-// This code is ok.
-for (i = 0; i < len; ++i) {
-}
-
-for (i = 0;i < 10; i++) { // MISSING_SPACE
-}
-for (i = 0; i < 10;i++) { // MISSING_SPACE
-}
-for ( i = 0; i < 10; i++) { // EXTRA_SPACE
-}
-for (i = 0 ; i < 10; i++) { // EXTRA_SPACE
-}
-for (i = 0; i < 10 ; i++) { // EXTRA_SPACE
-}
-for (i = 0; i < 10; i++ ) { // EXTRA_SPACE
-}
-for (i = 0; i < 10; i++) { // EXTRA_SPACE
-}
-for (i = 0; i < 10; i++) { // EXTRA_SPACE
-}
-for (i = 0 ;i < 10; i++) { // EXTRA_SPACE, MISSING_SPACE
-}
-
-// Regression test for bug 3508480, parse error when tab as last token.
-// +1: ILLEGAL_TAB, EXTRA_SPACE
diff --git a/tools/closure_linter/closure_linter/testdata/tokenizer.js b/tools/closure_linter/closure_linter/testdata/tokenizer.js
deleted file mode 100644
index 1fbcf4bd41..0000000000
--- a/tools/closure_linter/closure_linter/testdata/tokenizer.js
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Errors relating to tokenizing.
- *
- * @author robbyw@google.com (Robby Walker)
- */
-
-// Regression test: if regular expressions parse incorrectly this will emit an
-// error such as: Missing space after '/'
-x = /[^\']/; // and all the other chars
-
-// Regression test: if regular expressions parse incorrectly this will emit an
-// error such as: Missing space before +
-var regExp = fromStart ? / ^[\t\r\n]+/ : /[ \t\r\n]+$/;
-
-// Regression test for bug 1032312: test for correct parsing of multiline
-// strings
-// +2: MULTI_LINE_STRING
-var RG_MONTH_EVENT_TEMPLATE_SINGLE_QUOTE = new Template(
- '\
-<div id="${divID}" class=month_event \
- style="top:${top}px;left:${left}px;width:${width}px;height:${height}px;\
- z-index:' + Z_INDEX_MONTH_EVENT);
-
-// +2: MULTI_LINE_STRING
-var RG_MONTH_EVENT_TEMPLATE_DOUBLE_QUOTE = new Template(
- "\
-<div id='${divID}' class=month_event \
- style='top:${top}px;left:${left}px;width:${width}px;height:${height}px;\
- z-index:" + Z_INDEX_MONTH_EVENT);
-
-// Regression test for bug 1032312: test for correct parsing of single line
-// comment at end of line. If it's parsed incorrectly, it reads the entire next
-// line as a comment.
-//
-if (x) {
- // If the above line is treated as a comment, the closing brace below will
- // cause the linter to crash.
-}
-
-// Regression test for bitwise operators '^=', '>>>' and '>>>=' that weren't
-// recognized as operators.
-a -= b; a -= c; a ^= c >>> 13; a >>>= 1;
-
-// Regression test as xor was not allowed on the end of a line.
-x = 1000 ^
- 45;
-
-// Regression test for proper number parsing. If parsed incorrectly, some of
-// these notations can lead to missing spaces errors.
-var x = 1e-6 + 1e+6 + 0. + .5 + 0.5 + 0.e-6 + .5e-6 + 0.5e-6 + 0x123abc +
- 0X1Ab3 + 1E7;
-
-// Regression test for keyword parsing - making sure the fact that the "do"
-// keyword is a part of the identifier below doesn't break anything.
-this.undoRedoManager_.undo();
-
-// Regression test for regex as object value not matching.
-x = {x: /./};
-
-// Regression test for regex as last array element not matching.
-x = [/./];
-
-// Syntax tests for ES6:
-x = x => x;
diff --git a/tools/closure_linter/closure_linter/testdata/unparseable.js b/tools/closure_linter/closure_linter/testdata/unparseable.js
deleted file mode 100644
index e842614286..0000000000
--- a/tools/closure_linter/closure_linter/testdata/unparseable.js
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-
-
-/**
- * Constructs and initializes a new good object.
- * @constructor
- */
-goog.good = function() {
-};
-
-
-/**
- * Makes this good object go bad.
- * @param {number} badnessLevel How bad this object is going.
- */
-goog.good.prototype.goBad = function() { // EXTRA_PARAMETER_DOCUMENTATION
-};
-
-if (x)
- // Cannot parse ending block because beginning block is missing.
-} // FILE_DOES_NOT_PARSE
-
-
-/**
- * Unecessary return documentation error is not reported because file checking
- * stopped at token causing parse error.
- *
- * @return {boolean} Whether reform was sucessful.
- */
-goog.good.prototype.reform = function() {
-};
diff --git a/tools/closure_linter/closure_linter/testdata/unused_local_variables.js b/tools/closure_linter/closure_linter/testdata/unused_local_variables.js
deleted file mode 100644
index e9e51a1164..0000000000
--- a/tools/closure_linter/closure_linter/testdata/unused_local_variables.js
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2013 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Checks that unused local variables result in an error.
- */
-
-goog.provide('dummy.Something');
-
-
-
-/**
- * @constructor
- */
-dummy.Something = function() {
- // This variable isn't really used, but we can't tell for sure.
- var usedVariable = [];
- usedVariable.length = 1;
-
- var variableUsedInAClosure = [];
- var functionUsedByInvoking = function() {
- variableUsedInAClosure[1] = 'abc';
- };
- functionUsedByInvoking();
-
- var variableUsedTwoLevelsDeep = [];
- var firstLevelFunction = function() {
- function() {
- variableUsedTwoLevelsDeep.append(1);
- }
- };
- firstLevelFunction();
-
- // This variable isn't being declared so is unchecked.
- undeclaredLocal = 1;
-
- var unusedVariable;
-
- // Check that using a variable as member name doesn't trigger
- // usage.
- this.unusedVariable = 0;
- this.unusedVariable = this.unusedVariable + 1;
-
- // Check that declaring a variable twice doesn't trigger
- // usage.
- var unusedVariable; // UNUSED_LOCAL_VARIABLE
-
- var unusedVariableWithReassignment = []; // UNUSED_LOCAL_VARIABLE
- unusedVariableWithReassignment = 'a';
-
- var unusedFunction = function() {}; // UNUSED_LOCAL_VARIABLE
-
- var unusedHiddenVariable = 1; // UNUSED_LOCAL_VARIABLE
- firstLevelFunction = function() {
- // This variable is actually used in the function below, but hides the outer
- // variable with the same name.
- var unusedHiddenVariable = 1;
- function() {
- delete unusedHiddenVariable;
- }
- };
-};
-
-
-goog.scope(function() {
-var unusedAlias = dummy.Something; // UNUSED_LOCAL_VARIABLE
-var UsedTypeAlias = dummy.Something;
-var AnotherUsedTypeAlias = dummy.Something;
-
-
-/** @protected {AnotherUsedTypeAlias.Something|UsedTypeAlias} */
-var usedAlias = dummy.Something;
-new usedAlias();
-}); // goog.scope
-
-// Unused top level variables are not checked.
-var unusedTopLevelVariable;
diff --git a/tools/closure_linter/closure_linter/testdata/unused_private_members.js b/tools/closure_linter/closure_linter/testdata/unused_private_members.js
deleted file mode 100644
index 76c0865bcc..0000000000
--- a/tools/closure_linter/closure_linter/testdata/unused_private_members.js
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS-IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * @fileoverview Checks that an unused private members result in an error.
- */
-
-goog.provide('dummy.Something');
-
-
-
-/**
- * @constructor
- */
-dummy.Something = function() {
- /**
- * @type {number}
- * @private
- */
- this.normalVariable_ = 1;
-
- // +5: UNUSED_PRIVATE_MEMBER
- /**
- * @type {number}
- * @private
- */
- this.unusedVariable_ = 1;
-
- /**
- * @type {number}
- * @private
- * @suppress {unusedPrivateMembers}
- */
- this.suppressedUnusedVariable_ = 1;
-};
-
-
-/**
- * @type {number}
- * @private
- */
-dummy.Something.NORMAL_CONSTANT_ = 1;
-
-
-// +5: UNUSED_PRIVATE_MEMBER
-/**
- * @type {number}
- * @private
- */
-dummy.Something.UNUSED_CONSTANT_ = 1;
-
-
-/**
- * @type {number}
- * @private
- * @suppress {unusedPrivateMembers}
- */
-dummy.Something.SUPPRESSED_UNUSED_CONSTANT_ = 1;
-
-
-/**
- * @type {number}
- * @private
- */
-dummy.Something.normalStaticVariable_ = 1;
-
-
-// +5: UNUSED_PRIVATE_MEMBER
-/**
- * @type {number}
- * @private
- */
-dummy.Something.unusedStaticVariable_ = 1;
-
-
-/**
- * @type {number}
- * @private
- * @suppress {unusedPrivateMembers}
- */
-dummy.Something.suppressedUnusedStaticVariable_ = 1;
-
-
-/**
- * @type {number}
- * @private
- */
-dummy.Something.prototype.normalVariableOnPrototype_ = 1;
-
-
-// +5: UNUSED_PRIVATE_MEMBER
-/**
- * @type {number}
- * @private
- */
-dummy.Something.prototype.unusedVariableOnPrototype_ = 1;
-
-
-/**
- * @type {number}
- * @private
- * @suppress {unusedPrivateMembers}
- */
-dummy.Something.prototype.suppressedUnusedVariableOnPrototype_ = 1;
-
-
-/**
- * Check edge cases that should not be reported.
- */
-dummy.Something.prototype.checkFalsePositives = function() {
- this.__iterator__ = 1;
- this.normalVariable_.unknownChainedVariable_ = 1;
- othernamespace.unusedVariable_ = 1;
-
- this.element_ = 1;
- this.element_.modifyPublicMember = 1;
-
- /** @suppress {underscore} */
- this.suppressedUnderscore_ = true;
-};
-
-
-/**
- * Use all the normal variables.
- */
-dummy.Something.prototype.useAllTheThings = function() {
- var x = [
- dummy.Something.NORMAL_CONSTANT_,
- this.normalStaticVariable_,
- this.normalVariable_,
- this.normalVariableOnPrototype_,
- dummy.Something.normalStaticMethod_(),
- this.normalMethod_(),
- x
- ];
-};
-
-
-// +5: UNUSED_PRIVATE_MEMBER
-/**
- * Unused static method.
- * @private
- */
-dummy.Something.unusedStaticMethod_ = function() {
- // Do nothing.
-};
-
-
-/**
- * Unused static method.
- * @private
- * @suppress {unusedPrivateMembers}
- */
-dummy.Something.suppressedUnusedStaticMethod_ = function() {
- // Do nothing.
-};
-
-
-/**
- * Normal static method.
- * @private
- */
-dummy.Something.normalStaticMethod_ = function() {
- // Do nothing.
-};
-
-
-// +5: UNUSED_PRIVATE_MEMBER
-/**
- * Unused non-static method.
- * @private
- */
-dummy.Something.prototype.unusedMethod_ = function() {
- // Do nothing.
-};
-
-
-/**
- * Unused non-static method that is suppressed.
- * @private
- * @suppress {unusedPrivateMembers}
- */
-dummy.Something.prototype.suppressedUnusedMethod_ = function() {
- // Do nothing.
-};
-
-
-/**
- * Normal non-static method.
- * @private
- */
-dummy.Something.prototype.normalMethod_ = function() {
- // Do nothing.
-};
diff --git a/tools/closure_linter/closure_linter/testdata/utf8.html b/tools/closure_linter/closure_linter/testdata/utf8.html
deleted file mode 100644
index 29517d0941..0000000000
--- a/tools/closure_linter/closure_linter/testdata/utf8.html
+++ /dev/null
@@ -1,26 +0,0 @@
-<!--
- Copyright 2009 The Closure Linter Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS-IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-
-<meta http-equiv="Content-Type" content="charset=UTF-8"></meta>
-<script src=../somescript.js></script>
-<script>
-/** @bug 1735846 */
-window.onload = function() {
- // +1: UNUSED_LOCAL_VARIABLE
- var notFoundMsg = 'ぐーぐるぐるぐるという場所は見つかりませんでした。';
-};
-</script>
diff --git a/tools/closure_linter/closure_linter/testutil.py b/tools/closure_linter/closure_linter/testutil.py
deleted file mode 100644
index f7084ee37b..0000000000
--- a/tools/closure_linter/closure_linter/testutil.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Utility functions for testing gjslint components."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-import StringIO
-
-from closure_linter import ecmametadatapass
-from closure_linter import javascriptstatetracker
-from closure_linter import javascripttokenizer
-
-
-def TokenizeSource(source):
- """Convert a source into a string of tokens.
-
- Args:
- source: A source file as a string or file-like object (iterates lines).
-
- Returns:
- The first token of the resulting token stream.
- """
-
- if isinstance(source, basestring):
- source = StringIO.StringIO(source)
-
- tokenizer = javascripttokenizer.JavaScriptTokenizer()
- return tokenizer.TokenizeFile(source)
-
-
-def TokenizeSourceAndRunEcmaPass(source):
- """Tokenize a source and run the EcmaMetaDataPass on it.
-
- Args:
- source: A source file as a string or file-like object (iterates lines).
-
- Returns:
- The first token of the resulting token stream.
- """
- start_token = TokenizeSource(source)
- ecma_pass = ecmametadatapass.EcmaMetaDataPass()
- ecma_pass.Process(start_token)
- return start_token
-
-
-def ParseFunctionsAndComments(source, error_handler=None):
- """Run the tokenizer and tracker and return comments and functions found.
-
- Args:
- source: A source file as a string or file-like object (iterates lines).
- error_handler: An error handler.
-
- Returns:
- The functions and comments as a tuple.
- """
- start_token = TokenizeSourceAndRunEcmaPass(source)
-
- tracker = javascriptstatetracker.JavaScriptStateTracker()
- if error_handler is not None:
- tracker.DocFlagPass(start_token, error_handler)
-
- functions = []
- comments = []
- for token in start_token:
- tracker.HandleToken(token, tracker.GetLastNonSpaceToken())
-
- function = tracker.GetFunction()
- if function and function not in functions:
- functions.append(function)
-
- comment = tracker.GetDocComment()
- if comment and comment not in comments:
- comments.append(comment)
-
- tracker.HandleAfterToken(token)
-
- return functions, comments
diff --git a/tools/closure_linter/closure_linter/tokenutil.py b/tools/closure_linter/closure_linter/tokenutil.py
deleted file mode 100755
index 11e3ccc68b..0000000000
--- a/tools/closure_linter/closure_linter/tokenutil.py
+++ /dev/null
@@ -1,697 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Token utility functions."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-import copy
-import StringIO
-
-from closure_linter.common import tokens
-from closure_linter.javascripttokens import JavaScriptToken
-from closure_linter.javascripttokens import JavaScriptTokenType
-
-# Shorthand
-Type = tokens.TokenType
-
-
-def GetFirstTokenInSameLine(token):
- """Returns the first token in the same line as token.
-
- Args:
- token: Any token in the line.
-
- Returns:
- The first token in the same line as token.
- """
- while not token.IsFirstInLine():
- token = token.previous
- return token
-
-
-def GetFirstTokenInPreviousLine(token):
- """Returns the first token in the previous line as token.
-
- Args:
- token: Any token in the line.
-
- Returns:
- The first token in the previous line as token, or None if token is on the
- first line.
- """
- first_in_line = GetFirstTokenInSameLine(token)
- if first_in_line.previous:
- return GetFirstTokenInSameLine(first_in_line.previous)
-
- return None
-
-
-def GetLastTokenInSameLine(token):
- """Returns the last token in the same line as token.
-
- Args:
- token: Any token in the line.
-
- Returns:
- The last token in the same line as token.
- """
- while not token.IsLastInLine():
- token = token.next
- return token
-
-
-def GetAllTokensInSameLine(token):
- """Returns all tokens in the same line as the given token.
-
- Args:
- token: Any token in the line.
-
- Returns:
- All tokens on the same line as the given token.
- """
- first_token = GetFirstTokenInSameLine(token)
- last_token = GetLastTokenInSameLine(token)
-
- tokens_in_line = []
- while first_token != last_token:
- tokens_in_line.append(first_token)
- first_token = first_token.next
- tokens_in_line.append(last_token)
-
- return tokens_in_line
-
-
-def CustomSearch(start_token, func, end_func=None, distance=None,
- reverse=False):
- """Returns the first token where func is True within distance of this token.
-
- Args:
- start_token: The token to start searching from
- func: The function to call to test a token for applicability
- end_func: The function to call to test a token to determine whether to abort
- the search.
- distance: The number of tokens to look through before failing search. Must
- be positive. If unspecified, will search until the end of the token
- chain
- reverse: When true, search the tokens before this one instead of the tokens
- after it
-
- Returns:
- The first token matching func within distance of this token, or None if no
- such token is found.
- """
- token = start_token
- if reverse:
- while token and (distance is None or distance > 0):
- previous = token.previous
- if previous:
- if func(previous):
- return previous
- if end_func and end_func(previous):
- return None
-
- token = previous
- if distance is not None:
- distance -= 1
-
- else:
- while token and (distance is None or distance > 0):
- next_token = token.next
- if next_token:
- if func(next_token):
- return next_token
- if end_func and end_func(next_token):
- return None
-
- token = next_token
- if distance is not None:
- distance -= 1
-
- return None
-
-
-def Search(start_token, token_types, distance=None, reverse=False):
- """Returns the first token of type in token_types within distance.
-
- Args:
- start_token: The token to start searching from
- token_types: The allowable types of the token being searched for
- distance: The number of tokens to look through before failing search. Must
- be positive. If unspecified, will search until the end of the token
- chain
- reverse: When true, search the tokens before this one instead of the tokens
- after it
-
- Returns:
- The first token of any type in token_types within distance of this token, or
- None if no such token is found.
- """
- return CustomSearch(start_token, lambda token: token.IsAnyType(token_types),
- None, distance, reverse)
-
-
-def SearchExcept(start_token, token_types, distance=None, reverse=False):
- """Returns the first token not of any type in token_types within distance.
-
- Args:
- start_token: The token to start searching from
- token_types: The unallowable types of the token being searched for
- distance: The number of tokens to look through before failing search. Must
- be positive. If unspecified, will search until the end of the token
- chain
- reverse: When true, search the tokens before this one instead of the tokens
- after it
-
- Returns:
- The first token of any type in token_types within distance of this token, or
- None if no such token is found.
- """
- return CustomSearch(start_token,
- lambda token: not token.IsAnyType(token_types),
- None, distance, reverse)
-
-
-def SearchUntil(start_token, token_types, end_types, distance=None,
- reverse=False):
- """Returns the first token of type in token_types before a token of end_type.
-
- Args:
- start_token: The token to start searching from.
- token_types: The allowable types of the token being searched for.
- end_types: Types of tokens to abort search if we find.
- distance: The number of tokens to look through before failing search. Must
- be positive. If unspecified, will search until the end of the token
- chain
- reverse: When true, search the tokens before this one instead of the tokens
- after it
-
- Returns:
- The first token of any type in token_types within distance of this token
- before any tokens of type in end_type, or None if no such token is found.
- """
- return CustomSearch(start_token, lambda token: token.IsAnyType(token_types),
- lambda token: token.IsAnyType(end_types),
- distance, reverse)
-
-
-def DeleteToken(token):
- """Deletes the given token from the linked list.
-
- Args:
- token: The token to delete
- """
- # When deleting a token, we do not update the deleted token itself to make
- # sure the previous and next pointers are still pointing to tokens which are
- # not deleted. Also it is very hard to keep track of all previously deleted
- # tokens to update them when their pointers become invalid. So we add this
- # flag that any token linked list iteration logic can skip deleted node safely
- # when its current token is deleted.
- token.is_deleted = True
- if token.previous:
- token.previous.next = token.next
-
- if token.next:
- token.next.previous = token.previous
-
- following_token = token.next
- while following_token and following_token.metadata.last_code == token:
- following_token.metadata.last_code = token.metadata.last_code
- following_token = following_token.next
-
-
-def DeleteTokens(token, token_count):
- """Deletes the given number of tokens starting with the given token.
-
- Args:
- token: The token to start deleting at.
- token_count: The total number of tokens to delete.
- """
- for i in xrange(1, token_count):
- DeleteToken(token.next)
- DeleteToken(token)
-
-
-def InsertTokenBefore(new_token, token):
- """Insert new_token before token.
-
- Args:
- new_token: A token to be added to the stream
- token: A token already in the stream
- """
- new_token.next = token
- new_token.previous = token.previous
-
- new_token.metadata = copy.copy(token.metadata)
-
- if new_token.IsCode():
- old_last_code = token.metadata.last_code
- following_token = token
- while (following_token and
- following_token.metadata.last_code == old_last_code):
- following_token.metadata.last_code = new_token
- following_token = following_token.next
-
- token.previous = new_token
- if new_token.previous:
- new_token.previous.next = new_token
-
- if new_token.start_index is None:
- if new_token.line_number == token.line_number:
- new_token.start_index = token.start_index
- else:
- previous_token = new_token.previous
- if previous_token:
- new_token.start_index = (previous_token.start_index +
- len(previous_token.string))
- else:
- new_token.start_index = 0
-
- iterator = new_token.next
- while iterator and iterator.line_number == new_token.line_number:
- iterator.start_index += len(new_token.string)
- iterator = iterator.next
-
-
-def InsertTokenAfter(new_token, token):
- """Insert new_token after token.
-
- Args:
- new_token: A token to be added to the stream
- token: A token already in the stream
- """
- new_token.previous = token
- new_token.next = token.next
-
- new_token.metadata = copy.copy(token.metadata)
-
- if token.IsCode():
- new_token.metadata.last_code = token
-
- if new_token.IsCode():
- following_token = token.next
- while following_token and following_token.metadata.last_code == token:
- following_token.metadata.last_code = new_token
- following_token = following_token.next
-
- token.next = new_token
- if new_token.next:
- new_token.next.previous = new_token
-
- if new_token.start_index is None:
- if new_token.line_number == token.line_number:
- new_token.start_index = token.start_index + len(token.string)
- else:
- new_token.start_index = 0
-
- iterator = new_token.next
- while iterator and iterator.line_number == new_token.line_number:
- iterator.start_index += len(new_token.string)
- iterator = iterator.next
-
-
-def InsertTokensAfter(new_tokens, token):
- """Insert multiple tokens after token.
-
- Args:
- new_tokens: An array of tokens to be added to the stream
- token: A token already in the stream
- """
- # TODO(user): It would be nicer to have InsertTokenAfter defer to here
- # instead of vice-versa.
- current_token = token
- for new_token in new_tokens:
- InsertTokenAfter(new_token, current_token)
- current_token = new_token
-
-
-def InsertSpaceTokenAfter(token):
- """Inserts a space token after the given token.
-
- Args:
- token: The token to insert a space token after
-
- Returns:
- A single space token
- """
- space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line,
- token.line_number)
- InsertTokenAfter(space_token, token)
-
-
-def InsertBlankLineAfter(token):
- """Inserts a blank line after the given token.
-
- Args:
- token: The token to insert a blank line after
-
- Returns:
- A single space token
- """
- blank_token = JavaScriptToken('', Type.BLANK_LINE, '',
- token.line_number + 1)
- InsertLineAfter(token, [blank_token])
-
-
-def InsertLineAfter(token, new_tokens):
- """Inserts a new line consisting of new_tokens after the given token.
-
- Args:
- token: The token to insert after.
- new_tokens: The tokens that will make up the new line.
- """
- insert_location = token
- for new_token in new_tokens:
- InsertTokenAfter(new_token, insert_location)
- insert_location = new_token
-
- # Update all subsequent line numbers.
- next_token = new_tokens[-1].next
- while next_token:
- next_token.line_number += 1
- next_token = next_token.next
-
-
-def SplitToken(token, position):
- """Splits the token into two tokens at position.
-
- Args:
- token: The token to split
- position: The position to split at. Will be the beginning of second token.
-
- Returns:
- The new second token.
- """
- new_string = token.string[position:]
- token.string = token.string[:position]
-
- new_token = JavaScriptToken(new_string, token.type, token.line,
- token.line_number)
- InsertTokenAfter(new_token, token)
-
- return new_token
-
-
-def Compare(token1, token2):
- """Compares two tokens and determines their relative order.
-
- Args:
- token1: The first token to compare.
- token2: The second token to compare.
-
- Returns:
- A negative integer, zero, or a positive integer as the first token is
- before, equal, or after the second in the token stream.
- """
- if token2.line_number != token1.line_number:
- return token1.line_number - token2.line_number
- else:
- return token1.start_index - token2.start_index
-
-
-def GoogScopeOrNoneFromStartBlock(token):
- """Determines if the given START_BLOCK is part of a goog.scope statement.
-
- Args:
- token: A token of type START_BLOCK.
-
- Returns:
- The goog.scope function call token, or None if such call doesn't exist.
- """
- if token.type != JavaScriptTokenType.START_BLOCK:
- return None
-
- # Search for a goog.scope statement, which will be 5 tokens before the
- # block. Illustration of the tokens found prior to the start block:
- # goog.scope(function() {
- # 5 4 3 21 ^
-
- maybe_goog_scope = token
- for unused_i in xrange(5):
- maybe_goog_scope = (maybe_goog_scope.previous if maybe_goog_scope and
- maybe_goog_scope.previous else None)
- if maybe_goog_scope and maybe_goog_scope.string == 'goog.scope':
- return maybe_goog_scope
-
-
-def GetTokenRange(start_token, end_token):
- """Returns a list of tokens between the two given, inclusive.
-
- Args:
- start_token: Start token in the range.
- end_token: End token in the range.
-
- Returns:
- A list of tokens, in order, from start_token to end_token (including start
- and end). Returns none if the tokens do not describe a valid range.
- """
-
- token_range = []
- token = start_token
-
- while token:
- token_range.append(token)
-
- if token == end_token:
- return token_range
-
- token = token.next
-
-
-def TokensToString(token_iterable):
- """Convert a number of tokens into a string.
-
- Newlines will be inserted whenever the line_number of two neighboring
- strings differ.
-
- Args:
- token_iterable: The tokens to turn to a string.
-
- Returns:
- A string representation of the given tokens.
- """
-
- buf = StringIO.StringIO()
- token_list = list(token_iterable)
- if not token_list:
- return ''
-
- line_number = token_list[0].line_number
-
- for token in token_list:
-
- while line_number < token.line_number:
- line_number += 1
- buf.write('\n')
-
- if line_number > token.line_number:
- line_number = token.line_number
- buf.write('\n')
-
- buf.write(token.string)
-
- return buf.getvalue()
-
-
-def GetPreviousCodeToken(token):
- """Returns the code token before the specified token.
-
- Args:
- token: A token.
-
- Returns:
- The code token before the specified token or None if no such token
- exists.
- """
-
- return CustomSearch(
- token,
- lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES,
- reverse=True)
-
-
-def GetNextCodeToken(token):
- """Returns the next code token after the specified token.
-
- Args:
- token: A token.
-
- Returns:
- The next code token after the specified token or None if no such token
- exists.
- """
-
- return CustomSearch(
- token,
- lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES,
- reverse=False)
-
-
-def GetIdentifierStart(token):
- """Returns the first token in an identifier.
-
- Given a token which is part of an identifier, returns the token at the start
- of the identifier.
-
- Args:
- token: A token which is part of an identifier.
-
- Returns:
- The token at the start of the identifier or None if the identifier was not
- of the form 'a.b.c' (e.g. "['a']['b'].c").
- """
-
- start_token = token
- previous_code_token = GetPreviousCodeToken(token)
-
- while (previous_code_token and (
- previous_code_token.IsType(JavaScriptTokenType.IDENTIFIER) or
- IsDot(previous_code_token))):
- start_token = previous_code_token
- previous_code_token = GetPreviousCodeToken(previous_code_token)
-
- if IsDot(start_token):
- return None
-
- return start_token
-
-
-def GetIdentifierForToken(token):
- """Get the symbol specified by a token.
-
- Given a token, this function additionally concatenates any parts of an
- identifying symbol being identified that are split by whitespace or a
- newline.
-
- The function will return None if the token is not the first token of an
- identifier.
-
- Args:
- token: The first token of a symbol.
-
- Returns:
- The whole symbol, as a string.
- """
-
- # Search backward to determine if this token is the first token of the
- # identifier. If it is not the first token, return None to signal that this
- # token should be ignored.
- prev_token = token.previous
- while prev_token:
- if (prev_token.IsType(JavaScriptTokenType.IDENTIFIER) or
- IsDot(prev_token)):
- return None
-
- if (prev_token.IsType(tokens.TokenType.WHITESPACE) or
- prev_token.IsAnyType(JavaScriptTokenType.COMMENT_TYPES)):
- prev_token = prev_token.previous
- else:
- break
-
- # A "function foo()" declaration.
- if token.type is JavaScriptTokenType.FUNCTION_NAME:
- return token.string
-
- # A "var foo" declaration (if the previous token is 'var')
- previous_code_token = GetPreviousCodeToken(token)
-
- if previous_code_token and previous_code_token.IsKeyword('var'):
- return token.string
-
- # Otherwise, this is potentially a namespaced (goog.foo.bar) identifier that
- # could span multiple lines or be broken up by whitespace. We need
- # to concatenate.
- identifier_types = set([
- JavaScriptTokenType.IDENTIFIER,
- JavaScriptTokenType.SIMPLE_LVALUE
- ])
-
- assert token.type in identifier_types
-
- # Start with the first token
- symbol_tokens = [token]
-
- if token.next:
- for t in token.next:
- last_symbol_token = symbol_tokens[-1]
-
- # A dot is part of the previous symbol.
- if IsDot(t):
- symbol_tokens.append(t)
- continue
-
- # An identifier is part of the previous symbol if the previous one was a
- # dot.
- if t.type in identifier_types:
- if IsDot(last_symbol_token):
- symbol_tokens.append(t)
- continue
- else:
- break
-
- # Skip any whitespace
- if t.type in JavaScriptTokenType.NON_CODE_TYPES:
- continue
-
- # This is the end of the identifier. Stop iterating.
- break
-
- if symbol_tokens:
- return ''.join([t.string for t in symbol_tokens])
-
-
-def GetStringAfterToken(token):
- """Get string after token.
-
- Args:
- token: Search will be done after this token.
-
- Returns:
- String if found after token else None (empty string will also
- return None).
-
- Search until end of string as in case of empty string Type.STRING_TEXT is not
- present/found and don't want to return next string.
- E.g.
- a = '';
- b = 'test';
- When searching for string after 'a' if search is not limited by end of string
- then it will return 'test' which is not desirable as there is a empty string
- before that.
-
- This will return None for cases where string is empty or no string found
- as in both cases there is no Type.STRING_TEXT.
- """
- string_token = SearchUntil(token, JavaScriptTokenType.STRING_TEXT,
- [JavaScriptTokenType.SINGLE_QUOTE_STRING_END,
- JavaScriptTokenType.DOUBLE_QUOTE_STRING_END])
- if string_token:
- return string_token.string
- else:
- return None
-
-
-def IsDot(token):
- """Whether the token represents a "dot" operator (foo.bar)."""
- return token.type is JavaScriptTokenType.OPERATOR and token.string == '.'
-
-
-def IsIdentifierOrDot(token):
- """Whether the token is either an identifier or a '.'."""
- return (token.type in [JavaScriptTokenType.IDENTIFIER,
- JavaScriptTokenType.SIMPLE_LVALUE] or
- IsDot(token))
diff --git a/tools/closure_linter/closure_linter/tokenutil_test.py b/tools/closure_linter/closure_linter/tokenutil_test.py
deleted file mode 100644
index c7d3854776..0000000000
--- a/tools/closure_linter/closure_linter/tokenutil_test.py
+++ /dev/null
@@ -1,297 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for the scopeutil module."""
-
-# Allow non-Google copyright
-# pylint: disable=g-bad-file-header
-
-__author__ = ('nnaze@google.com (Nathan Naze)')
-
-import unittest as googletest
-
-from closure_linter import ecmametadatapass
-from closure_linter import javascripttokens
-from closure_linter import testutil
-from closure_linter import tokenutil
-
-
-class FakeToken(object):
- pass
-
-
-class TokenUtilTest(googletest.TestCase):
-
- def testGetTokenRange(self):
-
- a = FakeToken()
- b = FakeToken()
- c = FakeToken()
- d = FakeToken()
- e = FakeToken()
-
- a.next = b
- b.next = c
- c.next = d
-
- self.assertEquals([a, b, c, d], tokenutil.GetTokenRange(a, d))
-
- # This is an error as e does not come after a in the token chain.
- self.assertRaises(Exception, lambda: tokenutil.GetTokenRange(a, e))
-
- def testTokensToString(self):
-
- a = FakeToken()
- b = FakeToken()
- c = FakeToken()
- d = FakeToken()
- e = FakeToken()
-
- a.string = 'aaa'
- b.string = 'bbb'
- c.string = 'ccc'
- d.string = 'ddd'
- e.string = 'eee'
-
- a.line_number = 5
- b.line_number = 6
- c.line_number = 6
- d.line_number = 10
- e.line_number = 11
-
- self.assertEquals(
- 'aaa\nbbbccc\n\n\n\nddd\neee',
- tokenutil.TokensToString([a, b, c, d, e]))
-
- self.assertEquals(
- 'ddd\neee\naaa\nbbbccc',
- tokenutil.TokensToString([d, e, a, b, c]),
- 'Neighboring tokens not in line_number order should have a newline '
- 'between them.')
-
- def testGetPreviousCodeToken(self):
-
- tokens = testutil.TokenizeSource("""
-start1. // comment
- /* another comment */
- end1
-""")
-
- def _GetTokenStartingWith(token_starts_with):
- for t in tokens:
- if t.string.startswith(token_starts_with):
- return t
-
- self.assertEquals(
- None,
- tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('start1')))
-
- self.assertEquals(
- '.',
- tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('end1')).string)
-
- self.assertEquals(
- 'start1',
- tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('.')).string)
-
- def testGetNextCodeToken(self):
-
- tokens = testutil.TokenizeSource("""
-start1. // comment
- /* another comment */
- end1
-""")
-
- def _GetTokenStartingWith(token_starts_with):
- for t in tokens:
- if t.string.startswith(token_starts_with):
- return t
-
- self.assertEquals(
- '.',
- tokenutil.GetNextCodeToken(_GetTokenStartingWith('start1')).string)
-
- self.assertEquals(
- 'end1',
- tokenutil.GetNextCodeToken(_GetTokenStartingWith('.')).string)
-
- self.assertEquals(
- None,
- tokenutil.GetNextCodeToken(_GetTokenStartingWith('end1')))
-
- def testGetIdentifierStart(self):
-
- tokens = testutil.TokenizeSource("""
-start1 . // comment
- prototype. /* another comment */
- end1
-
-['edge'][case].prototype.
- end2 = function() {}
-""")
-
- def _GetTokenStartingWith(token_starts_with):
- for t in tokens:
- if t.string.startswith(token_starts_with):
- return t
-
- self.assertEquals(
- 'start1',
- tokenutil.GetIdentifierStart(_GetTokenStartingWith('end1')).string)
-
- self.assertEquals(
- 'start1',
- tokenutil.GetIdentifierStart(_GetTokenStartingWith('start1')).string)
-
- self.assertEquals(
- None,
- tokenutil.GetIdentifierStart(_GetTokenStartingWith('end2')))
-
- def testInsertTokenBefore(self):
-
- self.AssertInsertTokenAfterBefore(False)
-
- def testInsertTokenAfter(self):
-
- self.AssertInsertTokenAfterBefore(True)
-
- def AssertInsertTokenAfterBefore(self, after):
-
- new_token = javascripttokens.JavaScriptToken(
- 'a', javascripttokens.JavaScriptTokenType.IDENTIFIER, 1, 1)
-
- existing_token1 = javascripttokens.JavaScriptToken(
- 'var', javascripttokens.JavaScriptTokenType.KEYWORD, 1, 1)
- existing_token1.start_index = 0
- existing_token1.metadata = ecmametadatapass.EcmaMetaData()
-
- existing_token2 = javascripttokens.JavaScriptToken(
- ' ', javascripttokens.JavaScriptTokenType.WHITESPACE, 1, 1)
- existing_token2.start_index = 3
- existing_token2.metadata = ecmametadatapass.EcmaMetaData()
- existing_token2.metadata.last_code = existing_token1
-
- existing_token1.next = existing_token2
- existing_token2.previous = existing_token1
-
- if after:
- tokenutil.InsertTokenAfter(new_token, existing_token1)
- else:
- tokenutil.InsertTokenBefore(new_token, existing_token2)
-
- self.assertEquals(existing_token1, new_token.previous)
- self.assertEquals(existing_token2, new_token.next)
-
- self.assertEquals(new_token, existing_token1.next)
- self.assertEquals(new_token, existing_token2.previous)
-
- self.assertEquals(existing_token1, new_token.metadata.last_code)
- self.assertEquals(new_token, existing_token2.metadata.last_code)
-
- self.assertEquals(0, existing_token1.start_index)
- self.assertEquals(3, new_token.start_index)
- self.assertEquals(4, existing_token2.start_index)
-
- def testGetIdentifierForToken(self):
-
- tokens = testutil.TokenizeSource("""
-start1.abc.def.prototype.
- onContinuedLine
-
-(start2.abc.def
- .hij.klm
- .nop)
-
-start3.abc.def
- .hij = function() {};
-
-// An absurd multi-liner.
-start4.abc.def.
- hij.
- klm = function() {};
-
-start5 . aaa . bbb . ccc
- shouldntBePartOfThePreviousSymbol
-
-start6.abc.def ghi.shouldntBePartOfThePreviousSymbol
-
-var start7 = 42;
-
-function start8() {
-
-}
-
-start9.abc. // why is there a comment here?
- def /* another comment */
- shouldntBePart
-
-start10.abc // why is there a comment here?
- .def /* another comment */
- shouldntBePart
-
-start11.abc. middle1.shouldNotBeIdentifier
-""")
-
- def _GetTokenStartingWith(token_starts_with):
- for t in tokens:
- if t.string.startswith(token_starts_with):
- return t
-
- self.assertEquals(
- 'start1.abc.def.prototype.onContinuedLine',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start1')))
-
- self.assertEquals(
- 'start2.abc.def.hij.klm.nop',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start2')))
-
- self.assertEquals(
- 'start3.abc.def.hij',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start3')))
-
- self.assertEquals(
- 'start4.abc.def.hij.klm',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start4')))
-
- self.assertEquals(
- 'start5.aaa.bbb.ccc',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start5')))
-
- self.assertEquals(
- 'start6.abc.def',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start6')))
-
- self.assertEquals(
- 'start7',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start7')))
-
- self.assertEquals(
- 'start8',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start8')))
-
- self.assertEquals(
- 'start9.abc.def',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start9')))
-
- self.assertEquals(
- 'start10.abc.def',
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start10')))
-
- self.assertIsNone(
- tokenutil.GetIdentifierForToken(_GetTokenStartingWith('middle1')))
-
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/closure_linter/typeannotation.py b/tools/closure_linter/closure_linter/typeannotation.py
deleted file mode 100644
index 00604c13a7..0000000000
--- a/tools/closure_linter/closure_linter/typeannotation.py
+++ /dev/null
@@ -1,401 +0,0 @@
-#!/usr/bin/env python
-#*-* coding: utf-8
-"""Closure typeannotation parsing and utilities."""
-
-
-
-from closure_linter import errors
-from closure_linter import javascripttokens
-from closure_linter.common import error
-
-# Shorthand
-TYPE = javascripttokens.JavaScriptTokenType
-
-
-class TypeAnnotation(object):
- """Represents a structured view of a closure type annotation.
-
- Attribute:
- identifier: The name of the type.
- key_type: The name part before a colon.
- sub_types: The list of sub_types used e.g. for Array.<…>
- or_null: The '?' annotation
- not_null: The '!' annotation
- type_group: If this a a grouping (a|b), but does not include function(a).
- return_type: The return type of a function definition.
- alias: The actual type set by closurizednamespaceinfo if the identifier uses
- an alias to shorten the name.
- tokens: An ordered list of tokens used for this type. May contain
- TypeAnnotation instances for sub_types, key_type or return_type.
- """
-
- IMPLICIT_TYPE_GROUP = 2
-
- NULLABILITY_UNKNOWN = 2
-
- # Frequently used known non-nullable types.
- NON_NULLABLE = frozenset([
- 'boolean', 'function', 'number', 'string', 'undefined'])
- # Frequently used known nullable types.
- NULLABLE_TYPE_WHITELIST = frozenset([
- 'Array', 'Document', 'Element', 'Function', 'Node', 'NodeList',
- 'Object'])
-
- def __init__(self):
- self.identifier = ''
- self.sub_types = []
- self.or_null = False
- self.not_null = False
- self.type_group = False
- self.alias = None
- self.key_type = None
- self.record_type = False
- self.opt_arg = False
- self.return_type = None
- self.tokens = []
-
- def IsFunction(self):
- """Determines whether this is a function definition."""
- return self.identifier == 'function'
-
- def IsConstructor(self):
- """Determines whether this is a function definition for a constructor."""
- key_type = self.sub_types and self.sub_types[0].key_type
- return self.IsFunction() and key_type.identifier == 'new'
-
- def IsRecordType(self):
- """Returns True if this type is a record type."""
- return (self.record_type or
- bool([t for t in self.sub_types if t.IsRecordType()]))
-
- def IsVarArgsType(self):
- """Determines if the type is a var_args type, i.e. starts with '...'."""
- return self.identifier.startswith('...') or (
- self.type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP and
- self.sub_types[0].identifier.startswith('...'))
-
- def IsEmpty(self):
- """Returns True if the type is empty."""
- return not self.tokens
-
- def IsUnknownType(self):
- """Returns True if this is the unknown type {?}."""
- return (self.or_null
- and not self.identifier
- and not self.sub_types
- and not self.return_type)
-
- def Append(self, item):
- """Adds a sub_type to this type and finalizes it.
-
- Args:
- item: The TypeAnnotation item to append.
- """
- # item is a TypeAnnotation instance, so pylint: disable=protected-access
- self.sub_types.append(item._Finalize(self))
-
- def __repr__(self):
- """Reconstructs the type definition."""
- append = ''
- if self.sub_types:
- separator = (',' if not self.type_group else '|')
- if self.identifier == 'function':
- surround = '(%s)'
- else:
- surround = {False: '{%s}' if self.record_type else '<%s>',
- True: '(%s)',
- self.IMPLICIT_TYPE_GROUP: '%s'}[self.type_group]
- append = surround % separator.join([repr(t) for t in self.sub_types])
- if self.return_type:
- append += ':%s' % repr(self.return_type)
- append += '=' if self.opt_arg else ''
- prefix = '' + ('?' if self.or_null else '') + ('!' if self.not_null else '')
- keyword = '%s:' % repr(self.key_type) if self.key_type else ''
- return keyword + prefix + '%s' % (self.alias or self.identifier) + append
-
- def ToString(self):
- """Concats the type's tokens to form a string again."""
- ret = []
- for token in self.tokens:
- if not isinstance(token, TypeAnnotation):
- ret.append(token.string)
- else:
- ret.append(token.ToString())
- return ''.join(ret)
-
- def Dump(self, indent=''):
- """Dumps this type's structure for debugging purposes."""
- result = []
- for t in self.tokens:
- if isinstance(t, TypeAnnotation):
- result.append(indent + str(t) + ' =>\n' + t.Dump(indent + ' '))
- else:
- result.append(indent + str(t))
- return '\n'.join(result)
-
- def IterIdentifiers(self):
- """Iterates over all identifiers in this type and its subtypes."""
- if self.identifier:
- yield self.identifier
- for subtype in self.IterTypes():
- for identifier in subtype.IterIdentifiers():
- yield identifier
-
- def IterTypeGroup(self):
- """Iterates over all types in the type group including self.
-
- Yields:
- If this is a implicit or manual type-group: all sub_types.
- Otherwise: self
- E.g. for @type {Foo.<Bar>} this will yield only Foo.<Bar>,
- for @type {Foo|(Bar|Sample)} this will yield Foo, Bar and Sample.
-
- """
- if self.type_group:
- for sub_type in self.sub_types:
- for sub_type in sub_type.IterTypeGroup():
- yield sub_type
- else:
- yield self
-
- def IterTypes(self):
- """Iterates over each subtype as well as return and key types."""
- if self.return_type:
- yield self.return_type
-
- if self.key_type:
- yield self.key_type
-
- for sub_type in self.sub_types:
- yield sub_type
-
- def GetNullability(self, modifiers=True):
- """Computes whether the type may be null.
-
- Args:
- modifiers: Whether the modifiers ? and ! should be considered in the
- evaluation.
- Returns:
- True if the type allows null, False if the type is strictly non nullable
- and NULLABILITY_UNKNOWN if the nullability cannot be determined.
- """
-
- # Explicitly marked nullable types or 'null' are nullable.
- if (modifiers and self.or_null) or self.identifier == 'null':
- return True
-
- # Explicitly marked non-nullable types or non-nullable base types:
- if ((modifiers and self.not_null) or self.record_type
- or self.identifier in self.NON_NULLABLE):
- return False
-
- # A type group is nullable if any of its elements are nullable.
- if self.type_group:
- maybe_nullable = False
- for sub_type in self.sub_types:
- nullability = sub_type.GetNullability()
- if nullability == self.NULLABILITY_UNKNOWN:
- maybe_nullable = nullability
- elif nullability:
- return True
- return maybe_nullable
-
- # Whitelisted types are nullable.
- if self.identifier.rstrip('.') in self.NULLABLE_TYPE_WHITELIST:
- return True
-
- # All other types are unknown (most should be nullable, but
- # enums are not and typedefs might not be).
- return self.NULLABILITY_UNKNOWN
-
- def WillAlwaysBeNullable(self):
- """Computes whether the ! flag is illegal for this type.
-
- This is the case if this type or any of the subtypes is marked as
- explicitly nullable.
-
- Returns:
- True if the ! flag would be illegal.
- """
- if self.or_null or self.identifier == 'null':
- return True
-
- if self.type_group:
- return bool([t for t in self.sub_types if t.WillAlwaysBeNullable()])
-
- return False
-
- def _Finalize(self, parent):
- """Fixes some parsing issues once the TypeAnnotation is complete."""
-
- # Normalize functions whose definition ended up in the key type because
- # they defined a return type after a colon.
- if self.key_type and self.key_type.identifier == 'function':
- current = self.key_type
- current.return_type = self
- self.key_type = None
- # opt_arg never refers to the return type but to the function itself.
- current.opt_arg = self.opt_arg
- self.opt_arg = False
- return current
-
- # If a typedef just specified the key, it will not end up in the key type.
- if parent.record_type and not self.key_type:
- current = TypeAnnotation()
- current.key_type = self
- current.tokens.append(self)
- return current
- return self
-
- def FirstToken(self):
- """Returns the first token used in this type or any of its subtypes."""
- first = self.tokens[0]
- return first.FirstToken() if isinstance(first, TypeAnnotation) else first
-
-
-def Parse(token, token_end, error_handler):
- """Parses a type annotation and returns a TypeAnnotation object."""
- return TypeAnnotationParser(error_handler).Parse(token.next, token_end)
-
-
-class TypeAnnotationParser(object):
- """A parser for type annotations constructing the TypeAnnotation object."""
-
- def __init__(self, error_handler):
- self._stack = []
- self._error_handler = error_handler
- self._closing_error = False
-
- def Parse(self, token, token_end):
- """Parses a type annotation and returns a TypeAnnotation object."""
- root = TypeAnnotation()
- self._stack.append(root)
- current = TypeAnnotation()
- root.tokens.append(current)
-
- while token and token != token_end:
- if token.type in (TYPE.DOC_TYPE_START_BLOCK, TYPE.DOC_START_BRACE):
- if token.string == '(':
- if (current.identifier and
- current.identifier not in ['function', '...']):
- self.Error(token,
- 'Invalid identifier for (): "%s"' % current.identifier)
- current.type_group = current.identifier != 'function'
- elif token.string == '{':
- current.record_type = True
- current.tokens.append(token)
- self._stack.append(current)
- current = TypeAnnotation()
- self._stack[-1].tokens.append(current)
-
- elif token.type in (TYPE.DOC_TYPE_END_BLOCK, TYPE.DOC_END_BRACE):
- prev = self._stack.pop()
- prev.Append(current)
- current = prev
-
- # If an implicit type group was created, close it as well.
- if prev.type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP:
- prev = self._stack.pop()
- prev.Append(current)
- current = prev
- current.tokens.append(token)
-
- elif token.type == TYPE.DOC_TYPE_MODIFIER:
- if token.string == '!':
- current.tokens.append(token)
- current.not_null = True
- elif token.string == '?':
- current.tokens.append(token)
- current.or_null = True
- elif token.string == ':':
- current.tokens.append(token)
- prev = current
- current = TypeAnnotation()
- prev.tokens.append(current)
- current.key_type = prev
- elif token.string == '=':
- # For implicit type groups the '=' refers to the parent.
- try:
- if self._stack[-1].type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP:
- self._stack[-1].tokens.append(token)
- self._stack[-1].opt_arg = True
- else:
- current.tokens.append(token)
- current.opt_arg = True
- except IndexError:
- self.ClosingError(token)
- elif token.string == '|':
- # If a type group has explicitly been opened do a normal append.
- # Otherwise we have to open the type group and move the current
- # type into it, before appending
- if not self._stack[-1].type_group:
- type_group = TypeAnnotation()
- if current.key_type and current.key_type.identifier != 'function':
- type_group.key_type = current.key_type
- current.key_type = None
- type_group.type_group = TypeAnnotation.IMPLICIT_TYPE_GROUP
- # Fix the token order
- prev = self._stack[-1].tokens.pop()
- self._stack[-1].tokens.append(type_group)
- type_group.tokens.append(prev)
- self._stack.append(type_group)
- self._stack[-1].tokens.append(token)
- self.Append(current, error_token=token)
- current = TypeAnnotation()
- self._stack[-1].tokens.append(current)
- elif token.string == ',':
- self.Append(current, error_token=token)
- current = TypeAnnotation()
- self._stack[-1].tokens.append(token)
- self._stack[-1].tokens.append(current)
- else:
- current.tokens.append(token)
- self.Error(token, 'Invalid token')
-
- elif token.type == TYPE.COMMENT:
- current.tokens.append(token)
- current.identifier += token.string.strip()
-
- elif token.type in [TYPE.DOC_PREFIX, TYPE.WHITESPACE]:
- current.tokens.append(token)
-
- else:
- current.tokens.append(token)
- self.Error(token, 'Unexpected token')
-
- token = token.next
-
- self.Append(current, error_token=token)
- try:
- ret = self._stack.pop()
- except IndexError:
- self.ClosingError(token)
- # The type is screwed up, but let's return something.
- return current
-
- if self._stack and (len(self._stack) != 1 or
- ret.type_group != TypeAnnotation.IMPLICIT_TYPE_GROUP):
- self.Error(token, 'Too many opening items.')
-
- return ret if len(ret.sub_types) > 1 else ret.sub_types[0]
-
- def Append(self, type_obj, error_token):
- """Appends a new TypeAnnotation object to the current parent."""
- if self._stack:
- self._stack[-1].Append(type_obj)
- else:
- self.ClosingError(error_token)
-
- def ClosingError(self, token):
- """Reports an error about too many closing items, but only once."""
- if not self._closing_error:
- self._closing_error = True
- self.Error(token, 'Too many closing items.')
-
- def Error(self, token, message):
- """Calls the error_handler to post an error message."""
- if self._error_handler:
- self._error_handler.HandleError(error.Error(
- errors.JSDOC_DOES_NOT_PARSE,
- 'Error parsing jsdoc type at token "%s" (column: %d): %s' %
- (token.string, token.start_index, message), token))
diff --git a/tools/closure_linter/closure_linter/typeannotation_test.py b/tools/closure_linter/closure_linter/typeannotation_test.py
deleted file mode 100755
index da9dfa369f..0000000000
--- a/tools/closure_linter/closure_linter/typeannotation_test.py
+++ /dev/null
@@ -1,232 +0,0 @@
-#!/usr/bin/env python
-"""Unit tests for the typeannotation module."""
-
-
-
-
-import unittest as googletest
-
-from closure_linter import testutil
-from closure_linter.common import erroraccumulator
-
-CRAZY_TYPE = ('Array.<!function(new:X,{a:null},...(c|d)):'
- 'function(...(Object.<string>))>')
-
-
-class TypeErrorException(Exception):
- """Exception for TypeErrors."""
-
- def __init__(self, errors):
- super(TypeErrorException, self).__init__()
- self.errors = errors
-
-
-class TypeParserTest(googletest.TestCase):
- """Tests for typeannotation parsing."""
-
- def _ParseComment(self, script):
- """Parse a script that contains one comment and return it."""
- accumulator = erroraccumulator.ErrorAccumulator()
- _, comments = testutil.ParseFunctionsAndComments(script, accumulator)
- if accumulator.GetErrors():
- raise TypeErrorException(accumulator.GetErrors())
- self.assertEquals(1, len(comments))
- return comments[0]
-
- def _ParseType(self, type_str):
- """Creates a comment to parse and returns the parsed type."""
- comment = self._ParseComment('/** @type {%s} **/' % type_str)
- return comment.GetDocFlags()[0].jstype
-
- def assertProperReconstruction(self, type_str, matching_str=None):
- """Parses the type and asserts the its repr matches the type.
-
- If matching_str is specified, it will assert that the repr matches this
- string instead.
-
- Args:
- type_str: The type string to parse.
- matching_str: A string the __repr__ of the parsed type should match.
- Returns:
- The parsed js_type.
- """
- parsed_type = self._ParseType(type_str)
- # Use listEqual assertion to more easily identify the difference
- self.assertListEqual(list(matching_str or type_str),
- list(repr(parsed_type)))
- self.assertEquals(matching_str or type_str, repr(parsed_type))
-
- # Newlines will be inserted by the file writer.
- self.assertEquals(type_str.replace('\n', ''), parsed_type.ToString())
- return parsed_type
-
- def assertNullable(self, type_str, nullable=True):
- parsed_type = self.assertProperReconstruction(type_str)
- self.assertEquals(nullable, parsed_type.GetNullability(),
- '"%s" should %sbe nullable' %
- (type_str, 'not ' if nullable else ''))
-
- def assertNotNullable(self, type_str):
- return self.assertNullable(type_str, nullable=False)
-
- def testReconstruction(self):
- self.assertProperReconstruction('*')
- self.assertProperReconstruction('number')
- self.assertProperReconstruction('(((number)))')
- self.assertProperReconstruction('!number')
- self.assertProperReconstruction('?!number')
- self.assertProperReconstruction('number=')
- self.assertProperReconstruction('number=!?', '?!number=')
- self.assertProperReconstruction('number|?string')
- self.assertProperReconstruction('(number|string)')
- self.assertProperReconstruction('?(number|string)')
- self.assertProperReconstruction('Object.<number,string>')
- self.assertProperReconstruction('function(new:Object)')
- self.assertProperReconstruction('function(new:Object):number')
- self.assertProperReconstruction('function(new:Object,Element):number')
- self.assertProperReconstruction('function(this:T,...)')
- self.assertProperReconstruction('{a:?number}')
- self.assertProperReconstruction('{a:?number,b:(number|string)}')
- self.assertProperReconstruction('{c:{nested_element:*}|undefined}')
- self.assertProperReconstruction('{handleEvent:function(?):?}')
- self.assertProperReconstruction('function():?|null')
- self.assertProperReconstruction('null|function():?|bar')
-
- def testOptargs(self):
- self.assertProperReconstruction('number=')
- self.assertProperReconstruction('number|string=')
- self.assertProperReconstruction('(number|string)=')
- self.assertProperReconstruction('(number|string=)')
- self.assertProperReconstruction('(number=|string)')
- self.assertProperReconstruction('function(...):number=')
-
- def testIndepth(self):
- # Do an deeper check of the crazy identifier
- crazy = self.assertProperReconstruction(CRAZY_TYPE)
- self.assertEquals('Array.', crazy.identifier)
- self.assertEquals(1, len(crazy.sub_types))
- func1 = crazy.sub_types[0]
- func2 = func1.return_type
- self.assertEquals('function', func1.identifier)
- self.assertEquals('function', func2.identifier)
- self.assertEquals(3, len(func1.sub_types))
- self.assertEquals(1, len(func2.sub_types))
- self.assertEquals('Object.', func2.sub_types[0].sub_types[0].identifier)
-
- def testIterIdentifiers(self):
- nested_identifiers = self._ParseType('(a|{b:(c|function(new:d):e)})')
- for identifier in ('a', 'b', 'c', 'd', 'e'):
- self.assertIn(identifier, nested_identifiers.IterIdentifiers())
-
- def testIsEmpty(self):
- self.assertTrue(self._ParseType('').IsEmpty())
- self.assertFalse(self._ParseType('?').IsEmpty())
- self.assertFalse(self._ParseType('!').IsEmpty())
- self.assertFalse(self._ParseType('<?>').IsEmpty())
-
- def testIsConstructor(self):
- self.assertFalse(self._ParseType('').IsConstructor())
- self.assertFalse(self._ParseType('Array.<number>').IsConstructor())
- self.assertTrue(self._ParseType('function(new:T)').IsConstructor())
-
- def testIsVarArgsType(self):
- self.assertTrue(self._ParseType('...number').IsVarArgsType())
- self.assertTrue(self._ParseType('...Object|Array').IsVarArgsType())
- self.assertTrue(self._ParseType('...(Object|Array)').IsVarArgsType())
- self.assertFalse(self._ParseType('Object|...Array').IsVarArgsType())
- self.assertFalse(self._ParseType('(...Object|Array)').IsVarArgsType())
-
- def testIsUnknownType(self):
- self.assertTrue(self._ParseType('?').IsUnknownType())
- self.assertTrue(self._ParseType('Foo.<?>').sub_types[0].IsUnknownType())
- self.assertFalse(self._ParseType('?|!').IsUnknownType())
- self.assertTrue(self._ParseType('?|!').sub_types[0].IsUnknownType())
- self.assertFalse(self._ParseType('!').IsUnknownType())
-
- long_type = 'function():?|{handleEvent:function(?=):?,sample:?}|?='
- record = self._ParseType(long_type)
- # First check that there's not just one type with 3 return types, but three
- # top-level types.
- self.assertEquals(3, len(record.sub_types))
-
- # Now extract all unknown type instances and verify that they really are.
- handle_event, sample = record.sub_types[1].sub_types
- for i, sub_type in enumerate([
- record.sub_types[0].return_type,
- handle_event.return_type,
- handle_event.sub_types[0],
- sample,
- record.sub_types[2]]):
- self.assertTrue(sub_type.IsUnknownType(),
- 'Type %d should be the unknown type: %s\n%s' % (
- i, sub_type.tokens, record.Dump()))
-
- def testTypedefNames(self):
- easy = self._ParseType('{a}')
- self.assertTrue(easy.record_type)
-
- easy = self.assertProperReconstruction('{a}', '{a:}').sub_types[0]
- self.assertEquals('a', easy.key_type.identifier)
- self.assertEquals('', easy.identifier)
-
- easy = self.assertProperReconstruction('{a:b}').sub_types[0]
- self.assertEquals('a', easy.key_type.identifier)
- self.assertEquals('b', easy.identifier)
-
- def assertTypeError(self, type_str):
- """Asserts that parsing the given type raises a linter error."""
- self.assertRaises(TypeErrorException, self._ParseType, type_str)
-
- def testParseBadTypes(self):
- """Tests that several errors in types don't break the parser."""
- self.assertTypeError('<')
- self.assertTypeError('>')
- self.assertTypeError('Foo.<Bar')
- self.assertTypeError('Foo.Bar>=')
- self.assertTypeError('Foo.<Bar>>=')
- self.assertTypeError('(')
- self.assertTypeError(')')
- self.assertTypeError('Foo.<Bar)>')
- self._ParseType(':')
- self._ParseType(':foo')
- self.assertTypeError(':)foo')
- self.assertTypeError('(a|{b:(c|function(new:d):e')
-
- def testNullable(self):
- self.assertNullable('null')
- self.assertNullable('Object')
- self.assertNullable('?string')
- self.assertNullable('?number')
-
- self.assertNotNullable('string')
- self.assertNotNullable('number')
- self.assertNotNullable('boolean')
- self.assertNotNullable('function(Object)')
- self.assertNotNullable('function(Object):Object')
- self.assertNotNullable('function(?Object):?Object')
- self.assertNotNullable('!Object')
-
- self.assertNotNullable('boolean|string')
- self.assertNotNullable('(boolean|string)')
-
- self.assertNullable('(boolean|string|null)')
- self.assertNullable('(?boolean)')
- self.assertNullable('?(boolean)')
-
- self.assertNullable('(boolean|Object)')
- self.assertNotNullable('(boolean|(string|{a:}))')
-
- def testSpaces(self):
- """Tests that spaces don't change the outcome."""
- type_str = (' A < b | ( c | ? ! d e f ) > | '
- 'function ( x : . . . ) : { y : z = } ')
- two_spaces = type_str.replace(' ', ' ')
- no_spaces = type_str.replace(' ', '')
- newlines = type_str.replace(' ', '\n * ')
- self.assertProperReconstruction(no_spaces)
- self.assertProperReconstruction(type_str, no_spaces)
- self.assertProperReconstruction(two_spaces, no_spaces)
- self.assertProperReconstruction(newlines, no_spaces)
-
-if __name__ == '__main__':
- googletest.main()
diff --git a/tools/closure_linter/dist/closure_linter-2.3.17-py2.7.egg b/tools/closure_linter/dist/closure_linter-2.3.17-py2.7.egg
deleted file mode 100644
index e9a847ca28..0000000000
--- a/tools/closure_linter/dist/closure_linter-2.3.17-py2.7.egg
+++ /dev/null
Binary files differ
diff --git a/tools/closure_linter/setup.py b/tools/closure_linter/setup.py
deleted file mode 100755
index d320b65d17..0000000000
--- a/tools/closure_linter/setup.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-try:
- from setuptools import setup
-except ImportError:
- from distutils.core import setup
-
-setup(name='closure_linter',
- version='2.3.17',
- description='Closure Linter',
- license='Apache',
- author='The Closure Linter Authors',
- author_email='opensource@google.com',
- url='http://code.google.com/p/closure-linter',
- install_requires=['python-gflags'],
- package_dir={'closure_linter': 'closure_linter'},
- packages=['closure_linter', 'closure_linter.common'],
- entry_points = {
- 'console_scripts': [
- 'gjslint = closure_linter.gjslint:main',
- 'fixjsstyle = closure_linter.fixjsstyle:main'
- ]
- }
-)