summaryrefslogtreecommitdiff
path: root/deps/v8/build/util
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/build/util')
-rw-r--r--deps/v8/build/util/BUILD.gn51
-rw-r--r--deps/v8/build/util/LASTCHANGE.dummy1
-rw-r--r--deps/v8/build/util/PRESUBMIT.py58
-rw-r--r--deps/v8/build/util/android_chrome_version.py173
-rw-r--r--deps/v8/build/util/android_chrome_version_test.py293
-rw-r--r--deps/v8/build/util/branding.gni46
-rw-r--r--deps/v8/build/util/generate_wrapper.gni98
-rwxr-xr-xdeps/v8/build/util/generate_wrapper.py136
-rw-r--r--deps/v8/build/util/java_action.gni103
-rwxr-xr-xdeps/v8/build/util/java_action.py82
-rw-r--r--deps/v8/build/util/lastchange.gni16
-rwxr-xr-xdeps/v8/build/util/lastchange.py318
-rw-r--r--deps/v8/build/util/lib/common/PRESUBMIT.py16
-rw-r--r--deps/v8/build/util/lib/common/__init__.py0
-rw-r--r--deps/v8/build/util/lib/common/chrome_test_server_spawner.py480
-rw-r--r--deps/v8/build/util/lib/common/perf_result_data_type.py20
-rw-r--r--deps/v8/build/util/lib/common/perf_tests_results_helper.py200
-rw-r--r--deps/v8/build/util/lib/common/unittest_util.py155
-rwxr-xr-xdeps/v8/build/util/lib/common/unittest_util_test.py65
-rw-r--r--deps/v8/build/util/lib/common/util.py151
-rw-r--r--deps/v8/build/util/process_version.gni126
-rw-r--r--deps/v8/build/util/version.gni159
-rwxr-xr-xdeps/v8/build/util/version.py259
-rw-r--r--deps/v8/build/util/version_test.py174
-rw-r--r--deps/v8/build/util/webkit_version.h.in9
25 files changed, 3189 insertions, 0 deletions
diff --git a/deps/v8/build/util/BUILD.gn b/deps/v8/build/util/BUILD.gn
new file mode 100644
index 0000000000..54c23c91c4
--- /dev/null
+++ b/deps/v8/build/util/BUILD.gn
@@ -0,0 +1,51 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/util/lastchange.gni")
+
+action("webkit_version") {
+ script = "version.py"
+
+ template_file = "webkit_version.h.in"
+ inputs = [
+ lastchange_file,
+ template_file,
+ ]
+
+ output_file = "$target_gen_dir/webkit_version.h"
+ outputs = [
+ output_file,
+ ]
+
+ args = [
+ # LASTCHANGE contains "<build hash>-<ref>". The user agent only wants the
+ # "<build hash>" bit, so chop off everything after it.
+ "-e",
+ "LASTCHANGE=LASTCHANGE[:LASTCHANGE.find('-')]",
+ "-f",
+ rebase_path(lastchange_file, root_build_dir),
+ rebase_path(template_file, root_build_dir),
+ rebase_path(output_file, root_build_dir),
+ ]
+}
+
+action("chrome_version_json") {
+ script = "version.py"
+ _chrome_version_path = "//chrome/VERSION"
+ inputs = [
+ _chrome_version_path,
+ ]
+ _output_file = "$root_gen_dir/CHROME_VERSION.json"
+ outputs = [
+ _output_file,
+ ]
+ args = [
+ "--file",
+ rebase_path(_chrome_version_path, root_build_dir),
+ "--template",
+ "{\"full-quoted\": \"\\\"@MAJOR@.@MINOR@.@BUILD@.@PATCH@\\\"\"}",
+ "--output",
+ rebase_path(_output_file, root_build_dir),
+ ]
+}
diff --git a/deps/v8/build/util/LASTCHANGE.dummy b/deps/v8/build/util/LASTCHANGE.dummy
new file mode 100644
index 0000000000..21bb3c33c7
--- /dev/null
+++ b/deps/v8/build/util/LASTCHANGE.dummy
@@ -0,0 +1 @@
+LASTCHANGE=0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
diff --git a/deps/v8/build/util/PRESUBMIT.py b/deps/v8/build/util/PRESUBMIT.py
new file mode 100644
index 0000000000..271afbbb62
--- /dev/null
+++ b/deps/v8/build/util/PRESUBMIT.py
@@ -0,0 +1,58 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+"""Presubmit for build/util"""
+
+
+def _GetBlacklist(input_api):
+ blacklist = []
+ affected_files = input_api.change.AffectedFiles()
+ version_script_change = next(
+ (f for f in affected_files
+ if re.search('\\/version\\.py$|\\/version_test\\.py$', f.LocalPath())),
+ None)
+
+ if version_script_change is None:
+ blacklist.append('version_test\\.py$')
+
+ android_chrome_version_script_change = next(
+ (f for f in affected_files if re.search(
+ '\\/android_chrome_version\\.py$|'
+ '\\/android_chrome_version_test\\.py$', f.LocalPath())), None)
+
+ if android_chrome_version_script_change is None:
+ blacklist.append('android_chrome_version_test\\.py$')
+
+ return blacklist
+
+
+def _GetPythonUnitTests(input_api, output_api):
+ # No need to test if files are unchanged
+ blacklist = _GetBlacklist(input_api)
+
+ return input_api.canned_checks.GetUnitTestsRecursively(
+ input_api,
+ output_api,
+ input_api.PresubmitLocalPath(),
+ whitelist=['.*_test\\.py$'],
+ blacklist=blacklist)
+
+
+def CommonChecks(input_api, output_api):
+ """Presubmit checks run on both upload and commit.
+ """
+ checks = []
+ checks.extend(_GetPythonUnitTests(input_api, output_api))
+ return input_api.RunTests(checks, False)
+
+
+def CheckChangeOnUpload(input_api, output_api):
+ """Presubmit checks on CL upload."""
+ return CommonChecks(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+ """Presubmit checks on commit."""
+ return CommonChecks(input_api, output_api)
diff --git a/deps/v8/build/util/android_chrome_version.py b/deps/v8/build/util/android_chrome_version.py
new file mode 100644
index 0000000000..5628f1a845
--- /dev/null
+++ b/deps/v8/build/util/android_chrome_version.py
@@ -0,0 +1,173 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Different build variants of chrome for android have different version codes.
+Reason: for targets that have the same package name (e.g. chrome, chome
+modern, monochrome, trichrome), Play Store considers them the same app
+and will push the supported app with the highest version code to devices.
+(note Play Store does not support hosting two different apps with same
+version code and package name)
+
+Each key in this dict represents a unique version code that will be used for
+one or more android chrome apks.
+
+Webview channels must have unique version codes for a couple reasons:
+a) Play Store does not support having the same version code for different
+ versions of a package. Without unique codes, promoting a beta apk to stable
+ would require first removing the beta version.
+b) Firebase project support (used by official builders) requires unique
+ [version code + package name].
+ We cannot add new webview package names for new channels because webview
+ packages are whitelisted by Android as webview providers.
+
+WEBVIEW_STABLE, WEBVIEW_BETA, WEBVIEW_DEV are all used for standalone webview,
+whereas the others are used for various chrome apks.
+
+Note that a final digit of '3' for webview is reserved for Trichrome Webview.
+The same versionCode is used for both Trichrome Chrome and Trichrome Webview.
+"""
+ANDROID_CHROME_APK_VERSION_CODE_DIFFS = {
+ 'CHROME': 0,
+ 'CHROME_MODERN': 1,
+ 'MONOCHROME': 2,
+ 'TRICHROME': 3,
+ 'NOTOUCH_CHROME': 4,
+ 'WEBVIEW_STABLE': 0,
+ 'WEBVIEW_BETA': 1,
+ 'WEBVIEW_DEV': 2,
+}
+
+"""The architecture preference is encoded into the version_code for devices
+that support multiple architectures. (exploiting play store logic that pushes
+apk with highest version code)
+
+Detail:
+Many Android devices support multiple architectures, and can run applications
+built for any of them; the Play Store considers all of the supported
+architectures compatible and does not, itself, have any preference for which
+is "better". The common cases here:
+
+- All production arm64 devices can also run arm
+- All production x64 devices can also run x86
+- Pretty much all production x86/x64 devices can also run arm (via a binary
+ translator)
+
+Since the Play Store has no particular preferences, you have to encode your own
+preferences into the ordering of the version codes. There's a few relevant
+things here:
+
+- For any android app, it's theoretically preferable to ship a 64-bit version to
+ 64-bit devices if it exists, because the 64-bit architectures are supposed to
+ be "better" than their 32-bit predecessors (unfortunately this is not always
+ true due to the effect on memory usage, but we currently deal with this by
+ simply not shipping a 64-bit version *at all* on the configurations where we
+ want the 32-bit version to be used).
+- For any android app, it's definitely preferable to ship an x86 version to x86
+ devices if it exists instead of an arm version, because running things through
+ the binary translator is a performance hit.
+- For WebView, Monochrome, and Trichrome specifically, they are a special class
+ of APK called "multiarch" which means that they actually need to *use* more
+ than one architecture at runtime (rather than simply being compatible with
+ more than one). The 64-bit builds of these multiarch APKs contain both 32-bit
+ and 64-bit code, so that Webview is available for both ABIs. If you're
+ multiarch you *must* have a version that supports both 32-bit and 64-bit
+ version on a 64-bit device, otherwise it won't work properly. So, the 64-bit
+ version needs to be a higher versionCode, as otherwise a 64-bit device would
+ prefer the 32-bit version that does not include any 64-bit code, and fail.
+- The relative order of mips isn't important, but it needs to be a *distinct*
+ value to the other architectures because all builds need unique version codes.
+"""
+ARCH_VERSION_CODE_DIFF = {
+ 'arm': 0,
+ 'x86': 10,
+ 'mipsel': 20,
+ 'arm64': 30,
+ 'x64': 60
+}
+ARCH_CHOICES = ARCH_VERSION_CODE_DIFF.keys()
+
+""" "Next" builds get +5 last version code digit.
+
+We choose 5 because it won't conflict with values in
+ANDROID_CHROME_APK_VERSION_CODE_DIFFS
+
+We also increment BUILD (branch) number to ensure that the version code is
+higher for the next build than any build with the same BUILD value (even if the
+other builds have a higher PATCH value). This is needed for release logistics
+when working with unreleased Android versions: upgrading android will install
+the chrome build (the "next" build) that uses the new android sdk.
+"""
+NEXT_BUILD_VERSION_CODE_DIFF = 100005
+
+"""For 64-bit architectures, some packages have multiple targets with version
+codes that differ by the second-to-last digit (the architecture digit). This is
+for various combinations of 32-bit vs 64-bit chrome and webview. The
+default/traditional configuration is 32-bit chrome with 64-bit webview, but we
+are adding:
++ 64-bit chrome with 32-bit webview
++ 64-bit combined Chrome and Webview (only one library)
++ (maybe someday 32-bit chrome with 32-bit webview)
+
+The naming scheme followed here is <chrome>_<webview>,
+e.g. 64_32 is 64-bit chrome with 32-bit webview.
+"""
+ARCH64_APK_VARIANTS = {
+ '64_32': {
+ 'PACKAGES': frozenset(['MONOCHROME', 'TRICHROME']),
+ 'MODIFIER': 10
+ },
+ '64': {
+ 'PACKAGES': frozenset(['MONOCHROME', 'TRICHROME']),
+ 'MODIFIER': 20
+ }
+}
+
+
+def GenerateVersionCodes(version_values, arch, is_next_build):
+ """Get dict of version codes for chrome-for-android-related targets
+
+ e.g.
+ {
+ 'CHROME_VERSION_CODE': '378100010',
+ 'MONOCHROME_VERSION_CODE': '378100013',
+ ...
+ }
+
+ versionCode values are built like this:
+ {full BUILD int}{3 digits for PATCH}{1 digit for architecture}{final digit}.
+
+ MAJOR and MINOR values are not used for generating versionCode.
+ - MINOR is always 0. It was used for something long ago in Chrome's history
+ but has not been used since, and has never been nonzero on Android.
+ - MAJOR is cosmetic and controlled by the release managers. MAJOR and BUILD
+ always have reasonable sort ordering: for two version codes A and B, it's
+ always the case that (A.MAJOR < B.MAJOR) implies (A.BUILD < B.BUILD), and
+ that (A.MAJOR > B.MAJOR) implies (A.BUILD > B.BUILD). This property is just
+ maintained by the humans who set MAJOR.
+
+ Thus, this method is responsible for the final two digits of versionCode.
+ """
+
+ base_version_code = '%s%03d00' % (version_values['BUILD'],
+ int(version_values['PATCH']))
+ new_version_code = int(base_version_code)
+
+ new_version_code += ARCH_VERSION_CODE_DIFF[arch]
+ if is_next_build:
+ new_version_code += NEXT_BUILD_VERSION_CODE_DIFF
+
+ version_codes = {}
+ for apk, diff in ANDROID_CHROME_APK_VERSION_CODE_DIFFS.iteritems():
+ version_code_name = apk + '_VERSION_CODE'
+ version_code_val = new_version_code + diff
+ version_codes[version_code_name] = str(version_code_val)
+
+ if arch == 'arm64' or arch == 'x64':
+ for variant, config in ARCH64_APK_VARIANTS.iteritems():
+ if apk in config['PACKAGES']:
+ variant_name = apk + '_' + variant + '_VERSION_CODE'
+ variant_val = version_code_val + config['MODIFIER']
+ version_codes[variant_name] = str(variant_val)
+
+
+ return version_codes
diff --git a/deps/v8/build/util/android_chrome_version_test.py b/deps/v8/build/util/android_chrome_version_test.py
new file mode 100644
index 0000000000..5e743d34ba
--- /dev/null
+++ b/deps/v8/build/util/android_chrome_version_test.py
@@ -0,0 +1,293 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from android_chrome_version import GenerateVersionCodes
+
+
+class _VersionTest(unittest.TestCase):
+ """Unittests for the android_chrome_version module.
+ """
+
+ EXAMPLE_VERSION_VALUES = {
+ 'MAJOR': '74',
+ 'MINOR': '0',
+ 'BUILD': '3720',
+ 'PATCH': '0',
+ }
+
+ def testGenerateVersionCodesAndroidChrome(self):
+ """Assert it gives correct values for standard/example inputs"""
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False)
+
+ chrome_version_code = output['CHROME_VERSION_CODE']
+
+ self.assertEqual(chrome_version_code, '372000000')
+
+ def testGenerateVersionCodesAndroidChromeModern(self):
+ """Assert it gives correct values for standard/example inputs"""
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False)
+
+ chrome_modern_version_code = output['CHROME_MODERN_VERSION_CODE']
+
+ self.assertEqual(chrome_modern_version_code, '372000001')
+
+ def testGenerateVersionCodesAndroidMonochrome(self):
+ """Assert it gives correct values for standard/example inputs"""
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False)
+
+ monochrome_version_code = output['MONOCHROME_VERSION_CODE']
+
+ self.assertEqual(monochrome_version_code, '372000002')
+
+ def testGenerateVersionCodesAndroidTrichrome(self):
+ """Assert it gives correct values for standard/example inputs"""
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False)
+
+ trichrome_version_code = output['TRICHROME_VERSION_CODE']
+
+ self.assertEqual(trichrome_version_code, '372000003')
+
+ def testGenerateVersionCodesAndroidNoTouch(self):
+ """Assert it gives correct values for standard/example inputs"""
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False)
+
+ notouch_chrome_version_code = output['NOTOUCH_CHROME_VERSION_CODE']
+
+ self.assertEqual(notouch_chrome_version_code, '372000004')
+
+ def testGenerateVersionCodesAndroidWebviewStable(self):
+ """Assert it gives correct values for standard/example inputs"""
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False)
+
+ webview_stable_version_code = output['WEBVIEW_STABLE_VERSION_CODE']
+
+ self.assertEqual(webview_stable_version_code, '372000000')
+
+ def testGenerateVersionCodesAndroidWebviewBeta(self):
+ """Assert it gives correct values for standard/example inputs"""
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False)
+
+ webview_beta_version_code = output['WEBVIEW_BETA_VERSION_CODE']
+
+ self.assertEqual(webview_beta_version_code, '372000001')
+
+ def testGenerateVersionCodesAndroidWebviewDev(self):
+ """Assert it gives correct values for standard/example inputs"""
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False)
+
+ webview_dev_version_code = output['WEBVIEW_DEV_VERSION_CODE']
+
+ self.assertEqual(webview_dev_version_code, '372000002')
+
+ def testGenerateVersionCodesAndroidNextBuild(self):
+ """Assert it handles "next" builds correctly"""
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=True)
+
+ # Get just a sample of values
+ chrome_version_code = output['CHROME_VERSION_CODE']
+ monochrome_version_code = output['MONOCHROME_VERSION_CODE']
+ webview_stable_version_code = output['WEBVIEW_STABLE_VERSION_CODE']
+ webview_beta_version_code = output['WEBVIEW_BETA_VERSION_CODE']
+
+ self.assertEqual(chrome_version_code, '372100005')
+ self.assertEqual(monochrome_version_code, '372100007')
+ self.assertEqual(webview_stable_version_code, '372100005')
+ self.assertEqual(webview_beta_version_code, '372100006')
+
+ def testGenerateVersionCodesAndroidArchArm(self):
+ """Assert it handles different architectures correctly.
+
+ Version codes for different builds need to be distinct and maintain a
+ certain ordering.
+ See docstring on android_chrome_version.ARCH_VERSION_CODE_DIFF for
+ reasoning.
+ """
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False)
+ arch_chrome_version_code = output['CHROME_VERSION_CODE']
+
+ self.assertEqual(arch_chrome_version_code, '372000000')
+
+ def testGenerateVersionCodesAndroidArchX86(self):
+ """Assert it handles different architectures correctly.
+
+ Version codes for different builds need to be distinct and maintain a
+ certain ordering.
+ See docstring on android_chrome_version.ARCH_VERSION_CODE_DIFF for
+ reasoning.
+ """
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='x86', is_next_build=False)
+ arch_chrome_version_code = output['CHROME_VERSION_CODE']
+
+ self.assertEqual(arch_chrome_version_code, '372000010')
+
+ def testGenerateVersionCodesAndroidArchMips(self):
+ """Assert it handles different architectures correctly.
+
+ Version codes for different builds need to be distinct and maintain a
+ certain ordering.
+ See docstring on android_chrome_version.ARCH_VERSION_CODE_DIFF for
+ reasoning.
+ """
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='mipsel', is_next_build=False)
+ arch_chrome_version_code = output['CHROME_VERSION_CODE']
+
+ self.assertEqual(arch_chrome_version_code, '372000020')
+
+ def testGenerateVersionCodesAndroidArchArm64(self):
+ """Assert it handles different architectures correctly.
+
+ Version codes for different builds need to be distinct and maintain a
+ certain ordering.
+ See docstring on android_chrome_version.ARCH_VERSION_CODE_DIFF for
+ reasoning.
+ """
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='arm64', is_next_build=False)
+ arch_chrome_version_code = output['CHROME_VERSION_CODE']
+
+ self.assertEqual(arch_chrome_version_code, '372000030')
+
+ def testGenerateVersionCodesAndroidArchArm64Variants(self):
+ """Assert it handles 64-bit-specific additional version codes correctly.
+
+ Some additional version codes are generated for 64-bit architectures.
+ See docstring on android_chrome_version.ARCH64_APK_VARIANTS for more info.
+ """
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='arm64', is_next_build=False)
+ arch_monochrome_64_32_version_code = output['MONOCHROME_64_32_VERSION_CODE']
+ arch_monochrome_64_version_code = output['MONOCHROME_64_VERSION_CODE']
+ arch_trichrome_64_32_version_code = output['TRICHROME_64_32_VERSION_CODE']
+ arch_trichrome_64_version_code = output['TRICHROME_64_VERSION_CODE']
+
+ self.assertEqual(arch_monochrome_64_32_version_code, '372000042')
+ self.assertEqual(arch_monochrome_64_version_code, '372000052')
+ self.assertEqual(arch_trichrome_64_32_version_code, '372000043')
+ self.assertEqual(arch_trichrome_64_version_code, '372000053')
+
+ def testGenerateVersionCodesAndroidArchX64(self):
+ """Assert it handles different architectures correctly.
+
+ Version codes for different builds need to be distinct and maintain a
+ certain ordering.
+ See docstring on android_chrome_version.ARCH_VERSION_CODE_DIFF for
+ reasoning.
+ """
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='x64', is_next_build=False)
+ arch_chrome_version_code = output['CHROME_VERSION_CODE']
+
+ self.assertEqual(arch_chrome_version_code, '372000060')
+
+ def testGenerateVersionCodesAndroidArchX64Variants(self):
+ """Assert it handles 64-bit-specific additional version codes correctly.
+
+ Some additional version codes are generated for 64-bit architectures.
+ See docstring on android_chrome_version.ARCH64_APK_VARIANTS for more info.
+ """
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='x64', is_next_build=False)
+ arch_monochrome_64_32_version_code = output['MONOCHROME_64_32_VERSION_CODE']
+ arch_monochrome_64_version_code = output['MONOCHROME_64_VERSION_CODE']
+ arch_trichrome_64_32_version_code = output['TRICHROME_64_32_VERSION_CODE']
+ arch_trichrome_64_version_code = output['TRICHROME_64_VERSION_CODE']
+
+ self.assertEqual(arch_monochrome_64_32_version_code, '372000072')
+ self.assertEqual(arch_monochrome_64_version_code, '372000082')
+ self.assertEqual(arch_trichrome_64_32_version_code, '372000073')
+ self.assertEqual(arch_trichrome_64_version_code, '372000083')
+
+ def testGenerateVersionCodesAndroidArchOrderArm(self):
+ """Assert it handles different architectures correctly.
+
+ Version codes for different builds need to be distinct and maintain a
+ certain ordering.
+ See docstring on android_chrome_version.ARCH_VERSION_CODE_DIFF for
+ reasoning.
+
+ Test arm-related values.
+ """
+ arm_output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False)
+ arm64_output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='arm64', is_next_build=False)
+
+ arm_chrome_version_code = arm_output['CHROME_VERSION_CODE']
+ arm64_chrome_version_code = arm64_output['CHROME_VERSION_CODE']
+
+ self.assertLess(arm_chrome_version_code, arm64_chrome_version_code)
+
+ def testGenerateVersionCodesAndroidArchOrderX86(self):
+ """Assert it handles different architectures correctly.
+
+ Version codes for different builds need to be distinct and maintain a
+ certain ordering.
+ See docstring on android_chrome_version.ARCH_VERSION_CODE_DIFF for
+ reasoning.
+
+ Test x86-related values.
+ """
+ x86_output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='x86', is_next_build=False)
+ x64_output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='x64', is_next_build=False)
+
+ x86_chrome_version_code = x86_output['CHROME_VERSION_CODE']
+ x64_chrome_version_code = x64_output['CHROME_VERSION_CODE']
+
+ self.assertLess(x86_chrome_version_code, x64_chrome_version_code)
+
+ def testGenerateVersionCodesAndroidWebviewChannelOrderBeta(self):
+ """Assert webview beta channel is higher than stable.
+
+ The channel-specific version codes for standalone webview needs to follow
+ the order stable < beta < dev.
+
+ This allows that if a user opts into beta track, they will always have the
+ beta apk, including any finch experiments targeted at beta users, even when
+ beta and stable channels are otherwise on the same version.
+ """
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False)
+
+ webview_stable_version_code = output['WEBVIEW_STABLE_VERSION_CODE']
+ webview_beta_version_code = output['WEBVIEW_BETA_VERSION_CODE']
+
+ self.assertGreater(webview_beta_version_code, webview_stable_version_code)
+
+ def testGenerateVersionCodesAndroidWebviewChannelOrderDev(self):
+ """Assert webview dev channel is higher than beta.
+
+ The channel-specific version codes for standalone webview needs to follow
+ the order stable < beta < dev.
+
+ This allows that if a user opts into dev track, they will always have the
+ dev apk, including any finch experiments targeted at dev users, even when
+ dev and beta channels are otherwise on the same version.
+ """
+ output = GenerateVersionCodes(
+ self.EXAMPLE_VERSION_VALUES, arch='arm', is_next_build=False)
+
+ webview_beta_version_code = output['WEBVIEW_BETA_VERSION_CODE']
+ webview_dev_version_code = output['WEBVIEW_DEV_VERSION_CODE']
+
+ self.assertGreater(webview_dev_version_code, webview_beta_version_code)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/build/util/branding.gni b/deps/v8/build/util/branding.gni
new file mode 100644
index 0000000000..2a229f1056
--- /dev/null
+++ b/deps/v8/build/util/branding.gni
@@ -0,0 +1,46 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This exposes the Chrome branding as GN variables for use in build files.
+#
+# PREFER NOT TO USE THESE. The GYP build uses this kind of thing extensively.
+# However, it is far better to write an action to generate a file at
+# build-time with the information you need. This allows better dependency
+# checking and GN will run faster.
+#
+# These values should only be used if you REALLY need to depend on them at
+# build-time, for example, in the computation of output file names.
+
+import("//build/config/chrome_build.gni")
+
+_branding_dictionary_template =
+ "full_name = \"@PRODUCT_FULLNAME@\" " +
+ "short_name = \"@PRODUCT_SHORTNAME@\" " +
+ "bundle_id = \"@MAC_BUNDLE_ID@\" " +
+ "creator_code = \"@MAC_CREATOR_CODE@\" " +
+ "installer_full_name = \"@PRODUCT_INSTALLER_FULLNAME@\" " +
+ "installer_short_name = \"@PRODUCT_INSTALLER_SHORTNAME@\" " +
+ "team_id = \"@MAC_TEAM_ID@\" "
+
+_branding_file = "//chrome/app/theme/$branding_path_component/BRANDING"
+_result = exec_script("version.py",
+ [
+ "-f",
+ rebase_path(_branding_file, root_build_dir),
+ "-t",
+ _branding_dictionary_template,
+ ],
+ "scope",
+ [ _branding_file ])
+
+chrome_product_full_name = _result.full_name
+chrome_product_short_name = _result.short_name
+chrome_product_installer_full_name = _result.installer_full_name
+chrome_product_installer_short_name = _result.installer_short_name
+
+if (is_mac) {
+ chrome_mac_bundle_id = _result.bundle_id
+ chrome_mac_creator_code = _result.creator_code
+ chrome_mac_team_id = _result.team_id
+}
diff --git a/deps/v8/build/util/generate_wrapper.gni b/deps/v8/build/util/generate_wrapper.gni
new file mode 100644
index 0000000000..74d94330da
--- /dev/null
+++ b/deps/v8/build/util/generate_wrapper.gni
@@ -0,0 +1,98 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Wraps a target and any of its arguments to an executable script.
+#
+# Many executable targets have build-time-constant arguments. This
+# template allows those to be wrapped into a single, user- or bot-friendly
+# script at build time.
+#
+# Paths to be wrapped should be relative to root_build_dir and should be
+# wrapped in "@WrappedPath(...)"; see Example below.
+#
+# Variables:
+# generator_script: Path to the script to use to perform the wrapping.
+# Defaults to //build/util/generate_wrapper.py. Generally should only
+# be set by other templates.
+# wrapper_script: Output path.
+# executable: Path to the executable to wrap. Can be a script or a
+# build product. Paths can be relative to the containing gn file
+# or source-absolute.
+# executable_args: List of arguments to write into the wrapper.
+#
+# Example wrapping a checked-in script:
+# generate_wrapper("sample_wrapper") {
+# executable = "//for/bar/sample.py"
+# wrapper_script = "$root_build_dir/bin/run_sample"
+#
+# _sample_argument_path = "//sample/$target_cpu/lib/sample_lib.so"
+# _rebased_sample_argument_path = rebase_path(
+# _sample_argument_path,
+# root_build_dir)
+# executable_args = [
+# "--sample-lib", "@WrappedPath(${_rebased_sample_argument_path})",
+# ]
+# }
+#
+# Example wrapping a build product:
+# generate_wrapper("sample_wrapper") {
+# executable = "$root_build_dir/sample_build_product"
+# wrapper_script = "$root_build_dir/bin/run_sample_build_product"
+# }
+template("generate_wrapper") {
+ _generator_script = "//build/util/generate_wrapper.py"
+ if (defined(invoker.generator_script)) {
+ _generator_script = invoker.generator_script
+ }
+ _executable_to_wrap = invoker.executable
+ _wrapper_script = invoker.wrapper_script
+ if (is_win) {
+ _wrapper_script += ".bat"
+ }
+ if (defined(invoker.executable_args)) {
+ _wrapped_arguments = invoker.executable_args
+ } else {
+ _wrapped_arguments = []
+ }
+
+ action(target_name) {
+ forward_variables_from(invoker,
+ [
+ "data",
+ "data_deps",
+ "deps",
+ "sources",
+ "testonly",
+ ])
+ script = _generator_script
+ if (!defined(data)) {
+ data = []
+ }
+ data += [ _wrapper_script ]
+ outputs = [
+ _wrapper_script,
+ ]
+
+ _rebased_executable_to_wrap =
+ rebase_path(_executable_to_wrap, root_build_dir)
+ _rebased_wrapper_script = rebase_path(_wrapper_script, root_build_dir)
+ if (is_win) {
+ _script_language = "batch"
+ } else {
+ _script_language = "bash"
+ }
+ args = [
+ "--executable",
+ "@WrappedPath(${_rebased_executable_to_wrap})",
+ "--wrapper-script",
+ _rebased_wrapper_script,
+ "--output-directory",
+ rebase_path(root_build_dir, root_build_dir),
+ "--script-language",
+ _script_language,
+ "--",
+ ]
+ args += _wrapped_arguments
+ }
+}
diff --git a/deps/v8/build/util/generate_wrapper.py b/deps/v8/build/util/generate_wrapper.py
new file mode 100755
index 0000000000..5373e1ea2e
--- /dev/null
+++ b/deps/v8/build/util/generate_wrapper.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env vpython
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Wraps an executable and any provided arguments into an executable script."""
+
+import argparse
+import os
+import sys
+import textwrap
+
+
+# The bash template passes the python script into vpython via stdin.
+# The interpreter doesn't know about the script, so we have bash
+# inject the script location.
+BASH_TEMPLATE = textwrap.dedent(
+ """\
+ #!/usr/bin/env vpython
+ _SCRIPT_LOCATION = __file__
+ {script}
+ """)
+
+
+# The batch template reruns the batch script with vpython, with the -x
+# flag instructing the interpreter to ignore the first line. The interpreter
+# knows about the (batch) script in this case, so it can get the file location
+# directly.
+BATCH_TEMPLATE = textwrap.dedent(
+ """\
+ @SETLOCAL ENABLEDELAYEDEXPANSION \
+ & vpython.bat -x "%~f0" %* \
+ & EXIT /B !ERRORLEVEL!
+ _SCRIPT_LOCATION = __file__
+ {script}
+ """)
+
+
+SCRIPT_TEMPLATES = {
+ 'bash': BASH_TEMPLATE,
+ 'batch': BATCH_TEMPLATE,
+}
+
+
+PY_TEMPLATE = textwrap.dedent(
+ """\
+ import os
+ import re
+ import subprocess
+ import sys
+
+ _WRAPPED_PATH_RE = re.compile(r'@WrappedPath\(([^)]+)\)')
+ _PATH_TO_OUTPUT_DIR = '{path_to_output_dir}'
+ _SCRIPT_DIR = os.path.dirname(os.path.realpath(_SCRIPT_LOCATION))
+
+
+ def ExpandWrappedPath(arg):
+ m = _WRAPPED_PATH_RE.match(arg)
+ if m:
+ return os.path.join(
+ os.path.relpath(_SCRIPT_DIR), _PATH_TO_OUTPUT_DIR, m.group(1))
+ return arg
+
+
+ def ExpandWrappedPaths(args):
+ for i, arg in enumerate(args):
+ args[i] = ExpandWrappedPath(arg)
+ return args
+
+
+ def main(raw_args):
+ executable_path = ExpandWrappedPath('{executable_path}')
+ executable_args = ExpandWrappedPaths({executable_args})
+
+ return subprocess.call([executable_path] + executable_args + raw_args)
+
+
+ if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
+ """)
+
+
+def Wrap(args):
+ """Writes a wrapped script according to the provided arguments.
+
+ Arguments:
+ args: an argparse.Namespace object containing command-line arguments
+ as parsed by a parser returned by CreateArgumentParser.
+ """
+ path_to_output_dir = os.path.relpath(
+ args.output_directory,
+ os.path.dirname(args.wrapper_script))
+
+ with open(args.wrapper_script, 'w') as wrapper_script:
+ py_contents = PY_TEMPLATE.format(
+ path_to_output_dir=path_to_output_dir,
+ executable_path=str(args.executable),
+ executable_args=str(args.executable_args))
+ template = SCRIPT_TEMPLATES[args.script_language]
+ wrapper_script.write(template.format(
+ script=py_contents))
+ os.chmod(args.wrapper_script, 0750)
+
+ return 0
+
+
+def CreateArgumentParser():
+ """Creates an argparse.ArgumentParser instance."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '--executable',
+ help='Executable to wrap.')
+ parser.add_argument(
+ '--wrapper-script',
+ help='Path to which the wrapper script will be written.')
+ parser.add_argument(
+ '--output-directory',
+ help='Path to the output directory.')
+ parser.add_argument(
+ '--script-language',
+ choices=SCRIPT_TEMPLATES.keys(),
+ help='Language in which the warpper script will be written.')
+ parser.add_argument(
+ 'executable_args', nargs='*',
+ help='Arguments to wrap into the executable.')
+ return parser
+
+
+def main(raw_args):
+ parser = CreateArgumentParser()
+ args = parser.parse_args(raw_args)
+ return Wrap(args)
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/deps/v8/build/util/java_action.gni b/deps/v8/build/util/java_action.gni
new file mode 100644
index 0000000000..646d5a4e7e
--- /dev/null
+++ b/deps/v8/build/util/java_action.gni
@@ -0,0 +1,103 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+jarrunner = "//build/util/java_action.py"
+
+# Declare a target that runs a java command a single time.
+#
+# This target type allows you to run a java command a single time to produce
+# one or more output files. If you want to run a java command for each of a
+# set of input files, see "java_action_foreach".
+#
+# See "gn help action" for more information on how to use this target. This
+# template is based on the "action" and supports the same variables.
+template("java_action") {
+ assert(defined(invoker.script),
+ "Need script in $target_name listing the .jar file to run.")
+ assert(defined(invoker.outputs),
+ "Need outputs in $target_name listing the generated outputs.")
+
+ jarscript = invoker.script
+ action(target_name) {
+ script = jarrunner
+
+ inputs = [
+ jarscript,
+ ]
+ if (defined(invoker.inputs)) {
+ inputs += invoker.inputs
+ }
+
+ args = [
+ "-jar",
+ rebase_path(jarscript, root_build_dir),
+ ]
+ if (defined(invoker.args)) {
+ args += invoker.args
+ }
+
+ forward_variables_from(invoker,
+ [
+ "console",
+ "data",
+ "data_deps",
+ "depfile",
+ "deps",
+ "outputs",
+ "sources",
+ "testonly",
+ "visibility",
+ ])
+ }
+}
+
+# Declare a target that runs a java command over a set of files.
+#
+# This target type allows you to run a java command once-per-file over a set of
+# sources. If you want to run a java command once that takes many files as
+# input, see "java_action".
+#
+# See "gn help action_foreach" for more information on how to use this target.
+# This template is based on the "action_foreach" supports the same variables.
+template("java_action_foreach") {
+ assert(defined(invoker.script),
+ "Need script in $target_name listing the .jar file to run.")
+ assert(defined(invoker.outputs),
+ "Need outputs in $target_name listing the generated outputs.")
+ assert(defined(invoker.sources),
+ "Need sources in $target_name listing the target inputs.")
+
+ jarscript = invoker.script
+ action_foreach(target_name) {
+ script = jarrunner
+
+ inputs = [
+ jarscript,
+ ]
+ if (defined(invoker.inputs)) {
+ inputs += invoker.inputs
+ }
+
+ args = [
+ "-jar",
+ rebase_path(jarscript, root_build_dir),
+ ]
+ if (defined(invoker.args)) {
+ args += invoker.args
+ }
+
+ forward_variables_from(invoker,
+ [
+ "console",
+ "data",
+ "data_deps",
+ "depfile",
+ "deps",
+ "outputs",
+ "sources",
+ "testonly",
+ "visibility",
+ ])
+ }
+}
diff --git a/deps/v8/build/util/java_action.py b/deps/v8/build/util/java_action.py
new file mode 100755
index 0000000000..ed9bb601de
--- /dev/null
+++ b/deps/v8/build/util/java_action.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Wrapper script to run java command as action with gn."""
+
+import os
+import subprocess
+import sys
+
+EXIT_SUCCESS = 0
+EXIT_FAILURE = 1
+
+
+def IsExecutable(path):
+ """Returns whether file at |path| exists and is executable.
+
+ Args:
+ path: absolute or relative path to test.
+
+ Returns:
+ True if the file at |path| exists, False otherwise.
+ """
+ return os.path.isfile(path) and os.access(path, os.X_OK)
+
+
+def FindCommand(command):
+ """Looks up for |command| in PATH.
+
+ Args:
+ command: name of the command to lookup, if command is a relative or
+ absolute path (i.e. contains some path separator) then only that
+ path will be tested.
+
+ Returns:
+ Full path to command or None if the command was not found.
+
+ On Windows, this respects the PATHEXT environment variable when the
+ command name does not have an extension.
+ """
+ fpath, _ = os.path.split(command)
+ if fpath:
+ if IsExecutable(command):
+ return command
+
+ if sys.platform == 'win32':
+ # On Windows, if the command does not have an extension, cmd.exe will
+ # try all extensions from PATHEXT when resolving the full path.
+ command, ext = os.path.splitext(command)
+ if not ext:
+ exts = os.environ['PATHEXT'].split(os.path.pathsep)
+ else:
+ exts = [ext]
+ else:
+ exts = ['']
+
+ for path in os.environ['PATH'].split(os.path.pathsep):
+ for ext in exts:
+ path = os.path.join(path, command) + ext
+ if IsExecutable(path):
+ return path
+
+ return None
+
+
+def main():
+ java_path = FindCommand('java')
+ if not java_path:
+ sys.stderr.write('java: command not found\n')
+ sys.exit(EXIT_FAILURE)
+
+ args = sys.argv[1:]
+ if len(args) < 2 or args[0] != '-jar':
+ sys.stderr.write('usage: %s -jar JARPATH [java_args]...\n' % sys.argv[0])
+ sys.exit(EXIT_FAILURE)
+
+ return subprocess.check_call([java_path] + args)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/deps/v8/build/util/lastchange.gni b/deps/v8/build/util/lastchange.gni
new file mode 100644
index 0000000000..a13295900d
--- /dev/null
+++ b/deps/v8/build/util/lastchange.gni
@@ -0,0 +1,16 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is used to inject fixed dummy commit for commit independent
+# reproducible binaries.
+
+declare_args() {
+ use_dummy_lastchange = false
+}
+
+if (use_dummy_lastchange) {
+ lastchange_file = "//build/util/LASTCHANGE.dummy"
+} else {
+ lastchange_file = "//build/util/LASTCHANGE"
+}
diff --git a/deps/v8/build/util/lastchange.py b/deps/v8/build/util/lastchange.py
new file mode 100755
index 0000000000..6d704b7afa
--- /dev/null
+++ b/deps/v8/build/util/lastchange.py
@@ -0,0 +1,318 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+lastchange.py -- Chromium revision fetching utility.
+"""
+from __future__ import print_function
+
+import argparse
+import collections
+import logging
+import os
+import subprocess
+import sys
+
+VersionInfo = collections.namedtuple("VersionInfo",
+ ("revision_id", "revision", "timestamp"))
+
+class GitError(Exception):
+ pass
+
+# This function exists for compatibility with logic outside this
+# repository that uses this file as a library.
+# TODO(eliribble) remove this function after it has been ported into
+# the repositories that depend on it
+def RunGitCommand(directory, command):
+ """
+ Launches git subcommand.
+
+ Errors are swallowed.
+
+ Returns:
+ A process object or None.
+ """
+ command = ['git'] + command
+ # Force shell usage under cygwin. This is a workaround for
+ # mysterious loss of cwd while invoking cygwin's git.
+ # We can't just pass shell=True to Popen, as under win32 this will
+ # cause CMD to be used, while we explicitly want a cygwin shell.
+ if sys.platform == 'cygwin':
+ command = ['sh', '-c', ' '.join(command)]
+ try:
+ proc = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ cwd=directory,
+ shell=(sys.platform=='win32'))
+ return proc
+ except OSError as e:
+ logging.error('Command %r failed: %s' % (' '.join(command), e))
+ return None
+
+
+def _RunGitCommand(directory, command):
+ """Launches git subcommand.
+
+ Returns:
+ The stripped stdout of the git command.
+ Raises:
+ GitError on failure, including a nonzero return code.
+ """
+ command = ['git'] + command
+ # Force shell usage under cygwin. This is a workaround for
+ # mysterious loss of cwd while invoking cygwin's git.
+ # We can't just pass shell=True to Popen, as under win32 this will
+ # cause CMD to be used, while we explicitly want a cygwin shell.
+ if sys.platform == 'cygwin':
+ command = ['sh', '-c', ' '.join(command)]
+ try:
+ logging.info("Executing '%s' in %s", ' '.join(command), directory)
+ proc = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ cwd=directory,
+ shell=(sys.platform=='win32'))
+ stdout, stderr = proc.communicate()
+ stdout = stdout.strip()
+ logging.debug("returncode: %d", proc.returncode)
+ logging.debug("stdout: %s", stdout)
+ logging.debug("stderr: %s", stderr)
+ if proc.returncode != 0 or not stdout:
+ raise GitError((
+ "Git command '{}' in {} failed: "
+ "rc={}, stdout='{}' stderr='{}'").format(
+ " ".join(command), directory, proc.returncode, stdout, stderr))
+ return stdout
+ except OSError as e:
+ raise GitError("Git command 'git {}' in {} failed: {}".format(
+ " ".join(command), directory, e))
+
+
+def GetMergeBase(directory, ref):
+ """
+ Return the merge-base of HEAD and ref.
+
+ Args:
+ directory: The directory containing the .git directory.
+ ref: The ref to use to find the merge base.
+ Returns:
+ The git commit SHA of the merge-base as a string.
+ """
+ logging.debug("Calculating merge base between HEAD and %s in %s",
+ ref, directory)
+ command = ['merge-base', 'HEAD', ref]
+ return _RunGitCommand(directory, command)
+
+
+def FetchGitRevision(directory, commit_filter, start_commit="HEAD"):
+ """
+ Fetch the Git hash (and Cr-Commit-Position if any) for a given directory.
+
+ Args:
+ directory: The directory containing the .git directory.
+ commit_filter: A filter to supply to grep to filter commits
+ start_commit: A commit identifier. The result of this function
+ will be limited to only consider commits before the provided
+ commit.
+ Returns:
+ A VersionInfo object. On error all values will be 0.
+ """
+ hash_ = ''
+
+ git_args = ['log', '-1', '--format=%H %ct']
+ if commit_filter is not None:
+ git_args.append('--grep=' + commit_filter)
+
+ git_args.append(start_commit)
+
+ output = _RunGitCommand(directory, git_args)
+ hash_, commit_timestamp = output.split()
+ if not hash_:
+ return VersionInfo('0', '0', 0)
+
+ revision = hash_
+ output = _RunGitCommand(directory, ['cat-file', 'commit', hash_])
+ for line in reversed(output.splitlines()):
+ if line.startswith('Cr-Commit-Position:'):
+ pos = line.rsplit()[-1].strip()
+ logging.debug("Found Cr-Commit-Position '%s'", pos)
+ revision = "{}-{}".format(hash_, pos)
+ break
+ return VersionInfo(hash_, revision, int(commit_timestamp))
+
+
+def GetHeaderGuard(path):
+ """
+ Returns the header #define guard for the given file path.
+ This treats everything after the last instance of "src/" as being a
+ relevant part of the guard. If there is no "src/", then the entire path
+ is used.
+ """
+ src_index = path.rfind('src/')
+ if src_index != -1:
+ guard = path[src_index + 4:]
+ else:
+ guard = path
+ guard = guard.upper()
+ return guard.replace('/', '_').replace('.', '_').replace('\\', '_') + '_'
+
+
+def GetHeaderContents(path, define, version):
+ """
+ Returns what the contents of the header file should be that indicate the given
+ revision.
+ """
+ header_guard = GetHeaderGuard(path)
+
+ header_contents = """/* Generated by lastchange.py, do not edit.*/
+
+#ifndef %(header_guard)s
+#define %(header_guard)s
+
+#define %(define)s "%(version)s"
+
+#endif // %(header_guard)s
+"""
+ header_contents = header_contents % { 'header_guard': header_guard,
+ 'define': define,
+ 'version': version }
+ return header_contents
+
+
+def GetGitTopDirectory(source_dir):
+ """Get the top git directory - the directory that contains the .git directory.
+
+ Args:
+ source_dir: The directory to search.
+ Returns:
+ The output of "git rev-parse --show-toplevel" as a string
+ """
+ return _RunGitCommand(source_dir, ['rev-parse', '--show-toplevel'])
+
+
+def WriteIfChanged(file_name, contents):
+ """
+ Writes the specified contents to the specified file_name
+ iff the contents are different than the current contents.
+ Returns if new data was written.
+ """
+ try:
+ old_contents = open(file_name, 'r').read()
+ except EnvironmentError:
+ pass
+ else:
+ if contents == old_contents:
+ return False
+ os.unlink(file_name)
+ open(file_name, 'w').write(contents)
+ return True
+
+
+def main(argv=None):
+ if argv is None:
+ argv = sys.argv
+
+ parser = argparse.ArgumentParser(usage="lastchange.py [options]")
+ parser.add_argument("-m", "--version-macro",
+ help=("Name of C #define when using --header. Defaults to "
+ "LAST_CHANGE."))
+ parser.add_argument("-o", "--output", metavar="FILE",
+ help=("Write last change to FILE. "
+ "Can be combined with --header to write both files."))
+ parser.add_argument("--header", metavar="FILE",
+ help=("Write last change to FILE as a C/C++ header. "
+ "Can be combined with --output to write both files."))
+ parser.add_argument("--merge-base-ref",
+ default=None,
+ help=("Only consider changes since the merge "
+ "base between HEAD and the provided ref"))
+ parser.add_argument("--revision-id-only", action='store_true',
+ help=("Output the revision as a VCS revision ID only (in "
+ "Git, a 40-character commit hash, excluding the "
+ "Cr-Commit-Position)."))
+ parser.add_argument("--print-only", action="store_true",
+ help=("Just print the revision string. Overrides any "
+ "file-output-related options."))
+ parser.add_argument("-s", "--source-dir", metavar="DIR",
+ help="Use repository in the given directory.")
+ parser.add_argument("--filter", metavar="REGEX",
+ help=("Only use log entries where the commit message "
+ "matches the supplied filter regex. Defaults to "
+ "'^Change-Id:' to suppress local commits."),
+ default='^Change-Id:')
+
+ args, extras = parser.parse_known_args(argv[1:])
+
+ logging.basicConfig(level=logging.WARNING)
+
+ out_file = args.output
+ header = args.header
+ commit_filter=args.filter
+
+ while len(extras) and out_file is None:
+ if out_file is None:
+ out_file = extras.pop(0)
+ if extras:
+ sys.stderr.write('Unexpected arguments: %r\n\n' % extras)
+ parser.print_help()
+ sys.exit(2)
+
+ source_dir = args.source_dir or os.path.dirname(os.path.abspath(__file__))
+ try:
+ git_top_dir = GetGitTopDirectory(source_dir)
+ except GitError as e:
+ logging.error("Failed to get git top directory from '%s': %s",
+ source_dir, e)
+ return 2
+
+ if args.merge_base_ref:
+ try:
+ merge_base_sha = GetMergeBase(git_top_dir, args.merge_base_ref)
+ except GitError as e:
+ logging.error("You requested a --merge-base-ref value of '%s' but no "
+ "merge base could be found between it and HEAD. Git "
+ "reports: %s", args.merge_base_ref, e)
+ return 3
+ else:
+ merge_base_sha = 'HEAD'
+
+ try:
+ version_info = FetchGitRevision(git_top_dir, commit_filter, merge_base_sha)
+ except GitError as e:
+ logging.error("Failed to get version info: %s", e)
+ logging.info(("Falling back to a version of 0.0.0 to allow script to "
+ "finish. This is normal if you are bootstrapping a new environment "
+ "or do not have a git repository for any other reason. If not, this "
+ "could represent a serious error."))
+ version_info = VersionInfo('0', '0', 0)
+
+ revision_string = version_info.revision
+ if args.revision_id_only:
+ revision_string = version_info.revision_id
+
+ if args.print_only:
+ print(revision_string)
+ else:
+ contents = "LASTCHANGE=%s\n" % revision_string
+ if not out_file and not args.header:
+ sys.stdout.write(contents)
+ else:
+ if out_file:
+ committime_file = out_file + '.committime'
+ out_changed = WriteIfChanged(out_file, contents)
+ if out_changed or not os.path.exists(committime_file):
+ with open(committime_file, 'w') as timefile:
+ timefile.write(str(version_info.timestamp))
+ if header:
+ WriteIfChanged(header,
+ GetHeaderContents(header, args.version_macro,
+ revision_string))
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/deps/v8/build/util/lib/common/PRESUBMIT.py b/deps/v8/build/util/lib/common/PRESUBMIT.py
new file mode 100644
index 0000000000..fca962f1ca
--- /dev/null
+++ b/deps/v8/build/util/lib/common/PRESUBMIT.py
@@ -0,0 +1,16 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+def _RunTests(input_api, output_api):
+ return (input_api.canned_checks.RunUnitTestsInDirectory(
+ input_api, output_api, '.', whitelist=[r'.+_test.py$']))
+
+
+def CheckChangeOnUpload(input_api, output_api):
+ return _RunTests(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+ return _RunTests(input_api, output_api)
diff --git a/deps/v8/build/util/lib/common/__init__.py b/deps/v8/build/util/lib/common/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/build/util/lib/common/__init__.py
diff --git a/deps/v8/build/util/lib/common/chrome_test_server_spawner.py b/deps/v8/build/util/lib/common/chrome_test_server_spawner.py
new file mode 100644
index 0000000000..b9844aa391
--- /dev/null
+++ b/deps/v8/build/util/lib/common/chrome_test_server_spawner.py
@@ -0,0 +1,480 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A "Test Server Spawner" that handles killing/stopping per-test test servers.
+
+It's used to accept requests from the device to spawn and kill instances of the
+chrome test server on the host.
+"""
+# pylint: disable=W0702
+
+import BaseHTTPServer
+import json
+import logging
+import os
+import select
+import struct
+import subprocess
+import sys
+import threading
+import time
+import urlparse
+
+
+SERVER_TYPES = {
+ 'http': '',
+ 'ftp': '-f',
+ 'sync': '', # Sync uses its own script, and doesn't take a server type arg.
+ 'tcpecho': '--tcp-echo',
+ 'udpecho': '--udp-echo',
+ 'ws': '--websocket',
+}
+
+
+_DIR_SOURCE_ROOT = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir,
+ os.pardir))
+
+
+_logger = logging.getLogger(__name__)
+
+
+# Path that are needed to import necessary modules when launching a testserver.
+os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + (':%s:%s:%s:%s:%s'
+ % (os.path.join(_DIR_SOURCE_ROOT, 'third_party'),
+ os.path.join(_DIR_SOURCE_ROOT, 'third_party', 'tlslite'),
+ os.path.join(_DIR_SOURCE_ROOT, 'third_party', 'pyftpdlib', 'src'),
+ os.path.join(_DIR_SOURCE_ROOT, 'net', 'tools', 'testserver'),
+ os.path.join(_DIR_SOURCE_ROOT, 'components', 'sync', 'tools',
+ 'testserver')))
+
+
+# The timeout (in seconds) of starting up the Python test server.
+_TEST_SERVER_STARTUP_TIMEOUT = 10
+
+
+def _GetServerTypeCommandLine(server_type):
+ """Returns the command-line by the given server type.
+
+ Args:
+ server_type: the server type to be used (e.g. 'http').
+
+ Returns:
+ A string containing the command-line argument.
+ """
+ if server_type not in SERVER_TYPES:
+ raise NotImplementedError('Unknown server type: %s' % server_type)
+ if server_type == 'udpecho':
+ raise Exception('Please do not run UDP echo tests because we do not have '
+ 'a UDP forwarder tool.')
+ return SERVER_TYPES[server_type]
+
+
+class PortForwarder:
+ def Map(self, port_pairs):
+ pass
+
+ def GetDevicePortForHostPort(self, host_port):
+ """Returns the device port that corresponds to a given host port."""
+ return host_port
+
+ def WaitHostPortAvailable(self, port):
+ """Returns True if |port| is available."""
+ return True
+
+ def WaitPortNotAvailable(self, port):
+ """Returns True if |port| is not available."""
+ return True
+
+ def WaitDevicePortReady(self, port):
+ """Returns whether the provided port is used."""
+ return True
+
+ def Unmap(self, device_port):
+ """Unmaps specified port"""
+ pass
+
+
+class TestServerThread(threading.Thread):
+ """A thread to run the test server in a separate process."""
+
+ def __init__(self, ready_event, arguments, port_forwarder):
+ """Initialize TestServerThread with the following argument.
+
+ Args:
+ ready_event: event which will be set when the test server is ready.
+ arguments: dictionary of arguments to run the test server.
+ device: An instance of DeviceUtils.
+ tool: instance of runtime error detection tool.
+ """
+ threading.Thread.__init__(self)
+ self.wait_event = threading.Event()
+ self.stop_event = threading.Event()
+ self.ready_event = ready_event
+ self.ready_event.clear()
+ self.arguments = arguments
+ self.port_forwarder = port_forwarder
+ self.test_server_process = None
+ self.is_ready = False
+ self.host_port = self.arguments['port']
+ self.host_ocsp_port = 0
+ assert isinstance(self.host_port, int)
+ # The forwarder device port now is dynamically allocated.
+ self.forwarder_device_port = 0
+ self.forwarder_ocsp_device_port = 0
+ # Anonymous pipe in order to get port info from test server.
+ self.pipe_in = None
+ self.pipe_out = None
+ self.process = None
+ self.command_line = []
+
+ def _WaitToStartAndGetPortFromTestServer(self):
+ """Waits for the Python test server to start and gets the port it is using.
+
+ The port information is passed by the Python test server with a pipe given
+ by self.pipe_out. It is written as a result to |self.host_port|.
+
+ Returns:
+ Whether the port used by the test server was successfully fetched.
+ """
+ assert self.host_port == 0 and self.pipe_out and self.pipe_in
+ (in_fds, _, _) = select.select([self.pipe_in, ], [], [],
+ _TEST_SERVER_STARTUP_TIMEOUT)
+ if len(in_fds) == 0:
+ _logger.error('Failed to wait to the Python test server to be started.')
+ return False
+ # First read the data length as an unsigned 4-byte value. This
+ # is _not_ using network byte ordering since the Python test server packs
+ # size as native byte order and all Chromium platforms so far are
+ # configured to use little-endian.
+ # TODO(jnd): Change the Python test server and local_test_server_*.cc to
+ # use a unified byte order (either big-endian or little-endian).
+ data_length = os.read(self.pipe_in, struct.calcsize('=L'))
+ if data_length:
+ (data_length,) = struct.unpack('=L', data_length)
+ assert data_length
+ if not data_length:
+ _logger.error('Failed to get length of server data.')
+ return False
+ server_data_json = os.read(self.pipe_in, data_length)
+ if not server_data_json:
+ _logger.error('Failed to get server data.')
+ return False
+ _logger.info('Got port json data: %s', server_data_json)
+
+ parsed_server_data = None
+ try:
+ parsed_server_data = json.loads(server_data_json)
+ except ValueError:
+ pass
+
+ if not isinstance(parsed_server_data, dict):
+ _logger.error('Failed to parse server_data: %s' % server_data_json)
+ return False
+
+ if not isinstance(parsed_server_data.get('port'), int):
+ _logger.error('Failed to get port information from the server data.')
+ return False
+
+ self.host_port = parsed_server_data['port']
+ self.host_ocsp_port = parsed_server_data.get('ocsp_port', 0)
+
+ return self.port_forwarder.WaitPortNotAvailable(self.host_port)
+
+ def _GenerateCommandLineArguments(self):
+ """Generates the command line to run the test server.
+
+ Note that all options are processed by following the definitions in
+ testserver.py.
+ """
+ if self.command_line:
+ return
+
+ args_copy = dict(self.arguments)
+
+ # Translate the server type.
+ type_cmd = _GetServerTypeCommandLine(args_copy.pop('server-type'))
+ if type_cmd:
+ self.command_line.append(type_cmd)
+
+ # Use a pipe to get the port given by the instance of Python test server
+ # if the test does not specify the port.
+ assert self.host_port == args_copy['port']
+ if self.host_port == 0:
+ (self.pipe_in, self.pipe_out) = os.pipe()
+ self.command_line.append('--startup-pipe=%d' % self.pipe_out)
+
+ # Pass the remaining arguments as-is.
+ for key, values in args_copy.iteritems():
+ if not isinstance(values, list):
+ values = [values]
+ for value in values:
+ if value is None:
+ self.command_line.append('--%s' % key)
+ else:
+ self.command_line.append('--%s=%s' % (key, value))
+
+ def _CloseUnnecessaryFDsForTestServerProcess(self):
+ # This is required to avoid subtle deadlocks that could be caused by the
+ # test server child process inheriting undesirable file descriptors such as
+ # file lock file descriptors.
+ for fd in xrange(0, 1024):
+ if fd != self.pipe_out:
+ try:
+ os.close(fd)
+ except:
+ pass
+
+ def run(self):
+ _logger.info('Start running the thread!')
+ self.wait_event.clear()
+ self._GenerateCommandLineArguments()
+ command = _DIR_SOURCE_ROOT
+ if self.arguments['server-type'] == 'sync':
+ command = [os.path.join(command, 'components', 'sync', 'tools',
+ 'testserver',
+ 'sync_testserver.py')] + self.command_line
+ else:
+ command = [os.path.join(command, 'net', 'tools', 'testserver',
+ 'testserver.py')] + self.command_line
+ _logger.info('Running: %s', command)
+
+ # Disable PYTHONUNBUFFERED because it has a bad interaction with the
+ # testserver. Remove once this interaction is fixed.
+ unbuf = os.environ.pop('PYTHONUNBUFFERED', None)
+
+ # Pass _DIR_SOURCE_ROOT as the child's working directory so that relative
+ # paths in the arguments are resolved correctly.
+ self.process = subprocess.Popen(
+ command, preexec_fn=self._CloseUnnecessaryFDsForTestServerProcess,
+ cwd=_DIR_SOURCE_ROOT)
+ if unbuf:
+ os.environ['PYTHONUNBUFFERED'] = unbuf
+ if self.process:
+ if self.pipe_out:
+ self.is_ready = self._WaitToStartAndGetPortFromTestServer()
+ else:
+ self.is_ready = self.port_forwarder.WaitPortNotAvailable(self.host_port)
+
+ if self.is_ready:
+ port_map = [(0, self.host_port)]
+ if self.host_ocsp_port:
+ port_map.extend([(0, self.host_ocsp_port)])
+ self.port_forwarder.Map(port_map)
+
+ self.forwarder_device_port = \
+ self.port_forwarder.GetDevicePortForHostPort(self.host_port)
+ if self.host_ocsp_port:
+ self.forwarder_ocsp_device_port = \
+ self.port_forwarder.GetDevicePortForHostPort(self.host_ocsp_port)
+
+ # Check whether the forwarder is ready on the device.
+ self.is_ready = self.forwarder_device_port and \
+ self.port_forwarder.WaitDevicePortReady(self.forwarder_device_port)
+
+ # Wake up the request handler thread.
+ self.ready_event.set()
+ # Keep thread running until Stop() gets called.
+ self.stop_event.wait()
+ if self.process.poll() is None:
+ self.process.kill()
+ self.port_forwarder.Unmap(self.forwarder_device_port)
+ self.process = None
+ self.is_ready = False
+ if self.pipe_out:
+ os.close(self.pipe_in)
+ os.close(self.pipe_out)
+ self.pipe_in = None
+ self.pipe_out = None
+ _logger.info('Test-server has died.')
+ self.wait_event.set()
+
+ def Stop(self):
+ """Blocks until the loop has finished.
+
+ Note that this must be called in another thread.
+ """
+ if not self.process:
+ return
+ self.stop_event.set()
+ self.wait_event.wait()
+
+
+class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ """A handler used to process http GET/POST request."""
+
+ def _SendResponse(self, response_code, response_reason, additional_headers,
+ contents):
+ """Generates a response sent to the client from the provided parameters.
+
+ Args:
+ response_code: number of the response status.
+ response_reason: string of reason description of the response.
+ additional_headers: dict of additional headers. Each key is the name of
+ the header, each value is the content of the header.
+ contents: string of the contents we want to send to client.
+ """
+ self.send_response(response_code, response_reason)
+ self.send_header('Content-Type', 'text/html')
+ # Specify the content-length as without it the http(s) response will not
+ # be completed properly (and the browser keeps expecting data).
+ self.send_header('Content-Length', len(contents))
+ for header_name in additional_headers:
+ self.send_header(header_name, additional_headers[header_name])
+ self.end_headers()
+ self.wfile.write(contents)
+ self.wfile.flush()
+
+ def _StartTestServer(self):
+ """Starts the test server thread."""
+ _logger.info('Handling request to spawn a test server.')
+ content_type = self.headers.getheader('content-type')
+ if content_type != 'application/json':
+ raise Exception('Bad content-type for start request.')
+ content_length = self.headers.getheader('content-length')
+ if not content_length:
+ content_length = 0
+ try:
+ content_length = int(content_length)
+ except:
+ raise Exception('Bad content-length for start request.')
+ _logger.info(content_length)
+ test_server_argument_json = self.rfile.read(content_length)
+ _logger.info(test_server_argument_json)
+
+ if len(self.server.test_servers) >= self.server.max_instances:
+ self._SendResponse(400, 'Invalid request', {},
+ 'Too many test servers running')
+ return
+
+ ready_event = threading.Event()
+ new_server = TestServerThread(ready_event,
+ json.loads(test_server_argument_json),
+ self.server.port_forwarder)
+ new_server.setDaemon(True)
+ new_server.start()
+ ready_event.wait()
+ if new_server.is_ready:
+ response = {'port': new_server.forwarder_device_port,
+ 'message': 'started'};
+ if new_server.forwarder_ocsp_device_port:
+ response['ocsp_port'] = new_server.forwarder_ocsp_device_port
+ self._SendResponse(200, 'OK', {}, json.dumps(response))
+ _logger.info('Test server is running on port %d forwarded to %d.' %
+ (new_server.forwarder_device_port, new_server.host_port))
+ port = new_server.forwarder_device_port
+ assert not self.server.test_servers.has_key(port)
+ self.server.test_servers[port] = new_server
+ else:
+ new_server.Stop()
+ self._SendResponse(500, 'Test Server Error.', {}, '')
+ _logger.info('Encounter problem during starting a test server.')
+
+ def _KillTestServer(self, params):
+ """Stops the test server instance."""
+ try:
+ port = int(params['port'][0])
+ except ValueError, KeyError:
+ port = None
+ if port == None or port <= 0:
+ self._SendResponse(400, 'Invalid request.', {}, 'port must be specified')
+ return
+
+ if not self.server.test_servers.has_key(port):
+ self._SendResponse(400, 'Invalid request.', {},
+ "testserver isn't running on port %d" % port)
+ return
+
+ server = self.server.test_servers.pop(port)
+
+ _logger.info('Handling request to kill a test server on port: %d.', port)
+ server.Stop()
+
+ # Make sure the status of test server is correct before sending response.
+ if self.server.port_forwarder.WaitHostPortAvailable(port):
+ self._SendResponse(200, 'OK', {}, 'killed')
+ _logger.info('Test server on port %d is killed', port)
+ else:
+ self._SendResponse(500, 'Test Server Error.', {}, '')
+ _logger.info('Encounter problem during killing a test server.')
+
+ def log_message(self, format, *args):
+ # Suppress the default HTTP logging behavior if the logging level is higher
+ # than INFO.
+ if _logger.getEffectiveLevel() <= logging.INFO:
+ pass
+
+ def do_POST(self):
+ parsed_path = urlparse.urlparse(self.path)
+ action = parsed_path.path
+ _logger.info('Action for POST method is: %s.', action)
+ if action == '/start':
+ self._StartTestServer()
+ else:
+ self._SendResponse(400, 'Unknown request.', {}, '')
+ _logger.info('Encounter unknown request: %s.', action)
+
+ def do_GET(self):
+ parsed_path = urlparse.urlparse(self.path)
+ action = parsed_path.path
+ params = urlparse.parse_qs(parsed_path.query, keep_blank_values=1)
+ _logger.info('Action for GET method is: %s.', action)
+ for param in params:
+ _logger.info('%s=%s', param, params[param][0])
+ if action == '/kill':
+ self._KillTestServer(params)
+ elif action == '/ping':
+ # The ping handler is used to check whether the spawner server is ready
+ # to serve the requests. We don't need to test the status of the test
+ # server when handling ping request.
+ self._SendResponse(200, 'OK', {}, 'ready')
+ _logger.info('Handled ping request and sent response.')
+ else:
+ self._SendResponse(400, 'Unknown request', {}, '')
+ _logger.info('Encounter unknown request: %s.', action)
+
+
+class SpawningServer(object):
+ """The class used to start/stop a http server."""
+
+ def __init__(self, test_server_spawner_port, port_forwarder, max_instances):
+ self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port),
+ SpawningServerRequestHandler)
+ self.server_port = self.server.server_port
+ _logger.info('Started test server spawner on port: %d.', self.server_port)
+
+ self.server.port_forwarder = port_forwarder
+ self.server.test_servers = {}
+ self.server.max_instances = max_instances
+
+ def _Listen(self):
+ _logger.info('Starting test server spawner.')
+ self.server.serve_forever()
+
+ def Start(self):
+ """Starts the test server spawner."""
+ listener_thread = threading.Thread(target=self._Listen)
+ listener_thread.setDaemon(True)
+ listener_thread.start()
+
+ def Stop(self):
+ """Stops the test server spawner.
+
+ Also cleans the server state.
+ """
+ self.CleanupState()
+ self.server.shutdown()
+
+ def CleanupState(self):
+ """Cleans up the spawning server state.
+
+ This should be called if the test server spawner is reused,
+ to avoid sharing the test server instance.
+ """
+ if self.server.test_servers:
+ _logger.warning('Not all test servers were stopped.')
+ for port in self.server.test_servers:
+ _logger.warning('Stopping test server on port %d' % port)
+ self.server.test_servers[port].Stop()
+ self.server.test_servers = {}
diff --git a/deps/v8/build/util/lib/common/perf_result_data_type.py b/deps/v8/build/util/lib/common/perf_result_data_type.py
new file mode 100644
index 0000000000..67b550a46c
--- /dev/null
+++ b/deps/v8/build/util/lib/common/perf_result_data_type.py
@@ -0,0 +1,20 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+DEFAULT = 'default'
+UNIMPORTANT = 'unimportant'
+HISTOGRAM = 'histogram'
+UNIMPORTANT_HISTOGRAM = 'unimportant-histogram'
+INFORMATIONAL = 'informational'
+
+ALL_TYPES = [DEFAULT, UNIMPORTANT, HISTOGRAM, UNIMPORTANT_HISTOGRAM,
+ INFORMATIONAL]
+
+
+def IsValidType(datatype):
+ return datatype in ALL_TYPES
+
+
+def IsHistogram(datatype):
+ return (datatype == HISTOGRAM or datatype == UNIMPORTANT_HISTOGRAM)
diff --git a/deps/v8/build/util/lib/common/perf_tests_results_helper.py b/deps/v8/build/util/lib/common/perf_tests_results_helper.py
new file mode 100644
index 0000000000..59bb5e439d
--- /dev/null
+++ b/deps/v8/build/util/lib/common/perf_tests_results_helper.py
@@ -0,0 +1,200 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+import sys
+
+import json
+import logging
+import math
+
+import perf_result_data_type
+
+
+# Mapping from result type to test output
+RESULT_TYPES = {perf_result_data_type.UNIMPORTANT: 'RESULT ',
+ perf_result_data_type.DEFAULT: '*RESULT ',
+ perf_result_data_type.INFORMATIONAL: '',
+ perf_result_data_type.UNIMPORTANT_HISTOGRAM: 'HISTOGRAM ',
+ perf_result_data_type.HISTOGRAM: '*HISTOGRAM '}
+
+
+def _EscapePerfResult(s):
+ """Escapes |s| for use in a perf result."""
+ return re.sub('[\:|=/#&,]', '_', s)
+
+
+def FlattenList(values):
+ """Returns a simple list without sub-lists."""
+ ret = []
+ for entry in values:
+ if isinstance(entry, list):
+ ret.extend(FlattenList(entry))
+ else:
+ ret.append(entry)
+ return ret
+
+
+def GeomMeanAndStdDevFromHistogram(histogram_json):
+ histogram = json.loads(histogram_json)
+ # Handle empty histograms gracefully.
+ if not 'buckets' in histogram:
+ return 0.0, 0.0
+ count = 0
+ sum_of_logs = 0
+ for bucket in histogram['buckets']:
+ if 'high' in bucket:
+ bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0
+ else:
+ bucket['mean'] = bucket['low']
+ if bucket['mean'] > 0:
+ sum_of_logs += math.log(bucket['mean']) * bucket['count']
+ count += bucket['count']
+
+ if count == 0:
+ return 0.0, 0.0
+
+ sum_of_squares = 0
+ geom_mean = math.exp(sum_of_logs / count)
+ for bucket in histogram['buckets']:
+ if bucket['mean'] > 0:
+ sum_of_squares += (bucket['mean'] - geom_mean) ** 2 * bucket['count']
+ return geom_mean, math.sqrt(sum_of_squares / count)
+
+
+def _ValueToString(v):
+ # Special case for floats so we don't print using scientific notation.
+ if isinstance(v, float):
+ return '%f' % v
+ else:
+ return str(v)
+
+
+def _MeanAndStdDevFromList(values):
+ avg = None
+ sd = None
+ if len(values) > 1:
+ try:
+ value = '[%s]' % ','.join([_ValueToString(v) for v in values])
+ avg = sum([float(v) for v in values]) / len(values)
+ sqdiffs = [(float(v) - avg) ** 2 for v in values]
+ variance = sum(sqdiffs) / (len(values) - 1)
+ sd = math.sqrt(variance)
+ except ValueError:
+ value = ', '.join(values)
+ else:
+ value = values[0]
+ return value, avg, sd
+
+
+def PrintPages(page_list):
+ """Prints list of pages to stdout in the format required by perf tests."""
+ print 'Pages: [%s]' % ','.join([_EscapePerfResult(p) for p in page_list])
+
+
+def PrintPerfResult(measurement, trace, values, units,
+ result_type=perf_result_data_type.DEFAULT,
+ print_to_stdout=True):
+ """Prints numerical data to stdout in the format required by perf tests.
+
+ The string args may be empty but they must not contain any colons (:) or
+ equals signs (=).
+ This is parsed by the buildbot using:
+ http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/slave/process_log_utils.py
+
+ Args:
+ measurement: A description of the quantity being measured, e.g. "vm_peak".
+ On the dashboard, this maps to a particular graph. Mandatory.
+ trace: A description of the particular data point, e.g. "reference".
+ On the dashboard, this maps to a particular "line" in the graph.
+ Mandatory.
+ values: A list of numeric measured values. An N-dimensional list will be
+ flattened and treated as a simple list.
+ units: A description of the units of measure, e.g. "bytes".
+ result_type: Accepts values of perf_result_data_type.ALL_TYPES.
+ print_to_stdout: If True, prints the output in stdout instead of returning
+ the output to caller.
+
+ Returns:
+ String of the formated perf result.
+ """
+ assert perf_result_data_type.IsValidType(result_type), \
+ 'result type: %s is invalid' % result_type
+
+ trace_name = _EscapePerfResult(trace)
+
+ if (result_type == perf_result_data_type.UNIMPORTANT or
+ result_type == perf_result_data_type.DEFAULT or
+ result_type == perf_result_data_type.INFORMATIONAL):
+ assert isinstance(values, list)
+ assert '/' not in measurement
+ flattened_values = FlattenList(values)
+ assert len(flattened_values)
+ value, avg, sd = _MeanAndStdDevFromList(flattened_values)
+ output = '%s%s: %s%s%s %s' % (
+ RESULT_TYPES[result_type],
+ _EscapePerfResult(measurement),
+ trace_name,
+ # Do not show equal sign if the trace is empty. Usually it happens when
+ # measurement is enough clear to describe the result.
+ '= ' if trace_name else '',
+ value,
+ units)
+ else:
+ assert perf_result_data_type.IsHistogram(result_type)
+ assert isinstance(values, list)
+ # The histograms can only be printed individually, there's no computation
+ # across different histograms.
+ assert len(values) == 1
+ value = values[0]
+ output = '%s%s: %s= %s %s' % (
+ RESULT_TYPES[result_type],
+ _EscapePerfResult(measurement),
+ trace_name,
+ value,
+ units)
+ avg, sd = GeomMeanAndStdDevFromHistogram(value)
+
+ if avg:
+ output += '\nAvg %s: %f%s' % (measurement, avg, units)
+ if sd:
+ output += '\nSd %s: %f%s' % (measurement, sd, units)
+ if print_to_stdout:
+ print output
+ sys.stdout.flush()
+ return output
+
+
+def ReportPerfResult(chart_data, graph_title, trace_title, value, units,
+ improvement_direction='down', important=True):
+ """Outputs test results in correct format.
+
+ If chart_data is None, it outputs data in old format. If chart_data is a
+ dictionary, formats in chartjson format. If any other format defaults to
+ old format.
+
+ Args:
+ chart_data: A dictionary corresponding to perf results in the chartjson
+ format.
+ graph_title: A string containing the name of the chart to add the result
+ to.
+ trace_title: A string containing the name of the trace within the chart
+ to add the result to.
+ value: The value of the result being reported.
+ units: The units of the value being reported.
+ improvement_direction: A string denoting whether higher or lower is
+ better for the result. Either 'up' or 'down'.
+ important: A boolean denoting whether the result is important or not.
+ """
+ if chart_data and isinstance(chart_data, dict):
+ chart_data['charts'].setdefault(graph_title, {})
+ chart_data['charts'][graph_title][trace_title] = {
+ 'type': 'scalar',
+ 'value': value,
+ 'units': units,
+ 'improvement_direction': improvement_direction,
+ 'important': important
+ }
+ else:
+ PrintPerfResult(graph_title, trace_title, [value], units)
diff --git a/deps/v8/build/util/lib/common/unittest_util.py b/deps/v8/build/util/lib/common/unittest_util.py
new file mode 100644
index 0000000000..9683ab717a
--- /dev/null
+++ b/deps/v8/build/util/lib/common/unittest_util.py
@@ -0,0 +1,155 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utilities for dealing with the python unittest module."""
+
+import fnmatch
+import re
+import sys
+import unittest
+
+
+class _TextTestResult(unittest._TextTestResult):
+ """A test result class that can print formatted text results to a stream.
+
+ Results printed in conformance with gtest output format, like:
+ [ RUN ] autofill.AutofillTest.testAutofillInvalid: "test desc."
+ [ OK ] autofill.AutofillTest.testAutofillInvalid
+ [ RUN ] autofill.AutofillTest.testFillProfile: "test desc."
+ [ OK ] autofill.AutofillTest.testFillProfile
+ [ RUN ] autofill.AutofillTest.testFillProfileCrazyCharacters: "Test."
+ [ OK ] autofill.AutofillTest.testFillProfileCrazyCharacters
+ """
+ def __init__(self, stream, descriptions, verbosity):
+ unittest._TextTestResult.__init__(self, stream, descriptions, verbosity)
+ self._fails = set()
+
+ def _GetTestURI(self, test):
+ return '%s.%s.%s' % (test.__class__.__module__,
+ test.__class__.__name__,
+ test._testMethodName)
+
+ def getDescription(self, test):
+ return '%s: "%s"' % (self._GetTestURI(test), test.shortDescription())
+
+ def startTest(self, test):
+ unittest.TestResult.startTest(self, test)
+ self.stream.writeln('[ RUN ] %s' % self.getDescription(test))
+
+ def addSuccess(self, test):
+ unittest.TestResult.addSuccess(self, test)
+ self.stream.writeln('[ OK ] %s' % self._GetTestURI(test))
+
+ def addError(self, test, err):
+ unittest.TestResult.addError(self, test, err)
+ self.stream.writeln('[ ERROR ] %s' % self._GetTestURI(test))
+ self._fails.add(self._GetTestURI(test))
+
+ def addFailure(self, test, err):
+ unittest.TestResult.addFailure(self, test, err)
+ self.stream.writeln('[ FAILED ] %s' % self._GetTestURI(test))
+ self._fails.add(self._GetTestURI(test))
+
+ def getRetestFilter(self):
+ return ':'.join(self._fails)
+
+
+class TextTestRunner(unittest.TextTestRunner):
+ """Test Runner for displaying test results in textual format.
+
+ Results are displayed in conformance with google test output.
+ """
+
+ def __init__(self, verbosity=1):
+ unittest.TextTestRunner.__init__(self, stream=sys.stderr,
+ verbosity=verbosity)
+
+ def _makeResult(self):
+ return _TextTestResult(self.stream, self.descriptions, self.verbosity)
+
+
+def GetTestsFromSuite(suite):
+ """Returns all the tests from a given test suite."""
+ tests = []
+ for x in suite:
+ if isinstance(x, unittest.TestSuite):
+ tests += GetTestsFromSuite(x)
+ else:
+ tests += [x]
+ return tests
+
+
+def GetTestNamesFromSuite(suite):
+ """Returns a list of every test name in the given suite."""
+ return map(lambda x: GetTestName(x), GetTestsFromSuite(suite))
+
+
+def GetTestName(test):
+ """Gets the test name of the given unittest test."""
+ return '.'.join([test.__class__.__module__,
+ test.__class__.__name__,
+ test._testMethodName])
+
+
+def FilterTestSuite(suite, gtest_filter):
+ """Returns a new filtered tests suite based on the given gtest filter.
+
+ See https://github.com/google/googletest/blob/master/googletest/docs/AdvancedGuide.md
+ for gtest_filter specification.
+ """
+ return unittest.TestSuite(FilterTests(GetTestsFromSuite(suite), gtest_filter))
+
+
+def FilterTests(all_tests, gtest_filter):
+ """Filter a list of tests based on the given gtest filter.
+
+ Args:
+ all_tests: List of tests (unittest.TestSuite)
+ gtest_filter: Filter to apply.
+
+ Returns:
+ Filtered subset of the given list of tests.
+ """
+ test_names = [GetTestName(test) for test in all_tests]
+ filtered_names = FilterTestNames(test_names, gtest_filter)
+ return [test for test in all_tests if GetTestName(test) in filtered_names]
+
+
+def FilterTestNames(all_tests, gtest_filter):
+ """Filter a list of test names based on the given gtest filter.
+
+ See https://github.com/google/googletest/blob/master/googletest/docs/AdvancedGuide.md
+ for gtest_filter specification.
+
+ Args:
+ all_tests: List of test names.
+ gtest_filter: Filter to apply.
+
+ Returns:
+ Filtered subset of the given list of test names.
+ """
+ pattern_groups = gtest_filter.split('-')
+ positive_patterns = ['*']
+ if pattern_groups[0]:
+ positive_patterns = pattern_groups[0].split(':')
+ negative_patterns = []
+ if len(pattern_groups) > 1:
+ negative_patterns = pattern_groups[1].split(':')
+
+ neg_pats = None
+ if negative_patterns:
+ neg_pats = re.compile('|'.join(fnmatch.translate(p) for p in
+ negative_patterns))
+
+ tests = []
+ test_set = set()
+ for pattern in positive_patterns:
+ pattern_tests = [
+ test for test in all_tests
+ if (fnmatch.fnmatch(test, pattern)
+ and not (neg_pats and neg_pats.match(test))
+ and test not in test_set)]
+ tests.extend(pattern_tests)
+ test_set.update(pattern_tests)
+ return tests
diff --git a/deps/v8/build/util/lib/common/unittest_util_test.py b/deps/v8/build/util/lib/common/unittest_util_test.py
new file mode 100755
index 0000000000..1514c9b6d4
--- /dev/null
+++ b/deps/v8/build/util/lib/common/unittest_util_test.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=protected-access
+
+import logging
+import sys
+import unittest
+import unittest_util
+
+
+class FilterTestNamesTest(unittest.TestCase):
+
+ possible_list = ["Foo.One",
+ "Foo.Two",
+ "Foo.Three",
+ "Bar.One",
+ "Bar.Two",
+ "Bar.Three",
+ "Quux.One",
+ "Quux.Two",
+ "Quux.Three"]
+
+ def testMatchAll(self):
+ x = unittest_util.FilterTestNames(self.possible_list, "*")
+ self.assertEquals(x, self.possible_list)
+
+ def testMatchPartial(self):
+ x = unittest_util.FilterTestNames(self.possible_list, "Foo.*")
+ self.assertEquals(x, ["Foo.One", "Foo.Two", "Foo.Three"])
+
+ def testMatchFull(self):
+ x = unittest_util.FilterTestNames(self.possible_list, "Foo.Two")
+ self.assertEquals(x, ["Foo.Two"])
+
+ def testMatchTwo(self):
+ x = unittest_util.FilterTestNames(self.possible_list, "Bar.*:Foo.*")
+ self.assertEquals(x, ["Bar.One",
+ "Bar.Two",
+ "Bar.Three",
+ "Foo.One",
+ "Foo.Two",
+ "Foo.Three"])
+
+ def testMatchWithNegative(self):
+ x = unittest_util.FilterTestNames(self.possible_list, "Bar.*:Foo.*-*.Three")
+ self.assertEquals(x, ["Bar.One",
+ "Bar.Two",
+ "Foo.One",
+ "Foo.Two"])
+
+ def testMatchOverlapping(self):
+ x = unittest_util.FilterTestNames(self.possible_list, "Bar.*:*.Two")
+ self.assertEquals(x, ["Bar.One",
+ "Bar.Two",
+ "Bar.Three",
+ "Foo.Two",
+ "Quux.Two"])
+
+
+if __name__ == '__main__':
+ logging.getLogger().setLevel(logging.DEBUG)
+ unittest.main(verbosity=2)
diff --git a/deps/v8/build/util/lib/common/util.py b/deps/v8/build/util/lib/common/util.py
new file mode 100644
index 0000000000..a415b1f534
--- /dev/null
+++ b/deps/v8/build/util/lib/common/util.py
@@ -0,0 +1,151 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Generic utilities for all python scripts."""
+
+import atexit
+import httplib
+import os
+import signal
+import stat
+import subprocess
+import sys
+import tempfile
+import urlparse
+
+
+def GetPlatformName():
+ """Return a string to be used in paths for the platform."""
+ if IsWindows():
+ return 'win'
+ if IsMac():
+ return 'mac'
+ if IsLinux():
+ return 'linux'
+ raise NotImplementedError('Unknown platform "%s".' % sys.platform)
+
+
+def IsWindows():
+ return sys.platform == 'cygwin' or sys.platform.startswith('win')
+
+
+def IsLinux():
+ return sys.platform.startswith('linux')
+
+
+def IsMac():
+ return sys.platform.startswith('darwin')
+
+
+def _DeleteDir(path):
+ """Deletes a directory recursively, which must exist."""
+ # Don't use shutil.rmtree because it can't delete read-only files on Win.
+ for root, dirs, files in os.walk(path, topdown=False):
+ for name in files:
+ filename = os.path.join(root, name)
+ os.chmod(filename, stat.S_IWRITE)
+ os.remove(filename)
+ for name in dirs:
+ os.rmdir(os.path.join(root, name))
+ os.rmdir(path)
+
+
+def Delete(path):
+ """Deletes the given file or directory (recursively), which must exist."""
+ if os.path.isdir(path):
+ _DeleteDir(path)
+ else:
+ os.remove(path)
+
+
+def MaybeDelete(path):
+ """Deletes the given file or directory (recurisvely), if it exists."""
+ if os.path.exists(path):
+ Delete(path)
+
+
+def MakeTempDir(parent_dir=None):
+ """Creates a temporary directory and returns an absolute path to it.
+
+ The temporary directory is automatically deleted when the python interpreter
+ exits normally.
+
+ Args:
+ parent_dir: the directory to create the temp dir in. If None, the system
+ temp dir is used.
+
+ Returns:
+ The absolute path to the temporary directory.
+ """
+ path = tempfile.mkdtemp(dir=parent_dir)
+ atexit.register(MaybeDelete, path)
+ return path
+
+
+def Unzip(zip_path, output_dir):
+ """Unzips the given zip file using a system installed unzip tool.
+
+ Args:
+ zip_path: zip file to unzip.
+ output_dir: directory to unzip the contents of the zip file. The directory
+ must exist.
+
+ Raises:
+ RuntimeError if the unzip operation fails.
+ """
+ if IsWindows():
+ unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
+ else:
+ unzip_cmd = ['unzip', '-o']
+ unzip_cmd += [zip_path]
+ if RunCommand(unzip_cmd, output_dir) != 0:
+ raise RuntimeError('Unable to unzip %s to %s' % (zip_path, output_dir))
+
+
+def Kill(pid):
+ """Terminate the given pid."""
+ if IsWindows():
+ subprocess.call(['taskkill.exe', '/T', '/F', '/PID', str(pid)])
+ else:
+ os.kill(pid, signal.SIGTERM)
+
+
+def RunCommand(cmd, cwd=None):
+ """Runs the given command and returns the exit code.
+
+ Args:
+ cmd: list of command arguments.
+ cwd: working directory to execute the command, or None if the current
+ working directory should be used.
+
+ Returns:
+ The exit code of the command.
+ """
+ process = subprocess.Popen(cmd, cwd=cwd)
+ process.wait()
+ return process.returncode
+
+
+def DoesUrlExist(url):
+ """Determines whether a resource exists at the given URL.
+
+ Args:
+ url: URL to be verified.
+
+ Returns:
+ True if url exists, otherwise False.
+ """
+ parsed = urlparse.urlparse(url)
+ try:
+ conn = httplib.HTTPConnection(parsed.netloc)
+ conn.request('HEAD', parsed.path)
+ response = conn.getresponse()
+ except (socket.gaierror, socket.error):
+ return False
+ finally:
+ conn.close()
+ # Follow both permanent (301) and temporary (302) redirects.
+ if response.status == 302 or response.status == 301:
+ return DoesUrlExist(response.getheader('location'))
+ return response.status == 200
diff --git a/deps/v8/build/util/process_version.gni b/deps/v8/build/util/process_version.gni
new file mode 100644
index 0000000000..e27346e6f0
--- /dev/null
+++ b/deps/v8/build/util/process_version.gni
@@ -0,0 +1,126 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Runs the version processing script over the given template file to produce
+# an output file. This is used for generating various forms of files that
+# incorporate the product name and version.
+#
+# Unlike GYP, this will actually compile the resulting file, so you don't need
+# to add it separately to the sources, just depend on the target.
+#
+# In GYP this is a rule that runs once per ".ver" file. In GN this just
+# processes one file per invocation of the template so you may have to have
+# multiple targets.
+#
+# Parameters:
+# sources (optional):
+# List of file names to read. When converting a GYP target, this should
+# list the 'source' (see above) as well as any extra_variable_files.
+# The files will be passed to version.py in the order specified here.
+#
+# output:
+# File name of file to write. In GYP this is unspecified and it will
+# make up a file name for you based on the input name, and tack on
+# "_version.rc" to the end. But in GN you need to specify the full name.
+#
+# template_file (optional):
+# Template file to use (not a list). Most Windows users that want to use
+# this to process a .rc template should use process_version_rc_template(),
+# defined in //chrome/process_version_rc_template.gni, instead.
+#
+# extra_args (optional):
+# Extra arguments to pass to version.py. Any "-f <filename>" args should
+# use sources instead.
+#
+# process_only (optional, defaults to false)
+# Set to generate only one action that processes the version file and
+# doesn't attempt to link the result into a source set. This is for if
+# you are processing the version as data only.
+#
+# visibility (optional)
+#
+# Example:
+# process_version("myversion") {
+# sources = [
+# "//chrome/VERSION"
+# "myfile.h.in"
+# ]
+# output = "$target_gen_dir/myfile.h"
+# extra_args = [ "-e", "FOO=42" ]
+# }
+template("process_version") {
+ assert(defined(invoker.output), "Output must be defined for $target_name")
+
+ process_only = defined(invoker.process_only) && invoker.process_only
+
+ if (process_only) {
+ action_name = target_name
+ } else {
+ action_name = target_name + "_action"
+ source_set_name = target_name
+ }
+
+ action(action_name) {
+ script = "//build/util/version.py"
+
+ inputs = []
+ if (defined(invoker.inputs)) {
+ inputs += invoker.inputs
+ }
+ if (defined(invoker.template_file)) {
+ inputs += [ invoker.template_file ]
+ }
+
+ outputs = [
+ invoker.output,
+ ]
+
+ args = []
+
+ if (is_official_build) {
+ args += [ "--official" ]
+ }
+
+ if (defined(invoker.sources)) {
+ inputs += invoker.sources
+ foreach(i, invoker.sources) {
+ args += [
+ "-f",
+ rebase_path(i, root_build_dir),
+ ]
+ }
+ }
+
+ if (defined(invoker.extra_args)) {
+ args += invoker.extra_args
+ }
+ args += [
+ "-o",
+ rebase_path(invoker.output, root_build_dir),
+ ]
+ if (defined(invoker.template_file)) {
+ args += [ rebase_path(invoker.template_file, root_build_dir) ]
+ }
+
+ forward_variables_from(invoker, [ "deps" ])
+
+ if (process_only) {
+ # When processing only, visibility gets applied to this target.
+ forward_variables_from(invoker, [ "visibility" ])
+ } else {
+ # When linking the result, only the source set can depend on the action.
+ visibility = [ ":$source_set_name" ]
+ }
+ }
+
+ if (!process_only) {
+ source_set(source_set_name) {
+ forward_variables_from(invoker, [ "visibility" ])
+ sources = get_target_outputs(":$action_name")
+ public_deps = [
+ ":$action_name",
+ ]
+ }
+ }
+}
diff --git a/deps/v8/build/util/version.gni b/deps/v8/build/util/version.gni
new file mode 100644
index 0000000000..5bfceb52f9
--- /dev/null
+++ b/deps/v8/build/util/version.gni
@@ -0,0 +1,159 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This exposes the Chrome version as GN variables for use in build files.
+# This also generates the various version codes used for builds of chrome for
+# android.
+#
+# PREFER NOT TO USE THESE. The GYP build uses this kind of thing extensively.
+# However, it is far better to write an action (or use the process_version
+# wrapper in build/util/version.gni) to generate a file at build-time with the
+# information you need. This allows better dependency checking and GN will
+# run faster.
+#
+# These values should only be used if you REALLY need to depend on them at
+# build-time, for example, in the computation of output file names.
+
+# Give version.py a pattern that will expand to a GN scope consisting of
+# all values we need at once.
+_version_dictionary_template = "full = \"@MAJOR@.@MINOR@.@BUILD@.@PATCH@\" " +
+ "major = \"@MAJOR@\" minor = \"@MINOR@\" " +
+ "build = \"@BUILD@\" patch = \"@PATCH@\" "
+
+# The file containing the Chrome version number.
+chrome_version_file = "//chrome/VERSION"
+
+_script_arguments = []
+
+if (target_os == "mac") {
+ _version_dictionary_template += "patch_hi = @PATCH_HI@ patch_lo = @PATCH_LO@ "
+
+ _script_arguments += [
+ "-e",
+ "PATCH_HI=int(PATCH)//256",
+ "-e",
+ "PATCH_LO=int(PATCH)%256",
+ ]
+} else if (target_os == "android") {
+ import("//build/config/android/config.gni")
+
+ _version_dictionary_template +=
+ "chrome_version_code = " + "\"@CHROME_VERSION_CODE@\" " +
+ "chrome_modern_version_code = \"@CHROME_MODERN_VERSION_CODE@\" " +
+ "monochrome_version_code = \"@MONOCHROME_VERSION_CODE@\" " +
+ "trichrome_version_code = \"@TRICHROME_VERSION_CODE@\" " +
+ "notouch_chrome_version_code = \"@NOTOUCH_CHROME_VERSION_CODE@\" " +
+ "webview_stable_version_code = \"@WEBVIEW_STABLE_VERSION_CODE@\" " +
+ "webview_beta_version_code = \"@WEBVIEW_BETA_VERSION_CODE@\" " +
+ "webview_dev_version_code = \"@WEBVIEW_DEV_VERSION_CODE@\" "
+
+ if (target_cpu == "arm64" || target_cpu == "x64") {
+ _version_dictionary_template +=
+ "monochrome_64_32_version_code = \"@MONOCHROME_64_32_VERSION_CODE@\" " +
+ "monochrome_64_version_code = \"@MONOCHROME_64_VERSION_CODE@\" " +
+ "trichrome_64_32_version_code = \"@TRICHROME_64_32_VERSION_CODE@\" " +
+ "trichrome_64_version_code = \"@TRICHROME_64_VERSION_CODE@\" "
+ }
+
+ _script_arguments += [
+ "-a",
+ target_cpu,
+ ]
+
+ if (!public_android_sdk) {
+ _script_arguments += [ "--next" ]
+ }
+}
+
+_script_arguments += [
+ "-f",
+ rebase_path(chrome_version_file, root_build_dir),
+ "-t",
+ _version_dictionary_template,
+ "--os",
+ target_os,
+]
+
+_result = exec_script("version.py",
+ _script_arguments,
+ "scope",
+ [ chrome_version_file ])
+
+# Full version. For example "45.0.12321.0"
+chrome_version_full = _result.full
+
+# The consituent parts of the full version.
+chrome_version_major = _result.major
+chrome_version_minor = _result.minor
+chrome_version_build = _result.build
+chrome_version_patch = _result.patch
+
+if (target_os == "mac") {
+ chrome_version_patch_hi = _result.patch_hi
+ chrome_version_patch_lo = _result.patch_lo
+
+ chrome_dylib_version = "$chrome_version_build.$chrome_version_patch_hi" +
+ ".$chrome_version_patch_lo"
+} else if (target_os == "android") {
+ forward_variables_from(_result,
+ [
+ "chrome_modern_version_code",
+ "chrome_version_code",
+ "monochrome_64_32_version_code",
+ "monochrome_64_version_code",
+ "monochrome_version_code",
+ "notouch_chrome_version_code",
+ "trichrome_64_32_version_code",
+ "trichrome_64_version_code",
+ "trichrome_version_code",
+ "webview_beta_version_code",
+ "webview_dev_version_code",
+ "webview_stable_version_code",
+ ])
+
+ chrome_version_name = chrome_version_full
+
+ lines_to_write__deprecated = [
+ "VersionName=$chrome_version_name",
+ "Chrome=$chrome_version_code",
+ "ChromeModern=$chrome_modern_version_code",
+ "Monochrome=$monochrome_version_code",
+ "TrichromeChrome=$trichrome_version_code",
+ "NoTouchChrome=$notouch_chrome_version_code",
+ "WebviewStable=$webview_stable_version_code",
+ "WebviewBeta=$webview_beta_version_code",
+ "WebviewDev=$webview_dev_version_code",
+ ]
+ lines_to_write = [
+ "VersionName: $chrome_version_name",
+ "Chrome: $chrome_version_code",
+ "ChromeModern: $chrome_modern_version_code",
+ "Monochrome: $monochrome_version_code",
+ "TrichromeChrome: $trichrome_version_code",
+ "MonochromeFP: $notouch_chrome_version_code",
+ "WebviewStable: $webview_stable_version_code",
+ "WebviewBeta: $webview_beta_version_code",
+ "WebviewDev: $webview_dev_version_code",
+ ]
+
+ if (target_cpu == "arm64" || target_cpu == "x64") {
+ lines_to_write__deprecated += [
+ "Monochrome_64_32=$monochrome_64_32_version_code",
+ "Monochrome_64=$monochrome_64_version_code",
+ "TrichromeChrome_64_32=$trichrome_64_32_version_code",
+ "TrichromeChrome_64=$trichrome_64_version_code",
+ ]
+ lines_to_write += [
+ "Monochrome6432: $monochrome_64_32_version_code",
+ "Monochrome64: $monochrome_64_version_code",
+ "TrichromeChrome6432: $trichrome_64_32_version_code",
+ "TrichromeChrome64: $trichrome_64_version_code",
+ ]
+ }
+
+ write_file("$root_out_dir/android_chrome_versions.txt", lines_to_write)
+
+ # TODO (stonebraker) For a 3-way patch; to be removed
+ write_file("$root_out_dir/chrome_versions.txt", lines_to_write__deprecated)
+}
diff --git a/deps/v8/build/util/version.py b/deps/v8/build/util/version.py
new file mode 100755
index 0000000000..4f440c4ee7
--- /dev/null
+++ b/deps/v8/build/util/version.py
@@ -0,0 +1,259 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+version.py -- Chromium version string substitution utility.
+"""
+
+from __future__ import print_function
+
+import argparse
+import os
+import sys
+
+import android_chrome_version
+
+
+def FetchValuesFromFile(values_dict, file_name):
+ """
+ Fetches KEYWORD=VALUE settings from the specified file.
+
+ Everything to the left of the first '=' is the keyword,
+ everything to the right is the value. No stripping of
+ white space, so beware.
+
+ The file must exist, otherwise you get the Python exception from open().
+ """
+ for line in open(file_name, 'r').readlines():
+ key, val = line.rstrip('\r\n').split('=', 1)
+ values_dict[key] = val
+
+
+def FetchValues(file_list, is_official_build=None):
+ """
+ Returns a dictionary of values to be used for substitution.
+
+ Populates the dictionary with KEYWORD=VALUE settings from the files in
+ 'file_list'.
+
+ Explicitly adds the following value from internal calculations:
+
+ OFFICIAL_BUILD
+ """
+ CHROME_BUILD_TYPE = os.environ.get('CHROME_BUILD_TYPE')
+ if CHROME_BUILD_TYPE == '_official' or is_official_build:
+ official_build = '1'
+ else:
+ official_build = '0'
+
+ values = dict(
+ OFFICIAL_BUILD = official_build,
+ )
+
+ for file_name in file_list:
+ FetchValuesFromFile(values, file_name)
+
+ return values
+
+
+def SubstTemplate(contents, values):
+ """
+ Returns the template with substituted values from the specified dictionary.
+
+ Keywords to be substituted are surrounded by '@': @KEYWORD@.
+
+ No attempt is made to avoid recursive substitution. The order
+ of evaluation is random based on the order of the keywords returned
+ by the Python dictionary. So do NOT substitute a value that
+ contains any @KEYWORD@ strings expecting them to be recursively
+ substituted, okay?
+ """
+ for key, val in values.items():
+ try:
+ contents = contents.replace('@' + key + '@', val)
+ except TypeError:
+ print(repr(key), repr(val))
+ return contents
+
+
+def SubstFile(file_name, values):
+ """
+ Returns the contents of the specified file_name with substituted values.
+
+ Substituted values come from the specified dictionary.
+
+ This is like SubstTemplate, except it operates on a file.
+ """
+ template = open(file_name, 'r').read()
+ return SubstTemplate(template, values)
+
+
+def WriteIfChanged(file_name, contents):
+ """
+ Writes the specified contents to the specified file_name.
+
+ Does nothing if the contents aren't different than the current contents.
+ """
+ try:
+ old_contents = open(file_name, 'r').read()
+ except EnvironmentError:
+ pass
+ else:
+ if contents == old_contents:
+ return
+ os.unlink(file_name)
+ open(file_name, 'w').write(contents)
+
+
+def BuildParser():
+ """Build argparse parser, with added arguments."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-f', '--file', action='append', default=[],
+ help='Read variables from FILE.')
+ parser.add_argument('-i', '--input', default=None,
+ help='Read strings to substitute from FILE.')
+ parser.add_argument('-o', '--output', default=None,
+ help='Write substituted strings to FILE.')
+ parser.add_argument('-t', '--template', default=None,
+ help='Use TEMPLATE as the strings to substitute.')
+ parser.add_argument(
+ '-e',
+ '--eval',
+ action='append',
+ default=[],
+ help='Evaluate VAL after reading variables. Can be used '
+ 'to synthesize variables. e.g. -e \'PATCH_HI=int('
+ 'PATCH)//256.')
+ parser.add_argument(
+ '-a',
+ '--arch',
+ default=None,
+ choices=android_chrome_version.ARCH_CHOICES,
+ help='Set which cpu architecture the build is for.')
+ parser.add_argument('--os', default=None, help='Set the target os.')
+ parser.add_argument('--official', action='store_true',
+ help='Whether the current build should be an official '
+ 'build, used in addition to the environment '
+ 'variable.')
+ parser.add_argument(
+ '--next',
+ action='store_true',
+ help='Whether the current build should be a "next" '
+ 'build, which targets pre-release versions of '
+ 'Android')
+ parser.add_argument('args', nargs=argparse.REMAINDER,
+ help='For compatibility: INPUT and OUTPUT can be '
+ 'passed as positional arguments.')
+ return parser
+
+
+def BuildEvals(options, parser):
+ """Construct a dict of passed '-e' arguments for evaluating."""
+ evals = {}
+ for expression in options.eval:
+ try:
+ evals.update(dict([expression.split('=', 1)]))
+ except ValueError:
+ parser.error('-e requires VAR=VAL')
+ return evals
+
+
+def ModifyOptionsCompat(options, parser):
+ """Support compatibility with old versions.
+
+ Specifically, for old versions that considered the first two
+ positional arguments shorthands for --input and --output.
+ """
+ while len(options.args) and (options.input is None or options.output is None):
+ if options.input is None:
+ options.input = options.args.pop(0)
+ elif options.output is None:
+ options.output = options.args.pop(0)
+ if options.args:
+ parser.error('Unexpected arguments: %r' % options.args)
+
+
+def GenerateValues(options, evals):
+ """Construct a dict of raw values used to generate output.
+
+ e.g. this could return a dict like
+ {
+ 'BUILD': 74,
+ }
+
+ which would be used to resolve a template like
+ 'build = "@BUILD@"' into 'build = "74"'
+
+ """
+ values = FetchValues(options.file, options.official)
+
+ for key, val in evals.items():
+ values[key] = str(eval(val, globals(), values))
+
+ if options.os == 'android':
+ android_chrome_version_codes = android_chrome_version.GenerateVersionCodes(
+ values, options.arch, options.next)
+ values.update(android_chrome_version_codes)
+
+ return values
+
+
+def GenerateOutputContents(options, values):
+ """Construct output string (e.g. from template).
+
+ Arguments:
+ options -- argparse parsed arguments
+ values -- dict with raw values used to resolve the keywords in a template
+ string
+ """
+
+ if options.template is not None:
+ return SubstTemplate(options.template, values)
+ elif options.input:
+ return SubstFile(options.input, values)
+ else:
+ # Generate a default set of version information.
+ return """MAJOR=%(MAJOR)s
+MINOR=%(MINOR)s
+BUILD=%(BUILD)s
+PATCH=%(PATCH)s
+LASTCHANGE=%(LASTCHANGE)s
+OFFICIAL_BUILD=%(OFFICIAL_BUILD)s
+""" % values
+
+
+def BuildOutput(args):
+ """Gets all input and output values needed for writing output."""
+ # Build argparse parser with arguments
+ parser = BuildParser()
+ options = parser.parse_args(args)
+
+ # Get dict of passed '-e' arguments for evaluating
+ evals = BuildEvals(options, parser)
+ # For compatibility with interface that considered first two positional
+ # arguments shorthands for --input and --output.
+ ModifyOptionsCompat(options, parser)
+
+ # Get the raw values that will be used the generate the output
+ values = GenerateValues(options, evals)
+ # Get the output string
+ contents = GenerateOutputContents(options, values)
+
+ return {'options': options, 'contents': contents}
+
+
+def main():
+ output = BuildOutput(sys.argv[1:])
+
+ if output['options'].output is not None:
+ WriteIfChanged(output['options'].output, output['contents'])
+ else:
+ print(output['contents'])
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/deps/v8/build/util/version_test.py b/deps/v8/build/util/version_test.py
new file mode 100644
index 0000000000..2a65ddc716
--- /dev/null
+++ b/deps/v8/build/util/version_test.py
@@ -0,0 +1,174 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+import mock
+import version
+
+
+def _ReplaceArgs(args, *replacements):
+ new_args = args[:]
+ for flag, val in replacements:
+ flag_index = args.index(flag)
+ new_args[flag_index + 1] = val
+ return new_args
+
+
+class _VersionTest(unittest.TestCase):
+ """Unittests for the version module.
+ """
+
+ _CHROME_VERSION_FILE = os.path.join(
+ os.path.dirname(__file__), os.pardir, os.pardir, 'chrome', 'VERSION')
+
+ _SCRIPT = os.path.join(os.path.dirname(__file__), 'version.py')
+
+ _EXAMPLE_VERSION = {
+ 'MAJOR': '74',
+ 'MINOR': '0',
+ 'BUILD': '3720',
+ 'PATCH': '0',
+ }
+
+ _EXAMPLE_TEMPLATE = (
+ 'full = "@MAJOR@.@MINOR@.@BUILD@.@PATCH@" '
+ 'major = "@MAJOR@" minor = "@MINOR@" '
+ 'build = "@BUILD@" patch = "@PATCH@" version_id = @VERSION_ID@ ')
+
+ _ANDROID_CHROME_VARS = [
+ 'chrome_version_code',
+ 'chrome_modern_version_code',
+ 'monochrome_version_code',
+ 'trichrome_version_code',
+ 'webview_stable_version_code',
+ 'webview_beta_version_code',
+ 'webview_dev_version_code',
+ ]
+
+ _EXAMPLE_ANDROID_TEMPLATE = (
+ _EXAMPLE_TEMPLATE + ''.join(
+ ['%s = "@%s@" ' % (el, el.upper()) for el in _ANDROID_CHROME_VARS]))
+
+ _EXAMPLE_ARGS = [
+ '-f',
+ _CHROME_VERSION_FILE,
+ '-t',
+ _EXAMPLE_TEMPLATE,
+ ]
+
+ _EXAMPLE_ANDROID_ARGS = _ReplaceArgs(_EXAMPLE_ARGS,
+ ['-t', _EXAMPLE_ANDROID_TEMPLATE]) + [
+ '-a',
+ 'arm',
+ '--os',
+ 'android',
+ ]
+
+ @staticmethod
+ def _RunBuildOutput(new_version_values={},
+ get_new_args=lambda old_args: old_args):
+ """Parameterized helper method for running the main testable method in
+ version.py.
+
+ Keyword arguments:
+ new_version_values -- dict used to update _EXAMPLE_VERSION
+ get_new_args -- lambda for updating _EXAMPLE_ANDROID_ARGS
+ """
+
+ with mock.patch('version.FetchValuesFromFile') as \
+ fetch_values_from_file_mock:
+
+ fetch_values_from_file_mock.side_effect = (lambda values, file :
+ values.update(
+ dict(_VersionTest._EXAMPLE_VERSION, **new_version_values)))
+
+ new_args = get_new_args(_VersionTest._EXAMPLE_ARGS)
+ return version.BuildOutput(new_args)
+
+ def testFetchValuesFromFile(self):
+ """It returns a dict in correct format - { <str>: <str> }, to verify
+ assumption of other tests that mock this function
+ """
+ result = {}
+ version.FetchValuesFromFile(result, self._CHROME_VERSION_FILE)
+
+ for key, val in result.iteritems():
+ self.assertIsInstance(key, str)
+ self.assertIsInstance(val, str)
+
+ def testBuildOutputAndroid(self):
+ """Assert it gives includes assignments of expected variables"""
+ output = self._RunBuildOutput(
+ get_new_args=lambda args: self._EXAMPLE_ANDROID_ARGS)
+ contents = output['contents']
+
+ self.assertRegexpMatches(contents, r'\bchrome_version_code = "\d+"\s')
+ self.assertRegexpMatches(contents,
+ r'\bchrome_modern_version_code = "\d+"\s')
+ self.assertRegexpMatches(contents, r'\bmonochrome_version_code = "\d+"\s')
+ self.assertRegexpMatches(contents, r'\btrichrome_version_code = "\d+"\s')
+ self.assertRegexpMatches(contents,
+ r'\bwebview_stable_version_code = "\d+"\s')
+ self.assertRegexpMatches(contents, r'\bwebview_beta_version_code = "\d+"\s')
+ self.assertRegexpMatches(contents, r'\bwebview_dev_version_code = "\d+"\s')
+
+ def testBuildOutputAndroidArchVariantsArm64(self):
+ """Assert 64-bit-specific version codes"""
+ new_template = (
+ self._EXAMPLE_ANDROID_TEMPLATE +
+ "monochrome_64_32_version_code = \"@MONOCHROME_64_32_VERSION_CODE@\" "
+ "monochrome_64_version_code = \"@MONOCHROME_64_VERSION_CODE@\" "
+ "trichrome_64_32_version_code = \"@TRICHROME_64_32_VERSION_CODE@\" "
+ "trichrome_64_version_code = \"@TRICHROME_64_VERSION_CODE@\" ")
+ args_with_template = _ReplaceArgs(self._EXAMPLE_ANDROID_ARGS,
+ ['-t', new_template])
+ new_args = _ReplaceArgs(args_with_template, ['-a', 'arm64'])
+ output = self._RunBuildOutput(get_new_args=lambda args: new_args)
+ contents = output['contents']
+
+ self.assertRegexpMatches(contents,
+ r'\bmonochrome_64_32_version_code = "\d+"\s')
+ self.assertRegexpMatches(contents,
+ r'\bmonochrome_64_version_code = "\d+"\s')
+ self.assertRegexpMatches(contents,
+ r'\btrichrome_64_32_version_code = "\d+"\s')
+ self.assertRegexpMatches(contents,
+ r'\btrichrome_64_version_code = "\d+"\s')
+
+ def testBuildOutputAndroidArchVariantsX64(self):
+ """Assert 64-bit-specific version codes"""
+ new_template = (
+ self._EXAMPLE_ANDROID_TEMPLATE +
+ "monochrome_64_32_version_code = \"@MONOCHROME_64_32_VERSION_CODE@\" "
+ "monochrome_64_version_code = \"@MONOCHROME_64_VERSION_CODE@\" "
+ "trichrome_64_32_version_code = \"@TRICHROME_64_32_VERSION_CODE@\" "
+ "trichrome_64_version_code = \"@TRICHROME_64_VERSION_CODE@\" ")
+ args_with_template = _ReplaceArgs(self._EXAMPLE_ANDROID_ARGS,
+ ['-t', new_template])
+ new_args = _ReplaceArgs(args_with_template, ['-a', 'x64'])
+ output = self._RunBuildOutput(get_new_args=lambda args: new_args)
+ contents = output['contents']
+
+ self.assertRegexpMatches(contents,
+ r'\bmonochrome_64_32_version_code = "\d+"\s')
+ self.assertRegexpMatches(contents,
+ r'\bmonochrome_64_version_code = "\d+"\s')
+ self.assertRegexpMatches(contents,
+ r'\btrichrome_64_32_version_code = "\d+"\s')
+ self.assertRegexpMatches(contents,
+ r'\btrichrome_64_version_code = "\d+"\s')
+
+ def testBuildOutputAndroidChromeArchInput(self):
+ """Assert it raises an exception when using an invalid architecture input"""
+ new_args = _ReplaceArgs(self._EXAMPLE_ANDROID_ARGS, ['-a', 'foobar'])
+ with self.assertRaises(SystemExit) as cm:
+ self._RunBuildOutput(get_new_args=lambda args: new_args)
+
+ self.assertEqual(cm.exception.code, 2)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/build/util/webkit_version.h.in b/deps/v8/build/util/webkit_version.h.in
new file mode 100644
index 0000000000..41960e7d88
--- /dev/null
+++ b/deps/v8/build/util/webkit_version.h.in
@@ -0,0 +1,9 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// webkit_version.h is generated from webkit_version.h.in. Edit the source!
+
+#define WEBKIT_VERSION_MAJOR 537
+#define WEBKIT_VERSION_MINOR 36
+#define WEBKIT_SVN_REVISION "@@LASTCHANGE@"