diff options
Diffstat (limited to 'deps/v8/build/toolchain')
44 files changed, 5862 insertions, 0 deletions
diff --git a/deps/v8/build/toolchain/BUILD.gn b/deps/v8/build/toolchain/BUILD.gn new file mode 100644 index 0000000000..75701ded70 --- /dev/null +++ b/deps/v8/build/toolchain/BUILD.gn @@ -0,0 +1,25 @@ +# Copyright 2016 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/toolchain/concurrent_links.gni") +import("//build/toolchain/goma.gni") + +declare_args() { + # Pool for non goma tasks. + action_pool_depth = -1 +} + +if (action_pool_depth == -1 || use_goma) { + action_pool_depth = exec_script("get_cpu_count.py", [], "value") +} + +if (current_toolchain == default_toolchain) { + pool("link_pool") { + depth = concurrent_links + } + + pool("action_pool") { + depth = action_pool_depth + } +} diff --git a/deps/v8/build/toolchain/OWNERS b/deps/v8/build/toolchain/OWNERS new file mode 100644 index 0000000000..b329d48bfb --- /dev/null +++ b/deps/v8/build/toolchain/OWNERS @@ -0,0 +1,8 @@ +dpranke@chromium.org +scottmg@chromium.org + +# Clang Static Analyzer. +per-file clang_static_analyzer*=mmoroz@chromium.org + +# Code Coverage. +per-file *code_coverage*=mmoroz@chromium.org diff --git a/deps/v8/build/toolchain/aix/BUILD.gn b/deps/v8/build/toolchain/aix/BUILD.gn new file mode 100644 index 0000000000..202e59e652 --- /dev/null +++ b/deps/v8/build/toolchain/aix/BUILD.gn @@ -0,0 +1,21 @@ +# Copyright 2017 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/toolchain/gcc_toolchain.gni") + +gcc_toolchain("ppc64") { + cc = "gcc" + cxx = "g++" + + readelf = "readelf" + nm = "nm" + ar = "ar" + ld = cxx + + toolchain_args = { + current_cpu = "ppc64" + current_os = "aix" + is_clang = false + } +} diff --git a/deps/v8/build/toolchain/android/BUILD.gn b/deps/v8/build/toolchain/android/BUILD.gn new file mode 100644 index 0000000000..97dd12dc75 --- /dev/null +++ b/deps/v8/build/toolchain/android/BUILD.gn @@ -0,0 +1,141 @@ +# Copyright 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/config/android/config.gni") +import("//build/config/clang/clang.gni") +import("//build/config/sysroot.gni") # Imports android/config.gni. +import("//build/toolchain/gcc_toolchain.gni") + +declare_args() { + # Whether unstripped binaries, i.e. compiled with debug symbols, should be + # considered runtime_deps rather than stripped ones. + android_unstripped_runtime_outputs = true +} + +# The Android clang toolchains share most of the same parameters, so we have this +# wrapper around gcc_toolchain to avoid duplication of logic. +# +# Parameters: +# - toolchain_root +# Path to cpu-specific toolchain within the ndk. +# - sysroot +# Sysroot for this architecture. +# - lib_dir +# Subdirectory inside of sysroot where libs go. +# - binary_prefix +# Prefix of compiler executables. +template("android_clang_toolchain") { + gcc_toolchain(target_name) { + assert(defined(invoker.toolchain_args), + "toolchain_args must be defined for android_clang_toolchain()") + toolchain_args = invoker.toolchain_args + toolchain_args.current_os = "android" + + # Output linker map files for binary size analysis. + enable_linker_map = true + + # Make our manually injected libs relative to the build dir. + _ndk_lib = + rebase_path(invoker.sysroot + "/" + invoker.lib_dir, root_build_dir) + + libs_section_prefix = "$_ndk_lib/crtbegin_dynamic.o" + libs_section_postfix = "$_ndk_lib/crtend_android.o" + + solink_libs_section_prefix = "$_ndk_lib/crtbegin_so.o" + solink_libs_section_postfix = "$_ndk_lib/crtend_so.o" + + _android_tool_prefix = + "${invoker.toolchain_root}/bin/${invoker.binary_prefix}-" + + # The tools should be run relative to the build dir. + _tool_prefix = rebase_path("$_android_tool_prefix", root_build_dir) + + _prefix = rebase_path("$clang_base_path/bin", root_build_dir) + cc = "$_prefix/clang" + cxx = "$_prefix/clang++" + ar = "$_prefix/llvm-ar" + ld = cxx + readelf = _tool_prefix + "readelf" + nm = _tool_prefix + "nm" + strip = rebase_path("//buildtools/third_party/eu-strip/bin/eu-strip", + root_build_dir) + use_unstripped_as_runtime_outputs = android_unstripped_runtime_outputs + + # Don't use .cr.so for loadable_modules since they are always loaded via + # absolute path. + loadable_module_extension = ".so" + } +} + +android_clang_toolchain("android_clang_x86") { + toolchain_root = x86_android_toolchain_root + sysroot = "$android_ndk_root/$x86_android_sysroot_subdir" + lib_dir = "usr/lib" + binary_prefix = "i686-linux-android" + toolchain_args = { + current_cpu = "x86" + + # We lack the libclang_rt.profile library for x86 and x86_64, so we cannot + # link any binaries that are generated with coverage instrumentation. + # Therefore we need to turn off 'use_clang_coverage' for this toolchain. + # TODO(crbug.com/865376) + use_clang_coverage = false + } +} + +android_clang_toolchain("android_clang_arm") { + toolchain_root = arm_android_toolchain_root + sysroot = "$android_ndk_root/$arm_android_sysroot_subdir" + lib_dir = "usr/lib" + binary_prefix = "arm-linux-androideabi" + toolchain_args = { + current_cpu = "arm" + } +} + +android_clang_toolchain("android_clang_mipsel") { + toolchain_root = mips_android_toolchain_root + sysroot = "$android_ndk_root/$mips_android_sysroot_subdir" + lib_dir = "usr/lib" + binary_prefix = "mipsel-linux-android" + toolchain_args = { + current_cpu = "mipsel" + } +} + +android_clang_toolchain("android_clang_x64") { + toolchain_root = x86_64_android_toolchain_root + sysroot = "$android_ndk_root/$x86_64_android_sysroot_subdir" + lib_dir = "usr/lib64" + binary_prefix = "x86_64-linux-android" + toolchain_args = { + current_cpu = "x64" + + # We lack the libclang_rt.profile library for x86 and x86_64, so we cannot + # link any binaries that are generated with coverage instrumentation. + # Therefore we need to turn off 'use_clang_coverage' for this toolchain. + # TODO(crbug.com/865376) + use_clang_coverage = false + } +} + +android_clang_toolchain("android_clang_arm64") { + toolchain_root = arm64_android_toolchain_root + sysroot = "$android_ndk_root/$arm64_android_sysroot_subdir" + lib_dir = "usr/lib" + binary_prefix = "aarch64-linux-android" + toolchain_args = { + current_cpu = "arm64" + } +} + +android_clang_toolchain("android_clang_mips64el") { + toolchain_root = mips64_android_toolchain_root + sysroot = "$android_ndk_root/$mips64_android_sysroot_subdir" + lib_dir = "usr/lib64" + binary_prefix = "mips64el-linux-android" + toolchain_args = { + current_cpu = "mips64el" + } +} diff --git a/deps/v8/build/toolchain/cc_wrapper.gni b/deps/v8/build/toolchain/cc_wrapper.gni new file mode 100644 index 0000000000..0a03dde8d6 --- /dev/null +++ b/deps/v8/build/toolchain/cc_wrapper.gni @@ -0,0 +1,40 @@ +# Copyright (c) 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/toolchain/goma.gni") + +# Defines the configuration of cc wrapper +# ccache: a c/c++ compiler cache which can greatly reduce recompilation times. +# icecc, distcc: it takes compile jobs from a build and distributes them among +# remote machines allowing a parallel build. +# +# TIPS +# +# 1) ccache +# Set clang_use_chrome_plugins=false if using ccache 3.1.9 or earlier, since +# these versions don't support -Xclang. (3.1.10 and later will silently +# ignore -Xclang, so it doesn't matter if you disable clang_use_chrome_plugins +# or not). +# +# Use ccache 3.2 or later to avoid clang unused argument warnings: +# https://bugzilla.samba.org/show_bug.cgi?id=8118 +# +# To avoid -Wparentheses-equality clang warnings, at some cost in terms of +# speed, you can do: +# export CCACHE_CPP2=yes +# +# 2) icecc +# Set clang_use_chrome_plugins=false because icecc cannot distribute custom +# clang libraries. +# +# To use icecc and ccache together, set cc_wrapper = "ccache" with +# export CCACHE_PREFIX=icecc + +declare_args() { + # Set to "ccache", "icecc" or "distcc". Probably doesn't work on windows. + cc_wrapper = "" +} + +assert(!use_goma || cc_wrapper == "", + "use_goma and cc_wrapper can not be used together.") diff --git a/deps/v8/build/toolchain/clang_code_coverage_wrapper.py b/deps/v8/build/toolchain/clang_code_coverage_wrapper.py new file mode 100755 index 0000000000..9697805690 --- /dev/null +++ b/deps/v8/build/toolchain/clang_code_coverage_wrapper.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python +# Copyright 2018 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +"""Removes code coverage flags from invocations of the Clang C/C++ compiler. + +If the GN arg `use_clang_coverage=true`, this script will be invoked by default. +GN will add coverage instrumentation flags to almost all source files. + +This script is used to remove instrumentation flags from a subset of the source +files. By default, it will not remove flags from any files. If the option +--files-to-instrument is passed, this script will remove flags from all files +except the ones listed in --files-to-instrument. + +This script also contains hard-coded exclusion lists of files to never +instrument, indexed by target operating system. Files in these lists have their +flags removed in both modes. The OS can be selected with --target-os. + +The path to the coverage instrumentation input file should be relative to the +root build directory, and the file consists of multiple lines where each line +represents a path to a source file, and the specified paths must be relative to +the root build directory. e.g. ../../base/task/post_task.cc for build +directory 'out/Release'. + +One caveat with this compiler wrapper is that it may introduce unexpected +behaviors in incremental builds when the file path to the coverage +instrumentation input file changes between consecutive runs, so callers of this +script are strongly advised to always use the same path such as +"${root_build_dir}/coverage_instrumentation_input.txt". + +It's worth noting on try job builders, if the contents of the instrumentation +file changes so that a file doesn't need to be instrumented any longer, it will +be recompiled automatically because if try job B runs after try job A, the files +that were instrumented in A will be updated (i.e., reverted to the checked in +version) in B, and so they'll be considered out of date by ninja and recompiled. + +Example usage: + clang_code_coverage_wrapper.py \\ + --files-to-instrument=coverage_instrumentation_input.txt +""" + +import argparse +import os +import subprocess +import sys + +# Flags used to enable coverage instrumentation. +# Flags should be listed in the same order that they are added in +# build/config/coverage/BUILD.gn +_COVERAGE_FLAGS = [ + '-fprofile-instr-generate', '-fcoverage-mapping', + # Following experimental flags remove unused header functions from the + # coverage mapping data embedded in the test binaries, and the reduction + # of binary size enables building Chrome's large unit test targets on + # MacOS. Please refer to crbug.com/796290 for more details. + '-mllvm', '-limited-coverage-experimental=true' +] + +# Map of exclusion lists indexed by target OS. +# If no target OS is defined, or one is defined that doesn't have a specific +# entry, use the 'default' exclusion_list. Anything added to 'default' will +# apply to all platforms that don't have their own specific list. +_COVERAGE_EXCLUSION_LIST_MAP = { + 'default': [], + 'chromeos': [ + # These files caused clang to crash while compiling them. They are + # excluded pending an investigation into the underlying compiler bug. + '../../third_party/webrtc/p2p/base/p2p_transport_channel.cc', + '../../third_party/icu/source/common/uts46.cpp', + '../../third_party/icu/source/common/ucnvmbcs.cpp', + '../../base/android/android_image_reader_compat.cc', + ] +} + + +def _remove_flags_from_command(command): + # We need to remove the coverage flags for this file, but we only want to + # remove them if we see the exact sequence defined in _COVERAGE_FLAGS. + # That ensures that we only remove the flags added by GN when + # "use_clang_coverage" is true. Otherwise, we would remove flags set by + # other parts of the build system. + start_flag = _COVERAGE_FLAGS[0] + num_flags = len(_COVERAGE_FLAGS) + start_idx = 0 + try: + while True: + idx = command.index(start_flag, start_idx) + start_idx = idx + 1 + if command[idx:idx+num_flags] == _COVERAGE_FLAGS: + del command[idx:idx+num_flags] + break + except ValueError: + pass + +def main(): + # TODO(crbug.com/898695): Make this wrapper work on Windows platform. + arg_parser = argparse.ArgumentParser() + arg_parser.usage = __doc__ + arg_parser.add_argument( + '--files-to-instrument', + type=str, + help='Path to a file that contains a list of file names to instrument.') + arg_parser.add_argument( + '--target-os', + required=False, + help='The OS to compile for.') + arg_parser.add_argument('args', nargs=argparse.REMAINDER) + parsed_args = arg_parser.parse_args() + + if (parsed_args.files_to_instrument and + not os.path.isfile(parsed_args.files_to_instrument)): + raise Exception('Path to the coverage instrumentation file: "%s" doesn\'t ' + 'exist.' % parsed_args.files_to_instrument) + + compile_command = parsed_args.args + if not any('clang' in s for s in compile_command): + return subprocess.call(compile_command) + + try: + # The command is assumed to use Clang as the compiler, and the path to the + # source file is behind the -c argument, and the path to the source path is + # relative to the root build directory. For example: + # clang++ -fvisibility=hidden -c ../../base/files/file_path.cc -o \ + # obj/base/base/file_path.o + index_dash_c = compile_command.index('-c') + except ValueError: + print '-c argument is not found in the compile command.' + raise + + if index_dash_c + 1 >= len(compile_command): + raise Exception('Source file to be compiled is missing from the command.') + + compile_source_file = compile_command[index_dash_c + 1] + target_os = parsed_args.target_os + if target_os not in _COVERAGE_EXCLUSION_LIST_MAP: + target_os = 'default' + exclusion_list = _COVERAGE_EXCLUSION_LIST_MAP[target_os] + + if compile_source_file in exclusion_list: + _remove_flags_from_command(compile_command) + elif parsed_args.files_to_instrument: + with open(parsed_args.files_to_instrument) as f: + if compile_source_file not in f.read(): + _remove_flags_from_command(compile_command) + + return subprocess.call(compile_command) + +if __name__ == '__main__': + sys.exit(main()) diff --git a/deps/v8/build/toolchain/concurrent_links.gni b/deps/v8/build/toolchain/concurrent_links.gni new file mode 100644 index 0000000000..84607bc676 --- /dev/null +++ b/deps/v8/build/toolchain/concurrent_links.gni @@ -0,0 +1,60 @@ +# Copyright 2016 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# This file should only be imported from files that define toolchains. +# There's no way to enforce this exactly, but all toolchains are processed +# in the context of the default_toolchain, so we can at least check for that. +assert(current_toolchain == default_toolchain) + +import("//build/config/compiler/compiler.gni") +import("//build/config/sanitizers/sanitizers.gni") +import("//build/toolchain/toolchain.gni") + +declare_args() { + # Limit the number of concurrent links; we often want to run fewer + # links at once than we do compiles, because linking is memory-intensive. + # The default to use varies by platform and by the amount of memory + # available, so we call out to a script to get the right value. + concurrent_links = -1 +} + +if (concurrent_links == -1) { + if (use_thin_lto) { + _args = [ + "--mem_per_link_gb=10", + "--reserve_mem_gb=10", + ] + } else if (use_sanitizer_coverage || use_fuzzing_engine) { + # Sanitizer coverage instrumentation increases linker memory consumption + # significantly. + _args = [ "--mem_per_link_gb=16" ] + } else if (is_win && symbol_level == 1 && !is_debug) { + _args = [ "--mem_per_link_gb=3" ] + } else if (is_win) { + _args = [ "--mem_per_link_gb=5" ] + } else if (is_mac) { + _args = [ "--mem_per_link_gb=4" ] + } else if (is_android && !is_component_build && symbol_level == 2) { + # Full debug symbols require large memory for link. + _args = [ "--mem_per_link_gb=25" ] + } else if (is_android && !is_debug && !using_sanitizer && symbol_level < 2) { + # Increase the number of concurrent links for release bots. Debug builds + # make heavier use of ProGuard, and so should not be raised. Sanitizers also + # increase the memory overhead. + if (symbol_level == 1) { + _args = [ "--mem_per_link_gb=6" ] + } else { + _args = [ "--mem_per_link_gb=4" ] + } + } else if (is_linux && !is_chromeos && symbol_level == 0) { + # Memory consumption on link without debug symbols is low on linux. + _args = [ "--mem_per_link_gb=3" ] + } else { + _args = [] + } + + # TODO(crbug.com/617429) Pass more build configuration info to the script + # so that we can compute better values. + concurrent_links = exec_script("get_concurrent_links.py", _args, "value") +} diff --git a/deps/v8/build/toolchain/cros/BUILD.gn b/deps/v8/build/toolchain/cros/BUILD.gn new file mode 100644 index 0000000000..5a9561f232 --- /dev/null +++ b/deps/v8/build/toolchain/cros/BUILD.gn @@ -0,0 +1,173 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/config/compiler/compiler.gni") +import("//build/config/sysroot.gni") +import("//build/toolchain/gcc_toolchain.gni") +import("//build/toolchain/cros_toolchain.gni") + +# This is the normal toolchain for most targets. +gcc_toolchain("target") { + ar = cros_target_ar + cc = cros_target_cc + cxx = cros_target_cxx + + # Relativize path if compiler is specified such that not to lookup from $PATH + # and cc/cxx does not contain additional flags. + if (cc != get_path_info(cc, "file") && string_replace(cc, " ", "") == cc) { + cc = rebase_path(cc, root_build_dir) + } + if (cxx != get_path_info(cxx, "file") && string_replace(cxx, " ", "") == cxx) { + cxx = rebase_path(cxx, root_build_dir) + } + + ld = cxx + if (cros_target_ld != "") { + ld = cros_target_ld + } + if (cros_target_nm != "") { + nm = cros_target_nm + } + if (cros_target_readelf != "") { + readelf = cros_target_readelf + } + extra_cflags = cros_target_extra_cflags + extra_cppflags = cros_target_extra_cppflags + extra_cxxflags = cros_target_extra_cxxflags + extra_ldflags = cros_target_extra_ldflags + + toolchain_args = { + cc_wrapper = "" + current_cpu = target_cpu + current_os = "chromeos" + is_clang = is_clang + use_debug_fission = use_debug_fission + use_gold = use_gold + use_sysroot = use_sysroot + } +} + +# This is a special toolchain needed just for the nacl_bootstrap target in +# //native_client/src/trusted/service_runtime/linux. It is identical +# to ":target" except that it forces use_debug_fission, use_gold, and +# use_sysroot off, and allows the user to set different sets of extra flags. +gcc_toolchain("nacl_bootstrap") { + ar = cros_target_ar + cc = cros_target_cc + cxx = cros_target_cxx + + # Relativize path if compiler is specified such that not to lookup from $PATH + # and cc/cxx does not contain additional flags. + if (cc != get_path_info(cc, "file") && string_replace(cc, " ", "") == cc) { + cc = rebase_path(cc, root_build_dir) + } + if (cxx != get_path_info(cxx, "file") && string_replace(cxx, " ", "") == cxx) { + cxx = rebase_path(cxx, root_build_dir) + } + ld = cxx + if (cros_target_ld != "") { + ld = cros_target_ld + } + if (cros_target_nm != "") { + nm = cros_target_nm + } + if (cros_target_readelf != "") { + readelf = cros_target_readelf + } + extra_cflags = cros_nacl_bootstrap_extra_cflags + extra_cppflags = cros_nacl_bootstrap_extra_cppflags + extra_cxxflags = cros_nacl_bootstrap_extra_cxxflags + extra_ldflags = cros_nacl_bootstrap_extra_ldflags + + toolchain_args = { + cc_wrapper = "" + current_cpu = target_cpu + current_os = "chromeos" + is_clang = is_clang + use_debug_fission = false + use_gold = false + use_sysroot = false + } +} + +gcc_toolchain("host") { + # These are args for the template. + ar = cros_host_ar + cc = cros_host_cc + cxx = cros_host_cxx + + # Relativize path if compiler is specified such that not to lookup from $PATH + # and cc/cxx does not contain additional flags. + if (cc != get_path_info(cc, "file") && string_replace(cc, " ", "") == cc) { + cc = rebase_path(cc, root_build_dir) + } + if (cxx != get_path_info(cxx, "file") && string_replace(cxx, " ", "") == cxx) { + cxx = rebase_path(cxx, root_build_dir) + } + ld = cxx + if (cros_host_ld != "") { + ld = cros_host_ld + } + if (cros_host_nm != "") { + nm = cros_host_nm + } + if (cros_host_readelf != "") { + readelf = cros_host_readelf + } + extra_cflags = cros_host_extra_cflags + extra_cppflags = cros_host_extra_cppflags + extra_cxxflags = cros_host_extra_cxxflags + extra_ldflags = cros_host_extra_ldflags + + toolchain_args = { + cc_wrapper = "" + is_clang = cros_host_is_clang + current_cpu = host_cpu + current_os = "linux" + use_sysroot = false + } +} + +gcc_toolchain("v8_snapshot") { + # These are args for the template. + ar = cros_v8_snapshot_ar + cc = cros_v8_snapshot_cc + cxx = cros_v8_snapshot_cxx + + # Relativize path if compiler is specified such that not to lookup from $PATH + # and cc/cxx does not contain additional flags. + if (cc != get_path_info(cc, "file") && string_replace(cc, " ", "") == cc) { + cc = rebase_path(cc, root_build_dir) + } + if (cxx != get_path_info(cxx, "file") && string_replace(cxx, " ", "") == cxx) { + cxx = rebase_path(cxx, root_build_dir) + } + ld = cxx + if (cros_v8_snapshot_ld != "") { + ld = cros_v8_snapshot_ld + } + if (cros_v8_snapshot_nm != "") { + nm = cros_v8_snapshot_nm + } + if (cros_v8_snapshot_readelf != "") { + readelf = cros_v8_snapshot_readelf + } + extra_cflags = cros_v8_snapshot_extra_cflags + extra_cppflags = cros_v8_snapshot_extra_cppflags + extra_cxxflags = cros_v8_snapshot_extra_cxxflags + extra_ldflags = cros_v8_snapshot_extra_ldflags + + toolchain_args = { + cc_wrapper = "" + is_clang = cros_v8_snapshot_is_clang + if (target_cpu == "x86" || target_cpu == "arm" || target_cpu == "mipsel") { + current_cpu = "x86" + } else { + current_cpu = "x64" + } + v8_current_cpu = v8_target_cpu + current_os = "linux" + use_sysroot = false + } +} diff --git a/deps/v8/build/toolchain/cros_toolchain.gni b/deps/v8/build/toolchain/cros_toolchain.gni new file mode 100644 index 0000000000..fdfdb0704e --- /dev/null +++ b/deps/v8/build/toolchain/cros_toolchain.gni @@ -0,0 +1,81 @@ +# Copyright 2016 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# CrOS builds must cross-compile on a Linux host for the actual CrOS +# device target. There are many different CrOS devices so the build +# system provides configuration variables that permit a CrOS build to +# control the cross-compilation tool chain. However, requiring such +# fine-grain specification is tedious for build-bots and developers. +# Consequently, the CrOS build system defaults to a convenience +# compilation mode where the compilation host is also the build target. +# +# Chrome can be compiled in this way with the gn variable: +# +# target_os = "chromeos" +# +# To perform a board-specific build, first obtain the correct system +# root (http://goo.gl/aFB4XH) for the board. Then configure GN to use it +# by setting appropriate cross-compilation variables. +# +# For example, to compile a Chrome source tree in /g/src for an +# auron_paine CrOS device with the system root cached in /g/.cros_cache, +# the following GN arguments must be provided to configure +# cross-compilation with Goma acceleration. (NB: additional variables +# will be necessary to successfully compile a working CrOS Chrome. See +# the definition of GYP_DEFINES inside a sysroot shell.) +# +# goma_dir = "/g/.cros_cache/common/goma+2" +# target_sysroot= /g/.cros_cache/chrome-sdk/tarballs/auron_paine+7644.0.0+sysroot_chromeos-base_chromeos-chrome.tar.xz" +# cros_target_cc = "x86_64-cros-linux-gnu-gcc -B/g/.cros_cache/chrome-sdk/tarballs/auron_paine+7657.0.0+target_toolchain/usr/x86_64-pc-linux-gnu/x86_64-cros-linux-gnu/binutils-bin/2.25.51-gold" +# cros_target_cxx = "x86_64-cros-linux-gnu-g++ -B/g/.cros_cache/chrome-sdk/tarballs/auron_paine+7657.0.0+target_toolchain/usr/x86_64-pc-linux-gnu/x86_64-cros-linux-gnu/binutils-bin/2.25.51-gold" +# cros_target_ar = "x86_64-cros-linux-gnu-gcc-ar" +# target_cpu = "x64" + +declare_args() { + # These must be specified for a board-specific build. + cros_target_ar = "ar" + cros_target_cc = "gcc" + cros_target_cxx = "g++" + cros_target_ld = "" + cros_target_nm = "" + cros_target_readelf = "" + + # These can be optionally set. The "_cppflags" will be applied to *both* + # C and C++ files; use "_cxxflags" for C++-only flags. + cros_target_extra_cflags = "" + cros_target_extra_cppflags = "" + cros_target_extra_cxxflags = "" + cros_target_extra_ldflags = "" + + # is_clang is used instead of cros_target_is_clang + + cros_host_ar = "ar" + cros_host_cc = "gcc" + cros_host_cxx = "g++" + cros_host_ld = "" + cros_host_nm = "" + cros_host_readelf = "" + cros_host_extra_cflags = "" + cros_host_extra_cppflags = "" + cros_host_extra_cxxflags = "" + cros_host_extra_ldflags = "" + cros_host_is_clang = false + + cros_v8_snapshot_ar = "ar" + cros_v8_snapshot_cc = "gcc" + cros_v8_snapshot_cxx = "g++" + cros_v8_snapshot_ld = "" + cros_v8_snapshot_nm = "" + cros_v8_snapshot_readelf = "" + cros_v8_snapshot_extra_cflags = "" + cros_v8_snapshot_extra_cppflags = "" + cros_v8_snapshot_extra_cxxflags = "" + cros_v8_snapshot_extra_ldflags = "" + cros_v8_snapshot_is_clang = false + + cros_nacl_bootstrap_extra_cflags = "" + cros_nacl_bootstrap_extra_cppflags = "" + cros_nacl_bootstrap_extra_cxxflags = "" + cros_nacl_bootstrap_extra_ldflags = "" +} diff --git a/deps/v8/build/toolchain/fuchsia/BUILD.gn b/deps/v8/build/toolchain/fuchsia/BUILD.gn new file mode 100644 index 0000000000..06ac5e5b74 --- /dev/null +++ b/deps/v8/build/toolchain/fuchsia/BUILD.gn @@ -0,0 +1,41 @@ +# Copyright 2017 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/config/fuchsia/config.gni") +import("//build/toolchain/gcc_toolchain.gni") + +# Fuchsia builds using the Clang toolchain, with most parameters common across +# the different target architectures. +template("fuchsia_clang_toolchain") { + clang_toolchain(target_name) { + assert(host_os == "linux" || host_os == "mac") + assert(defined(invoker.toolchain_args), + "toolchain_args must be defined for fuchsia_clang_toolchain()") + + # We want to build and strip binaries, but retain the unstripped binaries + # in runtime_deps to make them available for isolates. + if (host_os == "linux") { + strip = rebase_path("//buildtools/third_party/eu-strip/bin/eu-strip", + root_build_dir) + use_unstripped_as_runtime_outputs = true + } + + default_shlib_subdir = "/lib" + + toolchain_args = invoker.toolchain_args + toolchain_args.current_os = "fuchsia" + } +} + +fuchsia_clang_toolchain("x64") { + toolchain_args = { + current_cpu = "x64" + } +} + +fuchsia_clang_toolchain("arm64") { + toolchain_args = { + current_cpu = "arm64" + } +} diff --git a/deps/v8/build/toolchain/fuchsia/OWNERS b/deps/v8/build/toolchain/fuchsia/OWNERS new file mode 100644 index 0000000000..3f809e82b1 --- /dev/null +++ b/deps/v8/build/toolchain/fuchsia/OWNERS @@ -0,0 +1 @@ +scottmg@chromium.org diff --git a/deps/v8/build/toolchain/gcc_link_wrapper.py b/deps/v8/build/toolchain/gcc_link_wrapper.py new file mode 100755 index 0000000000..8892f14bfe --- /dev/null +++ b/deps/v8/build/toolchain/gcc_link_wrapper.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Runs a linking command and optionally a strip command. + +This script exists to avoid using complex shell commands in +gcc_toolchain.gni's tool("link"), in case the host running the compiler +does not have a POSIX-like shell (e.g. Windows). +""" + +import argparse +import os +import subprocess +import sys + +import wrapper_utils + + +# When running on a Windows host and using a toolchain whose tools are +# actually wrapper scripts (i.e. .bat files on Windows) rather than binary +# executables, the "command" to run has to be prefixed with this magic. +# The GN toolchain definitions take care of that for when GN/Ninja is +# running the tool directly. When that command is passed in to this +# script, it appears as a unitary string but needs to be split up so that +# just 'cmd' is the actual command given to Python's subprocess module. +BAT_PREFIX = 'cmd /c call ' + +def CommandToRun(command): + if command[0].startswith(BAT_PREFIX): + command = command[0].split(None, 3) + command[1:] + return command + + +def main(): + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument('--strip', + help='The strip binary to run', + metavar='PATH') + parser.add_argument('--unstripped-file', + help='Executable file produced by linking command', + metavar='FILE') + parser.add_argument('--map-file', + help=('Use --Wl,-Map to generate a map file. Will be ' + 'gzipped if extension ends with .gz'), + metavar='FILE') + parser.add_argument('--output', + required=True, + help='Final output executable file', + metavar='FILE') + parser.add_argument('command', nargs='+', + help='Linking command') + args = parser.parse_args() + + # Work-around for gold being slow-by-default. http://crbug.com/632230 + fast_env = dict(os.environ) + fast_env['LC_ALL'] = 'C' + result = wrapper_utils.RunLinkWithOptionalMapFile(args.command, env=fast_env, + map_file=args.map_file) + if result != 0: + return result + + # Finally, strip the linked executable (if desired). + if args.strip: + result = subprocess.call(CommandToRun([ + args.strip, '-o', args.output, args.unstripped_file + ])) + + return result + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/deps/v8/build/toolchain/gcc_solink_wrapper.py b/deps/v8/build/toolchain/gcc_solink_wrapper.py new file mode 100755 index 0000000000..cb1c02d24e --- /dev/null +++ b/deps/v8/build/toolchain/gcc_solink_wrapper.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Runs 'ld -shared' and generates a .TOC file that's untouched when unchanged. + +This script exists to avoid using complex shell commands in +gcc_toolchain.gni's tool("solink"), in case the host running the compiler +does not have a POSIX-like shell (e.g. Windows). +""" + +import argparse +import os +import subprocess +import sys + +import wrapper_utils + + +def CollectSONAME(args): + """Replaces: readelf -d $sofile | grep SONAME""" + toc = '' + readelf = subprocess.Popen(wrapper_utils.CommandToRun( + [args.readelf, '-d', args.sofile]), stdout=subprocess.PIPE, bufsize=-1) + for line in readelf.stdout: + if 'SONAME' in line: + toc += line + return readelf.wait(), toc + + +def CollectDynSym(args): + """Replaces: nm --format=posix -g -D $sofile | cut -f1-2 -d' '""" + toc = '' + nm = subprocess.Popen(wrapper_utils.CommandToRun([ + args.nm, '--format=posix', '-g', '-D', args.sofile]), + stdout=subprocess.PIPE, bufsize=-1) + for line in nm.stdout: + toc += ' '.join(line.split(' ', 2)[:2]) + '\n' + return nm.wait(), toc + + +def CollectTOC(args): + result, toc = CollectSONAME(args) + if result == 0: + result, dynsym = CollectDynSym(args) + toc += dynsym + return result, toc + + +def UpdateTOC(tocfile, toc): + if os.path.exists(tocfile): + old_toc = open(tocfile, 'r').read() + else: + old_toc = None + if toc != old_toc: + open(tocfile, 'w').write(toc) + + +def main(): + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument('--readelf', + required=True, + help='The readelf binary to run', + metavar='PATH') + parser.add_argument('--nm', + required=True, + help='The nm binary to run', + metavar='PATH') + parser.add_argument('--strip', + help='The strip binary to run', + metavar='PATH') + parser.add_argument('--sofile', + required=True, + help='Shared object file produced by linking command', + metavar='FILE') + parser.add_argument('--tocfile', + required=True, + help='Output table-of-contents file', + metavar='FILE') + parser.add_argument('--map-file', + help=('Use --Wl,-Map to generate a map file. Will be ' + 'gzipped if extension ends with .gz'), + metavar='FILE') + parser.add_argument('--output', + required=True, + help='Final output shared object file', + metavar='FILE') + parser.add_argument('command', nargs='+', + help='Linking command') + args = parser.parse_args() + + # Work-around for gold being slow-by-default. http://crbug.com/632230 + fast_env = dict(os.environ) + fast_env['LC_ALL'] = 'C' + + # First, run the actual link. + command = wrapper_utils.CommandToRun(args.command) + result = wrapper_utils.RunLinkWithOptionalMapFile(command, env=fast_env, + map_file=args.map_file) + + if result != 0: + return result + + # Next, generate the contents of the TOC file. + result, toc = CollectTOC(args) + if result != 0: + return result + + # If there is an existing TOC file with identical contents, leave it alone. + # Otherwise, write out the TOC file. + UpdateTOC(args.tocfile, toc) + + # Finally, strip the linked shared object file (if desired). + if args.strip: + result = subprocess.call(wrapper_utils.CommandToRun( + [args.strip, '-o', args.output, args.sofile])) + + return result + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/deps/v8/build/toolchain/gcc_toolchain.gni b/deps/v8/build/toolchain/gcc_toolchain.gni new file mode 100644 index 0000000000..80e2a362a5 --- /dev/null +++ b/deps/v8/build/toolchain/gcc_toolchain.gni @@ -0,0 +1,643 @@ +# Copyright (c) 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/config/clang/clang.gni") +import("//build/config/compiler/compiler.gni") +import("//build/config/coverage/coverage.gni") +import("//build/config/sanitizers/sanitizers.gni") +import("//build/config/v8_target_cpu.gni") +import("//build/toolchain/cc_wrapper.gni") +import("//build/toolchain/goma.gni") +import("//build/toolchain/toolchain.gni") + +if (is_nacl) { + # To keep NaCl variables out of builds that don't include NaCl, all + # variables defined in nacl/config.gni referenced here should be protected by + # is_nacl conditions. + import("//build/config/nacl/config.gni") +} + +declare_args() { + # Enables whitelist generation for IDR_ grit defines seen by the compiler. + # Currently works only on some platforms and enabled by default for release + # builds. + # Requires debug info, so disabled for symbol_level=0 & strip_debug_info=true. + enable_resource_whitelist_generation = + is_official_build && + # Don't enable for Android-on-Chrome OS so that they can build with + # symbol_level=0 without this failing (crbug.com/891164). + (target_os == "android" || target_os == "win") +} + +# When the arg is set via args.gn, it applies to all toolchains. In order to not +# hit the assert in grit_rule.gni, explicitly disable for host toolchains. +if (is_linux && target_os == "android") { + enable_resource_whitelist_generation = false +} + +# This template defines a toolchain for something that works like gcc +# (including clang). +# +# It requires the following variables specifying the executables to run: +# - ar +# - cc +# - cxx +# - ld +# +# Optional parameters that control the tools: +# +# - extra_cflags +# Extra flags to be appended when compiling C files (but not C++ files). +# - extra_cppflags +# Extra flags to be appended when compiling both C and C++ files. "CPP" +# stands for "C PreProcessor" in this context, although it can be +# used for non-preprocessor flags as well. Not to be confused with +# "CXX" (which follows). +# - extra_cxxflags +# Extra flags to be appended when compiling C++ files (but not C files). +# - extra_asmflags +# Extra flags to be appended when compiling assembly. +# - extra_ldflags +# Extra flags to be appended when linking +# +# - libs_section_prefix +# - libs_section_postfix +# The contents of these strings, if specified, will be placed around +# the libs section of the linker line. It allows one to inject libraries +# at the beginning and end for all targets in a toolchain. +# - solink_libs_section_prefix +# - solink_libs_section_postfix +# Same as libs_section_{pre,post}fix except used for solink instead of link. +# - link_outputs +# The content of this array, if specified, will be added to the list of +# outputs from the link command. This can be useful in conjunction with +# the post_link parameter. +# - use_unstripped_as_runtime_outputs +# When |strip| is set, mark unstripped executables as runtime deps rather +# than stripped ones. +# - post_link +# The content of this string, if specified, will be run as a separate +# command following the the link command. +# - deps +# Just forwarded to the toolchain definition. +# - executable_extension +# If this string is specified it will be used for the file extension +# for an executable, rather than using no extension; targets will +# still be able to override the extension using the output_extension +# variable. +# - rebuild_define +# The contents of this string, if specified, will be passed as a #define +# to the toolchain. It can be used to force recompiles whenever a +# toolchain is updated. +# - shlib_extension +# If this string is specified it will be used for the file extension +# for a shared library, rather than default value specified in +# toolchain.gni +# - strip +# Location of the strip executable. When specified, strip will be run on +# all shared libraries and executables as they are built. The pre-stripped +# artifacts will be put in lib.unstripped/ and exe.unstripped/. +template("gcc_toolchain") { + toolchain(target_name) { + assert(defined(invoker.ar), "gcc_toolchain() must specify a \"ar\" value") + assert(defined(invoker.cc), "gcc_toolchain() must specify a \"cc\" value") + assert(defined(invoker.cxx), "gcc_toolchain() must specify a \"cxx\" value") + assert(defined(invoker.ld), "gcc_toolchain() must specify a \"ld\" value") + + # This define changes when the toolchain changes, forcing a rebuild. + # Nothing should ever use this define. + if (defined(invoker.rebuild_define)) { + rebuild_string = "-D" + invoker.rebuild_define + " " + } else { + rebuild_string = "" + } + + # GN's syntax can't handle more than one scope dereference at once, like + # "invoker.toolchain_args.foo", so make a temporary to hold the toolchain + # args so we can do "invoker_toolchain_args.foo". + assert(defined(invoker.toolchain_args), + "Toolchains must specify toolchain_args") + invoker_toolchain_args = invoker.toolchain_args + assert(defined(invoker_toolchain_args.current_cpu), + "toolchain_args must specify a current_cpu") + assert(defined(invoker_toolchain_args.current_os), + "toolchain_args must specify a current_os") + + # When invoking this toolchain not as the default one, these args will be + # passed to the build. They are ignored when this is the default toolchain. + toolchain_args = { + # Populate toolchain args from the invoker. + forward_variables_from(invoker_toolchain_args, "*") + + # The host toolchain value computed by the default toolchain's setup + # needs to be passed through unchanged to all secondary toolchains to + # ensure that it's always the same, regardless of the values that may be + # set on those toolchains. + host_toolchain = host_toolchain + + if (!defined(invoker_toolchain_args.v8_current_cpu)) { + v8_current_cpu = invoker_toolchain_args.current_cpu + } + } + + # When the invoker has explicitly overridden use_goma or cc_wrapper in the + # toolchain args, use those values, otherwise default to the global one. + # This works because the only reasonable override that toolchains might + # supply for these values are to force-disable them. + if (defined(toolchain_args.use_goma)) { + toolchain_uses_goma = toolchain_args.use_goma + } else { + toolchain_uses_goma = use_goma + } + if (defined(toolchain_args.cc_wrapper)) { + toolchain_cc_wrapper = toolchain_args.cc_wrapper + } else { + toolchain_cc_wrapper = cc_wrapper + } + assert(!(toolchain_cc_wrapper != "" && toolchain_uses_goma), + "Goma and cc_wrapper can't be used together.") + + # When the invoker has explicitly overridden use_goma or cc_wrapper in the + # toolchain args, use those values, otherwise default to the global one. + # This works because the only reasonable override that toolchains might + # supply for these values are to force-disable them. + # But if has_gomacc_path is set in simple chrome build, we assumes that + # *chromeos* compiler wrapper find gomacc from GOMACC_PATH envvar. + # Note: In this case, we use gomacc for host toolchain compiling. + if (toolchain_uses_goma && + (!has_gomacc_path || invoker_toolchain_args.current_os != "chromeos")) { + goma_path = "$goma_dir/gomacc" + compiler_prefix = "${goma_path} " + } else { + compiler_prefix = "${toolchain_cc_wrapper} " + } + + # Create a distinct variable for "asm", since coverage runs pass a bunch of + # flags to clang/clang++ that are nonsensical on assembler runs. + asm_prefix = compiler_prefix + + # A specific toolchain may wish to avoid coverage instrumentation, so we + # allow the global "use_clang_coverage" arg to be overridden. + if (defined(toolchain_args.use_clang_coverage)) { + toolchain_use_clang_coverage = toolchain_args.use_clang_coverage + } else { + toolchain_use_clang_coverage = use_clang_coverage + } + + # For a coverage build, we use the wrapper script globally so that it can + # remove coverage cflags from files that should not have them. + if (toolchain_use_clang_coverage) { + # "coverage_instrumentation_input_file" is set in args.gn, but it can be + # overridden by a toolchain config. + if (defined(toolchain_args.coverage_instrumentation_input_file)) { + toolchain_coverage_instrumentation_input_file = + toolchain_args.coverage_instrumentation_input_file + } else { + toolchain_coverage_instrumentation_input_file = + coverage_instrumentation_input_file + } + + _coverage_wrapper = + rebase_path("//build/toolchain/clang_code_coverage_wrapper.py", + root_build_dir) + + # The wrapper needs to know what OS we target because it uses that to + # select a list of files that should not be instrumented. + _coverage_wrapper = _coverage_wrapper + " --target-os=" + target_os + + # We want to instrument everything if there is no input file set. + # If there is a file we need to give it to the wrapper script so it can + # instrument only those files. + if (toolchain_coverage_instrumentation_input_file != "") { + _coverage_wrapper = + _coverage_wrapper + " --files-to-instrument=" + + rebase_path(toolchain_coverage_instrumentation_input_file, + root_build_dir) + } + compiler_prefix = "${_coverage_wrapper} " + compiler_prefix + } + + cc = compiler_prefix + invoker.cc + cxx = compiler_prefix + invoker.cxx + asm = asm_prefix + invoker.cc + ar = invoker.ar + ld = invoker.ld + if (defined(invoker.readelf)) { + readelf = invoker.readelf + } else { + readelf = "readelf" + } + if (defined(invoker.nm)) { + nm = invoker.nm + } else { + nm = "nm" + } + + if (defined(invoker.shlib_extension)) { + default_shlib_extension = invoker.shlib_extension + } else { + default_shlib_extension = shlib_extension + } + + if (defined(invoker.default_shlib_subdir)) { + default_shlib_subdir = invoker.default_shlib_subdir + } else { + default_shlib_subdir = "" + } + + if (defined(invoker.executable_extension)) { + default_executable_extension = invoker.executable_extension + } else { + default_executable_extension = "" + } + + # Bring these into our scope for string interpolation with default values. + if (defined(invoker.libs_section_prefix)) { + libs_section_prefix = invoker.libs_section_prefix + } else { + libs_section_prefix = "" + } + + if (defined(invoker.libs_section_postfix)) { + libs_section_postfix = invoker.libs_section_postfix + } else { + libs_section_postfix = "" + } + + if (defined(invoker.solink_libs_section_prefix)) { + solink_libs_section_prefix = invoker.solink_libs_section_prefix + } else { + solink_libs_section_prefix = "" + } + + if (defined(invoker.solink_libs_section_postfix)) { + solink_libs_section_postfix = invoker.solink_libs_section_postfix + } else { + solink_libs_section_postfix = "" + } + + if (defined(invoker.extra_cflags) && invoker.extra_cflags != "") { + extra_cflags = " " + invoker.extra_cflags + } else { + extra_cflags = "" + } + + if (defined(invoker.extra_cppflags) && invoker.extra_cppflags != "") { + extra_cppflags = " " + invoker.extra_cppflags + } else { + extra_cppflags = "" + } + + if (defined(invoker.extra_cxxflags) && invoker.extra_cxxflags != "") { + extra_cxxflags = " " + invoker.extra_cxxflags + } else { + extra_cxxflags = "" + } + + if (defined(invoker.extra_asmflags) && invoker.extra_asmflags != "") { + extra_asmflags = " " + invoker.extra_asmflags + } else { + extra_asmflags = "" + } + + if (defined(invoker.extra_ldflags) && invoker.extra_ldflags != "") { + extra_ldflags = " " + invoker.extra_ldflags + } else { + extra_ldflags = "" + } + + enable_linker_map = defined(invoker.enable_linker_map) && + invoker.enable_linker_map && generate_linker_map + + # These library switches can apply to all tools below. + lib_switch = "-l" + lib_dir_switch = "-L" + + # Object files go in this directory. + object_subdir = "{{target_out_dir}}/{{label_name}}" + + tool("cc") { + depfile = "{{output}}.d" + precompiled_header_type = "gcc" + command = "$cc -MMD -MF $depfile ${rebuild_string}{{defines}} {{include_dirs}} {{cflags}} {{cflags_c}}${extra_cppflags}${extra_cflags} -c {{source}} -o {{output}}" + depsformat = "gcc" + description = "CC {{output}}" + outputs = [ + "$object_subdir/{{source_name_part}}.o", + ] + } + + tool("cxx") { + depfile = "{{output}}.d" + precompiled_header_type = "gcc" + command = "$cxx -MMD -MF $depfile ${rebuild_string}{{defines}} {{include_dirs}} {{cflags}} {{cflags_cc}}${extra_cppflags}${extra_cxxflags} -c {{source}} -o {{output}}" + depsformat = "gcc" + description = "CXX {{output}}" + outputs = [ + "$object_subdir/{{source_name_part}}.o", + ] + } + + tool("asm") { + # For GCC we can just use the C compiler to compile assembly. + depfile = "{{output}}.d" + command = "$asm -MMD -MF $depfile ${rebuild_string}{{defines}} {{include_dirs}} {{asmflags}}${extra_asmflags} -c {{source}} -o {{output}}" + depsformat = "gcc" + description = "ASM {{output}}" + outputs = [ + "$object_subdir/{{source_name_part}}.o", + ] + } + + tool("alink") { + if (current_os == "aix") { + # AIX does not support either -D (deterministic output) or response + # files. + command = "$ar -X64 {{arflags}} -r -c -s {{output}} {{inputs}}" + } else { + rspfile = "{{output}}.rsp" + rspfile_content = "{{inputs}}" + command = "\"$ar\" {{arflags}} -r -c -s -D {{output}} @\"$rspfile\"" + } + + # Remove the output file first so that ar doesn't try to modify the + # existing file. + if (host_os == "win") { + tool_wrapper_path = + rebase_path("//build/toolchain/win/tool_wrapper.py", root_build_dir) + command = "cmd /c $python_path $tool_wrapper_path delete-file {{output}} && $command" + } else { + command = "rm -f {{output}} && $command" + } + + # Almost all targets build with //build/config/compiler:thin_archive which + # adds -T to arflags. + description = "AR {{output}}" + outputs = [ + "{{output_dir}}/{{target_output_name}}{{output_extension}}", + ] + + # Shared libraries go in the target out directory by default so we can + # generate different targets with the same name and not have them collide. + default_output_dir = "{{target_out_dir}}" + default_output_extension = ".a" + output_prefix = "lib" + } + + tool("solink") { + soname = "{{target_output_name}}{{output_extension}}" # e.g. "libfoo.so". + sofile = "{{output_dir}}/$soname" # Possibly including toolchain dir. + rspfile = sofile + ".rsp" + pool = "//build/toolchain:link_pool($default_toolchain)" + + if (defined(invoker.strip)) { + unstripped_sofile = "{{root_out_dir}}/lib.unstripped/$soname" + } else { + unstripped_sofile = sofile + } + + # These variables are not built into GN but are helpers that + # implement (1) linking to produce a .so, (2) extracting the symbols + # from that file (3) if the extracted list differs from the existing + # .TOC file, overwrite it, otherwise, don't change it. + tocfile = sofile + ".TOC" + + link_command = "$ld -shared {{ldflags}}${extra_ldflags} -o \"$unstripped_sofile\" -Wl,-soname=\"$soname\" @\"$rspfile\"" + + # Generate a map file to be used for binary size analysis. + # Map file adds ~10% to the link time on a z620. + # With target_os="android", libchrome.so.map.gz is ~20MB. + map_switch = "" + if (enable_linker_map && is_official_build) { + map_file = "$unstripped_sofile.map.gz" + map_switch = " --map-file \"$map_file\"" + } + + assert(defined(readelf), "to solink you must have a readelf") + assert(defined(nm), "to solink you must have an nm") + strip_switch = "" + if (defined(invoker.strip)) { + strip_switch = "--strip=${invoker.strip} " + } + + # This needs a Python script to avoid using a complex shell command + # requiring sh control structures, pipelines, and POSIX utilities. + # The host might not have a POSIX shell and utilities (e.g. Windows). + solink_wrapper = + rebase_path("//build/toolchain/gcc_solink_wrapper.py", root_build_dir) + command = "$python_path \"$solink_wrapper\" --readelf=\"$readelf\" --nm=\"$nm\" $strip_switch--sofile=\"$unstripped_sofile\" --tocfile=\"$tocfile\"$map_switch --output=\"$sofile\" -- $link_command" + + if (target_cpu == "mipsel" && is_component_build && is_android) { + rspfile_content = "-Wl,--start-group -Wl,--whole-archive {{inputs}} {{solibs}} -Wl,--no-whole-archive $solink_libs_section_prefix {{libs}} $solink_libs_section_postfix -Wl,--end-group" + } else { + rspfile_content = "-Wl,--whole-archive {{inputs}} {{solibs}} -Wl,--no-whole-archive $solink_libs_section_prefix {{libs}} $solink_libs_section_postfix" + } + + description = "SOLINK $sofile" + + # Use this for {{output_extension}} expansions unless a target manually + # overrides it (in which case {{output_extension}} will be what the target + # specifies). + default_output_extension = default_shlib_extension + + default_output_dir = "{{root_out_dir}}${default_shlib_subdir}" + + output_prefix = "lib" + + # Since the above commands only updates the .TOC file when it changes, ask + # Ninja to check if the timestamp actually changed to know if downstream + # dependencies should be recompiled. + restat = true + + # Tell GN about the output files. It will link to the sofile but use the + # tocfile for dependency management. + outputs = [ + sofile, + tocfile, + ] + if (sofile != unstripped_sofile) { + outputs += [ unstripped_sofile ] + if (defined(invoker.use_unstripped_as_runtime_outputs) && + invoker.use_unstripped_as_runtime_outputs) { + runtime_outputs = [ unstripped_sofile ] + } + } + if (defined(map_file)) { + outputs += [ map_file ] + } + link_output = sofile + depend_output = tocfile + } + + tool("solink_module") { + soname = "{{target_output_name}}{{output_extension}}" # e.g. "libfoo.so". + sofile = "{{output_dir}}/$soname" + rspfile = sofile + ".rsp" + pool = "//build/toolchain:link_pool($default_toolchain)" + + if (defined(invoker.strip)) { + unstripped_sofile = "{{root_out_dir}}/lib.unstripped/$soname" + } else { + unstripped_sofile = sofile + } + + command = "$ld -shared {{ldflags}}${extra_ldflags} -o \"$unstripped_sofile\" -Wl,-soname=\"$soname\" @\"$rspfile\"" + + if (defined(invoker.strip)) { + strip_command = "${invoker.strip} -o \"$sofile\" \"$unstripped_sofile\"" + command += " && " + strip_command + } + rspfile_content = "-Wl,--whole-archive {{inputs}} {{solibs}} -Wl,--no-whole-archive $solink_libs_section_prefix {{libs}} $solink_libs_section_postfix" + + description = "SOLINK_MODULE $sofile" + + # Use this for {{output_extension}} expansions unless a target manually + # overrides it (in which case {{output_extension}} will be what the target + # specifies). + if (defined(invoker.loadable_module_extension)) { + default_output_extension = invoker.loadable_module_extension + } else { + default_output_extension = default_shlib_extension + } + + default_output_dir = "{{root_out_dir}}${default_shlib_subdir}" + + output_prefix = "lib" + + outputs = [ + sofile, + ] + if (sofile != unstripped_sofile) { + outputs += [ unstripped_sofile ] + if (defined(invoker.use_unstripped_as_runtime_outputs) && + invoker.use_unstripped_as_runtime_outputs) { + runtime_outputs = [ unstripped_sofile ] + } + } + } + + tool("link") { + exename = "{{target_output_name}}{{output_extension}}" + outfile = "{{output_dir}}/$exename" + rspfile = "$outfile.rsp" + unstripped_outfile = outfile + pool = "//build/toolchain:link_pool($default_toolchain)" + + # Use this for {{output_extension}} expansions unless a target manually + # overrides it (in which case {{output_extension}} will be what the target + # specifies). + default_output_extension = default_executable_extension + + default_output_dir = "{{root_out_dir}}" + + if (defined(invoker.strip)) { + unstripped_outfile = "{{root_out_dir}}/exe.unstripped/$exename" + } + + # Generate a map file to be used for binary size analysis. + # Map file adds ~10% to the link time on a z620. + # With target_os="android", libchrome.so.map.gz is ~20MB. + map_switch = "" + if (enable_linker_map && is_official_build) { + map_file = "$unstripped_outfile.map.gz" + map_switch = " --map-file \"$map_file\"" + } + + start_group_flag = "" + end_group_flag = "" + if (current_os != "aix") { + # the "--start-group .. --end-group" feature isn't available on the aix ld. + start_group_flag = "-Wl,--start-group" + end_group_flag = "-Wl,--end-group " + } + link_command = "$ld {{ldflags}}${extra_ldflags} -o \"$unstripped_outfile\" $start_group_flag @\"$rspfile\" {{solibs}} $end_group_flag $libs_section_prefix {{libs}} $libs_section_postfix" + + strip_switch = "" + + if (defined(invoker.strip)) { + strip_switch = " --strip=\"${invoker.strip}\" --unstripped-file=\"$unstripped_outfile\"" + } + + link_wrapper = + rebase_path("//build/toolchain/gcc_link_wrapper.py", root_build_dir) + command = "$python_path \"$link_wrapper\" --output=\"$outfile\"$strip_switch$map_switch -- $link_command" + description = "LINK $outfile" + rspfile_content = "{{inputs}}" + outputs = [ + outfile, + ] + if (outfile != unstripped_outfile) { + outputs += [ unstripped_outfile ] + if (defined(invoker.use_unstripped_as_runtime_outputs) && + invoker.use_unstripped_as_runtime_outputs) { + runtime_outputs = [ unstripped_outfile ] + } + } + if (defined(invoker.link_outputs)) { + outputs += invoker.link_outputs + } + if (defined(map_file)) { + outputs += [ map_file ] + } + } + + # These two are really entirely generic, but have to be repeated in + # each toolchain because GN doesn't allow a template to be used here. + # See //build/toolchain/toolchain.gni for details. + tool("stamp") { + command = stamp_command + description = stamp_description + } + tool("copy") { + command = copy_command + description = copy_description + } + + tool("action") { + pool = "//build/toolchain:action_pool($default_toolchain)" + } + + forward_variables_from(invoker, [ "deps" ]) + } +} + +# This is a shorthand for gcc_toolchain instances based on the Chromium-built +# version of Clang. Only the toolchain_cpu and toolchain_os variables need to +# be specified by the invoker, and optionally toolprefix if it's a +# cross-compile case. Note that for a cross-compile case this toolchain +# requires a config to pass the appropriate -target option, or else it will +# actually just be doing a native compile. The invoker can optionally override +# use_gold too. +template("clang_toolchain") { + if (defined(invoker.toolprefix)) { + toolprefix = invoker.toolprefix + } else { + toolprefix = "" + } + + gcc_toolchain(target_name) { + prefix = rebase_path("$clang_base_path/bin", root_build_dir) + cc = "$prefix/clang" + cxx = "$prefix/clang++" + ld = cxx + readelf = "${toolprefix}readelf" + ar = "${prefix}/llvm-ar" + nm = "${toolprefix}nm" + + forward_variables_from(invoker, + [ + "strip", + "default_shlib_subdir", + "enable_linker_map", + "use_unstripped_as_runtime_outputs", + ]) + + toolchain_args = { + if (defined(invoker.toolchain_args)) { + forward_variables_from(invoker.toolchain_args, "*") + } + is_clang = true + } + } +} diff --git a/deps/v8/build/toolchain/get_concurrent_links.py b/deps/v8/build/toolchain/get_concurrent_links.py new file mode 100644 index 0000000000..e5121c77a9 --- /dev/null +++ b/deps/v8/build/toolchain/get_concurrent_links.py @@ -0,0 +1,86 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# This script computs the number of concurrent links we want to run in the build +# as a function of machine spec. It's based on GetDefaultConcurrentLinks in GYP. + +from __future__ import print_function + +import multiprocessing +import optparse +import os +import re +import subprocess +import sys + +def _GetTotalMemoryInBytes(): + if sys.platform in ('win32', 'cygwin'): + import ctypes + + class MEMORYSTATUSEX(ctypes.Structure): + _fields_ = [ + ("dwLength", ctypes.c_ulong), + ("dwMemoryLoad", ctypes.c_ulong), + ("ullTotalPhys", ctypes.c_ulonglong), + ("ullAvailPhys", ctypes.c_ulonglong), + ("ullTotalPageFile", ctypes.c_ulonglong), + ("ullAvailPageFile", ctypes.c_ulonglong), + ("ullTotalVirtual", ctypes.c_ulonglong), + ("ullAvailVirtual", ctypes.c_ulonglong), + ("sullAvailExtendedVirtual", ctypes.c_ulonglong), + ] + + stat = MEMORYSTATUSEX(dwLength=ctypes.sizeof(MEMORYSTATUSEX)) + ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat)) + return stat.ullTotalPhys + elif sys.platform.startswith('linux'): + if os.path.exists("/proc/meminfo"): + with open("/proc/meminfo") as meminfo: + memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB') + for line in meminfo: + match = memtotal_re.match(line) + if not match: + continue + return float(match.group(1)) * 2**10 + elif sys.platform == 'darwin': + try: + return int(subprocess.check_output(['sysctl', '-n', 'hw.memsize'])) + except Exception: + return 0 + # TODO(scottmg): Implement this for other platforms. + return 0 + + +def _GetDefaultConcurrentLinks(mem_per_link_gb, reserve_mem_gb): + # Inherit the legacy environment variable for people that have set it in GYP. + pool_size = int(os.getenv('GYP_LINK_CONCURRENCY', 0)) + if pool_size: + return pool_size + + mem_total_bytes = _GetTotalMemoryInBytes() + mem_total_bytes = max(0, mem_total_bytes - reserve_mem_gb * 2**30) + num_concurrent_links = int(max(1, mem_total_bytes / mem_per_link_gb / 2**30)) + hard_cap = max(1, int(os.getenv('GYP_LINK_CONCURRENCY_MAX', 2**32))) + + try: + cpu_cap = multiprocessing.cpu_count() + except: + cpu_cap = 1 + + return min(num_concurrent_links, hard_cap, cpu_cap) + + +def main(): + parser = optparse.OptionParser() + parser.add_option('--mem_per_link_gb', action="store", type="int", default=8) + parser.add_option('--reserve_mem_gb', action="store", type="int", default=0) + parser.disable_interspersed_args() + options, _ = parser.parse_args() + + print(_GetDefaultConcurrentLinks(options.mem_per_link_gb, + options.reserve_mem_gb)) + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/deps/v8/build/toolchain/get_cpu_count.py b/deps/v8/build/toolchain/get_cpu_count.py new file mode 100644 index 0000000000..765c7c78f6 --- /dev/null +++ b/deps/v8/build/toolchain/get_cpu_count.py @@ -0,0 +1,23 @@ +# Copyright 2018 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# This script shows cpu count to specify capacity of action pool. + +from __future__ import print_function + +import multiprocessing +import sys + +def main(): + try: + cpu_count = multiprocessing.cpu_count() + except: + cpu_count = 1 + + print(cpu_count) + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/deps/v8/build/toolchain/goma.gni b/deps/v8/build/toolchain/goma.gni new file mode 100644 index 0000000000..2fbf572389 --- /dev/null +++ b/deps/v8/build/toolchain/goma.gni @@ -0,0 +1,29 @@ +# Copyright (c) 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# Defines the configuration of Goma. + +declare_args() { + # Set to true to enable distributed compilation using Goma. + use_goma = false + + # This flag is for ChromeOS compiler wrapper. + # By passing gomacc path via GOMACC_PATH environment variable, ChromeOS' + # compiler wrapper invokes gomacc inside it. + has_gomacc_path = false + + # Set the default value based on the platform. + if (host_os == "win") { + # Absolute directory containing the gomacc.exe binary. + goma_dir = "C:\src\goma\goma-win64" + } else { + if (getenv("GOMA_DIR") != "") { + # Absolute directory containing the gomacc binary. + goma_dir = getenv("GOMA_DIR") + } else { + # Absolute directory containing the gomacc binary. + goma_dir = getenv("HOME") + "/goma" + } + } +} diff --git a/deps/v8/build/toolchain/linux/BUILD.gn b/deps/v8/build/toolchain/linux/BUILD.gn new file mode 100644 index 0000000000..fa8b17e9db --- /dev/null +++ b/deps/v8/build/toolchain/linux/BUILD.gn @@ -0,0 +1,300 @@ +# Copyright 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/config/sysroot.gni") +import("//build/toolchain/gcc_toolchain.gni") + +clang_toolchain("clang_ppc64") { + enable_linker_map = true + toolchain_args = { + current_cpu = "ppc64" + current_os = "linux" + } +} + +clang_toolchain("clang_arm") { + toolprefix = "arm-linux-gnueabihf-" + toolchain_args = { + current_cpu = "arm" + current_os = "linux" + } +} + +clang_toolchain("clang_arm64") { + toolprefix = "aarch64-linux-gnu-" + toolchain_args = { + current_cpu = "arm64" + current_os = "linux" + } +} + +gcc_toolchain("arm64") { + toolprefix = "aarch64-linux-gnu-" + + cc = "${toolprefix}gcc" + cxx = "${toolprefix}g++" + + ar = "${toolprefix}ar" + ld = cxx + readelf = "${toolprefix}readelf" + nm = "${toolprefix}nm" + + toolchain_args = { + current_cpu = "arm64" + current_os = "linux" + is_clang = false + } +} + +gcc_toolchain("arm") { + toolprefix = "arm-linux-gnueabihf-" + + cc = "${toolprefix}gcc" + cxx = "${toolprefix}g++" + + ar = "${toolprefix}ar" + ld = cxx + readelf = "${toolprefix}readelf" + nm = "${toolprefix}nm" + + toolchain_args = { + current_cpu = "arm" + current_os = "linux" + is_clang = false + } +} + +clang_toolchain("clang_x86") { + # Output linker map files for binary size analysis. + enable_linker_map = true + + toolchain_args = { + current_cpu = "x86" + current_os = "linux" + } +} + +clang_toolchain("clang_x86_v8_arm") { + toolchain_args = { + current_cpu = "x86" + v8_current_cpu = "arm" + current_os = "linux" + } +} + +clang_toolchain("clang_x86_v8_mipsel") { + toolchain_args = { + current_cpu = "x86" + v8_current_cpu = "mipsel" + current_os = "linux" + } +} + +clang_toolchain("clang_x86_v8_mips") { + toolchain_args = { + current_cpu = "x86" + v8_current_cpu = "mips" + current_os = "linux" + } +} + +gcc_toolchain("x86") { + cc = "gcc" + cxx = "g++" + + readelf = "readelf" + nm = "nm" + ar = "ar" + ld = cxx + + # Output linker map files for binary size analysis. + enable_linker_map = true + + toolchain_args = { + current_cpu = "x86" + current_os = "linux" + is_clang = false + } +} + +clang_toolchain("clang_x64") { + # Output linker map files for binary size analysis. + enable_linker_map = true + + toolchain_args = { + current_cpu = "x64" + current_os = "linux" + } +} + +clang_toolchain("clang_x64_v8_arm64") { + toolchain_args = { + current_cpu = "x64" + v8_current_cpu = "arm64" + current_os = "linux" + } +} + +clang_toolchain("clang_x64_v8_mips64el") { + toolchain_args = { + current_cpu = "x64" + v8_current_cpu = "mips64el" + current_os = "linux" + } +} + +clang_toolchain("clang_x64_v8_mips64") { + toolchain_args = { + current_cpu = "x64" + v8_current_cpu = "mips64" + current_os = "linux" + } +} + +gcc_toolchain("x64") { + cc = "gcc" + cxx = "g++" + + readelf = "readelf" + nm = "nm" + ar = "ar" + ld = cxx + + # Output linker map files for binary size analysis. + enable_linker_map = true + + toolchain_args = { + current_cpu = "x64" + current_os = "linux" + is_clang = false + } +} + +clang_toolchain("clang_mipsel") { + toolchain_args = { + current_cpu = "mipsel" + current_os = "linux" + } +} + +clang_toolchain("clang_mips64el") { + toolchain_args = { + current_cpu = "mips64el" + current_os = "linux" + } +} + +gcc_toolchain("mipsel") { + toolprefix = "mipsel-linux-gnu-" + + cc = "${toolprefix}gcc" + cxx = " ${toolprefix}g++" + ar = "${toolprefix}ar" + ld = cxx + readelf = "${toolprefix}readelf" + nm = "${toolprefix}nm" + + toolchain_args = { + cc_wrapper = "" + current_cpu = "mipsel" + current_os = "linux" + is_clang = false + use_goma = false + } +} + +gcc_toolchain("mips64el") { + toolprefix = "mips64el-linux-gnuabi64-" + + cc = "${toolprefix}gcc" + cxx = "${toolprefix}g++" + ar = "${toolprefix}ar" + ld = cxx + readelf = "${toolprefix}readelf" + nm = "${toolprefix}nm" + + toolchain_args = { + cc_wrapper = "" + current_cpu = "mips64el" + current_os = "linux" + is_clang = false + use_goma = false + } +} + +clang_toolchain("clang_s390x") { + toolchain_args = { + current_cpu = "s390x" + current_os = "linux" + is_clang = true + } +} + +gcc_toolchain("s390x") { + cc = "gcc" + cxx = "g++" + + readelf = "readelf" + nm = "nm" + ar = "ar" + ld = cxx + + toolchain_args = { + current_cpu = "s390x" + current_os = "linux" + is_clang = false + } +} + +gcc_toolchain("ppc64") { + cc = "gcc" + cxx = "g++" + + readelf = "readelf" + nm = "nm" + ar = "ar" + ld = cxx + + toolchain_args = { + current_cpu = "ppc64" + current_os = "linux" + is_clang = false + } +} + +gcc_toolchain("mips") { + toolprefix = "mips-linux-gnu-" + + cc = "${toolprefix}gcc" + cxx = "${toolprefix}g++" + + readelf = "${toolprefix}readelf" + nm = "${toolprefix}nm" + ar = "${toolprefix}ar" + ld = cxx + + toolchain_args = { + current_cpu = "mips" + current_os = "linux" + is_clang = false + } +} + +gcc_toolchain("mips64") { + toolprefix = "mips64-linux-gnuabi64-" + + cc = "${toolprefix}gcc" + cxx = "${toolprefix}g++" + + readelf = "${toolprefix}readelf" + nm = "${toolprefix}nm" + ar = "${toolprefix}ar" + ld = cxx + + toolchain_args = { + current_cpu = "mips64" + current_os = "linux" + is_clang = false + } +} diff --git a/deps/v8/build/toolchain/linux/unbundle/BUILD.gn b/deps/v8/build/toolchain/linux/unbundle/BUILD.gn new file mode 100644 index 0000000000..4719d540b0 --- /dev/null +++ b/deps/v8/build/toolchain/linux/unbundle/BUILD.gn @@ -0,0 +1,41 @@ +# Copyright 2017 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/toolchain/gcc_toolchain.gni") + +gcc_toolchain("default") { + cc = getenv("CC") + cxx = getenv("CXX") + ar = getenv("AR") + nm = getenv("NM") + ld = cxx + + extra_cflags = getenv("CFLAGS") + extra_cppflags = getenv("CPPFLAGS") + extra_cxxflags = getenv("CXXFLAGS") + extra_ldflags = getenv("LDFLAGS") + + toolchain_args = { + current_cpu = current_cpu + current_os = current_os + } +} + +gcc_toolchain("host") { + cc = getenv("BUILD_CC") + cxx = getenv("BUILD_CXX") + ar = getenv("BUILD_AR") + nm = getenv("BUILD_NM") + ld = cxx + + extra_cflags = getenv("BUILD_CFLAGS") + extra_cppflags = getenv("BUILD_CPPFLAGS") + extra_cxxflags = getenv("BUILD_CXXFLAGS") + extra_ldflags = getenv("BUILD_LDFLAGS") + + toolchain_args = { + current_cpu = current_cpu + current_os = current_os + } +} diff --git a/deps/v8/build/toolchain/linux/unbundle/README.md b/deps/v8/build/toolchain/linux/unbundle/README.md new file mode 100644 index 0000000000..17b93c9fde --- /dev/null +++ b/deps/v8/build/toolchain/linux/unbundle/README.md @@ -0,0 +1,41 @@ +# Overview + +This directory contains files that make it possible for Linux +distributions to build Chromium using system toolchain. + +For more info on established way such builds are configured, +please read the following: + + - https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html + +Why do distros want CFLAGS, LDFLAGS, etc? Please read the following +for some examples. This is not an exhaustive list. + + - https://wiki.debian.org/Hardening + - https://wiki.ubuntu.com/DistCompilerFlags + - https://fedoraproject.org/wiki/Changes/Harden_All_Packages + - https://fedoraproject.org/wiki/Changes/Modernise_GCC_Flags + - https://fedoraproject.org/wiki/Packaging:Guidelines#Compiler_flags + - https://blog.flameeyes.eu/2010/09/are-we-done-with-ldflags/ + - https://blog.flameeyes.eu/2008/08/flags-and-flags/ + +# Usage + +Add the following to GN args: + +``` +custom_toolchain="//build/toolchain/linux/unbundle:default" +host_toolchain="//build/toolchain/linux/unbundle:default" +``` + +See [more docs on GN](https://gn.googlesource.com/gn/+/master/docs/quick_start.md). + +To cross-compile (not fully tested), add the following: + +``` +host_toolchain="//build/toolchain/linux/unbundle:host" +v8_snapshot_toolchain="//build/toolchain/linux/unbundle:host" +``` + +Note: when cross-compiling for a 32-bit target, a matching 32-bit toolchain +may be needed. diff --git a/deps/v8/build/toolchain/mac/BUILD.gn b/deps/v8/build/toolchain/mac/BUILD.gn new file mode 100644 index 0000000000..22316c9f1e --- /dev/null +++ b/deps/v8/build/toolchain/mac/BUILD.gn @@ -0,0 +1,572 @@ +# Copyright (c) 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# TODO(brettw) Use "gcc_toolchain.gni" like the Linux toolchains. This requires +# some enhancements since the commands on Mac are slightly different than on +# Linux. + +import("//build/config/clang/clang.gni") +import("//build/config/coverage/coverage.gni") +import("../goma.gni") +if (is_ios) { + import("//build/config/ios/ios_sdk.gni") +} +import("//build/config/mac/mac_sdk.gni") +import("//build/config/mac/symbols.gni") + +assert(host_os == "mac") + +import("//build/toolchain/cc_wrapper.gni") +import("//build/toolchain/concurrent_links.gni") +import("//build/toolchain/goma.gni") +import("//build/toolchain/toolchain.gni") + +declare_args() { + # Reduce the number of tasks using the copy_bundle_data and compile_xcassets + # tools as they can cause lots of I/O contention when invoking ninja with a + # large number of parallel jobs (e.g. when using distributed build like goma). + bundle_pool_depth = -1 +} + +if (current_toolchain == default_toolchain) { + pool("bundle_pool") { + if (bundle_pool_depth == -1) { + depth = concurrent_links + } else { + depth = bundle_pool_depth + } + } +} + +# When implementing tools using Python scripts, a TOOL_VERSION=N env +# variable is placed in front of the command. The N should be incremented +# whenever the script is changed, so that the build system rebuilds all +# edges that utilize the script. Ideally this should be changed to use +# proper input-dirty checking, but that could be expensive. Instead, use a +# script to get the tool scripts' modification time to use as the version. +# This won't cause a re-generation of GN files when the tool script changes +# but it will cause edges to be marked as dirty if the ninja files are +# regenerated. See https://crbug.com/619083 for details. A proper fix +# would be to have inputs to tools (https://crbug.com/621119). +tool_versions = + exec_script("get_tool_mtime.py", + rebase_path([ + "//build/toolchain/mac/compile_xcassets.py", + "//build/toolchain/mac/filter_libtool.py", + "//build/toolchain/mac/linker_driver.py", + ], + root_build_dir), + "trim scope") + +# Shared toolchain definition. Invocations should set current_os to set the +# build args in this definition. +template("mac_toolchain") { + toolchain(target_name) { + if (use_system_xcode) { + env_wrapper = "" + } else { + env_wrapper = "export DEVELOPER_DIR=$hermetic_xcode_path; " + } + + # When invoking this toolchain not as the default one, these args will be + # passed to the build. They are ignored when this is the default toolchain. + assert(defined(invoker.toolchain_args), + "Toolchains must declare toolchain_args") + toolchain_args = { + # Populate toolchain args from the invoker. + forward_variables_from(invoker.toolchain_args, "*") + + # The host toolchain value computed by the default toolchain's setup + # needs to be passed through unchanged to all secondary toolchains to + # ensure that it's always the same, regardless of the values that may be + # set on those toolchains. + host_toolchain = host_toolchain + } + + # Supports building with the version of clang shipped with Xcode when + # targeting iOS by not respecting clang_base_path. + if (toolchain_args.current_os == "ios" && use_xcode_clang) { + prefix = "" + } else { + prefix = rebase_path("$clang_base_path/bin/", root_build_dir) + } + + _cc = "${prefix}clang" + _cxx = "${prefix}clang++" + + # When the invoker has explicitly overridden use_goma or cc_wrapper in the + # toolchain args, use those values, otherwise default to the global one. + # This works because the only reasonable override that toolchains might + # supply for these values are to force-disable them. + if (defined(toolchain_args.use_goma)) { + toolchain_uses_goma = toolchain_args.use_goma + } else { + toolchain_uses_goma = use_goma + } + if (defined(toolchain_args.cc_wrapper)) { + toolchain_cc_wrapper = toolchain_args.cc_wrapper + } else { + toolchain_cc_wrapper = cc_wrapper + } + + # Compute the compiler prefix. + if (toolchain_uses_goma) { + assert(toolchain_cc_wrapper == "", + "Goma and cc_wrapper can't be used together.") + compiler_prefix = "$goma_dir/gomacc " + } else if (toolchain_cc_wrapper != "") { + compiler_prefix = toolchain_cc_wrapper + " " + } else { + compiler_prefix = "" + } + + cc = compiler_prefix + _cc + cxx = compiler_prefix + _cxx + ld = _cxx + + if (defined(toolchain_args.coverage_instrumentation_input_file)) { + toolchain_coverage_instrumentation_input_file = + toolchain_args.coverage_instrumentation_input_file + } else { + toolchain_coverage_instrumentation_input_file = + coverage_instrumentation_input_file + } + _use_clang_coverage_wrapper = + toolchain_coverage_instrumentation_input_file != "" + if (_use_clang_coverage_wrapper) { + _coverage_wrapper = + rebase_path("//build/toolchain/clang_code_coverage_wrapper.py", + root_build_dir) + " --files-to-instrument=" + + rebase_path(toolchain_coverage_instrumentation_input_file, + root_build_dir) + cc = _coverage_wrapper + " ${cc}" + cxx = _coverage_wrapper + " ${cxx}" + } + + linker_driver = + "TOOL_VERSION=${tool_versions.linker_driver} " + + rebase_path("//build/toolchain/mac/linker_driver.py", root_build_dir) + + # On iOS, the final applications are assembled using lipo (to support fat + # builds). The correct flags are passed to the linker_driver.py script + # directly during the lipo call. + if (toolchain_args.current_os != "ios") { + _enable_dsyms = enable_dsyms + _save_unstripped_output = save_unstripped_output + } else { + _enable_dsyms = false + _save_unstripped_output = false + } + + # Make these apply to all tools below. + lib_switch = "-l" + lib_dir_switch = "-L" + + # Object files go in this directory. Use label_name instead of + # target_output_name since labels will generally have no spaces and will be + # unique in the directory. + object_subdir = "{{target_out_dir}}/{{label_name}}" + + # If dSYMs are enabled, this flag will be added to the link tools. + if (_enable_dsyms) { + dsym_switch = " -Wcrl,dsym,{{root_out_dir}} " + if (is_mac) { + dsym_switch += "-Wcrl,dsymutilpath," + + rebase_path("//tools/clang/dsymutil/bin/dsymutil", + root_build_dir) + " " + } + + dsym_output_dir = + "{{root_out_dir}}/{{target_output_name}}{{output_extension}}.dSYM" + dsym_output = [ + "$dsym_output_dir/", + "$dsym_output_dir/Contents/Info.plist", + "$dsym_output_dir/Contents/Resources/DWARF/" + + "{{target_output_name}}{{output_extension}}", + ] + } else { + dsym_switch = "" + } + + if (_save_unstripped_output) { + _unstripped_output = "{{root_out_dir}}/{{target_output_name}}{{output_extension}}.unstripped" + } + + tool("cc") { + depfile = "{{output}}.d" + precompiled_header_type = "gcc" + command = "$cc -MMD -MF $depfile {{defines}} {{include_dirs}} {{cflags}} {{cflags_c}} -c {{source}} -o {{output}}" + depsformat = "gcc" + description = "CC {{output}}" + outputs = [ + "$object_subdir/{{source_name_part}}.o", + ] + } + + tool("cxx") { + depfile = "{{output}}.d" + precompiled_header_type = "gcc" + command = "$cxx -MMD -MF $depfile {{defines}} {{include_dirs}} {{cflags}} {{cflags_cc}} -c {{source}} -o {{output}}" + depsformat = "gcc" + description = "CXX {{output}}" + outputs = [ + "$object_subdir/{{source_name_part}}.o", + ] + } + + tool("asm") { + # For GCC we can just use the C compiler to compile assembly. + depfile = "{{output}}.d" + command = "$cc -MMD -MF $depfile {{defines}} {{include_dirs}} {{asmflags}} -c {{source}} -o {{output}}" + depsformat = "gcc" + description = "ASM {{output}}" + outputs = [ + "$object_subdir/{{source_name_part}}.o", + ] + } + + tool("objc") { + depfile = "{{output}}.d" + precompiled_header_type = "gcc" + command = "$cc -MMD -MF $depfile {{defines}} {{include_dirs}} {{cflags}} {{cflags_objc}} -c {{source}} -o {{output}}" + depsformat = "gcc" + description = "OBJC {{output}}" + outputs = [ + "$object_subdir/{{source_name_part}}.o", + ] + } + + tool("objcxx") { + depfile = "{{output}}.d" + precompiled_header_type = "gcc" + command = "$cxx -MMD -MF $depfile {{defines}} {{include_dirs}} {{cflags}} {{cflags_objcc}} -c {{source}} -o {{output}}" + depsformat = "gcc" + description = "OBJCXX {{output}}" + outputs = [ + "$object_subdir/{{source_name_part}}.o", + ] + } + + tool("alink") { + script = + rebase_path("//build/toolchain/mac/filter_libtool.py", root_build_dir) + + # Note about -filelist: Apple's linker reads the file list file and + # interprets each newline-separated chunk of text as a file name. It + # doesn't do the things one would expect from the shell like unescaping + # or handling quotes. In contrast, when Ninja finds a file name with + # spaces, it single-quotes them in $inputs_newline as it would normally + # do for command-line arguments. Thus any source names with spaces, or + # label names with spaces (which GN bases the output paths on) will be + # corrupted by this process. Don't use spaces for source files or labels. + rspfile = "{{output}}.rsp" + rspfile_content = "{{inputs_newline}}" + command = "$env_wrapper rm -f {{output}} && TOOL_VERSION=${tool_versions.filter_libtool} python $script libtool -static {{arflags}} -o {{output}} -filelist $rspfile" + description = "LIBTOOL-STATIC {{output}}" + outputs = [ + "{{output_dir}}/{{target_output_name}}{{output_extension}}", + ] + default_output_dir = "{{target_out_dir}}" + default_output_extension = ".a" + output_prefix = "lib" + } + + tool("solink") { + dylib = "{{output_dir}}/{{target_output_name}}{{output_extension}}" # eg "./libfoo.dylib" + rspfile = dylib + ".rsp" + pool = "//build/toolchain:link_pool($default_toolchain)" + + # These variables are not built into GN but are helpers that implement + # (1) linking to produce a .dylib, (2) extracting the symbols from that + # file to a temporary file, (3) if the temporary file has differences from + # the existing .TOC file, overwrite it, otherwise, don't change it. + # + # As a special case, if the library reexports symbols from other dynamic + # libraries, we always update the .TOC and skip the temporary file and + # diffing steps, since that library always needs to be re-linked. + tocname = dylib + ".TOC" + temporary_tocname = dylib + ".tmp" + + does_reexport_command = "[ ! -e \"$dylib\" -o ! -e \"$tocname\" ] || otool -l \"$dylib\" | grep -q LC_REEXPORT_DYLIB" + + link_command = "$linker_driver $ld -shared " + if (is_component_build) { + link_command += " -Wl,-install_name,@rpath/\"{{target_output_name}}{{output_extension}}\" " + } + link_command += dsym_switch + link_command += "{{ldflags}} -o \"$dylib\" -Wl,-filelist,\"$rspfile\" {{libs}} {{solibs}}" + + replace_command = "if ! cmp -s \"$temporary_tocname\" \"$tocname\"; then mv \"$temporary_tocname\" \"$tocname\"" + extract_toc_command = "{ otool -l \"$dylib\" | grep LC_ID_DYLIB -A 5; nm -gP \"$dylib\" | cut -f1-2 -d' ' | grep -v U\$\$; true; }" + + command = "$env_wrapper if $does_reexport_command ; then $link_command && $extract_toc_command > \"$tocname\"; else $link_command && $extract_toc_command > \"$temporary_tocname\" && $replace_command ; fi; fi" + + rspfile_content = "{{inputs_newline}}" + + description = "SOLINK {{output}}" + + # Use this for {{output_extension}} expansions unless a target manually + # overrides it (in which case {{output_extension}} will be what the target + # specifies). + default_output_dir = "{{root_out_dir}}" + default_output_extension = ".dylib" + + output_prefix = "lib" + + # Since the above commands only updates the .TOC file when it changes, ask + # Ninja to check if the timestamp actually changed to know if downstream + # dependencies should be recompiled. + restat = true + + # Tell GN about the output files. It will link to the dylib but use the + # tocname for dependency management. + outputs = [ + dylib, + tocname, + ] + link_output = dylib + depend_output = tocname + + if (_enable_dsyms) { + outputs += dsym_output + } + if (_save_unstripped_output) { + outputs += [ _unstripped_output ] + } + } + + tool("solink_module") { + sofile = "{{output_dir}}/{{target_output_name}}{{output_extension}}" # eg "./libfoo.so" + rspfile = sofile + ".rsp" + pool = "//build/toolchain:link_pool($default_toolchain)" + + link_command = "$env_wrapper $linker_driver $ld -bundle {{ldflags}} -o \"$sofile\" -Wl,-filelist,\"$rspfile\"" + if (is_component_build) { + link_command += " -Wl,-install_name,@rpath/{{target_output_name}}{{output_extension}}" + } + link_command += dsym_switch + link_command += " {{solibs}} {{libs}}" + command = link_command + + rspfile_content = "{{inputs_newline}}" + + description = "SOLINK_MODULE {{output}}" + + # Use this for {{output_extension}} expansions unless a target manually + # overrides it (in which case {{output_extension}} will be what the target + # specifies). + default_output_dir = "{{root_out_dir}}" + default_output_extension = ".so" + + outputs = [ + sofile, + ] + + if (_enable_dsyms) { + outputs += dsym_output + } + if (_save_unstripped_output) { + outputs += [ _unstripped_output ] + } + } + + tool("link") { + outfile = "{{output_dir}}/{{target_output_name}}{{output_extension}}" + rspfile = "$outfile.rsp" + pool = "//build/toolchain:link_pool($default_toolchain)" + + # Note about -filelist: Apple's linker reads the file list file and + # interprets each newline-separated chunk of text as a file name. It + # doesn't do the things one would expect from the shell like unescaping + # or handling quotes. In contrast, when Ninja finds a file name with + # spaces, it single-quotes them in $inputs_newline as it would normally + # do for command-line arguments. Thus any source names with spaces, or + # label names with spaces (which GN bases the output paths on) will be + # corrupted by this process. Don't use spaces for source files or labels. + command = "$env_wrapper $linker_driver $ld $dsym_switch {{ldflags}} -o \"$outfile\" -Wl,-filelist,\"$rspfile\" {{solibs}} {{libs}}" + description = "LINK $outfile" + rspfile_content = "{{inputs_newline}}" + outputs = [ + outfile, + ] + + if (_enable_dsyms) { + outputs += dsym_output + } + if (_save_unstripped_output) { + outputs += [ _unstripped_output ] + } + + default_output_dir = "{{root_out_dir}}" + } + + # These two are really entirely generic, but have to be repeated in + # each toolchain because GN doesn't allow a template to be used here. + # See //build/toolchain/toolchain.gni for details. + tool("stamp") { + command = stamp_command + description = stamp_description + } + tool("copy") { + command = copy_command + description = copy_description + } + + tool("copy_bundle_data") { + # copy_command use hardlink if possible but this does not work with + # directories. If source is a directory, instead use "pax" to create + # the same tree structure using hardlinks to individual files (this + # preserve symbolic links too) as recommended in the replies to the + # question at http://serverfault.com/q/209888/43689 ("cp -al" isn't + # available on macOS). + # + # According to the man page for pax, the commands to use to clone + # olddir to newdir using pax are the following: + # + # $ mkdir newdir + # $ cd olddir + # $ pax -rwl . ../newdir + # + # The _copydir command does exactly that but use an absolute path + # constructed using shell variable $OLDPWD (automatically set when + # cd is used) as computing the relative path is a bit complex and + # using pwd would requires a sub-shell to be created. + _copydir = "mkdir -p {{output}} && cd {{source}} && " + + "pax -rwl . \"\$OLDPWD\"/{{output}}" + command = "rm -rf {{output}} && if [[ -d {{source}} ]]; then " + + _copydir + "; else " + copy_command + "; fi" + + description = "COPY_BUNDLE_DATA {{source}} {{output}}" + pool = ":bundle_pool($default_toolchain)" + } + tool("compile_xcassets") { + _tool = rebase_path("//build/toolchain/mac/compile_xcassets.py", + root_build_dir) + if (is_ios) { + _sdk_name = ios_sdk_name + _min_deployment_target = ios_deployment_target + _compress_pngs = "" + } else { + _sdk_name = mac_sdk_name + _min_deployment_target = mac_deployment_target + _compress_pngs = " -c " + } + command = + "$env_wrapper rm -f \"{{output}}\" && " + + "TOOL_VERSION=${tool_versions.compile_xcassets} " + + "python $_tool$_compress_pngs -p \"$_sdk_name\" " + + "-t \"$_min_deployment_target\" -T \"{{bundle_product_type}}\" " + + "-P \"{{bundle_partial_info_plist}}\" -o {{output}} {{inputs}}" + + description = "COMPILE_XCASSETS {{output}}" + pool = ":bundle_pool($default_toolchain)" + } + + tool("action") { + pool = "//build/toolchain:action_pool($default_toolchain)" + } + } +} + +mac_toolchain("clang_arm") { + toolchain_args = { + current_cpu = "arm" + current_os = "mac" + } +} + +mac_toolchain("clang_x64") { + toolchain_args = { + current_cpu = "x64" + current_os = "mac" + + if (target_os == "ios") { + # TODO(crbug.com/753445): the use_sanitizer_coverage arg is currently + # not supported by the Chromium mac_clang_x64 toolchain on iOS + # distribution. + use_sanitizer_coverage = false + } + } +} + +mac_toolchain("clang_x86") { + toolchain_args = { + current_cpu = "x86" + current_os = "mac" + } +} + +mac_toolchain("clang_x86_v8_arm") { + toolchain_args = { + current_cpu = "x86" + current_os = "mac" + + if (defined(v8_current_cpu)) { + v8_current_cpu = "arm" + } + } +} + +mac_toolchain("clang_x86_v8_mipsel") { + toolchain_args = { + current_cpu = "x86" + current_os = "mac" + + if (defined(v8_current_cpu)) { + v8_current_cpu = "mipsel" + } + } +} + +mac_toolchain("clang_x64_v8_arm64") { + toolchain_args = { + current_cpu = "x64" + current_os = "mac" + + if (defined(v8_current_cpu)) { + v8_current_cpu = "arm64" + } + } +} + +mac_toolchain("clang_x64_v8_mips64el") { + toolchain_args = { + current_cpu = "x64" + current_os = "mac" + + if (defined(v8_current_cpu)) { + v8_current_cpu = "mips64el" + } + } +} + +if (is_ios) { + mac_toolchain("ios_clang_arm") { + toolchain_args = { + current_cpu = "arm" + current_os = "ios" + } + } + + mac_toolchain("ios_clang_arm64") { + toolchain_args = { + current_cpu = "arm64" + current_os = "ios" + } + } + + mac_toolchain("ios_clang_x86") { + toolchain_args = { + current_cpu = "x86" + current_os = "ios" + } + } + + mac_toolchain("ios_clang_x64") { + toolchain_args = { + current_cpu = "x64" + current_os = "ios" + } + } +} diff --git a/deps/v8/build/toolchain/mac/OWNERS b/deps/v8/build/toolchain/mac/OWNERS new file mode 100644 index 0000000000..0ed2e154d8 --- /dev/null +++ b/deps/v8/build/toolchain/mac/OWNERS @@ -0,0 +1,2 @@ +rsesek@chromium.org +sdefresne@chromium.org diff --git a/deps/v8/build/toolchain/mac/compile_xcassets.py b/deps/v8/build/toolchain/mac/compile_xcassets.py new file mode 100644 index 0000000000..c1f4680b7c --- /dev/null +++ b/deps/v8/build/toolchain/mac/compile_xcassets.py @@ -0,0 +1,251 @@ +# Copyright 2016 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import argparse +import os +import re +import subprocess +import sys +import tempfile + +"""Wrapper around actool to compile assets catalog. + +The script compile_xcassets.py is a wrapper around actool to compile +assets catalog to Assets.car that turns warning into errors. It also +fixes some quirks of actool to make it work from ninja (mostly that +actool seems to require absolute path but gn generates command-line +with relative paths). + +The wrapper filter out any message that is not a section header and +not a warning or error message, and fails if filtered output is not +empty. This should to treat all warnings as error until actool has +an option to fail with non-zero error code when there are warnings. +""" + +# Pattern matching a section header in the output of actool. +SECTION_HEADER = re.compile('^/\\* ([^ ]*) \\*/$') + +# Name of the section containing informational messages that can be ignored. +NOTICE_SECTION = 'com.apple.actool.compilation-results' + +# Regular expressions matching spurious messages from actool that should be +# ignored (as they are bogus). Generally a bug should be filed with Apple +# when adding a pattern here. +SPURIOUS_PATTERNS = map(re.compile, [ + # crbug.com/770634, likely a bug in Xcode 9.1 beta, remove once build + # requires a version of Xcode with a fix. + r'\[\]\[ipad\]\[76x76\]\[\]\[\]\[1x\]\[\]\[\]: notice: \(null\)', + + # crbug.com/770634, likely a bug in Xcode 9.2 beta, remove once build + # requires a version of Xcode with a fix. + r'\[\]\[ipad\]\[76x76\]\[\]\[\]\[1x\]\[\]\[\]: notice: 76x76@1x app icons' + ' only apply to iPad apps targeting releases of iOS prior to 10.0.', +]) + +# Map special type of asset catalog to the corresponding command-line +# parameter that need to be passed to actool. +ACTOOL_FLAG_FOR_ASSET_TYPE = { + '.appiconset': '--app-icon', + '.launchimage': '--launch-image', +} + + +def IsSpuriousMessage(line): + """Returns whether line contains a spurious message that should be ignored.""" + for pattern in SPURIOUS_PATTERNS: + match = pattern.search(line) + if match is not None: + return True + return False + + +def FilterCompilerOutput(compiler_output, relative_paths): + """Filers actool compilation output. + + The compiler output is composed of multiple sections for each different + level of output (error, warning, notices, ...). Each section starts with + the section name on a single line, followed by all the messages from the + section. + + The function filter any lines that are not in com.apple.actool.errors or + com.apple.actool.document.warnings sections (as spurious messages comes + before any section of the output). + + See crbug.com/730054, crbug.com/739163 and crbug.com/770634 for some example + messages that pollute the output of actool and cause flaky builds. + + Args: + compiler_output: string containing the output generated by the + compiler (contains both stdout and stderr) + relative_paths: mapping from absolute to relative paths used to + convert paths in the warning and error messages (unknown paths + will be left unaltered) + + Returns: + The filtered output of the compiler. If the compilation was a + success, then the output will be empty, otherwise it will use + relative path and omit any irrelevant output. + """ + + filtered_output = [] + current_section = None + data_in_section = False + for line in compiler_output.splitlines(): + match = SECTION_HEADER.search(line) + if match is not None: + data_in_section = False + current_section = match.group(1) + continue + if current_section and current_section != NOTICE_SECTION: + if IsSpuriousMessage(line): + continue + absolute_path = line.split(':')[0] + relative_path = relative_paths.get(absolute_path, absolute_path) + if absolute_path != relative_path: + line = relative_path + line[len(absolute_path):] + if not data_in_section: + data_in_section = True + filtered_output.append('/* %s */\n' % current_section) + filtered_output.append(line + '\n') + + return ''.join(filtered_output) + + +def CompileAssetCatalog(output, platform, product_type, min_deployment_target, + inputs, compress_pngs, partial_info_plist): + """Compile the .xcassets bundles to an asset catalog using actool. + + Args: + output: absolute path to the containing bundle + platform: the targeted platform + product_type: the bundle type + min_deployment_target: minimum deployment target + inputs: list of absolute paths to .xcassets bundles + compress_pngs: whether to enable compression of pngs + partial_info_plist: path to partial Info.plist to generate + """ + command = [ + 'xcrun', 'actool', '--output-format=human-readable-text', + '--notices', '--warnings', '--errors', '--platform', platform, + '--minimum-deployment-target', min_deployment_target, + ] + + if compress_pngs: + command.extend(['--compress-pngs']) + + if product_type != '': + command.extend(['--product-type', product_type]) + + if platform == 'macosx': + command.extend(['--target-device', 'mac']) + else: + command.extend(['--target-device', 'iphone', '--target-device', 'ipad']) + + # Scan the input directories for the presence of asset catalog types that + # require special treatment, and if so, add them to the actool command-line. + for relative_path in inputs: + + if not os.path.isdir(relative_path): + continue + + for file_or_dir_name in os.listdir(relative_path): + if not os.path.isdir(os.path.join(relative_path, file_or_dir_name)): + continue + + asset_name, asset_type = os.path.splitext(file_or_dir_name) + if asset_type not in ACTOOL_FLAG_FOR_ASSET_TYPE: + continue + + command.extend([ACTOOL_FLAG_FOR_ASSET_TYPE[asset_type], asset_name]) + + # Always ask actool to generate a partial Info.plist file. If not path + # has been given by the caller, use a temporary file name. + temporary_file = None + if not partial_info_plist: + temporary_file = tempfile.NamedTemporaryFile(suffix='.plist') + partial_info_plist = temporary_file.name + + command.extend(['--output-partial-info-plist', partial_info_plist]) + + # Dictionary used to convert absolute paths back to their relative form + # in the output of actool. + relative_paths = {} + + # actool crashes if paths are relative, so convert input and output paths + # to absolute paths, and record the relative paths to fix them back when + # filtering the output. + absolute_output = os.path.abspath(output) + relative_paths[output] = absolute_output + relative_paths[os.path.dirname(output)] = os.path.dirname(absolute_output) + command.extend(['--compile', os.path.dirname(os.path.abspath(output))]) + + for relative_path in inputs: + absolute_path = os.path.abspath(relative_path) + relative_paths[absolute_path] = relative_path + command.append(absolute_path) + + try: + # Run actool and redirect stdout and stderr to the same pipe (as actool + # is confused about what should go to stderr/stdout). + process = subprocess.Popen( + command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + stdout, _ = process.communicate() + + # Filter the output to remove all garbarge and to fix the paths. + stdout = FilterCompilerOutput(stdout, relative_paths) + + if process.returncode or stdout: + sys.stderr.write(stdout) + sys.exit(1) + + finally: + if temporary_file: + temporary_file.close() + + +def Main(): + parser = argparse.ArgumentParser( + description='compile assets catalog for a bundle') + parser.add_argument( + '--platform', '-p', required=True, + choices=('macosx', 'iphoneos', 'iphonesimulator'), + help='target platform for the compiled assets catalog') + parser.add_argument( + '--minimum-deployment-target', '-t', required=True, + help='minimum deployment target for the compiled assets catalog') + parser.add_argument( + '--output', '-o', required=True, + help='path to the compiled assets catalog') + parser.add_argument( + '--compress-pngs', '-c', action='store_true', default=False, + help='recompress PNGs while compiling assets catalog') + parser.add_argument( + '--product-type', '-T', + help='type of the containing bundle') + parser.add_argument( + '--partial-info-plist', '-P', + help='path to partial info plist to create') + parser.add_argument( + 'inputs', nargs='+', + help='path to input assets catalog sources') + args = parser.parse_args() + + if os.path.basename(args.output) != 'Assets.car': + sys.stderr.write( + 'output should be path to compiled asset catalog, not ' + 'to the containing bundle: %s\n' % (args.output,)) + sys.exit(1) + + CompileAssetCatalog( + args.output, + args.platform, + args.product_type, + args.minimum_deployment_target, + args.inputs, + args.compress_pngs, + args.partial_info_plist) + + +if __name__ == '__main__': + sys.exit(Main()) diff --git a/deps/v8/build/toolchain/mac/compile_xcassets_unittests.py b/deps/v8/build/toolchain/mac/compile_xcassets_unittests.py new file mode 100644 index 0000000000..7655df8c05 --- /dev/null +++ b/deps/v8/build/toolchain/mac/compile_xcassets_unittests.py @@ -0,0 +1,141 @@ +# Copyright 2017 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import unittest +import compile_xcassets + + +class TestFilterCompilerOutput(unittest.TestCase): + + relative_paths = { + '/Users/janedoe/chromium/src/Chromium.xcassets': + '../../Chromium.xcassets', + '/Users/janedoe/chromium/src/out/Default/Chromium.app/Assets.car': + 'Chromium.app/Assets.car', + } + + def testNoError(self): + self.assertEquals( + '', + compile_xcassets.FilterCompilerOutput( + '/* com.apple.actool.compilation-results */\n' + '/Users/janedoe/chromium/src/out/Default/Chromium.app/Assets.car\n', + self.relative_paths)) + + def testNoErrorRandomMessages(self): + self.assertEquals( + '', + compile_xcassets.FilterCompilerOutput( + '2017-07-04 04:59:19.460 ibtoold[23487:41214] CoreSimulator is att' + 'empting to unload a stale CoreSimulatorService job. Existing' + ' job (com.apple.CoreSimulator.CoreSimulatorService.179.1.E8tt' + 'yeDeVgWK) is from an older version and is being removed to pr' + 'event problems.\n' + '/* com.apple.actool.compilation-results */\n' + '/Users/janedoe/chromium/src/out/Default/Chromium.app/Assets.car\n', + self.relative_paths)) + + def testWarning(self): + self.assertEquals( + '/* com.apple.actool.document.warnings */\n' + '../../Chromium.xcassets:./image1.imageset/[universal][][][1x][][][][' + '][][]: warning: The file "image1.png" for the image set "image1"' + ' does not exist.\n', + compile_xcassets.FilterCompilerOutput( + '/* com.apple.actool.document.warnings */\n' + '/Users/janedoe/chromium/src/Chromium.xcassets:./image1.imageset/[' + 'universal][][][1x][][][][][][]: warning: The file "image1.png' + '" for the image set "image1" does not exist.\n' + '/* com.apple.actool.compilation-results */\n' + '/Users/janedoe/chromium/src/out/Default/Chromium.app/Assets.car\n', + self.relative_paths)) + + def testError(self): + self.assertEquals( + '/* com.apple.actool.errors */\n' + '../../Chromium.xcassets: error: The output directory "/Users/janedoe/' + 'chromium/src/out/Default/Chromium.app" does not exist.\n', + compile_xcassets.FilterCompilerOutput( + '/* com.apple.actool.errors */\n' + '/Users/janedoe/chromium/src/Chromium.xcassets: error: The output ' + 'directory "/Users/janedoe/chromium/src/out/Default/Chromium.a' + 'pp" does not exist.\n' + '/* com.apple.actool.compilation-results */\n', + self.relative_paths)) + + def testSpurious(self): + self.assertEquals( + '/* com.apple.actool.document.warnings */\n' + '../../Chromium.xcassets:./AppIcon.appiconset: warning: A 1024x1024 ap' + 'p store icon is required for iOS apps\n', + compile_xcassets.FilterCompilerOutput( + '/* com.apple.actool.document.warnings */\n' + '/Users/janedoe/chromium/src/Chromium.xcassets:./AppIcon.appiconse' + 't: warning: A 1024x1024 app store icon is required for iOS ap' + 'ps\n' + '/* com.apple.actool.document.notices */\n' + '/Users/janedoe/chromium/src/Chromium.xcassets:./AppIcon.appiconse' + 't/[][ipad][76x76][][][1x][][]: notice: (null)\n', + self.relative_paths)) + + def testComplexError(self): + self.assertEquals( + '/* com.apple.actool.errors */\n' + ': error: Failed to find a suitable device for the type SimDeviceType ' + ': com.apple.dt.Xcode.IBSimDeviceType.iPad-2x with runtime SimRunt' + 'ime : 10.3.1 (14E8301) - com.apple.CoreSimulator.SimRuntime.iOS-1' + '0-3\n' + ' Failure Reason: Failed to create SimDeviceSet at path /Users/jane' + 'doe/Library/Developer/Xcode/UserData/IB Support/Simulator Devices' + '. You\'ll want to check the logs in ~/Library/Logs/CoreSimulator ' + 'to see why creating the SimDeviceSet failed.\n' + ' Underlying Errors:\n' + ' Description: Failed to initialize simulator device set.\n' + ' Failure Reason: Failed to subscribe to notifications from Cor' + 'eSimulatorService.\n' + ' Underlying Errors:\n' + ' Description: Error returned in reply to notification requ' + 'est: Connection invalid\n' + ' Failure Reason: Software caused connection abort\n', + compile_xcassets.FilterCompilerOutput( + '2017-07-07 10:37:27.367 ibtoold[88538:12553239] CoreSimulator det' + 'ected Xcode.app relocation or CoreSimulatorService version ch' + 'ange. Framework path (/Applications/Xcode.app/Contents/Devel' + 'oper/Library/PrivateFrameworks/CoreSimulator.framework) and v' + 'ersion (375.21) does not match existing job path (/Library/De' + 'veloper/PrivateFrameworks/CoreSimulator.framework/Versions/A/' + 'XPCServices/com.apple.CoreSimulator.CoreSimulatorService.xpc)' + ' and version (459.13). Attempting to remove the stale servic' + 'e in order to add the expected version.\n' + '2017-07-07 10:37:27.625 ibtoold[88538:12553256] CoreSimulatorServ' + 'ice connection interrupted. Resubscribing to notifications.\n' + '2017-07-07 10:37:27.632 ibtoold[88538:12553264] CoreSimulatorServ' + 'ice connection became invalid. Simulator services will no lo' + 'nger be available.\n' + '2017-07-07 10:37:27.642 ibtoold[88538:12553274] CoreSimulatorServ' + 'ice connection became invalid. Simulator services will no lo' + 'nger be available.\n' + '/* com.apple.actool.errors */\n' + ': error: Failed to find a suitable device for the type SimDeviceT' + 'ype : com.apple.dt.Xcode.IBSimDeviceType.iPad-2x with runtime' + ' SimRuntime : 10.3.1 (14E8301) - com.apple.CoreSimulator.SimR' + 'untime.iOS-10-3\n' + ' Failure Reason: Failed to create SimDeviceSet at path /Users/' + 'janedoe/Library/Developer/Xcode/UserData/IB Support/Simulator' + ' Devices. You\'ll want to check the logs in ~/Library/Logs/Co' + 'reSimulator to see why creating the SimDeviceSet failed.\n' + ' Underlying Errors:\n' + ' Description: Failed to initialize simulator device set.\n' + ' Failure Reason: Failed to subscribe to notifications from' + ' CoreSimulatorService.\n' + ' Underlying Errors:\n' + ' Description: Error returned in reply to notification ' + 'request: Connection invalid\n' + ' Failure Reason: Software caused connection abort\n' + '/* com.apple.actool.compilation-results */\n', + self.relative_paths)) + + +if __name__ == '__main__': + unittest.main() diff --git a/deps/v8/build/toolchain/mac/filter_libtool.py b/deps/v8/build/toolchain/mac/filter_libtool.py new file mode 100644 index 0000000000..3b16151840 --- /dev/null +++ b/deps/v8/build/toolchain/mac/filter_libtool.py @@ -0,0 +1,54 @@ +# Copyright 2016 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import os +import re +import subprocess +import sys + +# This script executes libool and filters out logspam lines like: +# '/path/to/libtool: file: foo.o has no symbols' + +BLACKLIST_PATTERNS = map(re.compile, [ + r'^.*libtool: (?:for architecture: \S* )?file: .* has no symbols$', + r'^.*libtool: warning for library: .* the table of contents is empty ' + r'\(no object file members in the library define global symbols\)$', + r'^.*libtool: warning same member name \(\S*\) in output file used for ' + r'input files: \S* and: \S* \(due to use of basename, truncation, ' + r'blank padding or duplicate input files\)$', +]) + + +def IsBlacklistedLine(line): + """Returns whether the line should be filtered out.""" + for pattern in BLACKLIST_PATTERNS: + if pattern.match(line): + return True + return False + + +def Main(cmd_list): + env = os.environ.copy() + # Ref: + # http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c + # The problem with this flag is that it resets the file mtime on the file to + # epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone. + env['ZERO_AR_DATE'] = '1' + libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env) + _, err = libtoolout.communicate() + for line in err.splitlines(): + if not IsBlacklistedLine(line): + print >>sys.stderr, line + # Unconditionally touch the output .a file on the command line if present + # and the command succeeded. A bit hacky. + if not libtoolout.returncode: + for i in range(len(cmd_list) - 1): + if cmd_list[i] == '-o' and cmd_list[i+1].endswith('.a'): + os.utime(cmd_list[i+1], None) + break + return libtoolout.returncode + + +if __name__ == '__main__': + sys.exit(Main(sys.argv[1:])) diff --git a/deps/v8/build/toolchain/mac/get_tool_mtime.py b/deps/v8/build/toolchain/mac/get_tool_mtime.py new file mode 100644 index 0000000000..4106344b82 --- /dev/null +++ b/deps/v8/build/toolchain/mac/get_tool_mtime.py @@ -0,0 +1,17 @@ +# Copyright 2016 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import os +import sys + +# Usage: python get_tool_mtime.py path/to/file1.py path/to/file2.py +# +# Prints a GN scope with the variable name being the basename sans-extension +# and the value being the file modification time. A variable is emitted for +# each file argument on the command line. + +if __name__ == '__main__': + for f in sys.argv[1:]: + variable = os.path.splitext(os.path.basename(f))[0] + print '%s = %d' % (variable, os.path.getmtime(f)) diff --git a/deps/v8/build/toolchain/mac/linker_driver.py b/deps/v8/build/toolchain/mac/linker_driver.py new file mode 100755 index 0000000000..10bbda02ac --- /dev/null +++ b/deps/v8/build/toolchain/mac/linker_driver.py @@ -0,0 +1,256 @@ +#!/usr/bin/env python + +# Copyright 2016 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import os +import os.path +import shutil +import subprocess +import sys + +DSYMUTIL_INVOKE = ['xcrun', 'dsymutil'] + +# The linker_driver.py is responsible for forwarding a linker invocation to +# the compiler driver, while processing special arguments itself. +# +# Usage: linker_driver.py clang++ main.o -L. -llib -o prog -Wcrl,dsym,out +# +# On Mac, the logical step of linking is handled by three discrete tools to +# perform the image link, debug info link, and strip. The linker_driver.py +# combines these three steps into a single tool. +# +# The command passed to the linker_driver.py should be the compiler driver +# invocation for the linker. It is first invoked unaltered (except for the +# removal of the special driver arguments, described below). Then the driver +# performs additional actions, based on these arguments: +# +# -Wcrl,dsym,<dsym_path_prefix> +# After invoking the linker, this will run `dsymutil` on the linker's +# output, producing a dSYM bundle, stored at dsym_path_prefix. As an +# example, if the linker driver were invoked with: +# "... -o out/gn/obj/foo/libbar.dylib ... -Wcrl,dsym,out/gn ..." +# The resulting dSYM would be out/gn/libbar.dylib.dSYM/. +# +# -Wcrl,dsymutilpath,<dsymutil_path> +# Sets the path to the dsymutil to run with -Wcrl,dsym, in which case +# `xcrun` is not used to invoke it. +# +# -Wcrl,unstripped,<unstripped_path_prefix> +# After invoking the linker, and before strip, this will save a copy of +# the unstripped linker output in the directory unstripped_path_prefix. +# +# -Wcrl,strip,<strip_arguments> +# After invoking the linker, and optionally dsymutil, this will run +# the strip command on the linker's output. strip_arguments are +# comma-separated arguments to be passed to the strip command. + +def Main(args): + """Main function for the linker driver. Separates out the arguments for + the main compiler driver and the linker driver, then invokes all the + required tools. + + Args: + args: list of string, Arguments to the script. + """ + + if len(args) < 2: + raise RuntimeError("Usage: linker_driver.py [linker-invocation]") + + for i in xrange(len(args)): + if args[i] != '--developer_dir': + continue + os.environ['DEVELOPER_DIR'] = args[i + 1] + del args[i:i+2] + break + + # Collect arguments to the linker driver (this script) and remove them from + # the arguments being passed to the compiler driver. + linker_driver_actions = {} + compiler_driver_args = [] + for arg in args[1:]: + if arg.startswith(_LINKER_DRIVER_ARG_PREFIX): + # Convert driver actions into a map of name => lambda to invoke. + driver_action = ProcessLinkerDriverArg(arg) + assert driver_action[0] not in linker_driver_actions + linker_driver_actions[driver_action[0]] = driver_action[1] + else: + compiler_driver_args.append(arg) + + linker_driver_outputs = [_FindLinkerOutput(compiler_driver_args)] + + try: + # Run the linker by invoking the compiler driver. + subprocess.check_call(compiler_driver_args) + + # Run the linker driver actions, in the order specified by the actions list. + for action in _LINKER_DRIVER_ACTIONS: + name = action[0] + if name in linker_driver_actions: + linker_driver_outputs += linker_driver_actions[name](args) + except: + # If a linker driver action failed, remove all the outputs to make the + # build step atomic. + map(_RemovePath, linker_driver_outputs) + + # Re-report the original failure. + raise + + +def ProcessLinkerDriverArg(arg): + """Processes a linker driver argument and returns a tuple containing the + name and unary lambda to invoke for that linker driver action. + + Args: + arg: string, The linker driver argument. + + Returns: + A 2-tuple: + 0: The driver action name, as in _LINKER_DRIVER_ACTIONS. + 1: An 1-ary lambda that takes the full list of arguments passed to + Main(). The lambda should call the linker driver action that + corresponds to the argument and return a list of outputs from the + action. + """ + if not arg.startswith(_LINKER_DRIVER_ARG_PREFIX): + raise ValueError('%s is not a linker driver argument' % (arg,)) + + sub_arg = arg[len(_LINKER_DRIVER_ARG_PREFIX):] + + for driver_action in _LINKER_DRIVER_ACTIONS: + (name, action) = driver_action + if sub_arg.startswith(name): + return (name, + lambda full_args: action(sub_arg[len(name):], full_args)) + + raise ValueError('Unknown linker driver argument: %s' % (arg,)) + + +def RunDsymUtil(dsym_path_prefix, full_args): + """Linker driver action for -Wcrl,dsym,<dsym-path-prefix>. Invokes dsymutil + on the linker's output and produces a dsym file at |dsym_file| path. + + Args: + dsym_path_prefix: string, The path at which the dsymutil output should be + located. + full_args: list of string, Full argument list for the linker driver. + + Returns: + list of string, Build step outputs. + """ + if not len(dsym_path_prefix): + raise ValueError('Unspecified dSYM output file') + + linker_out = _FindLinkerOutput(full_args) + base = os.path.basename(linker_out) + dsym_out = os.path.join(dsym_path_prefix, base + '.dSYM') + + # Remove old dSYMs before invoking dsymutil. + _RemovePath(dsym_out) + subprocess.check_call(DSYMUTIL_INVOKE + ['-o', dsym_out, linker_out]) + return [dsym_out] + + +def SetDsymutilPath(dsymutil_path, full_args): + """Linker driver action for -Wcrl,dsymutilpath,<dsymutil_path>. + + Sets the invocation command for dsymutil, which allows the caller to specify + an alternate dsymutil. This action is always processed before the RunDsymUtil + action. + + Args: + dsymutil_path: string, The path to the dsymutil binary to run + full_args: list of string, Full argument list for the linker driver. + + Returns: + No output - this step is run purely for its side-effect. + """ + global DSYMUTIL_INVOKE + DSYMUTIL_INVOKE = [dsymutil_path] + return [] + + +def RunSaveUnstripped(unstripped_path_prefix, full_args): + """Linker driver action for -Wcrl,unstripped,<unstripped_path_prefix>. Copies + the linker output to |unstripped_path_prefix| before stripping. + + Args: + unstripped_path_prefix: string, The path at which the unstripped output + should be located. + full_args: list of string, Full argument list for the linker driver. + + Returns: + list of string, Build step outputs. + """ + if not len(unstripped_path_prefix): + raise ValueError('Unspecified unstripped output file') + + linker_out = _FindLinkerOutput(full_args) + base = os.path.basename(linker_out) + unstripped_out = os.path.join(unstripped_path_prefix, base + '.unstripped') + + shutil.copyfile(linker_out, unstripped_out) + return [unstripped_out] + + +def RunStrip(strip_args_string, full_args): + """Linker driver action for -Wcrl,strip,<strip_arguments>. + + Args: + strip_args_string: string, Comma-separated arguments for `strip`. + full_args: list of string, Full arguments for the linker driver. + + Returns: + list of string, Build step outputs. + """ + strip_command = ['xcrun', 'strip'] + if len(strip_args_string) > 0: + strip_command += strip_args_string.split(',') + strip_command.append(_FindLinkerOutput(full_args)) + subprocess.check_call(strip_command) + return [] + + +def _FindLinkerOutput(full_args): + """Finds the output of the linker by looking for the output flag in its + argument list. As this is a required linker argument, raises an error if it + cannot be found. + """ + # The linker_driver.py script may be used to wrap either the compiler linker + # (uses -o to configure the output) or lipo (uses -output to configure the + # output). Since wrapping the compiler linker is the most likely possibility + # use try/except and fallback to checking for -output if -o is not found. + try: + output_flag_index = full_args.index('-o') + except ValueError: + output_flag_index = full_args.index('-output') + return full_args[output_flag_index + 1] + + +def _RemovePath(path): + """Removes the file or directory at |path| if it exists.""" + if os.path.exists(path): + if os.path.isdir(path): + shutil.rmtree(path) + else: + os.unlink(path) + + +_LINKER_DRIVER_ARG_PREFIX = '-Wcrl,' + +"""List of linker driver actions. The sort order of this list affects the +order in which the actions are invoked. The first item in the tuple is the +argument's -Wcrl,<sub_argument> and the second is the function to invoke. +""" +_LINKER_DRIVER_ACTIONS = [ + ('dsymutilpath,', SetDsymutilPath), + ('dsym,', RunDsymUtil), + ('unstripped,', RunSaveUnstripped), + ('strip,', RunStrip), +] + + +if __name__ == '__main__': + Main(sys.argv) + sys.exit(0) diff --git a/deps/v8/build/toolchain/nacl/BUILD.gn b/deps/v8/build/toolchain/nacl/BUILD.gn new file mode 100644 index 0000000000..85e284948c --- /dev/null +++ b/deps/v8/build/toolchain/nacl/BUILD.gn @@ -0,0 +1,266 @@ +# Copyright (c) 2014 The Native Client Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/config/nacl/config.gni") +import("//build/config/sysroot.gni") +import("//build/toolchain/nacl_toolchain.gni") + +# Add the toolchain revision as a preprocessor define so that sources are +# rebuilt when a toolchain is updated. +# Idea we could use the toolchain deps feature, but currently that feature is +# bugged and does not trigger a rebuild. +# https://code.google.com/p/chromium/issues/detail?id=431880 +# Calls to get the toolchain revision are relatively slow, so do them all in a +# single batch to amortize python startup, etc. +revisions = exec_script("//native_client/build/get_toolchain_revision.py", + [ + "nacl_x86_glibc", + "nacl_arm_glibc", + "pnacl_newlib", + ], + "trim list lines") +nacl_x86_glibc_rev = revisions[0] +nacl_arm_glibc_rev = revisions[1] + +pnacl_newlib_rev = revisions[2] + +if (host_os == "win") { + toolsuffix = ".exe" +} else { + toolsuffix = "" +} + +# The PNaCl toolchain tools are all wrapper scripts rather than binary +# executables. On POSIX systems, nobody cares what kind of executable +# file you are. But on Windows, scripts (.bat files) cannot be run +# directly and need the Windows shell (cmd.exe) specified explicily. +if (host_os == "win") { + # NOTE! The //build/toolchain/gcc_*_wrapper.py scripts recognize + # this exact prefix string, so they must be updated if this string + # is changed in any way. + scriptprefix = "cmd /c call " + scriptsuffix = ".bat" +} else { + scriptprefix = "" + scriptsuffix = "" +} + +# When the compilers are run via goma or ccache rather than directly by +# GN/Ninja, the goma/ccache wrapper handles .bat files but gets confused +# by being given the scriptprefix. +if (host_os == "win" && !use_goma && cc_wrapper == "") { + compiler_scriptprefix = scriptprefix +} else { + compiler_scriptprefix = "" +} + +template("pnacl_toolchain") { + assert(defined(invoker.executable_extension), + "Must define executable_extension") + + nacl_toolchain(target_name) { + toolchain_package = "pnacl_newlib" + toolchain_revision = pnacl_newlib_rev + toolprefix = + rebase_path("${nacl_toolchain_dir}/${toolchain_package}/bin/pnacl-", + root_build_dir) + + cc = compiler_scriptprefix + toolprefix + "clang" + scriptsuffix + cxx = compiler_scriptprefix + toolprefix + "clang++" + scriptsuffix + ar = toolprefix + "ar" + scriptsuffix + readelf = scriptprefix + toolprefix + "readelf" + scriptsuffix + nm = scriptprefix + toolprefix + "nm" + scriptsuffix + if (defined(invoker.strip)) { + strip = scriptprefix + toolprefix + invoker.strip + scriptsuffix + } + forward_variables_from(invoker, + [ + "executable_extension", + "is_clang_analysis_supported", + ]) + + # Note this is not the usual "ld = cxx" because "ld" uses are + # never run via goma, so this needs scriptprefix. + ld = scriptprefix + toolprefix + "clang++" + scriptsuffix + + toolchain_args = { + is_clang = true + current_cpu = "pnacl" + use_lld = false + } + } +} + +pnacl_toolchain("newlib_pnacl") { + executable_extension = ".pexe" + + # The pnacl-finalize tool turns a .pexe.debug file into a .pexe file. + # It's very similar in purpose to the traditional "strip" utility: it + # turns what comes out of the linker into what you actually want to + # distribute and run. PNaCl doesn't have a "strip"-like utility that + # you ever actually want to use other than pnacl-finalize, so just + # make pnacl-finalize the strip tool rather than adding an additional + # step like "postlink" to run pnacl-finalize. + strip = "finalize" +} + +pnacl_toolchain("newlib_pnacl_nonsfi") { + executable_extension = "" + strip = "strip" +} + +template("nacl_glibc_toolchain") { + toolchain_cpu = target_name + assert(defined(invoker.toolchain_tuple), "Must define toolchain_tuple") + assert(defined(invoker.toolchain_package), "Must define toolchain_package") + assert(defined(invoker.toolchain_revision), "Must define toolchain_revision") + forward_variables_from(invoker, + [ + "toolchain_package", + "toolchain_revision", + ]) + + toolprefix = rebase_path("${nacl_toolchain_dir}/${toolchain_package}/bin/" + + invoker.toolchain_tuple + "-", + root_build_dir) + + nacl_toolchain("glibc_" + toolchain_cpu) { + cc = toolprefix + "gcc" + toolsuffix + cxx = toolprefix + "g++" + toolsuffix + ar = toolprefix + "ar" + toolsuffix + ld = cxx + readelf = toolprefix + "readelf" + toolsuffix + nm = toolprefix + "nm" + toolsuffix + strip = toolprefix + "strip" + toolsuffix + + toolchain_args = { + current_cpu = toolchain_cpu + is_clang = false + is_nacl_glibc = true + use_lld = false + } + } +} + +nacl_glibc_toolchain("x86") { + toolchain_package = "nacl_x86_glibc" + toolchain_revision = nacl_x86_glibc_rev + + # Rely on the :compiler_cpu_abi config adding the -m32 flag here rather + # than using the i686-nacl binary directly. This is a because i686-nacl-gcc + # is a shell script wrapper around x86_64-nacl-gcc and goma has trouble with + # compiler executables that are shell scripts (so the i686 'compiler' is not + # currently in goma). + toolchain_tuple = "x86_64-nacl" +} + +nacl_glibc_toolchain("x64") { + toolchain_package = "nacl_x86_glibc" + toolchain_revision = nacl_x86_glibc_rev + toolchain_tuple = "x86_64-nacl" +} + +nacl_glibc_toolchain("arm") { + toolchain_package = "nacl_arm_glibc" + toolchain_revision = nacl_arm_glibc_rev + toolchain_tuple = "arm-nacl" +} + +template("nacl_clang_toolchain") { + toolchain_cpu = target_name + assert(defined(invoker.toolchain_tuple), "Must define toolchain_tuple") + + toolchain_package = "pnacl_newlib" + toolchain_revision = pnacl_newlib_rev + toolprefix = rebase_path("${nacl_toolchain_dir}/${toolchain_package}/bin/" + + invoker.toolchain_tuple + "-", + root_build_dir) + + nacl_toolchain("clang_newlib_" + toolchain_cpu) { + cc = toolprefix + "clang" + toolsuffix + cxx = toolprefix + "clang++" + toolsuffix + ar = toolprefix + "ar" + toolsuffix + ld = cxx + readelf = toolprefix + "readelf" + toolsuffix + nm = toolprefix + "nm" + toolsuffix + strip = toolprefix + "strip" + toolsuffix + + toolchain_args = { + current_cpu = toolchain_cpu + is_clang = true + use_lld = false + } + } +} + +template("nacl_irt_toolchain") { + toolchain_cpu = target_name + assert(defined(invoker.toolchain_tuple), "Must define toolchain_tuple") + + toolchain_package = "pnacl_newlib" + toolchain_revision = pnacl_newlib_rev + toolprefix = rebase_path("${nacl_toolchain_dir}/${toolchain_package}/bin/" + + invoker.toolchain_tuple + "-", + root_build_dir) + + link_irt = rebase_path("//native_client/build/link_irt.py", root_build_dir) + + tls_edit_label = + "//native_client/src/tools/tls_edit:tls_edit($host_toolchain)" + host_toolchain_out_dir = + rebase_path(get_label_info(tls_edit_label, "root_out_dir"), + root_build_dir) + tls_edit = "${host_toolchain_out_dir}/tls_edit" + + nacl_toolchain("irt_" + toolchain_cpu) { + cc = toolprefix + "clang" + toolsuffix + cxx = toolprefix + "clang++" + toolsuffix + ar = toolprefix + "ar" + toolsuffix + readelf = toolprefix + "readelf" + toolsuffix + nm = toolprefix + "nm" + toolsuffix + strip = toolprefix + "strip" + toolsuffix + + # Some IRT implementations (notably, Chromium's) contain C++ code, + # so we need to link w/ the C++ linker. + ld = "${python_path} ${link_irt} --tls-edit=${tls_edit} --link-cmd=${cxx} --readelf-cmd=${readelf}" + + toolchain_args = { + current_cpu = toolchain_cpu + is_clang = true + use_lld = false + } + + # TODO(ncbray): depend on link script + deps = [ + tls_edit_label, + ] + } +} + +template("nacl_clang_toolchains") { + assert(defined(invoker.toolchain_tuple), "Must define toolchain_tuple") + nacl_clang_toolchain(target_name) { + toolchain_tuple = invoker.toolchain_tuple + } + nacl_irt_toolchain(target_name) { + toolchain_tuple = invoker.toolchain_tuple + } +} + +nacl_clang_toolchains("x86") { + # Rely on :compiler_cpu_abi adding -m32. See nacl_x86_glibc above. + toolchain_tuple = "x86_64-nacl" +} + +nacl_clang_toolchains("x64") { + toolchain_tuple = "x86_64-nacl" +} + +nacl_clang_toolchains("arm") { + toolchain_tuple = "arm-nacl" +} + +nacl_clang_toolchains("mipsel") { + toolchain_tuple = "mipsel-nacl" +} diff --git a/deps/v8/build/toolchain/nacl_toolchain.gni b/deps/v8/build/toolchain/nacl_toolchain.gni new file mode 100644 index 0000000000..11404e1e20 --- /dev/null +++ b/deps/v8/build/toolchain/nacl_toolchain.gni @@ -0,0 +1,59 @@ +# Copyright (c) 2014 The Native Client Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/config/nacl/config.gni") +import("//build/toolchain/gcc_toolchain.gni") + +# This template defines a NaCl toolchain. +# +# It requires the following variables specifying the executables to run: +# - cc +# - cxx +# - ar +# - ld + +template("nacl_toolchain") { + assert(defined(invoker.cc), "nacl_toolchain() must specify a \"cc\" value") + assert(defined(invoker.cxx), "nacl_toolchain() must specify a \"cxx\" value") + assert(defined(invoker.ar), "nacl_toolchain() must specify a \"ar\" value") + assert(defined(invoker.ld), "nacl_toolchain() must specify a \"ld\" value") + gcc_toolchain(target_name) { + if (defined(invoker.executable_extension)) { + executable_extension = invoker.executable_extension + } else { + executable_extension = ".nexe" + } + rebuild_define = "NACL_TC_REV=" + invoker.toolchain_revision + + forward_variables_from(invoker, + [ + "ar", + "cc", + "cxx", + "deps", + "ld", + "link_outputs", + "nm", + "readelf", + "strip", + ]) + + toolchain_args = { + # Use all values set on the invoker's toolchain_args. + forward_variables_from(invoker.toolchain_args, "*") + + current_os = "nacl" + + # We do not support component builds with the NaCl toolchains. + is_component_build = false + + # We do not support tcmalloc in the NaCl toolchains. + use_allocator = "none" + + # We do not support clang code coverage in the NaCl toolchains. + use_clang_coverage = false + coverage_instrumentation_input_file = "" + } + } +} diff --git a/deps/v8/build/toolchain/toolchain.gni b/deps/v8/build/toolchain/toolchain.gni new file mode 100644 index 0000000000..9a13d296fa --- /dev/null +++ b/deps/v8/build/toolchain/toolchain.gni @@ -0,0 +1,102 @@ +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# Toolchain-related configuration that may be needed outside the context of the +# toolchain() rules themselves. + +import("//build/config/chrome_build.gni") +import("//build_overrides/build.gni") + +declare_args() { + # If this is set to true, or if LLVM_FORCE_HEAD_REVISION is set to 1 + # in the environment, we use the revision in the llvm repo to determine + # the CLANG_REVISION to use, instead of the version hard-coded into + # //tools/clang/scripts/update.py. This should only be used in + # conjunction with setting LLVM_FORCE_HEAD_REVISION in the + # environment when `gclient runhooks` is run as well. + llvm_force_head_revision = false + + # Compile with Xcode version of clang instead of hermetic version shipped + # with the build. Used on iOS to ship official builds (as they are built + # with the version of clang shipped with Xcode). + use_xcode_clang = is_ios && is_official_build + + # Used for binary size analysis. + generate_linker_map = is_android && is_official_build +} + +if (generate_linker_map) { + assert( + is_official_build, + "Linker map files should only be generated when is_official_build = true") + assert(current_os == "android" || target_os == "linux", + "Linker map files should only be generated for Android and Linux") +} + +# The path to the hermetic install of Xcode. Only relevant when +# use_system_xcode = false. +hermetic_xcode_path = + rebase_path("//build/${target_os}_files/Xcode.app", "", root_build_dir) + +declare_args() { + if (is_clang) { + # Clang compiler version. Clang files are placed at version-dependent paths. + clang_version = "9.0.0" + } +} + +# Check target_os here instead of is_ios as this file is loaded for secondary +# toolchain (host toolchain in particular) but the argument is the same for +# all toolchains. +assert(!use_xcode_clang || target_os == "ios", + "Using Xcode's clang is only supported in iOS builds") + +# Extension for shared library files (including leading dot). +if (is_mac || is_ios) { + shlib_extension = ".dylib" +} else if (is_android && is_component_build) { + # By appending .cr, we prevent name collisions with libraries already + # loaded by the Android zygote. + shlib_extension = ".cr.so" +} else if (is_posix || is_fuchsia) { + shlib_extension = ".so" +} else if (is_win) { + shlib_extension = ".dll" +} else { + assert(false, "Platform not supported") +} + +# Prefix for shared library files. +if (is_posix || is_fuchsia) { + shlib_prefix = "lib" +} else { + shlib_prefix = "" +} + +# Directory for shared library files. +if (is_fuchsia) { + shlib_subdir = "/lib" +} else { + shlib_subdir = "" +} + +# While other "tool"s in a toolchain are specific to the target of that +# toolchain, the "stamp" and "copy" tools are really generic to the host; +# but each toolchain must define them separately. GN doesn't allow a +# template instantiation inside a toolchain definition, so some boilerplate +# has to be repeated in each toolchain to define these two tools. These +# four variables reduce the duplication in that boilerplate. +stamp_description = "STAMP {{output}}" +copy_description = "COPY {{source}} {{output}}" +if (host_os == "win") { + _tool_wrapper_path = + rebase_path("//build/toolchain/win/tool_wrapper.py", root_build_dir) + + stamp_command = "cmd /c type nul > \"{{output}}\"" + copy_command = + "$python_path $_tool_wrapper_path recursive-mirror {{source}} {{output}}" +} else { + stamp_command = "touch {{output}}" + copy_command = "ln -f {{source}} {{output}} 2>/dev/null || (rm -rf {{output}} && cp -af {{source}} {{output}})" +} diff --git a/deps/v8/build/toolchain/win/BUILD.gn b/deps/v8/build/toolchain/win/BUILD.gn new file mode 100644 index 0000000000..478a98774b --- /dev/null +++ b/deps/v8/build/toolchain/win/BUILD.gn @@ -0,0 +1,496 @@ +# Copyright (c) 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/config/clang/clang.gni") +import("//build/config/compiler/compiler.gni") +import("//build/config/sanitizers/sanitizers.gni") +import("//build/config/win/visual_studio_version.gni") +import("//build/toolchain/cc_wrapper.gni") +import("//build/toolchain/goma.gni") +import("//build/toolchain/toolchain.gni") + +# Should only be running on Windows. +assert(is_win) + +# Setup the Visual Studio state. +# +# Its arguments are the VS path and the compiler wrapper tool. It will write +# "environment.x86" and "environment.x64" to the build directory and return a +# list to us. + +# This tool will is used as a wrapper for various commands below. +tool_wrapper_path = rebase_path("tool_wrapper.py", root_build_dir) + +if (use_goma) { + if (host_os == "win") { + goma_prefix = "$goma_dir/gomacc.exe " + } else { + goma_prefix = "$goma_dir/gomacc " + } + clang_prefix = goma_prefix +} else { + goma_prefix = "" + if (cc_wrapper != "") { + clang_prefix = cc_wrapper + " " + } else { + clang_prefix = "" + } +} + +# Copy the VS runtime DLL for the default toolchain to the root build directory +# so things will run. +if (current_toolchain == default_toolchain) { + if (is_debug) { + configuration_name = "Debug" + } else { + configuration_name = "Release" + } + exec_script("../../vs_toolchain.py", + [ + "copy_dlls", + rebase_path(root_build_dir), + configuration_name, + target_cpu, + ]) +} + +if (host_os == "win") { + clang_cl = "clang-cl.exe" +} else { + clang_cl = "clang-cl" +} + +# Parameters: +# environment: File name of environment file. +# +# You would also define a toolchain_args variable with at least these set: +# current_cpu: current_cpu to pass as a build arg +# current_os: current_os to pass as a build arg +template("msvc_toolchain") { + toolchain(target_name) { + # When invoking this toolchain not as the default one, these args will be + # passed to the build. They are ignored when this is the default toolchain. + assert(defined(invoker.toolchain_args)) + toolchain_args = { + if (defined(invoker.toolchain_args)) { + forward_variables_from(invoker.toolchain_args, "*") + } + + # This value needs to be passed through unchanged. + host_toolchain = host_toolchain + } + + # Make these apply to all tools below. + lib_switch = "" + lib_dir_switch = "/LIBPATH:" + + # Object files go in this directory. + object_subdir = "{{target_out_dir}}/{{label_name}}" + + env = invoker.environment + + cl = invoker.cl + + if (use_lld) { + if (host_os == "win") { + lld_link = "lld-link.exe" + } else { + lld_link = "lld-link" + } + prefix = rebase_path("$clang_base_path/bin", root_build_dir) + + # lld-link includes a replacement for lib.exe that can produce thin + # archives and understands bitcode (for lto builds). + lib = "$prefix/$lld_link /lib" + link = "$prefix/$lld_link" + if (host_os != "win") { + # See comment adding --rsp-quoting to $cl above for more information. + link = "$link --rsp-quoting=posix" + } + } else { + lib = "lib.exe" + link = "link.exe" + } + + # If possible, pass system includes as flags to the compiler. When that's + # not possible, load a full environment file (containing %INCLUDE% and + # %PATH%) -- e.g. 32-bit MSVS builds require %PATH% to be set and just + # passing in a list of include directories isn't enough. + if (defined(invoker.sys_include_flags)) { + env_wrapper = "" + sys_include_flags = "${invoker.sys_include_flags} " # Note trailing space. + } else { + # clang-cl doesn't need this env hoop, so omit it there. + assert((defined(toolchain_args.is_clang) && !toolchain_args.is_clang) || + !is_clang) + env_wrapper = "ninja -t msvc -e $env -- " # Note trailing space. + sys_include_flags = "" + } + + # ninja does not have -t msvc other than windows, and lld doesn't depend on + # mt.exe in PATH on non-Windows, so it's not needed there anyways. + if (defined(invoker.sys_lib_flags)) { + linker_wrapper = "" + sys_lib_flags = "${invoker.sys_lib_flags} " # Note trailing space + } else if (use_lld) { + # Invoke ninja as wrapper instead of tool wrapper, because python + # invocation requires higher cpu usage compared to ninja invocation, and + # the python wrapper is only needed to work around link.exe problems. + # TODO(thakis): Remove wrapper once lld-link can merge manifests without + # relying on mt.exe being in %PATH% on Windows, https://crbug.com/872740 + linker_wrapper = "ninja -t msvc -e $env -- " # Note trailing space. + sys_lib_flags = "" + } else { + linker_wrapper = + "$python_path $tool_wrapper_path link-wrapper $env False " # Note trailing space. + sys_lib_flags = "" + } + + tool("cc") { + precompiled_header_type = "msvc" + pdbname = "{{target_out_dir}}/{{label_name}}_c.pdb" + + # Label names may have spaces in them so the pdbname must be quoted. The + # source and output don't need to be quoted because GN knows they're a + # full file name and will quote automatically when necessary. + depsformat = "msvc" + description = "CC {{output}}" + outputs = [ + "$object_subdir/{{source_name_part}}.obj", + ] + + command = "$env_wrapper$cl /nologo /showIncludes $sys_include_flags{{defines}} {{include_dirs}} {{cflags}} {{cflags_c}} /c {{source}} /Fo{{output}} /Fd\"$pdbname\"" + } + + tool("cxx") { + precompiled_header_type = "msvc" + + # The PDB name needs to be different between C and C++ compiled files. + pdbname = "{{target_out_dir}}/{{label_name}}_cc.pdb" + + # See comment in CC tool about quoting. + depsformat = "msvc" + description = "CXX {{output}}" + outputs = [ + "$object_subdir/{{source_name_part}}.obj", + ] + + command = "$env_wrapper$cl /nologo /showIncludes $sys_include_flags{{defines}} {{include_dirs}} {{cflags}} {{cflags_cc}} /c {{source}} /Fo{{output}} /Fd\"$pdbname\"" + } + + tool("rc") { + command = "$python_path $tool_wrapper_path rc-wrapper $env rc.exe /nologo {{defines}} {{include_dirs}} /fo{{output}} {{source}}" + depsformat = "msvc" + outputs = [ + "$object_subdir/{{source_name_part}}.res", + ] + description = "RC {{output}}" + } + + tool("asm") { + if (toolchain_args.current_cpu == "arm64") { + prefix = rebase_path("$clang_base_path/bin", root_build_dir) + ml = "${clang_prefix}${prefix}/${clang_cl} --target=arm64-windows" + ml += " -c -o{{output}}" + } else { + if (toolchain_args.current_cpu == "x64") { + ml = "ml64.exe" + } else { + ml = "ml.exe" + } + ml += " /nologo /c /Fo{{output}}" + if (use_lld) { + # Wrap ml(64).exe with a script that makes its output deterministic. + # It's lld only because the script zaps obj Timestamp which + # link.exe /incremental looks at. + # TODO(https://crbug.com/762167): If we end up writing an llvm-ml64, + # make sure it has deterministic output (maybe with /Brepro or + # something) and remove this wrapper. + ml_py = rebase_path("ml.py", root_build_dir) + ml = "$python_path $ml_py $ml" + } + } + command = "$python_path $tool_wrapper_path asm-wrapper $env $ml {{defines}} {{include_dirs}} {{asmflags}} {{source}}" + description = "ASM {{output}}" + outputs = [ + "$object_subdir/{{source_name_part}}.obj", + ] + } + + tool("alink") { + rspfile = "{{output}}.rsp" + command = "$linker_wrapper$lib /nologo ${sys_lib_flags}{{arflags}} /OUT:{{output}} @$rspfile" + description = "LIB {{output}}" + outputs = [ + # Ignore {{output_extension}} and always use .lib, there's no reason to + # allow targets to override this extension on Windows. + "{{output_dir}}/{{target_output_name}}.lib", + ] + default_output_extension = ".lib" + default_output_dir = "{{target_out_dir}}" + + # The use of inputs_newline is to work around a fixed per-line buffer + # size in the linker. + rspfile_content = "{{inputs_newline}}" + } + + tool("solink") { + dllname = "{{output_dir}}/{{target_output_name}}{{output_extension}}" # e.g. foo.dll + libname = "${dllname}.lib" # e.g. foo.dll.lib + pdbname = "${dllname}.pdb" + rspfile = "${dllname}.rsp" + pool = "//build/toolchain:link_pool($default_toolchain)" + + command = "$linker_wrapper$link /nologo ${sys_lib_flags}/IMPLIB:$libname /DLL /OUT:$dllname /PDB:$pdbname @$rspfile" + + default_output_extension = ".dll" + default_output_dir = "{{root_out_dir}}" + description = "LINK(DLL) {{output}}" + outputs = [ + dllname, + libname, + ] + link_output = libname + depend_output = libname + runtime_outputs = [ dllname ] + if (symbol_level != 0) { + outputs += [ pdbname ] + runtime_outputs += [ pdbname ] + } + + # Since the above commands only updates the .lib file when it changes, ask + # Ninja to check if the timestamp actually changed to know if downstream + # dependencies should be recompiled. + restat = true + + # The use of inputs_newline is to work around a fixed per-line buffer + # size in the linker. + rspfile_content = "{{libs}} {{solibs}} {{inputs_newline}} {{ldflags}}" + } + + tool("solink_module") { + dllname = "{{output_dir}}/{{target_output_name}}{{output_extension}}" # e.g. foo.dll + pdbname = "${dllname}.pdb" + rspfile = "${dllname}.rsp" + pool = "//build/toolchain:link_pool($default_toolchain)" + + command = "$linker_wrapper$link /nologo ${sys_lib_flags}/DLL /OUT:$dllname /PDB:$pdbname @$rspfile" + + default_output_extension = ".dll" + default_output_dir = "{{root_out_dir}}" + description = "LINK_MODULE(DLL) {{output}}" + outputs = [ + dllname, + ] + if (symbol_level != 0) { + outputs += [ pdbname ] + } + runtime_outputs = outputs + + # The use of inputs_newline is to work around a fixed per-line buffer + # size in the linker. + rspfile_content = "{{libs}} {{solibs}} {{inputs_newline}} {{ldflags}}" + } + + tool("link") { + exename = "{{output_dir}}/{{target_output_name}}{{output_extension}}" + pdbname = "$exename.pdb" + rspfile = "$exename.rsp" + pool = "//build/toolchain:link_pool($default_toolchain)" + + command = "$linker_wrapper$link /nologo ${sys_lib_flags}/OUT:$exename /PDB:$pdbname @$rspfile" + + default_output_extension = ".exe" + default_output_dir = "{{root_out_dir}}" + description = "LINK {{output}}" + outputs = [ + exename, + ] + if (symbol_level != 0) { + outputs += [ pdbname ] + } + runtime_outputs = outputs + + # The use of inputs_newline is to work around a fixed per-line buffer + # size in the linker. + rspfile_content = "{{inputs_newline}} {{libs}} {{solibs}} {{ldflags}}" + } + + # These two are really entirely generic, but have to be repeated in + # each toolchain because GN doesn't allow a template to be used here. + # See //build/toolchain/toolchain.gni for details. + tool("stamp") { + command = stamp_command + description = stamp_description + pool = "//build/toolchain:action_pool($default_toolchain)" + } + tool("copy") { + command = copy_command + description = copy_description + pool = "//build/toolchain:action_pool($default_toolchain)" + } + + tool("action") { + pool = "//build/toolchain:action_pool($default_toolchain)" + } + } +} + +if (target_cpu == "x86" || target_cpu == "x64") { + win_build_host_cpu = target_cpu +} else { + win_build_host_cpu = host_cpu +} + +# x86, arm and arm64 build cpu toolchains for Windows (not WinUWP). Only +# define when the build cpu is one of these architectures since we don't +# do any cross compiles when targeting x64-bit (the build does generate +# some 64-bit stuff from x86/arm/arm64 target builds). +if (win_build_host_cpu != "x64") { + build_cpu_toolchain_data = exec_script("setup_toolchain.py", + [ + visual_studio_path, + windows_sdk_path, + visual_studio_runtime_dirs, + host_os, + win_build_host_cpu, + "environment." + win_build_host_cpu, + ], + "scope") + + msvc_toolchain(win_build_host_cpu) { + environment = "environment." + win_build_host_cpu + cl = "${goma_prefix}\"${build_cpu_toolchain_data.vc_bin_dir}/cl.exe\"" + if (host_os != "win") { + # For win cross build. + sys_lib_flags = "${build_cpu_toolchain_data.libpath_flags}" + } + toolchain_args = { + current_os = "win" + current_cpu = win_build_host_cpu + is_clang = false + } + } + + msvc_toolchain("win_clang_" + win_build_host_cpu) { + environment = "environment." + win_build_host_cpu + prefix = rebase_path("$clang_base_path/bin", root_build_dir) + cl = "${clang_prefix}$prefix/${clang_cl}" + sys_include_flags = "${build_cpu_toolchain_data.include_flags_imsvc}" + if (host_os != "win") { + # For win cross build. + sys_lib_flags = "${build_cpu_toolchain_data.libpath_flags}" + } + + toolchain_args = { + current_os = "win" + current_cpu = win_build_host_cpu + is_clang = true + } + } +} + +# 64-bit toolchains, including x64 and arm64. +template("win_64bit_toolchains") { + assert(defined(invoker.toolchain_arch)) + toolchain_arch = invoker.toolchain_arch + + win_64bit_toolchain_data = exec_script("setup_toolchain.py", + [ + visual_studio_path, + windows_sdk_path, + visual_studio_runtime_dirs, + "win", + toolchain_arch, + "environment." + toolchain_arch, + ], + "scope") + + msvc_toolchain(target_name) { + environment = "environment." + toolchain_arch + cl = "${goma_prefix}\"${win_64bit_toolchain_data.vc_bin_dir}/cl.exe\"" + if (host_os != "win") { + # For win cross build + sys_lib_flags = "${win_64bit_toolchain_data.libpath_flags}" + } + + toolchain_args = { + if (defined(invoker.toolchain_args)) { + forward_variables_from(invoker.toolchain_args, "*") + } + is_clang = false + current_os = "win" + current_cpu = toolchain_arch + } + } + + msvc_toolchain("win_clang_" + target_name) { + environment = "environment." + toolchain_arch + prefix = rebase_path("$clang_base_path/bin", root_build_dir) + cl = "${clang_prefix}$prefix/${clang_cl}" + sys_include_flags = "${win_64bit_toolchain_data.include_flags_imsvc}" + if (host_os != "win") { + # For win cross build + sys_lib_flags = "${win_64bit_toolchain_data.libpath_flags}" + } + + toolchain_args = { + if (defined(invoker.toolchain_args)) { + forward_variables_from(invoker.toolchain_args, "*") + } + is_clang = true + current_os = "win" + current_cpu = toolchain_arch + } + } +} + +win_64bit_toolchains("x64") { + toolchain_arch = "x64" +} + +if (target_cpu == "arm64") { + win_64bit_toolchains("arm64") { + toolchain_arch = "arm64" + } +} + +# The nacl_win64 toolchain is nearly identical to the plain x64 toolchain. +# It's used solely for building nacl64.exe (//components/nacl/broker:nacl64). +# The only reason it's a separate toolchain is so that it can force +# is_component_build to false in the toolchain_args() block, because +# building nacl64.exe in component style does not work. +win_64bit_toolchains("nacl_win64") { + toolchain_arch = "x64" + toolchain_args = { + is_component_build = false + } +} + +# WinUWP toolchains. Only define these when targeting them. + +if (target_os == "winuwp") { + assert(target_cpu == "x64" || target_cpu == "x86" || target_cpu == "arm" || + target_cpu == "arm64") + store_cpu_toolchain_data = exec_script("setup_toolchain.py", + [ + visual_studio_path, + windows_sdk_path, + visual_studio_runtime_dirs, + target_os, + target_cpu, + "environment.store_" + target_cpu, + ], + "scope") + + msvc_toolchain("uwp_" + target_cpu) { + environment = "environment.store_" + target_cpu + cl = "${goma_prefix}\"${store_cpu_toolchain_data.vc_bin_dir}/cl.exe\"" + toolchain_args = { + current_os = "winuwp" + current_cpu = target_cpu + is_clang = false + } + } +} diff --git a/deps/v8/build/toolchain/win/midl.gni b/deps/v8/build/toolchain/win/midl.gni new file mode 100644 index 0000000000..b46f4cd538 --- /dev/null +++ b/deps/v8/build/toolchain/win/midl.gni @@ -0,0 +1,118 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +assert(is_win) + +import("//build/config/win/visual_studio_version.gni") + +# This template defines a rule to invoke the MS IDL compiler. The generated +# source code will be compiled and linked into targets that depend on this. +# +# Parameters +# +# sources +# List of .idl file to process. +# +# header_file (optional) +# File name of generated header file. Defaults to the basename of the +# source idl file with a .h extension. +# +# out_dir (optional) +# Directory to write the generated files to. Defaults to target_gen_dir. +# +# dynamic_guid (optional) +# If the GUID is not constant across builds, the current GUID. +# +# deps (optional) +# visibility (optional) + +template("midl") { + action_name = "${target_name}_idl_action" + source_set_name = target_name + + assert(defined(invoker.sources), "Source must be defined for $target_name") + + if (defined(invoker.out_dir)) { + out_dir = invoker.out_dir + } else { + out_dir = target_gen_dir + } + + if (defined(invoker.dynamic_guid)) { + dynamic_guid = invoker.dynamic_guid + } else { + dynamic_guid = "none" + } + + if (defined(invoker.header_file)) { + header_file = invoker.header_file + } else { + header_file = "{{source_name_part}}.h" + } + + dlldata_file = "{{source_name_part}}.dlldata.c" + interface_identifier_file = "{{source_name_part}}_i.c" + proxy_file = "{{source_name_part}}_p.c" + type_library_file = "{{source_name_part}}.tlb" + + action_foreach(action_name) { + visibility = [ ":$source_set_name" ] + script = "//build/toolchain/win/midl.py" + + sources = invoker.sources + + # Note that .tlb is not included in the outputs as it is not always + # generated depending on the content of the input idl file. + outputs = [ + "$out_dir/$header_file", + "$out_dir/$dlldata_file", + "$out_dir/$interface_identifier_file", + "$out_dir/$proxy_file", + ] + + if (current_cpu == "x86") { + win_tool_arch = "environment.x86" + idl_target_platform = "win32" + } else if (current_cpu == "x64") { + win_tool_arch = "environment.x64" + idl_target_platform = "x64" + } else if (current_cpu == "arm64") { + win_tool_arch = "environment.arm64" + idl_target_platform = "arm64" + } else { + assert(false, "Need environment for this arch") + } + + args = [ + win_tool_arch, + rebase_path(out_dir, root_build_dir), + dynamic_guid, + type_library_file, + header_file, + dlldata_file, + interface_identifier_file, + proxy_file, + "{{source}}", + "/char", + "signed", + "/env", + idl_target_platform, + "/Oicf", + ] + + forward_variables_from(invoker, [ "deps" ]) + } + + source_set(target_name) { + forward_variables_from(invoker, [ "visibility" ]) + + # We only compile the IID files from the IDL tool rather than all outputs. + sources = process_file_template(invoker.sources, + [ "$out_dir/$interface_identifier_file" ]) + + public_deps = [ + ":$action_name", + ] + } +} diff --git a/deps/v8/build/toolchain/win/midl.py b/deps/v8/build/toolchain/win/midl.py new file mode 100644 index 0000000000..09fec0b8cf --- /dev/null +++ b/deps/v8/build/toolchain/win/midl.py @@ -0,0 +1,238 @@ +# Copyright 2017 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import array +import difflib +import distutils.dir_util +import filecmp +import operator +import os +import re +import shutil +import struct +import subprocess +import sys +import tempfile +import uuid + + +def ZapTimestamp(filename): + contents = open(filename, 'rb').read() + # midl.exe writes timestamp 2147483647 (2^31 - 1) as creation date into its + # outputs, but using the local timezone. To make the output timezone- + # independent, replace that date with a fixed string of the same length. + # Also blank out the minor version number. + if filename.endswith('.tlb'): + # See https://chromium-review.googlesource.com/c/chromium/src/+/693223 for + # a fairly complete description of the .tlb binary format. + # TLB files start with a 54 byte header. Offset 0x20 stores how many types + # are defined in the file, and the header is followed by that many uint32s. + # After that, 15 section headers appear. Each section header is 16 bytes, + # starting with offset and length uint32s. + # Section 12 in the file contains custom() data. custom() data has a type + # (int, string, etc). Each custom data chunk starts with a uint16_t + # describing its type. Type 8 is string data, consisting of a uint32_t + # len, followed by that many data bytes, followed by 'W' bytes to pad to a + # 4 byte boundary. Type 0x13 is uint32 data, followed by 4 data bytes, + # followed by two 'W' to pad to a 4 byte boundary. + # The custom block always starts with one string containing "Created by + # MIDL version 8...", followed by one uint32 containing 0x7fffffff, + # followed by another uint32 containing the MIDL compiler version (e.g. + # 0x0801026e for v8.1.622 -- 0x26e == 622). These 3 fields take 0x54 bytes. + # There might be more custom data after that, but these 3 blocks are always + # there for file-level metadata. + # All data is little-endian in the file. + assert contents[0:8] == 'MSFT\x02\x00\x01\x00' + ntypes, = struct.unpack_from('<I', contents, 0x20) + custom_off, custom_len = struct.unpack_from( + '<II', contents, 0x54 + 4*ntypes + 11*16) + assert custom_len >= 0x54 + # First: Type string (0x8), followed by 0x3e characters. + assert contents[custom_off:custom_off+6] == '\x08\x00\x3e\x00\x00\x00' + assert re.match( + 'Created by MIDL version 8\.\d\d\.\d{4} at ... Jan 1. ..:..:.. 2038\n', + contents[custom_off+6:custom_off+6+0x3e]) + # Second: Type uint32 (0x13) storing 0x7fffffff (followed by WW / 0x57 pad) + assert contents[custom_off+6+0x3e:custom_off+6+0x3e+8] == \ + '\x13\x00\xff\xff\xff\x7f\x57\x57' + # Third: Type uint32 (0x13) storing MIDL compiler version. + assert contents[custom_off+6+0x3e+8:custom_off+6+0x3e+8+2] == '\x13\x00' + # Replace "Created by" string with fixed string, and fixed MIDL version with + # 8.1.622 always. + contents = (contents[0:custom_off+6] + + 'Created by MIDL version 8.xx.xxxx at a redacted point in time\n' + + # uint32 (0x13) val 0x7fffffff, WW, uint32 (0x13), val 0x0801026e, WW + '\x13\x00\xff\xff\xff\x7f\x57\x57\x13\x00\x6e\x02\x01\x08\x57\x57' + + contents[custom_off + 0x54:]) + else: + contents = re.sub( + 'File created by MIDL compiler version 8\.\d\d\.\d{4} \*/\r\n' + '/\* at ... Jan 1. ..:..:.. 2038', + 'File created by MIDL compiler version 8.xx.xxxx */\r\n' + '/* at a redacted point in time', + contents) + contents = re.sub( + ' Oicf, W1, Zp8, env=(.....) \(32b run\), ' + 'target_arch=(AMD64|X86) 8\.\d\d\.\d{4}', + ' Oicf, W1, Zp8, env=\\1 (32b run), target_arch=\\2 8.xx.xxxx', + contents) + # TODO(thakis): If we need more hacks than these, try to verify checked-in + # outputs when we're using the hermetic toolchain. + # midl.exe older than 8.1.622 omit '//' after #endif, fix that: + contents = contents.replace('#endif !_MIDL_USE_GUIDDEF_', + '#endif // !_MIDL_USE_GUIDDEF_') + # midl.exe puts the midl version into code in one place. To have + # predictable output, lie about the midl version if it's not 8.1.622. + # This is unfortunate, but remember that there's beauty too in imperfection. + contents = contents.replace('0x801026c, /* MIDL Version 8.1.620 */', + '0x801026e, /* MIDL Version 8.1.622 */') + open(filename, 'wb').write(contents) + + +def overwrite_cls_guid_h(h_file, dynamic_guid): + contents = open(h_file, 'rb').read() + contents = re.sub('class DECLSPEC_UUID\("[^"]*"\)', + 'class DECLSPEC_UUID("%s")' % str(dynamic_guid), contents) + open(h_file, 'wb').write(contents) + + +def overwrite_cls_guid_iid(iid_file, dynamic_guid): + contents = open(iid_file, 'rb').read() + hexuuid = '0x%08x,0x%04x,0x%04x,' % dynamic_guid.fields[0:3] + hexuuid += ','.join('0x%02x' % ord(b) for b in dynamic_guid.bytes[8:]) + contents = re.sub(r'MIDL_DEFINE_GUID\(CLSID, ([^,]*),[^)]*\)', + r'MIDL_DEFINE_GUID(CLSID, \1,%s)' % hexuuid, contents) + open(iid_file, 'wb').write(contents) + + +def overwrite_cls_guid_tlb(tlb_file, dynamic_guid): + # See ZapTimestamp() for a short overview of the .tlb format. The 1st + # section contains type descriptions, and the first type should be our + # coclass. It points to the type's GUID in section 6, the GUID section. + contents = open(tlb_file, 'rb').read() + assert contents[0:8] == 'MSFT\x02\x00\x01\x00' + ntypes, = struct.unpack_from('<I', contents, 0x20) + type_off, type_len = struct.unpack_from('<II', contents, 0x54 + 4*ntypes) + assert ord(contents[type_off]) == 0x25, "expected coclass" + guidind = struct.unpack_from('<I', contents, type_off + 0x2c)[0] + guid_off, guid_len = struct.unpack_from( + '<II', contents, 0x54 + 4*ntypes + 5*16) + assert guidind + 14 <= guid_len + contents = array.array('c', contents) + struct.pack_into('<IHH8s', contents, guid_off + guidind, + *(dynamic_guid.fields[0:3] + (dynamic_guid.bytes[8:],))) + # The GUID is correct now, but there's also a GUID hashtable in section 5. + # Need to recreate that too. Since the hash table uses chaining, it's + # easiest to recompute it from scratch rather than trying to patch it up. + hashtab = [0xffffffff] * (0x80 / 4) + for guidind in range(guid_off, guid_off + guid_len, 24): + guidbytes, typeoff, nextguid = struct.unpack_from( + '<16sII', contents, guidind) + words = struct.unpack('<8H', guidbytes) + # midl seems to use the following simple hash function for GUIDs: + guidhash = reduce(operator.xor, [w for w in words]) % (0x80 / 4) + nextguid = hashtab[guidhash] + struct.pack_into('<I', contents, guidind + 0x14, nextguid) + hashtab[guidhash] = guidind - guid_off + hash_off, hash_len = struct.unpack_from( + '<II', contents, 0x54 + 4*ntypes + 4*16) + for i, hashval in enumerate(hashtab): + struct.pack_into('<I', contents, hash_off + 4*i, hashval) + open(tlb_file, 'wb').write(contents) + + +def overwrite_cls_guid(h_file, iid_file, tlb_file, dynamic_guid): + # Fix up GUID in .h, _i.c, and .tlb. This currently assumes that there's + # only one coclass in the idl file, and that that's the type with the + # dynamic type. + overwrite_cls_guid_h(h_file, dynamic_guid) + overwrite_cls_guid_iid(iid_file, dynamic_guid) + overwrite_cls_guid_tlb(tlb_file, dynamic_guid) + + +def main(arch, outdir, dynamic_guid, tlb, h, dlldata, iid, proxy, idl, *flags): + # Copy checked-in outputs to final location. + THIS_DIR = os.path.abspath(os.path.dirname(__file__)) + source = os.path.join(THIS_DIR, '..', '..', '..', + 'third_party', 'win_build_output', outdir.replace('gen/', 'midl/')) + if os.path.isdir(os.path.join(source, os.path.basename(idl))): + source = os.path.join(source, os.path.basename(idl)) + source = os.path.join(source, arch.split('.')[1]) # Append 'x86' or 'x64'. + source = os.path.normpath(source) + distutils.dir_util.copy_tree(source, outdir, preserve_times=False) + if dynamic_guid != 'none': + overwrite_cls_guid(os.path.join(outdir, h), + os.path.join(outdir, iid), + os.path.join(outdir, tlb), + uuid.UUID(dynamic_guid)) + + # On non-Windows, that's all we can do. + if sys.platform != 'win32': + return 0 + + # On Windows, run midl.exe on the input and check that its outputs are + # identical to the checked-in outputs (after possibly replacing their main + # class guid). + tmp_dir = tempfile.mkdtemp() + delete_tmp_dir = True + + # Read the environment block from the file. This is stored in the format used + # by CreateProcess. Drop last 2 NULs, one for list terminator, one for + # trailing vs. separator. + env_pairs = open(arch).read()[:-2].split('\0') + env_dict = dict([item.split('=', 1) for item in env_pairs]) + + args = ['midl', '/nologo'] + list(flags) + [ + '/out', tmp_dir, + '/tlb', tlb, + '/h', h, + '/dlldata', dlldata, + '/iid', iid, + '/proxy', proxy, + idl] + try: + popen = subprocess.Popen(args, shell=True, env=env_dict, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + out, _ = popen.communicate() + # Filter junk out of stdout, and write filtered versions. Output we want + # to filter is pairs of lines that look like this: + # Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl + # objidl.idl + lines = out.splitlines() + prefixes = ('Processing ', '64 bit Processing ') + processing = set(os.path.basename(x) + for x in lines if x.startswith(prefixes)) + for line in lines: + if not line.startswith(prefixes) and line not in processing: + print line + if popen.returncode != 0: + return popen.returncode + + for f in os.listdir(tmp_dir): + ZapTimestamp(os.path.join(tmp_dir, f)) + + # Now compare the output in tmp_dir to the copied-over outputs. + diff = filecmp.dircmp(tmp_dir, outdir) + if diff.diff_files: + print 'midl.exe output different from files in %s, see %s' \ + % (outdir, tmp_dir) + for f in diff.diff_files: + if f.endswith('.tlb'): continue + fromfile = os.path.join(outdir, f) + tofile = os.path.join(tmp_dir, f) + print ''.join(difflib.unified_diff(open(fromfile, 'U').readlines(), + open(tofile, 'U').readlines(), + fromfile, tofile)) + delete_tmp_dir = False + print 'To rebaseline:' + print ' copy /y %s\* %s' % (tmp_dir, source) + sys.exit(1) + return 0 + finally: + if os.path.exists(tmp_dir) and delete_tmp_dir: + shutil.rmtree(tmp_dir) + + +if __name__ == '__main__': + sys.exit(main(*sys.argv[1:])) diff --git a/deps/v8/build/toolchain/win/ml.py b/deps/v8/build/toolchain/win/ml.py new file mode 100755 index 0000000000..877c584c57 --- /dev/null +++ b/deps/v8/build/toolchain/win/ml.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python +# Copyright 2018 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Wraps ml.exe or ml64.exe and postprocesses the output to be deterministic. +Sets timestamp in .obj file to 0, hence incompatible with link.exe /incremental. + +Use by prefixing the ml(64).exe invocation with this script: + python ml.py ml.exe [args...]""" + +import array +import collections +import struct +import subprocess +import sys + + +class Struct(object): + """A thin wrapper around the struct module that returns a namedtuple""" + def __init__(self, name, *args): + """Pass the name of the return type, and then an interleaved list of + format strings as used by the struct module and of field names.""" + self.fmt = '<' + ''.join(args[0::2]) + self.type = collections.namedtuple(name, args[1::2]) + + def pack_into(self, buffer, offset, data): + return struct.pack_into(self.fmt, buffer, offset, *data) + + def unpack_from(self, buffer, offset=0): + return self.type(*struct.unpack_from(self.fmt, buffer, offset)) + + def size(self): + return struct.calcsize(self.fmt) + + +def Subtract(nt, **kwargs): + """Subtract(nt, f=2) returns a new namedtuple with 2 subtracted from nt.f""" + return nt._replace(**{k: getattr(nt, k) - v for k, v in kwargs.iteritems()}) + + +def MakeDeterministic(objdata): + # Takes data produced by ml(64).exe (without any special flags) and + # 1. Sets the timestamp to 0 + # 2. Strips the .debug$S section (which contains an unwanted absolute path) + + # This makes several assumptions about ml's output: + # - Section data is in the same order as the corresponding section headers: + # section headers preceeding the .debug$S section header have their data + # preceeding the .debug$S section data; likewise for section headers + # following the .debug$S section. + # - The .debug$S section contains only the absolute path to the obj file and + # nothing else, in particular there's only a single entry in the symbol + # table refering to the .debug$S section. + # - There are no COFF line number entries. + # - There's no IMAGE_SYM_CLASS_CLR_TOKEN symbol. + # These seem to hold in practice; if they stop holding this script needs to + # become smarter. + + objdata = array.array('c', objdata) # Writable, e.g. via struct.pack_into. + + # Read coff header. + COFFHEADER = Struct('COFFHEADER', + 'H', 'Machine', + 'H', 'NumberOfSections', + 'I', 'TimeDateStamp', + 'I', 'PointerToSymbolTable', + 'I', 'NumberOfSymbols', + + 'H', 'SizeOfOptionalHeader', + 'H', 'Characteristics') + coff_header = COFFHEADER.unpack_from(objdata) + assert coff_header.SizeOfOptionalHeader == 0 # Only set for binaries. + + # Read section headers following coff header. + SECTIONHEADER = Struct('SECTIONHEADER', + '8s', 'Name', + 'I', 'VirtualSize', + 'I', 'VirtualAddress', + + 'I', 'SizeOfRawData', + 'I', 'PointerToRawData', + 'I', 'PointerToRelocations', + 'I', 'PointerToLineNumbers', + + 'H', 'NumberOfRelocations', + 'H', 'NumberOfLineNumbers', + 'I', 'Characteristics') + section_headers = [] + debug_section_index = -1 + for i in range(0, coff_header.NumberOfSections): + section_header = SECTIONHEADER.unpack_from( + objdata, offset=COFFHEADER.size() + i * SECTIONHEADER.size()) + assert not section_header[0].startswith('/') # Support short names only. + section_headers.append(section_header) + + if section_header.Name == '.debug$S': + assert debug_section_index == -1 + debug_section_index = i + assert debug_section_index != -1 + + data_start = COFFHEADER.size() + len(section_headers) * SECTIONHEADER.size() + + # Verify the .debug$S section looks like we expect. + assert section_headers[debug_section_index].Name == '.debug$S' + assert section_headers[debug_section_index].VirtualSize == 0 + assert section_headers[debug_section_index].VirtualAddress == 0 + debug_size = section_headers[debug_section_index].SizeOfRawData + debug_offset = section_headers[debug_section_index].PointerToRawData + assert section_headers[debug_section_index].PointerToRelocations == 0 + assert section_headers[debug_section_index].PointerToLineNumbers == 0 + assert section_headers[debug_section_index].NumberOfRelocations == 0 + assert section_headers[debug_section_index].NumberOfLineNumbers == 0 + + # Make sure sections in front of .debug$S have their data preceeding it. + for header in section_headers[:debug_section_index]: + assert header.PointerToRawData < debug_offset + assert header.PointerToRelocations < debug_offset + assert header.PointerToLineNumbers < debug_offset + + # Make sure sections after of .debug$S have their data following it. + for header in section_headers[debug_section_index + 1:]: + # Make sure the .debug$S data is at the very end of section data: + assert header.PointerToRawData > debug_offset + assert header.PointerToRelocations == 0 + assert header.PointerToLineNumbers == 0 + + # Make sure the first non-empty section's data starts right after the section + # headers. + for section_header in section_headers: + if section_header.PointerToRawData == 0: + assert section_header.PointerToRelocations == 0 + assert section_header.PointerToLineNumbers == 0 + continue + assert section_header.PointerToRawData == data_start + break + + # Make sure the symbol table (and hence, string table) appear after the last + # section: + assert (coff_header.PointerToSymbolTable >= + section_headers[-1].PointerToRawData + section_headers[-1].SizeOfRawData) + + # The symbol table contains a symbol for the no-longer-present .debug$S + # section. If we leave it there, lld-link will complain: + # + # lld-link: error: .debug$S should not refer to non-existent section 5 + # + # so we need to remove that symbol table entry as well. This shifts symbol + # entries around and we need to update symbol table indices in: + # - relocations + # - line number records (never present) + # - one aux symbol entries (never present in ml output) + SYM = Struct('SYM', + '8s', 'Name', + 'I', 'Value', + 'h', 'SectionNumber', # Note: Signed! + 'H', 'Type', + + 'B', 'StorageClass', + 'B', 'NumberOfAuxSymbols') + i = 0 + debug_sym = -1 + while i < coff_header.NumberOfSymbols: + sym_offset = coff_header.PointerToSymbolTable + i * SYM.size() + sym = SYM.unpack_from(objdata, sym_offset) + + # 107 is IMAGE_SYM_CLASS_CLR_TOKEN, which has aux entry "CLR Token + # Definition", which contains a symbol index. Check it's never present. + assert sym.StorageClass != 107 + + # Note: sym.SectionNumber is 1-based, debug_section_index is 0-based. + if sym.SectionNumber - 1 == debug_section_index: + assert debug_sym == -1, 'more than one .debug$S symbol found' + debug_sym = i + # Make sure the .debug$S symbol looks like we expect. + # In particular, it should have exactly one aux symbol. + assert sym.Name == '.debug$S' + assert sym.Value == 0 + assert sym.Type == 0 + assert sym.StorageClass == 3 + assert sym.NumberOfAuxSymbols == 1 + elif sym.SectionNumber > debug_section_index: + sym = Subtract(sym, SectionNumber=1) + SYM.pack_into(objdata, sym_offset, sym) + i += 1 + sym.NumberOfAuxSymbols + assert debug_sym != -1, '.debug$S symbol not found' + + # Note: Usually the .debug$S section is the last, but for files saying + # `includelib foo.lib`, like safe_terminate_process.asm in 32-bit builds, + # this isn't true: .drectve is after .debug$S. + + # Update symbol table indices in relocations. + # There are a few processor types that have one or two relocation types + # where SymbolTableIndex has a different meaning, but not for x86. + REL = Struct('REL', + 'I', 'VirtualAddress', + 'I', 'SymbolTableIndex', + 'H', 'Type') + for header in section_headers[0:debug_section_index]: + for j in range(0, header.NumberOfRelocations): + rel_offset = header.PointerToRelocations + j * REL.size() + rel = REL.unpack_from(objdata, rel_offset) + assert rel.SymbolTableIndex != debug_sym + if rel.SymbolTableIndex > debug_sym: + rel = Subtract(rel, SymbolTableIndex=2) + REL.pack_into(objdata, rel_offset, rel) + + # Update symbol table indices in line numbers -- just check they don't exist. + for header in section_headers: + assert header.NumberOfLineNumbers == 0 + + # Now that all indices are updated, remove the symbol table entry refering to + # .debug$S and its aux entry. + del objdata[coff_header.PointerToSymbolTable + debug_sym * SYM.size(): + coff_header.PointerToSymbolTable + (debug_sym + 2) * SYM.size()] + + # Now we know that it's safe to write out the input data, with just the + # timestamp overwritten to 0, the last section header cut out (and the + # offsets of all other section headers decremented by the size of that + # one section header), and the last section's data cut out. The symbol + # table offset needs to be reduced by one section header and the size of + # the missing section. + # (The COFF spec only requires on-disk sections to be aligned in image files, + # for obj files it's not required. If that wasn't the case, deleting slices + # if data would not generally be safe.) + + # Update section offsets and remove .debug$S section data. + for i in range(0, debug_section_index): + header = section_headers[i] + if header.SizeOfRawData: + header = Subtract(header, PointerToRawData=SECTIONHEADER.size()) + if header.NumberOfRelocations: + header = Subtract(header, PointerToRelocations=SECTIONHEADER.size()) + if header.NumberOfLineNumbers: + header = Subtract(header, PointerToLineNumbers=SECTIONHEADER.size()) + SECTIONHEADER.pack_into( + objdata, COFFHEADER.size() + i * SECTIONHEADER.size(), header) + for i in range(debug_section_index + 1, len(section_headers)): + header = section_headers[i] + shift = SECTIONHEADER.size() + debug_size + if header.SizeOfRawData: + header = Subtract(header, PointerToRawData=shift) + if header.NumberOfRelocations: + header = Subtract(header, PointerToRelocations=shift) + if header.NumberOfLineNumbers: + header = Subtract(header, PointerToLineNumbers=shift) + SECTIONHEADER.pack_into( + objdata, COFFHEADER.size() + i * SECTIONHEADER.size(), header) + + del objdata[debug_offset:debug_offset + debug_size] + + # Finally, remove .debug$S section header and update coff header. + coff_header = coff_header._replace(TimeDateStamp=0) + coff_header = Subtract(coff_header, + NumberOfSections=1, + PointerToSymbolTable=SECTIONHEADER.size() + debug_size, + NumberOfSymbols=2) + COFFHEADER.pack_into(objdata, 0, coff_header) + + del objdata[ + COFFHEADER.size() + debug_section_index * SECTIONHEADER.size(): + COFFHEADER.size() + (debug_section_index + 1) * SECTIONHEADER.size()] + + # All done! + return objdata.tostring() + + +def main(): + ml_result = subprocess.call(sys.argv[1:]) + if ml_result != 0: + return ml_result + + objfile = None + for i in range(1, len(sys.argv)): + if sys.argv[i].startswith('/Fo'): + objfile = sys.argv[i][len('/Fo'):] + assert objfile, 'failed to find ml output' + + with open(objfile, 'rb') as f: + objdata = f.read() + objdata = MakeDeterministic(objdata) + with open(objfile, 'wb') as f: + f.write(objdata) + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/deps/v8/build/toolchain/win/rc/.gitignore b/deps/v8/build/toolchain/win/rc/.gitignore new file mode 100644 index 0000000000..e8fc4d3e1f --- /dev/null +++ b/deps/v8/build/toolchain/win/rc/.gitignore @@ -0,0 +1,3 @@ +linux64/rc +mac/rc +win/rc.exe diff --git a/deps/v8/build/toolchain/win/rc/README.md b/deps/v8/build/toolchain/win/rc/README.md new file mode 100644 index 0000000000..e6d38f9709 --- /dev/null +++ b/deps/v8/build/toolchain/win/rc/README.md @@ -0,0 +1,30 @@ +# rc + +This contains a cross-platform reimplementation of rc.exe. + +This exists mainly to compile .rc files on non-Windows hosts for cross builds. +However, it also runs on Windows for two reasons: + +1. To compare the output of Microsoft's rc.exe and the reimplementation and to + check that they produce bitwise identical output. +2. The reimplementation supports printing resource files in /showIncludes + output, which helps getting build dependencies right. + +The resource compiler consists of two parts: + +1. A python script rc.py that serves as the driver. It does unicode + conversions, runs the input through the preprocessor, and then calls the + actual resource compiler. +2. The resource compiler, a C++ binary obtained via sha1 files from Google + Storage. The binary's code currenty lives at + https://github.com/nico/hack/tree/master/res, even though work is (slowly) + underway to upstream it into LLVM. + +To update the rc binary, run `upload_rc_binaries.sh` in this directory, on a +Mac. + +rc isn't built from source as part of the regular chrome build because +it's needed in a gn toolchain tool, and these currently cannot have deps. +Alternatively, gn could be taught about deps on tools, or rc invocations could +be not a tool but a template like e.g. yasm invocations (which can have deps), +then the prebuilt binaries wouldn't be needed. diff --git a/deps/v8/build/toolchain/win/rc/linux64/rc.sha1 b/deps/v8/build/toolchain/win/rc/linux64/rc.sha1 new file mode 100644 index 0000000000..ad14ca46a9 --- /dev/null +++ b/deps/v8/build/toolchain/win/rc/linux64/rc.sha1 @@ -0,0 +1 @@ +2d0c766039264dc2514d005a42f074af4838a446
\ No newline at end of file diff --git a/deps/v8/build/toolchain/win/rc/mac/rc.sha1 b/deps/v8/build/toolchain/win/rc/mac/rc.sha1 new file mode 100644 index 0000000000..dbd6302a35 --- /dev/null +++ b/deps/v8/build/toolchain/win/rc/mac/rc.sha1 @@ -0,0 +1 @@ +4c25c3bcb6608109bb52028d008835895cf72629
\ No newline at end of file diff --git a/deps/v8/build/toolchain/win/rc/rc.py b/deps/v8/build/toolchain/win/rc/rc.py new file mode 100755 index 0000000000..2eff7d2fa3 --- /dev/null +++ b/deps/v8/build/toolchain/win/rc/rc.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python +# Copyright 2017 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""usage: rc.py [options] input.res +A resource compiler for .rc files. + +options: +-h, --help Print this message. +-I<dir> Add include path. +-D<sym> Define a macro for the preprocessor. +/fo<out> Set path of output .res file. +/nologo Ignored (rc.py doesn't print a logo by default). +/showIncludes Print referenced header and resource files.""" + +from __future__ import print_function +from collections import namedtuple +import codecs +import os +import re +import subprocess +import sys +import tempfile + + +THIS_DIR = os.path.abspath(os.path.dirname(__file__)) +SRC_DIR = \ + os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(THIS_DIR)))) + + +def ParseFlags(): + """Parses flags off sys.argv and returns the parsed flags.""" + # Can't use optparse / argparse because of /fo flag :-/ + includes = [] + defines = [] + output = None + input = None + show_includes = False + # Parse. + for flag in sys.argv[1:]: + if flag == '-h' or flag == '--help': + print(__doc__) + sys.exit(0) + if flag.startswith('-I'): + includes.append(flag) + elif flag.startswith('-D'): + defines.append(flag) + elif flag.startswith('/fo'): + if output: + print('rc.py: error: multiple /fo flags', '/fo' + output, flag, + file=sys.stderr) + sys.exit(1) + output = flag[3:] + elif flag == '/nologo': + pass + elif flag == '/showIncludes': + show_includes = True + elif (flag.startswith('-') or + (flag.startswith('/') and not os.path.exists(flag))): + print('rc.py: error: unknown flag', flag, file=sys.stderr) + print(__doc__, file=sys.stderr) + sys.exit(1) + else: + if input: + print('rc.py: error: multiple inputs:', input, flag, file=sys.stderr) + sys.exit(1) + input = flag + # Validate and set default values. + if not input: + print('rc.py: error: no input file', file=sys.stderr) + sys.exit(1) + if not output: + output = os.path.splitext(input)[0] + '.res' + Flags = namedtuple('Flags', ['includes', 'defines', 'output', 'input', + 'show_includes']) + return Flags(includes=includes, defines=defines, output=output, input=input, + show_includes=show_includes) + + +def ReadInput(input): + """"Reads input and returns it. For UTF-16LEBOM input, converts to UTF-8.""" + # Microsoft's rc.exe only supports unicode in the form of UTF-16LE with a BOM. + # Our rc binary sniffs for UTF-16LE. If that's not found, if /utf-8 is + # passed, the input is treated as UTF-8. If /utf-8 is not passed and the + # input is not UTF-16LE, then our rc errors out on characters outside of + # 7-bit ASCII. Since the driver always converts UTF-16LE to UTF-8 here (for + # the preprocessor, which doesn't support UTF-16LE), our rc will either see + # UTF-8 with the /utf-8 flag (for UTF-16LE input), or ASCII input. + # This is compatible with Microsoft rc.exe. If we wanted, we could expose + # a /utf-8 flag for the driver for UTF-8 .rc inputs too. + # TODO(thakis): Microsoft's rc.exe supports BOM-less UTF-16LE. We currently + # don't, but for chrome it currently doesn't matter. + is_utf8 = False + try: + with open(input, 'rb') as rc_file: + rc_file_data = rc_file.read() + if rc_file_data.startswith(codecs.BOM_UTF16_LE): + rc_file_data = rc_file_data[2:].decode('utf-16le').encode('utf-8') + is_utf8 = True + except IOError: + print('rc.py: failed to open', input, file=sys.stderr) + sys.exit(1) + except UnicodeDecodeError: + print('rc.py: failed to decode UTF-16 despite BOM', input, file=sys.stderr) + sys.exit(1) + return rc_file_data, is_utf8 + + +def Preprocess(rc_file_data, flags): + """Runs the input file through the preprocessor.""" + clang = os.path.join(SRC_DIR, 'third_party', 'llvm-build', + 'Release+Asserts', 'bin', 'clang-cl') + # Let preprocessor write to a temp file so that it doesn't interfere + # with /showIncludes output on stdout. + if sys.platform == 'win32': + clang += '.exe' + temp_handle, temp_file = tempfile.mkstemp(suffix='.i') + # Closing temp_handle immediately defeats the purpose of mkstemp(), but I + # can't figure out how to let write to the temp file on Windows otherwise. + os.close(temp_handle) + clang_cmd = [clang, '/P', '/DRC_INVOKED', '/TC', '-', '/Fi' + temp_file] + if os.path.dirname(flags.input): + # This must precede flags.includes. + clang_cmd.append('-I' + os.path.dirname(flags.input)) + if flags.show_includes: + clang_cmd.append('/showIncludes') + clang_cmd += flags.includes + flags.defines + p = subprocess.Popen(clang_cmd, stdin=subprocess.PIPE) + p.communicate(input=rc_file_data) + if p.returncode != 0: + sys.exit(p.returncode) + preprocessed_output = open(temp_file, 'rb').read() + os.remove(temp_file) + + # rc.exe has a wacko preprocessor: + # https://msdn.microsoft.com/en-us/library/windows/desktop/aa381033(v=vs.85).aspx + # """RC treats files with the .c and .h extensions in a special manner. It + # assumes that a file with one of these extensions does not contain + # resources. If a file has the .c or .h file name extension, RC ignores all + # lines in the file except the preprocessor directives.""" + # Thankfully, the Microsoft headers are mostly good about putting everything + # in the system headers behind `if !defined(RC_INVOKED)`, so regular + # preprocessing with RC_INVOKED defined works. + return preprocessed_output + + +def RunRc(preprocessed_output, is_utf8, flags): + if sys.platform.startswith('linux'): + rc = os.path.join(THIS_DIR, 'linux64', 'rc') + elif sys.platform == 'darwin': + rc = os.path.join(THIS_DIR, 'mac', 'rc') + elif sys.platform == 'win32': + rc = os.path.join(THIS_DIR, 'win', 'rc.exe') + else: + print('rc.py: error: unsupported platform', sys.platform, file=sys.stderr) + sys.exit(1) + rc_cmd = [rc] + # Make sure rc-relative resources can be found: + if os.path.dirname(flags.input): + rc_cmd.append('/cd' + os.path.dirname(flags.input)) + rc_cmd.append('/fo' + flags.output) + if is_utf8: + rc_cmd.append('/utf-8') + # TODO(thakis): rc currently always prints full paths for /showIncludes, + # but clang-cl /P doesn't. Which one is right? + if flags.show_includes: + rc_cmd.append('/showIncludes') + # Microsoft rc.exe searches for referenced files relative to -I flags in + # addition to the pwd, so -I flags need to be passed both to both + # the preprocessor and rc. + rc_cmd += flags.includes + p = subprocess.Popen(rc_cmd, stdin=subprocess.PIPE) + p.communicate(input=preprocessed_output) + return p.returncode + + +def main(): + # This driver has to do these things: + # 1. Parse flags. + # 2. Convert the input from UTF-16LE to UTF-8 if needed. + # 3. Pass the input through a preprocessor (and clean up the preprocessor's + # output in minor ways). + # 4. Call rc for the heavy lifting. + flags = ParseFlags() + rc_file_data, is_utf8 = ReadInput(flags.input) + preprocessed_output = Preprocess(rc_file_data, flags) + return RunRc(preprocessed_output, is_utf8, flags) + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/deps/v8/build/toolchain/win/rc/upload_rc_binaries.sh b/deps/v8/build/toolchain/win/rc/upload_rc_binaries.sh new file mode 100755 index 0000000000..ec4df4cbce --- /dev/null +++ b/deps/v8/build/toolchain/win/rc/upload_rc_binaries.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Copyright 2017 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +set -eu + +# Builds new rc binaries at head and uploads them to google storage. +# The new .sha1 files will be in the tree after this has run. + +if [[ "$OSTYPE" != "darwin"* ]]; then + echo "this script must run on a mac" + exit 1 +fi + +DIR="$(cd "$(dirname "${0}" )" && pwd)" +SRC_DIR="$DIR/../../../.." + +# Make sure Linux and Windows sysroots are installed, for distrib.py. +$SRC_DIR/build/linux/sysroot_scripts/install-sysroot.py --arch amd64 +$SRC_DIR/build/vs_toolchain.py update --force + +# Make a temporary directory. +WORK_DIR=$(mktemp -d) +if [[ ! "$WORK_DIR" || ! -d "$WORK_DIR" ]]; then + echo "could not create temp dir" + exit 1 +fi +function cleanup { + rm -rf "$WORK_DIR" +} +trap cleanup EXIT + +# Check out rc and build it in the temporary directory. Copy binaries over. +pushd "$WORK_DIR" > /dev/null +git clone -q https://github.com/nico/hack +cd hack/res +./distrib.py "$SRC_DIR" +popd > /dev/null +cp "$WORK_DIR/hack/res/rc-linux64" "$DIR/linux64/rc" +cp "$WORK_DIR/hack/res/rc-mac" "$DIR/mac/rc" +cp "$WORK_DIR/hack/res/rc-win.exe" "$DIR/win/rc.exe" + +# Upload binaries to cloud storage. +upload_to_google_storage.py -b chromium-browser-clang/rc "$DIR/linux64/rc" +upload_to_google_storage.py -b chromium-browser-clang/rc "$DIR/mac/rc" +upload_to_google_storage.py -b chromium-browser-clang/rc "$DIR/win/rc.exe" diff --git a/deps/v8/build/toolchain/win/rc/win/rc.exe.sha1 b/deps/v8/build/toolchain/win/rc/win/rc.exe.sha1 new file mode 100644 index 0000000000..3fdbfc0c20 --- /dev/null +++ b/deps/v8/build/toolchain/win/rc/win/rc.exe.sha1 @@ -0,0 +1 @@ +ba51d69039ffb88310b72b6568efa9f0de148f8f
\ No newline at end of file diff --git a/deps/v8/build/toolchain/win/setup_toolchain.py b/deps/v8/build/toolchain/win/setup_toolchain.py new file mode 100644 index 0000000000..ef8aeda564 --- /dev/null +++ b/deps/v8/build/toolchain/win/setup_toolchain.py @@ -0,0 +1,291 @@ +# Copyright (c) 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +# +# Copies the given "win tool" (which the toolchain uses to wrap compiler +# invocations) and the environment blocks for the 32-bit and 64-bit builds on +# Windows to the build directory. +# +# The arguments are the visual studio install location and the location of the +# win tool. The script assumes that the root build directory is the current dir +# and the files will be written to the current directory. + +from __future__ import print_function + +import errno +import json +import os +import re +import subprocess +import sys + +sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)) +import gn_helpers + +SCRIPT_DIR = os.path.dirname(__file__) + +def _ExtractImportantEnvironment(output_of_set): + """Extracts environment variables required for the toolchain to run from + a textual dump output by the cmd.exe 'set' command.""" + envvars_to_save = ( + 'goma_.*', # TODO(scottmg): This is ugly, but needed for goma. + 'include', + 'lib', + 'libpath', + 'path', + 'pathext', + 'systemroot', + 'temp', + 'tmp', + ) + env = {} + # This occasionally happens and leads to misleading SYSTEMROOT error messages + # if not caught here. + if output_of_set.count('=') == 0: + raise Exception('Invalid output_of_set. Value is:\n%s' % output_of_set) + for line in output_of_set.splitlines(): + for envvar in envvars_to_save: + if re.match(envvar + '=', line.lower()): + var, setting = line.split('=', 1) + if envvar == 'path': + # Our own rules and actions in Chromium rely on python being in the + # path. Add the path to this python here so that if it's not in the + # path when ninja is run later, python will still be found. + setting = os.path.dirname(sys.executable) + os.pathsep + setting + env[var.upper()] = setting + break + if sys.platform in ('win32', 'cygwin'): + for required in ('SYSTEMROOT', 'TEMP', 'TMP'): + if required not in env: + raise Exception('Environment variable "%s" ' + 'required to be set to valid path' % required) + return env + + +def _DetectVisualStudioPath(): + """Return path to the GYP_MSVS_VERSION of Visual Studio. + """ + + # Use the code in build/vs_toolchain.py to avoid duplicating code. + chromium_dir = os.path.abspath(os.path.join(SCRIPT_DIR, '..', '..', '..')) + sys.path.append(os.path.join(chromium_dir, 'build')) + import vs_toolchain + return vs_toolchain.DetectVisualStudioPath() + + +def _LoadEnvFromBat(args): + """Given a bat command, runs it and returns env vars set by it.""" + args = args[:] + args.extend(('&&', 'set')) + popen = subprocess.Popen( + args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + variables, _ = popen.communicate() + if popen.returncode != 0: + raise Exception('"%s" failed with error %d' % (args, popen.returncode)) + return variables.decode(errors='ignore') + + +def _LoadToolchainEnv(cpu, sdk_dir, target_store): + """Returns a dictionary with environment variables that must be set while + running binaries from the toolchain (e.g. INCLUDE and PATH for cl.exe).""" + # Check if we are running in the SDK command line environment and use + # the setup script from the SDK if so. |cpu| should be either + # 'x86' or 'x64' or 'arm' or 'arm64'. + assert cpu in ('x86', 'x64', 'arm', 'arm64') + if bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', 1))) and sdk_dir: + # Load environment from json file. + env = os.path.normpath(os.path.join(sdk_dir, 'bin/SetEnv.%s.json' % cpu)) + env = json.load(open(env))['env'] + for k in env: + entries = [os.path.join(*([os.path.join(sdk_dir, 'bin')] + e)) + for e in env[k]] + # clang-cl wants INCLUDE to be ;-separated even on non-Windows, + # lld-link wants LIB to be ;-separated even on non-Windows. Path gets :. + # The separator for INCLUDE here must match the one used in main() below. + sep = os.pathsep if k == 'PATH' else ';' + env[k] = sep.join(entries) + # PATH is a bit of a special case, it's in addition to the current PATH. + env['PATH'] = env['PATH'] + os.pathsep + os.environ['PATH'] + # Augment with the current env to pick up TEMP and friends. + for k in os.environ: + if k not in env: + env[k] = os.environ[k] + + varlines = [] + for k in sorted(env.keys()): + varlines.append('%s=%s' % (str(k), str(env[k]))) + variables = '\n'.join(varlines) + + # Check that the json file contained the same environment as the .cmd file. + if sys.platform in ('win32', 'cygwin'): + script = os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.cmd')) + arg = '/' + cpu + json_env = _ExtractImportantEnvironment(variables) + cmd_env = _ExtractImportantEnvironment(_LoadEnvFromBat([script, arg])) + assert _LowercaseDict(json_env) == _LowercaseDict(cmd_env) + else: + if 'GYP_MSVS_OVERRIDE_PATH' not in os.environ: + os.environ['GYP_MSVS_OVERRIDE_PATH'] = _DetectVisualStudioPath() + # We only support x64-hosted tools. + script_path = os.path.normpath(os.path.join( + os.environ['GYP_MSVS_OVERRIDE_PATH'], + 'VC/vcvarsall.bat')) + if not os.path.exists(script_path): + # vcvarsall.bat for VS 2017 fails if run after running vcvarsall.bat from + # VS 2013 or VS 2015. Fix this by clearing the vsinstalldir environment + # variable. + if 'VSINSTALLDIR' in os.environ: + del os.environ['VSINSTALLDIR'] + other_path = os.path.normpath(os.path.join( + os.environ['GYP_MSVS_OVERRIDE_PATH'], + 'VC/Auxiliary/Build/vcvarsall.bat')) + if not os.path.exists(other_path): + raise Exception('%s is missing - make sure VC++ tools are installed.' % + script_path) + script_path = other_path + cpu_arg = "amd64" + if (cpu != 'x64'): + # x64 is default target CPU thus any other CPU requires a target set + cpu_arg += '_' + cpu + args = [script_path, cpu_arg] + # Store target must come before any SDK version declaration + if (target_store): + args.append(['store']) + variables = _LoadEnvFromBat(args) + return _ExtractImportantEnvironment(variables) + + +def _FormatAsEnvironmentBlock(envvar_dict): + """Format as an 'environment block' directly suitable for CreateProcess. + Briefly this is a list of key=value\0, terminated by an additional \0. See + CreateProcess documentation for more details.""" + block = '' + nul = '\0' + for key, value in envvar_dict.items(): + block += key + '=' + value + nul + block += nul + return block + + +def _LowercaseDict(d): + """Returns a copy of `d` with both key and values lowercased. + + Args: + d: dict to lowercase (e.g. {'A': 'BcD'}). + + Returns: + A dict with both keys and values lowercased (e.g.: {'a': 'bcd'}). + """ + return {k.lower(): d[k].lower() for k in d} + + +def main(): + if len(sys.argv) != 7: + print('Usage setup_toolchain.py ' + '<visual studio path> <win sdk path> ' + '<runtime dirs> <target_os> <target_cpu> ' + '<environment block name|none>') + sys.exit(2) + win_sdk_path = sys.argv[2] + runtime_dirs = sys.argv[3] + target_os = sys.argv[4] + target_cpu = sys.argv[5] + environment_block_name = sys.argv[6] + if (environment_block_name == 'none'): + environment_block_name = '' + + if (target_os == 'winuwp'): + target_store = True + else: + target_store = False + + cpus = ('x86', 'x64', 'arm', 'arm64') + assert target_cpu in cpus + vc_bin_dir = '' + vc_lib_path = '' + vc_lib_atlmfc_path = '' + vc_lib_um_path = '' + include = '' + lib = '' + + # TODO(scottmg|goma): Do we need an equivalent of + # ninja_use_custom_environment_files? + + for cpu in cpus: + if cpu == target_cpu: + # Extract environment variables for subprocesses. + env = _LoadToolchainEnv(cpu, win_sdk_path, target_store) + env['PATH'] = runtime_dirs + os.pathsep + env['PATH'] + + for path in env['PATH'].split(os.pathsep): + if os.path.exists(os.path.join(path, 'cl.exe')): + vc_bin_dir = os.path.realpath(path) + break + + for path in env['LIB'].split(';'): + if os.path.exists(os.path.join(path, 'msvcrt.lib')): + vc_lib_path = os.path.realpath(path) + break + + for path in env['LIB'].split(';'): + if os.path.exists(os.path.join(path, 'atls.lib')): + vc_lib_atlmfc_path = os.path.realpath(path) + break + + for path in env['LIB'].split(';'): + if os.path.exists(os.path.join(path, 'User32.Lib')): + vc_lib_um_path = os.path.realpath(path) + break + + # The separator for INCLUDE here must match the one used in + # _LoadToolchainEnv() above. + include = [p.replace('"', r'\"') for p in env['INCLUDE'].split(';') if p] + + # Make include path relative to builddir when cwd and sdk in same drive. + try: + include = list(map(os.path.relpath, include)) + except ValueError: + pass + + lib = [p.replace('"', r'\"') for p in env['LIB'].split(';') if p] + # Make lib path relative to builddir when cwd and sdk in same drive. + try: + lib = map(os.path.relpath, lib) + except ValueError: + pass + + def q(s): # Quote s if it contains spaces or other weird characters. + return s if re.match(r'^[a-zA-Z0-9._/\\:-]*$', s) else '"' + s + '"' + include_I = ' '.join([q('/I' + i) for i in include]) + include_imsvc = ' '.join([q('-imsvc' + i) for i in include]) + libpath_flags = ' '.join([q('-libpath:' + i) for i in lib]) + + if (environment_block_name != ''): + env_block = _FormatAsEnvironmentBlock(env) + with open(environment_block_name, 'w') as f: + f.write(env_block) + + assert vc_bin_dir + print('vc_bin_dir = ' + gn_helpers.ToGNString(vc_bin_dir)) + assert include_I + print('include_flags_I = ' + gn_helpers.ToGNString(include_I)) + assert include_imsvc + print('include_flags_imsvc = ' + gn_helpers.ToGNString(include_imsvc)) + assert vc_lib_path + print('vc_lib_path = ' + gn_helpers.ToGNString(vc_lib_path)) + if (target_store != True): + # Path is assumed not to exist for desktop applications + assert vc_lib_atlmfc_path + # Possible atlmfc library path gets introduced in the future for store thus + # output result if a result exists. + if (vc_lib_atlmfc_path != ''): + print('vc_lib_atlmfc_path = ' + gn_helpers.ToGNString(vc_lib_atlmfc_path)) + assert vc_lib_um_path + print('vc_lib_um_path = ' + gn_helpers.ToGNString(vc_lib_um_path)) + print('paths = ' + gn_helpers.ToGNString(env['PATH'])) + assert libpath_flags + print('libpath_flags = ' + gn_helpers.ToGNString(libpath_flags)) + + +if __name__ == '__main__': + main() diff --git a/deps/v8/build/toolchain/win/tool_wrapper.py b/deps/v8/build/toolchain/win/tool_wrapper.py new file mode 100644 index 0000000000..926086670d --- /dev/null +++ b/deps/v8/build/toolchain/win/tool_wrapper.py @@ -0,0 +1,245 @@ +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Utility functions for Windows builds. + +This file is copied to the build directory as part of toolchain setup and +is used to set up calls to tools used by the build that need wrappers. +""" + +from __future__ import print_function + +import os +import re +import shutil +import subprocess +import stat +import string +import sys + +# tool_wrapper.py doesn't get invoked through python.bat so the Python bin +# directory doesn't get added to the path. The Python module search logic +# handles this fine and finds win32file.pyd. However the Windows module +# search logic then looks for pywintypes27.dll and other DLLs in the path and +# if it finds versions with a different bitness first then win32file.pyd will +# fail to load with a cryptic error: +# ImportError: DLL load failed: %1 is not a valid Win32 application. +if sys.platform == 'win32': + os.environ['PATH'] = os.path.dirname(sys.executable) + \ + os.pathsep + os.environ['PATH'] + import win32file # pylint: disable=import-error + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) + +# A regex matching an argument corresponding to the output filename passed to +# link.exe. +_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE) + +def main(args): + exit_code = WinTool().Dispatch(args) + if exit_code is not None: + sys.exit(exit_code) + + +class WinTool(object): + """This class performs all the Windows tooling steps. The methods can either + be executed directly, or dispatched from an argument list.""" + + def _UseSeparateMspdbsrv(self, env, args): + """Allows to use a unique instance of mspdbsrv.exe per linker instead of a + shared one.""" + if len(args) < 1: + raise Exception("Not enough arguments") + + if args[0] != 'link.exe': + return + + # Use the output filename passed to the linker to generate an endpoint name + # for mspdbsrv.exe. + endpoint_name = None + for arg in args: + m = _LINK_EXE_OUT_ARG.match(arg) + if m: + endpoint_name = re.sub(r'\W+', '', + '%s_%d' % (m.group('out'), os.getpid())) + break + + if endpoint_name is None: + return + + # Adds the appropriate environment variable. This will be read by link.exe + # to know which instance of mspdbsrv.exe it should connect to (if it's + # not set then the default endpoint is used). + env['_MSPDBSRV_ENDPOINT_'] = endpoint_name + + def Dispatch(self, args): + """Dispatches a string command to a method.""" + if len(args) < 1: + raise Exception("Not enough arguments") + + method = "Exec%s" % self._CommandifyName(args[0]) + return getattr(self, method)(*args[1:]) + + def _CommandifyName(self, name_string): + """Transforms a tool name like recursive-mirror to RecursiveMirror.""" + return name_string.title().replace('-', '') + + def _GetEnv(self, arch): + """Gets the saved environment from a file for a given architecture.""" + # The environment is saved as an "environment block" (see CreateProcess + # and msvs_emulation for details). We convert to a dict here. + # Drop last 2 NULs, one for list terminator, one for trailing vs. separator. + pairs = open(arch).read()[:-2].split('\0') + kvs = [item.split('=', 1) for item in pairs] + return dict(kvs) + + def ExecDeleteFile(self, path): + """Simple file delete command.""" + if os.path.exists(path): + os.unlink(path) + + def ExecRecursiveMirror(self, source, dest): + """Emulation of rm -rf out && cp -af in out.""" + if os.path.exists(dest): + if os.path.isdir(dest): + def _on_error(fn, path, dummy_excinfo): + # The operation failed, possibly because the file is set to + # read-only. If that's why, make it writable and try the op again. + if not os.access(path, os.W_OK): + os.chmod(path, stat.S_IWRITE) + fn(path) + shutil.rmtree(dest, onerror=_on_error) + else: + if not os.access(dest, os.W_OK): + # Attempt to make the file writable before deleting it. + os.chmod(dest, stat.S_IWRITE) + os.unlink(dest) + + if os.path.isdir(source): + shutil.copytree(source, dest) + else: + shutil.copy2(source, dest) + # Try to diagnose crbug.com/741603 + if not os.path.exists(dest): + raise Exception("Copying of %s to %s failed" % (source, dest)) + + def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args): + """Filter diagnostic output from link that looks like: + ' Creating library ui.dll.lib and object ui.dll.exp' + This happens when there are exports from the dll or exe. + """ + env = self._GetEnv(arch) + if use_separate_mspdbsrv == 'True': + self._UseSeparateMspdbsrv(env, args) + if sys.platform == 'win32': + args = list(args) # *args is a tuple by default, which is read-only. + args[0] = args[0].replace('/', '\\') + # https://docs.python.org/2/library/subprocess.html: + # "On Unix with shell=True [...] if args is a sequence, the first item + # specifies the command string, and any additional items will be treated as + # additional arguments to the shell itself. That is to say, Popen does the + # equivalent of: + # Popen(['/bin/sh', '-c', args[0], args[1], ...])" + # For that reason, since going through the shell doesn't seem necessary on + # non-Windows don't do that there. + pe_name = None + for arg in args: + m = _LINK_EXE_OUT_ARG.match(arg) + if m: + pe_name = m.group('out') + link = subprocess.Popen(args, shell=sys.platform == 'win32', env=env, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + # Read output one line at a time as it shows up to avoid OOM failures when + # GBs of output is produced. + for line in link.stdout: + if (not line.startswith(' Creating library ') and + not line.startswith('Generating code') and + not line.startswith('Finished generating code')): + print(line) + result = link.wait() + if result == 0 and sys.platform == 'win32': + # Flush the file buffers to try to work around a Windows 10 kernel bug, + # https://crbug.com/644525 + output_handle = win32file.CreateFile(pe_name, win32file.GENERIC_WRITE, + 0, None, win32file.OPEN_EXISTING, 0, 0) + win32file.FlushFileBuffers(output_handle) + output_handle.Close() + return result + + def ExecAsmWrapper(self, arch, *args): + """Filter logo banner from invocations of asm.exe.""" + env = self._GetEnv(arch) + if sys.platform == 'win32': + # Windows ARM64 uses clang-cl as assembler which has '/' as path + # separator, convert it to '\\' when running on Windows. + args = list(args) # *args is a tuple by default, which is read-only + args[0] = args[0].replace('/', '\\') + popen = subprocess.Popen(args, shell=True, env=env, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + out, _ = popen.communicate() + for line in out.splitlines(): + if not line.startswith(' Assembling: '): + print(line) + return popen.returncode + + def ExecRcWrapper(self, arch, *args): + """Converts .rc files to .res files.""" + env = self._GetEnv(arch) + + # We run two resource compilers: + # 1. A custom one at build/toolchain/win/rc/rc.py which can run on + # non-Windows, and which has /showIncludes support so we can track + # dependencies (e.g. on .ico files) of .rc files. + # 2. On Windows, regular Microsoft rc.exe, to make sure rc.py produces + # bitwise identical output. + + # 1. Run our rc.py. + # Also pass /showIncludes to track dependencies of .rc files. + args = list(args) + rcpy_args = args[:] + rcpy_args[0:1] = [sys.executable, os.path.join(BASE_DIR, 'rc', 'rc.py')] + rcpy_res_output = rcpy_args[-2] + assert rcpy_res_output.startswith('/fo') + assert rcpy_res_output.endswith('.res') + rc_res_output = rcpy_res_output + '_ms_rc' + args[-2] = rc_res_output + rcpy_args.append('/showIncludes') + rc_exe_exit_code = subprocess.call(rcpy_args, env=env) + if rc_exe_exit_code == 0: + # Since tool("rc") can't have deps, add deps on this script and on rc.py + # and its deps here, so that rc edges become dirty if rc.py changes. + print('Note: including file: ../../build/toolchain/win/tool_wrapper.py') + print('Note: including file: ../../build/toolchain/win/rc/rc.py') + print( + 'Note: including file: ../../build/toolchain/win/rc/linux64/rc.sha1') + print('Note: including file: ../../build/toolchain/win/rc/mac/rc.sha1') + print( + 'Note: including file: ../../build/toolchain/win/rc/win/rc.exe.sha1') + + # 2. Run Microsoft rc.exe. + if sys.platform == 'win32' and rc_exe_exit_code == 0: + rc_exe_exit_code = subprocess.call(args, shell=True, env=env) + # Assert Microsoft rc.exe and rc.py produced identical .res files. + if rc_exe_exit_code == 0: + import filecmp + # Strip "/fo" prefix. + assert filecmp.cmp(rc_res_output[3:], rcpy_res_output[3:]) + return rc_exe_exit_code + + def ExecActionWrapper(self, arch, rspfile, *dirname): + """Runs an action command line from a response file using the environment + for |arch|. If |dirname| is supplied, use that as the working directory.""" + env = self._GetEnv(arch) + # TODO(scottmg): This is a temporary hack to get some specific variables + # through to actions that are set after GN-time. http://crbug.com/333738. + for k, v in os.environ.items(): + if k not in env: + env[k] = v + args = open(rspfile).read() + dirname = dirname[0] if dirname else None + return subprocess.call(args, shell=True, env=env, cwd=dirname) + + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/deps/v8/build/toolchain/wrapper_utils.py b/deps/v8/build/toolchain/wrapper_utils.py new file mode 100644 index 0000000000..5949a3727c --- /dev/null +++ b/deps/v8/build/toolchain/wrapper_utils.py @@ -0,0 +1,93 @@ +# Copyright (c) 2016 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Helper functions for gcc_toolchain.gni wrappers.""" + +import gzip +import os +import re +import subprocess +import shlex +import shutil +import sys +import threading + +_BAT_PREFIX = 'cmd /c call ' + + +def _GzipThenDelete(src_path, dest_path): + # Results for Android map file with GCC on a z620: + # Uncompressed: 207MB + # gzip -9: 16.4MB, takes 8.7 seconds. + # gzip -1: 21.8MB, takes 2.0 seconds. + # Piping directly from the linker via -print-map (or via -Map with a fifo) + # adds a whopping 30-45 seconds! + with open(src_path, 'rb') as f_in, gzip.GzipFile(dest_path, 'wb', 1) as f_out: + shutil.copyfileobj(f_in, f_out) + os.unlink(src_path) + + +def CommandToRun(command): + """Generates commands compatible with Windows. + + When running on a Windows host and using a toolchain whose tools are + actually wrapper scripts (i.e. .bat files on Windows) rather than binary + executables, the |command| to run has to be prefixed with this magic. + The GN toolchain definitions take care of that for when GN/Ninja is + running the tool directly. When that command is passed in to this + script, it appears as a unitary string but needs to be split up so that + just 'cmd' is the actual command given to Python's subprocess module. + + Args: + command: List containing the UNIX style |command|. + + Returns: + A list containing the Windows version of the |command|. + """ + if command[0].startswith(_BAT_PREFIX): + command = command[0].split(None, 3) + command[1:] + return command + + +def RunLinkWithOptionalMapFile(command, env=None, map_file=None): + """Runs the given command, adding in -Wl,-Map when |map_file| is given. + + Also takes care of gzipping when |map_file| ends with .gz. + + Args: + command: List of arguments comprising the command. + env: Environment variables. + map_file: Path to output map_file. + + Returns: + The exit code of running |command|. + """ + tmp_map_path = None + if map_file and map_file.endswith('.gz'): + tmp_map_path = map_file + '.tmp' + command.append('-Wl,-Map,' + tmp_map_path) + elif map_file: + command.append('-Wl,-Map,' + map_file) + + result = subprocess.call(command, env=env) + + if tmp_map_path and result == 0: + threading.Thread( + target=lambda: _GzipThenDelete(tmp_map_path, map_file)).start() + elif tmp_map_path and os.path.exists(tmp_map_path): + os.unlink(tmp_map_path) + + return result + + +def CaptureCommandStderr(command, env=None): + """Returns the stderr of a command. + + Args: + command: A list containing the command and arguments. + env: Environment variables for the new process. + """ + child = subprocess.Popen(command, stderr=subprocess.PIPE, env=env) + _, stderr = child.communicate() + return child.returncode, stderr |