summaryrefslogtreecommitdiff
path: root/deps/v8/build/fuchsia
diff options
context:
space:
mode:
authorFlorian Dold <florian.dold@gmail.com>2019-08-07 22:45:47 +0200
committerFlorian Dold <florian.dold@gmail.com>2019-08-07 22:45:47 +0200
commit65e39b7046a29aa299f06285441b62bcf1e4df01 (patch)
tree2eb012aabb59533b954aa169199733292de336cf /deps/v8/build/fuchsia
parent936cd90b7def6ef7c1e0b80265a9dc77a9ad23c6 (diff)
downloadandroid-node-v8-65e39b7046a29aa299f06285441b62bcf1e4df01.tar.gz
android-node-v8-65e39b7046a29aa299f06285441b62bcf1e4df01.tar.bz2
android-node-v8-65e39b7046a29aa299f06285441b62bcf1e4df01.zip
Move v8/build into this repository.
Since we need to patch some files, we don't let depot_tools manage these files anymore. build.git commit a0b2e3b2708bcf81ec00ac1738b586bcc5e04eea
Diffstat (limited to 'deps/v8/build/fuchsia')
-rw-r--r--deps/v8/build/fuchsia/OWNERS9
-rw-r--r--deps/v8/build/fuchsia/__init__.py0
-rw-r--r--deps/v8/build/fuchsia/boot_data.py118
-rw-r--r--deps/v8/build/fuchsia/common.py86
-rw-r--r--deps/v8/build/fuchsia/common_args.py123
-rw-r--r--deps/v8/build/fuchsia/device_target.py282
-rwxr-xr-xdeps/v8/build/fuchsia/exe_runner.py36
-rw-r--r--deps/v8/build/fuchsia/fidlgen_js/BUILD.gn63
-rw-r--r--deps/v8/build/fuchsia/fidlgen_js/DEPS4
-rw-r--r--deps/v8/build/fuchsia/fidlgen_js/fidl.py549
-rwxr-xr-xdeps/v8/build/fuchsia/fidlgen_js/gen.py673
-rw-r--r--deps/v8/build/fuchsia/fidlgen_js/runtime/fidl.mjs270
-rw-r--r--deps/v8/build/fuchsia/fidlgen_js/runtime/zircon.cc438
-rw-r--r--deps/v8/build/fuchsia/fidlgen_js/runtime/zircon.h58
-rw-r--r--deps/v8/build/fuchsia/fidlgen_js/test/fidlgen_js_unittest.cc1334
-rw-r--r--deps/v8/build/fuchsia/fidlgen_js/test/simple.fidl142
-rw-r--r--deps/v8/build/fuchsia/fidlgen_js/third_party/__init__.py0
-rw-r--r--deps/v8/build/fuchsia/fidlgen_js/third_party/enum34/LICENSE32
-rw-r--r--deps/v8/build/fuchsia/fidlgen_js/third_party/enum34/README.chromium15
-rw-r--r--deps/v8/build/fuchsia/fidlgen_js/third_party/enum34/__init__.py837
-rw-r--r--deps/v8/build/fuchsia/layout_test_proxy/BUILD.gn34
-rw-r--r--deps/v8/build/fuchsia/layout_test_proxy/DEPS3
-rw-r--r--deps/v8/build/fuchsia/layout_test_proxy/layout_test_proxy.cc78
-rw-r--r--deps/v8/build/fuchsia/linux.sdk.sha11
-rw-r--r--deps/v8/build/fuchsia/mac.sdk.sha11
-rw-r--r--deps/v8/build/fuchsia/net_test_server.py89
-rw-r--r--deps/v8/build/fuchsia/qemu_target.py178
-rwxr-xr-xdeps/v8/build/fuchsia/qemu_target_test.py58
-rw-r--r--deps/v8/build/fuchsia/remote_cmd.py134
-rw-r--r--deps/v8/build/fuchsia/run_package.py224
-rw-r--r--deps/v8/build/fuchsia/symbolizer.py43
-rw-r--r--deps/v8/build/fuchsia/target.py346
-rwxr-xr-xdeps/v8/build/fuchsia/test_runner.py131
-rwxr-xr-xdeps/v8/build/fuchsia/update_sdk.py168
34 files changed, 6557 insertions, 0 deletions
diff --git a/deps/v8/build/fuchsia/OWNERS b/deps/v8/build/fuchsia/OWNERS
new file mode 100644
index 0000000000..22e1b69b8f
--- /dev/null
+++ b/deps/v8/build/fuchsia/OWNERS
@@ -0,0 +1,9 @@
+jamesr@chromium.org
+kmarshall@chromium.org
+scottmg@chromium.org
+sergeyu@chromium.org
+thakis@chromium.org
+wez@chromium.org
+
+# TEAM: cr-fuchsia@chromium.org
+# COMPONENT: Internals>PlatformIntegration
diff --git a/deps/v8/build/fuchsia/__init__.py b/deps/v8/build/fuchsia/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/build/fuchsia/__init__.py
diff --git a/deps/v8/build/fuchsia/boot_data.py b/deps/v8/build/fuchsia/boot_data.py
new file mode 100644
index 0000000000..7ff1efcbe6
--- /dev/null
+++ b/deps/v8/build/fuchsia/boot_data.py
@@ -0,0 +1,118 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Functions used to provision Fuchsia boot images."""
+
+import common
+import logging
+import os
+import subprocess
+import tempfile
+import time
+import uuid
+
+_SSH_CONFIG_TEMPLATE = """
+Host *
+ CheckHostIP no
+ StrictHostKeyChecking no
+ ForwardAgent no
+ ForwardX11 no
+ UserKnownHostsFile {known_hosts}
+ User fuchsia
+ IdentitiesOnly yes
+ IdentityFile {identity}
+ ServerAliveInterval 2
+ ServerAliveCountMax 5
+ ControlMaster auto
+ ControlPersist 1m
+ ControlPath /tmp/ssh-%r@%h:%p
+ ConnectTimeout 5
+ """
+
+FVM_TYPE_QCOW = 'qcow'
+FVM_TYPE_SPARSE = 'sparse'
+
+
+def _TargetCpuToSdkBinPath(target_arch):
+ """Returns the path to the SDK 'target' file directory for |target_cpu|."""
+
+ return os.path.join(common.SDK_ROOT, 'target', target_arch)
+
+
+def _GetPubKeyPath(output_dir):
+ """Returns a path to the generated SSH public key."""
+
+ return os.path.join(output_dir, 'id_ed25519.pub')
+
+
+def ProvisionSSH(output_dir):
+ """Generates a keypair and config file for SSH."""
+
+ host_key_path = os.path.join(output_dir, 'ssh_key')
+ host_pubkey_path = host_key_path + '.pub'
+ id_key_path = os.path.join(output_dir, 'id_ed25519')
+ id_pubkey_path = _GetPubKeyPath(output_dir)
+ known_hosts_path = os.path.join(output_dir, 'known_hosts')
+ ssh_config_path = os.path.join(output_dir, 'ssh_config')
+
+ logging.debug('Generating SSH credentials.')
+ if not os.path.isfile(host_key_path):
+ subprocess.check_call(['ssh-keygen', '-t', 'ed25519', '-h', '-f',
+ host_key_path, '-P', '', '-N', ''],
+ stdout=open(os.devnull))
+ if not os.path.isfile(id_key_path):
+ subprocess.check_call(['ssh-keygen', '-t', 'ed25519', '-f', id_key_path,
+ '-P', '', '-N', ''], stdout=open(os.devnull))
+
+ with open(ssh_config_path, "w") as ssh_config:
+ ssh_config.write(
+ _SSH_CONFIG_TEMPLATE.format(identity=id_key_path,
+ known_hosts=known_hosts_path))
+
+ if os.path.exists(known_hosts_path):
+ os.remove(known_hosts_path)
+
+
+def _MakeQcowDisk(output_dir, disk_path):
+ """Creates a QEMU copy-on-write version of |disk_path| in the output
+ directory."""
+
+ qimg_path = os.path.join(common.GetQemuRootForPlatform(), 'bin', 'qemu-img')
+ output_path = os.path.join(output_dir,
+ os.path.basename(disk_path) + '.qcow2')
+ subprocess.check_call([qimg_path, 'create', '-q', '-f', 'qcow2',
+ '-b', disk_path, output_path])
+ return output_path
+
+
+def GetTargetFile(target_arch, filename):
+ """Computes a path to |filename| in the Fuchsia target directory specific to
+ |target_arch|."""
+
+ return os.path.join(_TargetCpuToSdkBinPath(target_arch), filename)
+
+
+def GetSSHConfigPath(output_dir):
+ return output_dir + '/ssh_config'
+
+
+def GetBootImage(output_dir, target_arch):
+ """"Gets a path to the Zircon boot image, with the SSH client public key
+ added."""
+
+ ProvisionSSH(output_dir)
+ pubkey_path = _GetPubKeyPath(output_dir)
+ zbi_tool = os.path.join(common.SDK_ROOT, 'tools', 'zbi')
+ image_source_path = GetTargetFile(target_arch, 'fuchsia.zbi')
+ image_dest_path = os.path.join(output_dir, 'gen', 'fuchsia-with-keys.zbi')
+
+ cmd = [ zbi_tool, '-o', image_dest_path, image_source_path,
+ '-e', 'data/ssh/authorized_keys=' + pubkey_path ]
+ subprocess.check_call(cmd)
+
+ return image_dest_path
+
+
+def GetKernelArgs(output_dir):
+ return ['devmgr.epoch=%d' % time.time()]
diff --git a/deps/v8/build/fuchsia/common.py b/deps/v8/build/fuchsia/common.py
new file mode 100644
index 0000000000..1993374b30
--- /dev/null
+++ b/deps/v8/build/fuchsia/common.py
@@ -0,0 +1,86 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import platform
+import socket
+import subprocess
+import sys
+
+DIR_SOURCE_ROOT = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
+SDK_ROOT = os.path.join(DIR_SOURCE_ROOT, 'third_party', 'fuchsia-sdk', 'sdk')
+
+def EnsurePathExists(path):
+ """Checks that the file |path| exists on the filesystem and returns the path
+ if it does, raising an exception otherwise."""
+
+ if not os.path.exists(path):
+ raise IOError('Missing file: ' + path)
+
+ return path
+
+def GetHostOsFromPlatform():
+ host_platform = sys.platform
+ if host_platform.startswith('linux'):
+ return 'linux'
+ elif host_platform.startswith('darwin'):
+ return 'mac'
+ raise Exception('Unsupported host platform: %s' % host_platform)
+
+def GetHostArchFromPlatform():
+ host_arch = platform.machine()
+ if host_arch == 'x86_64':
+ return 'x64'
+ elif host_arch == 'aarch64':
+ return 'arm64'
+ raise Exception('Unsupported host architecture: %s' % host_arch)
+
+def GetQemuRootForPlatform():
+ return os.path.join(DIR_SOURCE_ROOT, 'third_party',
+ 'qemu-' + GetHostOsFromPlatform() + '-' +
+ GetHostArchFromPlatform())
+
+def ConnectPortForwardingTask(target, local_port, remote_port = 0):
+ """Establishes a port forwarding SSH task to a localhost TCP endpoint hosted
+ at port |local_port|. Blocks until port forwarding is established.
+
+ Returns the remote port number."""
+
+ forwarding_flags = ['-O', 'forward', # Send SSH mux control signal.
+ '-R', '%d:localhost:%d' % (remote_port, local_port),
+ '-v', # Get forwarded port info from stderr.
+ '-NT'] # Don't execute command; don't allocate terminal.
+
+ if remote_port != 0:
+ # Forward to a known remote port.
+ task = target.RunCommand([], ssh_args=forwarding_flags)
+ if task.returncode != 0:
+ raise Exception('Could not establish a port forwarding connection.')
+ return
+
+ task = target.RunCommandPiped([],
+ ssh_args=forwarding_flags,
+ stdout=subprocess.PIPE,
+ stderr=open('/dev/null'))
+ output = task.stdout.readlines()
+ task.wait()
+ if task.returncode != 0:
+ raise Exception('Got an error code when requesting port forwarding: %d' %
+ task.returncode)
+
+ parsed_port = int(output[0].strip())
+ logging.debug('Port forwarding established (local=%d, device=%d)' %
+ (local_port, parsed_port))
+ return parsed_port
+
+
+def GetAvailableTcpPort():
+ """Finds a (probably) open port by opening and closing a listen socket."""
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.bind(("", 0))
+ port = sock.getsockname()[1]
+ sock.close()
+ return port
diff --git a/deps/v8/build/fuchsia/common_args.py b/deps/v8/build/fuchsia/common_args.py
new file mode 100644
index 0000000000..8fda07e3df
--- /dev/null
+++ b/deps/v8/build/fuchsia/common_args.py
@@ -0,0 +1,123 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import sys
+
+from device_target import DeviceTarget
+from qemu_target import QemuTarget
+
+
+def AddCommonArgs(arg_parser):
+ """Adds command line arguments to |arg_parser| for options which are shared
+ across test and executable target types."""
+
+ common_args = arg_parser.add_argument_group('common', 'Common arguments')
+ common_args.add_argument('--package',
+ type=os.path.realpath, required=True,
+ help='Path to the package to execute.')
+ common_args.add_argument('--package-name', required=True,
+ help='Name of the package to execute, defined in ' +
+ 'package metadata.')
+ common_args.add_argument('--package-dep', action='append', default=[],
+ help='Path to an additional package to install.')
+ common_args.add_argument('--install-only', action='store_true', default=False,
+ help='Install the packages but do not run them.')
+ common_args.add_argument('--output-directory',
+ type=os.path.realpath, required=True,
+ help=('Path to the directory in which build files '
+ 'are located (must include build type).'))
+ common_args.add_argument('--target-cpu', required=True,
+ help='GN target_cpu setting for the build.')
+ common_args.add_argument('--target-staging-path',
+ help='target path under which to stage packages '
+ 'during deployment.', default='/data')
+ common_args.add_argument('--device', '-d', action='store_true', default=False,
+ help='Run on hardware device instead of QEMU.')
+ common_args.add_argument('--host', help='The IP of the target device. ' +
+ 'Optional.')
+ common_args.add_argument('--node-name',
+ help='The node-name of the device to boot or deploy '
+ 'to. Optional, will use the first discovered '
+ 'device if omitted.')
+ common_args.add_argument('--port', '-p', type=int, default=22,
+ help='The port of the SSH service running on the ' +
+ 'device. Optional.')
+ common_args.add_argument('--ssh-config', '-F',
+ help='The path to the SSH configuration used for '
+ 'connecting to the target device.')
+ common_args.add_argument('--fuchsia-out-dir',
+ help='Path to a Fuchsia build output directory. '
+ 'Equivalent to setting --ssh_config and '
+ '---os-check=ignore')
+ common_args.add_argument('--system-log-file',
+ help='File to write system logs to. Specify - to '
+ 'log to stdout.')
+ common_args.add_argument('--exclude-system-logs',
+ action='store_false',
+ dest='include_system_logs',
+ help='Do not show system log data.')
+ common_args.add_argument('--verbose', '-v', default=False,
+ action='store_true',
+ help='Enable debug-level logging.')
+ common_args.add_argument('--qemu-cpu-cores', type=int, default=4,
+ help='Sets the number of CPU cores to provide if '
+ 'launching in a VM with QEMU.'),
+ common_args.add_argument(
+ '--os_check', choices=['check', 'update', 'ignore'],
+ default='update',
+ help='Sets the OS version enforcement policy. If \'check\', then the '
+ 'deployment process will halt if the target\'s version doesn\'t '
+ 'match. If \'update\', then the target device will automatically '
+ 'be repaved. If \'ignore\', then the OS version won\'t be checked.')
+
+
+def ConfigureLogging(args):
+ """Configures the logging level based on command line |args|."""
+
+ logging.basicConfig(level=(logging.DEBUG if args.verbose else logging.INFO),
+ format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')
+
+ # The test server spawner is too noisy with INFO level logging, so tweak
+ # its verbosity a bit by adjusting its logging level.
+ logging.getLogger('chrome_test_server_spawner').setLevel(
+ logging.DEBUG if args.verbose else logging.WARN)
+
+ # Verbose SCP output can be useful at times but oftentimes is just too noisy.
+ # Only enable it if -vv is passed.
+ logging.getLogger('ssh').setLevel(
+ logging.DEBUG if args.verbose else logging.WARN)
+
+
+def GetDeploymentTargetForArgs(args):
+ """Constructs a deployment target object using parameters taken from
+ command line arguments."""
+
+ if args.system_log_file == '-':
+ system_log_file = sys.stdout
+ elif args.system_log_file:
+ system_log_file = open(args.system_log_file, 'w')
+ else:
+ system_log_file = None
+
+ if not args.device:
+ # KVM is required on x64 test bots.
+ require_kvm = args.test_launcher_bot_mode and args.target_cpu == "x64"
+
+ return QemuTarget(output_dir=args.output_directory,
+ target_cpu=args.target_cpu,
+ cpu_cores=args.qemu_cpu_cores,
+ system_log_file=system_log_file,
+ require_kvm=require_kvm)
+ else:
+ return DeviceTarget(output_dir=args.output_directory,
+ target_cpu=args.target_cpu,
+ host=args.host,
+ node_name=args.node_name,
+ port=args.port,
+ ssh_config=args.ssh_config,
+ fuchsia_out_dir=args.fuchsia_out_dir,
+ system_log_file=system_log_file,
+ os_check=args.os_check)
diff --git a/deps/v8/build/fuchsia/device_target.py b/deps/v8/build/fuchsia/device_target.py
new file mode 100644
index 0000000000..c35fc79b3e
--- /dev/null
+++ b/deps/v8/build/fuchsia/device_target.py
@@ -0,0 +1,282 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Implements commands for running and interacting with Fuchsia on devices."""
+
+import boot_data
+import filecmp
+import logging
+import os
+import re
+import subprocess
+import sys
+import target
+import tempfile
+import time
+import uuid
+
+from common import SDK_ROOT, EnsurePathExists
+
+# The maximum times to attempt mDNS resolution when connecting to a freshly
+# booted Fuchsia instance before aborting.
+_BOOT_DISCOVERY_ATTEMPTS = 30
+
+# Number of seconds to wait when querying a list of all devices over mDNS.
+_LIST_DEVICES_TIMEOUT_SECS = 3
+
+#Number of failed connection attempts before redirecting system logs to stdout.
+CONNECT_RETRY_COUNT_BEFORE_LOGGING = 10
+
+TARGET_HASH_FILE_PATH = '/data/.hash'
+
+class DeviceTarget(target.Target):
+ """Prepares a device to be used as a deployment target. Depending on the
+ command line parameters, it automatically handling a number of preparatory
+ steps relating to address resolution, device provisioning, and SDK
+ versioning.
+
+ If |_node_name| is unset:
+ If there is one running device, use it for deployment and execution. The
+ device's SDK version is checked unless --os-check=ignore is set.
+ If --os-check=update is set, then the target device is repaved if the SDK
+ version doesn't match.
+
+ If there are more than one running devices, then abort and instruct the
+ user to re-run the command with |_node_name|
+
+ Otherwise, if there are no running devices, then search for a device
+ running Zedboot, and pave it.
+
+
+ If |_node_name| is set:
+ If there is a running device with a matching nodename, then it is used
+ for deployment and execution.
+
+ Otherwise, attempt to pave a device with a matching nodename, and use it
+ for deployment and execution.
+
+ If |_host| is set:
+ Deploy to a device at the host IP address as-is."""
+
+ def __init__(self, output_dir, target_cpu, host=None, node_name=None,
+ port=None, ssh_config=None, fuchsia_out_dir=None,
+ os_check='update', system_log_file=None):
+ """output_dir: The directory which will contain the files that are
+ generated to support the deployment.
+ target_cpu: The CPU architecture of the deployment target. Can be
+ "x64" or "arm64".
+ host: The address of the deployment target device.
+ node_name: The node name of the deployment target device.
+ port: The port of the SSH service on the deployment target device.
+ ssh_config: The path to SSH configuration data.
+ fuchsia_out_dir: The path to a Fuchsia build output directory, for
+ deployments to devices paved with local Fuchsia builds.
+ os_check: If 'check', the target's SDK version must match.
+ If 'update', the target will be repaved if the SDK versions
+ mismatch.
+ If 'ignore', the target's SDK version is ignored."""
+
+ super(DeviceTarget, self).__init__(output_dir, target_cpu)
+
+ self._port = port if port else 22
+ self._system_log_file = system_log_file
+ self._loglistener = None
+ self._host = host
+ self._fuchsia_out_dir = fuchsia_out_dir
+ self._node_name = node_name
+ self._os_check = os_check,
+
+ if self._host and self._node_name:
+ raise Exception('Only one of "--host" or "--name" can be specified.')
+
+ if fuchsia_out_dir:
+ if ssh_config:
+ raise Exception('Only one of "--fuchsia-out-dir" or "--ssh_config" can '
+ 'be specified.')
+
+ # Use SSH keys from the Fuchsia output directory.
+ self._ssh_config_path = os.path.join(os.path.expanduser(fuchsia_out_dir),
+ 'ssh-keys', 'ssh_config')
+ self._os_check = 'ignore'
+
+ elif ssh_config:
+ # Use the SSH config provided via the commandline.
+ self._ssh_config_path = os.path.expanduser(ssh_config)
+
+ else:
+ # Default to using an automatically generated SSH config and keys.
+ boot_data.ProvisionSSH(output_dir)
+ self._ssh_config_path = boot_data.GetSSHConfigPath(output_dir)
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if self._loglistener:
+ self._loglistener.kill()
+
+ def _SDKHashMatches(self):
+ """Checks if /data/.hash on the device matches SDK_ROOT/.hash.
+
+ Returns True if the files are identical, or False otherwise.
+ """
+ with tempfile.NamedTemporaryFile() as tmp:
+ try:
+ self.GetFile(TARGET_HASH_FILE_PATH, tmp.name)
+ except subprocess.CalledProcessError:
+ # If the file is unretrievable for whatever reason, assume mismatch.
+ return False
+
+ return filecmp.cmp(tmp.name, os.path.join(SDK_ROOT, '.hash'), False)
+
+ def __Discover(self):
+ """Queries mDNS for the IP address of a booted Fuchsia instance whose name
+ matches |_node_name| on the local area network. If |_node_name| isn't
+ specified, and there is only one device on the network, then returns the
+ IP address of that advice.
+
+ Sets |_host_name| and returns True if the device was found,
+ or waits up to |timeout| seconds and returns False if the device couldn't
+ be found."""
+
+ dev_finder_path = os.path.join(SDK_ROOT, 'tools', 'dev_finder')
+
+ if self._node_name:
+ command = [dev_finder_path, 'resolve',
+ '-device-limit', '1', # Exit early as soon as a host is found.
+ self._node_name]
+ else:
+ command = [dev_finder_path, 'list', '-full',
+ '-timeout', str(_LIST_DEVICES_TIMEOUT_SECS * 1000)]
+
+ proc = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=open(os.devnull, 'w'))
+
+ output = proc.communicate()[0].strip().split('\n')
+
+ if proc.returncode != 0:
+ return False
+
+ if self._node_name:
+ # Handle the result of "dev_finder resolve".
+ self._host = output[0].strip()
+
+ else:
+ name_host_pairs = [x.strip().split(' ') for x in output]
+
+ # Handle the output of "dev_finder list".
+ if len(name_host_pairs) > 1:
+ print 'More than one device was discovered on the network.'
+ print 'Use --node-name <name> to specify the device to use.'
+ print '\nList of devices:'
+ for pair in name_host_pairs:
+ print ' ' + pair[1]
+ print
+ raise Exception('Ambiguous target device specification.')
+
+ assert len(name_host_pairs) == 1
+ self._host, self._node_name = name_host_pairs[0]
+
+ logging.info('Found device "%s" at address %s.' % (self._node_name,
+ self._host))
+
+ return True
+
+ def Start(self):
+ if self._host:
+ self._WaitUntilReady()
+
+ else:
+ should_provision = False
+
+ if self.__Discover():
+ self._WaitUntilReady()
+
+ if self._os_check != 'ignore':
+ if self._SDKHashMatches():
+ if self._os_check == 'update':
+ logging.info( 'SDK hash does not match; rebooting and repaving.')
+ self.RunCommand(['dm', 'reboot'])
+ should_provision = True
+ elif self._os_check == 'check':
+ raise Exception('Target device SDK version does not match.')
+
+ else:
+ should_provision = True
+
+ if should_provision:
+ self.__ProvisionDevice()
+
+ assert self._node_name
+ assert self._host
+
+
+ def __ProvisionDevice(self):
+ """Netboots a device with Fuchsia. If |_node_name| is set, then only a
+ device with a matching node name is used.
+
+ The device is up and reachable via SSH when the function is successfully
+ completes."""
+
+ bootserver_path = os.path.join(SDK_ROOT, 'tools', 'bootserver')
+ bootserver_command = [
+ bootserver_path,
+ '-1',
+ '--fvm',
+ EnsurePathExists(boot_data.GetTargetFile(self._GetTargetSdkArch(),
+ 'fvm.sparse.blk')),
+ EnsurePathExists(boot_data.GetBootImage(self._output_dir,
+ self._GetTargetSdkArch()))]
+
+ if self._GetTargetSdkArch() == 'x64':
+ bootserver_command += [
+ '--efi',
+ EnsurePathExists(boot_data.GetTargetFile(self._GetTargetSdkArch(),
+ 'local.esp.blk'))]
+
+ if self._node_name:
+ bootserver_command += ['-n', self._node_name]
+
+ bootserver_command += ['--']
+ bootserver_command += boot_data.GetKernelArgs(self._output_dir)
+
+ logging.debug(' '.join(bootserver_command))
+ stdout = subprocess.check_output(bootserver_command,
+ stderr=subprocess.STDOUT)
+
+ # Parse the nodename from bootserver stdout.
+ m = re.search(r'.*Proceeding with nodename (?P<nodename>.*)$', stdout,
+ re.MULTILINE)
+ if not m:
+ raise Exception('Couldn\'t parse nodename from bootserver output.')
+ self._node_name = m.groupdict()['nodename']
+ logging.info('Booted device "%s".' % self._node_name)
+
+ # Start loglistener to save system logs.
+ if self._system_log_file:
+ loglistener_path = os.path.join(SDK_ROOT, 'tools', 'loglistener')
+ self._loglistener = subprocess.Popen(
+ [loglistener_path, self._node_name],
+ stdout=self._system_log_file,
+ stderr=subprocess.STDOUT, stdin=open(os.devnull))
+
+ # Repeatdly query mDNS until we find the device, or we hit the timeout of
+ # DISCOVERY_TIMEOUT_SECS.
+ logging.info('Waiting for device to join network.')
+ for _ in xrange(_BOOT_DISCOVERY_ATTEMPTS):
+ if self.__Discover():
+ break
+
+ if not self._host:
+ raise Exception('Device %s couldn\'t be discovered via mDNS.' %
+ self._node_name)
+
+ self._WaitUntilReady();
+
+ # Update the target's hash to match the current tree's.
+ self.PutFile(os.path.join(SDK_ROOT, '.hash'), TARGET_HASH_FILE_PATH)
+
+ def _GetEndpoint(self):
+ return (self._host, self._port)
+
+ def _GetSshConfigPath(self):
+ return self._ssh_config_path
diff --git a/deps/v8/build/fuchsia/exe_runner.py b/deps/v8/build/fuchsia/exe_runner.py
new file mode 100755
index 0000000000..feb96d0779
--- /dev/null
+++ b/deps/v8/build/fuchsia/exe_runner.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Deploys and executes a packaged Fuchsia executable on a target."""
+
+import argparse
+import logging
+import sys
+
+from common_args import AddCommonArgs, ConfigureLogging, \
+ GetDeploymentTargetForArgs
+from run_package import RunPackage, RunPackageArgs
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ AddCommonArgs(parser)
+ parser.add_argument('child_args', nargs='*',
+ help='Arguments for the test process.')
+ args = parser.parse_args()
+ ConfigureLogging(args)
+
+ with GetDeploymentTargetForArgs(args) as target:
+ target.Start()
+
+ run_package_args = RunPackageArgs.FromCommonArgs(args)
+ return RunPackage(
+ args.output_directory, target, args.package, args.package_name,
+ args.package_dep, args.child_args, run_package_args)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/deps/v8/build/fuchsia/fidlgen_js/BUILD.gn b/deps/v8/build/fuchsia/fidlgen_js/BUILD.gn
new file mode 100644
index 0000000000..4b2bb6400c
--- /dev/null
+++ b/deps/v8/build/fuchsia/fidlgen_js/BUILD.gn
@@ -0,0 +1,63 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/fuchsia/fidl_library.gni")
+import("//testing/test.gni")
+
+test("fidlgen_js_unittests") {
+ testonly = true
+
+ sources = [
+ "test/fidlgen_js_unittest.cc",
+ ]
+
+ deps = [
+ ":fidljstest",
+ ":runtime",
+ "//base/test:test_support",
+ "//gin:gin_test",
+ "//testing/gtest",
+ "//v8",
+ ]
+
+ configs += [
+ "//tools/v8_context_snapshot:use_v8_context_snapshot",
+ "//v8:external_startup_data",
+ ]
+
+ data_deps = [
+ "//tools/v8_context_snapshot:v8_context_snapshot",
+ ]
+
+ data = [
+ "runtime/fidl.mjs",
+ ]
+}
+
+static_library("runtime") {
+ sources = [
+ "runtime/zircon.cc",
+ "runtime/zircon.h",
+ ]
+
+ deps = [
+ "//base",
+ "//gin",
+ "//third_party/fuchsia-sdk/sdk:async",
+ "//third_party/fuchsia-sdk/sdk:async_default",
+ "//v8",
+ ]
+}
+
+fidl_library("fidljstest") {
+ testonly = true
+ sources = [
+ "test/simple.fidl",
+ ]
+
+ languages = [
+ "cpp",
+ "js",
+ ]
+}
diff --git a/deps/v8/build/fuchsia/fidlgen_js/DEPS b/deps/v8/build/fuchsia/fidlgen_js/DEPS
new file mode 100644
index 0000000000..681254d0f3
--- /dev/null
+++ b/deps/v8/build/fuchsia/fidlgen_js/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ "+gin",
+ "+v8/include",
+]
diff --git a/deps/v8/build/fuchsia/fidlgen_js/fidl.py b/deps/v8/build/fuchsia/fidlgen_js/fidl.py
new file mode 100644
index 0000000000..6f8b99f441
--- /dev/null
+++ b/deps/v8/build/fuchsia/fidlgen_js/fidl.py
@@ -0,0 +1,549 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This was generated (and can be regenerated) by pasting
+# zircon/system/host/fidl/schema.json from Fuchsia into
+# https://app.quicktype.io and choosing Python 2.7 output. The only manual
+# change is to modify the import path for Enum.
+
+from third_party.enum34 import Enum
+
+
+def from_str(x):
+ assert isinstance(x, (str, unicode))
+ return x
+
+
+def from_int(x):
+ assert isinstance(x, int) and not isinstance(x, bool)
+ return x
+
+
+def from_none(x):
+ assert x is None
+ return x
+
+
+def from_union(fs, x):
+ for f in fs:
+ try:
+ return f(x)
+ except:
+ pass
+ assert False
+
+
+def from_bool(x):
+ assert isinstance(x, bool)
+ return x
+
+
+def to_class(c, x):
+ assert isinstance(x, c)
+ return x.to_dict()
+
+
+def to_enum(c, x):
+ assert isinstance(x, c)
+ return x.value
+
+
+def from_list(f, x):
+ assert isinstance(x, list)
+ return [f(y) for y in x]
+
+
+def from_dict(f, x):
+ assert isinstance(x, dict)
+ return { k: f(v) for (k, v) in x.items() }
+
+
+class Attribute:
+ def __init__(self, name, value):
+ self.name = name
+ self.value = value
+
+ @staticmethod
+ def from_dict(obj):
+ assert isinstance(obj, dict)
+ name = from_str(obj.get(u"name"))
+ value = from_str(obj.get(u"value"))
+ return Attribute(name, value)
+
+ def to_dict(self):
+ result = {}
+ result[u"name"] = from_str(self.name)
+ result[u"value"] = from_str(self.value)
+ return result
+
+
+class TypeKind(Enum):
+ ARRAY = u"array"
+ HANDLE = u"handle"
+ IDENTIFIER = u"identifier"
+ PRIMITIVE = u"primitive"
+ REQUEST = u"request"
+ STRING = u"string"
+ VECTOR = u"vector"
+
+
+class TypeClass:
+ def __init__(self, element_count, element_type, kind, maybe_element_count, nullable, subtype, identifier):
+ self.element_count = element_count
+ self.element_type = element_type
+ self.kind = kind
+ self.maybe_element_count = maybe_element_count
+ self.nullable = nullable
+ self.subtype = subtype
+ self.identifier = identifier
+
+ @staticmethod
+ def from_dict(obj):
+ assert isinstance(obj, dict)
+ element_count = from_union([from_int, from_none], obj.get(u"element_count"))
+ element_type = from_union([TypeClass.from_dict, from_none], obj.get(u"element_type"))
+ kind = TypeKind(obj.get(u"kind"))
+ maybe_element_count = from_union([from_int, from_none], obj.get(u"maybe_element_count"))
+ nullable = from_union([from_bool, from_none], obj.get(u"nullable"))
+ subtype = from_union([from_str, from_none], obj.get(u"subtype"))
+ identifier = from_union([from_str, from_none], obj.get(u"identifier"))
+ return TypeClass(element_count, element_type, kind, maybe_element_count, nullable, subtype, identifier)
+
+ def to_dict(self):
+ result = {}
+ result[u"element_count"] = from_union([from_int, from_none], self.element_count)
+ result[u"element_type"] = from_union([lambda x: to_class(TypeClass, x), from_none], self.element_type)
+ result[u"kind"] = to_enum(TypeKind, self.kind)
+ result[u"maybe_element_count"] = from_union([from_int, from_none], self.maybe_element_count)
+ result[u"nullable"] = from_union([from_bool, from_none], self.nullable)
+ result[u"subtype"] = from_union([from_str, from_none], self.subtype)
+ result[u"identifier"] = from_union([from_str, from_none], self.identifier)
+ return result
+
+
+class ConstantKind(Enum):
+ IDENTIFIER = u"identifier"
+ LITERAL = u"literal"
+
+
+class LiteralKind(Enum):
+ DEFAULT = u"default"
+ FALSE = u"false"
+ NUMERIC = u"numeric"
+ STRING = u"string"
+ TRUE = u"true"
+
+
+class Literal:
+ def __init__(self, kind, value):
+ self.kind = kind
+ self.value = value
+
+ @staticmethod
+ def from_dict(obj):
+ assert isinstance(obj, dict)
+ kind = LiteralKind(obj.get(u"kind"))
+ value = from_union([from_str, from_none], obj.get(u"value"))
+ return Literal(kind, value)
+
+ def to_dict(self):
+ result = {}
+ result[u"kind"] = to_enum(LiteralKind, self.kind)
+ result[u"value"] = from_union([from_str, from_none], self.value)
+ return result
+
+
+class Constant:
+ def __init__(self, identifier, kind, literal):
+ self.identifier = identifier
+ self.kind = kind
+ self.literal = literal
+
+ @staticmethod
+ def from_dict(obj):
+ assert isinstance(obj, dict)
+ identifier = from_union([from_str, from_none], obj.get(u"identifier"))
+ kind = ConstantKind(obj.get(u"kind"))
+ literal = from_union([Literal.from_dict, from_none], obj.get(u"literal"))
+ return Constant(identifier, kind, literal)
+
+ def to_dict(self):
+ result = {}
+ result[u"identifier"] = from_union([from_str, from_none], self.identifier)
+ result[u"kind"] = to_enum(ConstantKind, self.kind)
+ result[u"literal"] = from_union([lambda x: to_class(Literal, x), from_none], self.literal)
+ return result
+
+
+class Const:
+ def __init__(self, maybe_attributes, name, type, value):
+ self.maybe_attributes = maybe_attributes
+ self.name = name
+ self.type = type
+ self.value = value
+
+ @staticmethod
+ def from_dict(obj):
+ assert isinstance(obj, dict)
+ maybe_attributes = from_union([lambda x: from_list(Attribute.from_dict, x), from_none], obj.get(u"maybe_attributes"))
+ name = from_str(obj.get(u"name"))
+ type = TypeClass.from_dict(obj.get(u"type"))
+ value = Constant.from_dict(obj.get(u"value"))
+ return Const(maybe_attributes, name, type, value)
+
+ def to_dict(self):
+ result = {}
+ result[u"maybe_attributes"] = from_union([lambda x: from_list(lambda x: to_class(Attribute, x), x), from_none], self.maybe_attributes)
+ result[u"name"] = from_str(self.name)
+ result[u"type"] = to_class(TypeClass, self.type)
+ result[u"value"] = to_class(Constant, self.value)
+ return result
+
+
+class DeclarationsMap(Enum):
+ CONST = u"const"
+ ENUM = u"enum"
+ INTERFACE = u"interface"
+ STRUCT = u"struct"
+ UNION = u"union"
+
+
+class EnumMember:
+ def __init__(self, name, value):
+ self.name = name
+ self.value = value
+
+ @staticmethod
+ def from_dict(obj):
+ assert isinstance(obj, dict)
+ name = from_str(obj.get(u"name"))
+ value = Constant.from_dict(obj.get(u"value"))
+ return EnumMember(name, value)
+
+ def to_dict(self):
+ result = {}
+ result[u"name"] = from_str(self.name)
+ result[u"value"] = to_class(Constant, self.value)
+ return result
+
+
+class IntegerType(Enum):
+ INT16 = u"int16"
+ INT32 = u"int32"
+ INT64 = u"int64"
+ INT8 = u"int8"
+ UINT16 = u"uint16"
+ UINT32 = u"uint32"
+ UINT64 = u"uint64"
+ UINT8 = u"uint8"
+
+
+class EnumDeclarationElement:
+ def __init__(self, maybe_attributes, members, name, type):
+ self.maybe_attributes = maybe_attributes
+ self.members = members
+ self.name = name
+ self.type = type
+
+ @staticmethod
+ def from_dict(obj):
+ assert isinstance(obj, dict)
+ maybe_attributes = from_union([lambda x: from_list(Attribute.from_dict, x), from_none], obj.get(u"maybe_attributes"))
+ members = from_list(EnumMember.from_dict, obj.get(u"members"))
+ name = from_str(obj.get(u"name"))
+ type = IntegerType(obj.get(u"type"))
+ return EnumDeclarationElement(maybe_attributes, members, name, type)
+
+ def to_dict(self):
+ result = {}
+ result[u"maybe_attributes"] = from_union([lambda x: from_list(lambda x: to_class(Attribute, x), x), from_none], self.maybe_attributes)
+ result[u"members"] = from_list(lambda x: to_class(EnumMember, x), self.members)
+ result[u"name"] = from_str(self.name)
+ result[u"type"] = to_enum(IntegerType, self.type)
+ return result
+
+
+class InterfaceMethodParameter:
+ def __init__(self, alignment, name, offset, size, type):
+ self.alignment = alignment
+ self.name = name
+ self.offset = offset
+ self.size = size
+ self.type = type
+
+ @staticmethod
+ def from_dict(obj):
+ assert isinstance(obj, dict)
+ alignment = from_int(obj.get(u"alignment"))
+ name = from_str(obj.get(u"name"))
+ offset = from_int(obj.get(u"offset"))
+ size = from_int(obj.get(u"size"))
+ type = TypeClass.from_dict(obj.get(u"type"))
+ return InterfaceMethodParameter(alignment, name, offset, size, type)
+
+ def to_dict(self):
+ result = {}
+ result[u"alignment"] = from_int(self.alignment)
+ result[u"name"] = from_str(self.name)
+ result[u"offset"] = from_int(self.offset)
+ result[u"size"] = from_int(self.size)
+ result[u"type"] = to_class(TypeClass, self.type)
+ return result
+
+
+class InterfaceMethod:
+ def __init__(self, has_request, has_response, maybe_attributes, maybe_request, maybe_request_alignment, maybe_request_size, maybe_response, maybe_response_alignment, maybe_response_size, name, ordinal):
+ self.has_request = has_request
+ self.has_response = has_response
+ self.maybe_attributes = maybe_attributes
+ self.maybe_request = maybe_request
+ self.maybe_request_alignment = maybe_request_alignment
+ self.maybe_request_size = maybe_request_size
+ self.maybe_response = maybe_response
+ self.maybe_response_alignment = maybe_response_alignment
+ self.maybe_response_size = maybe_response_size
+ self.name = name
+ self.ordinal = ordinal
+
+ @staticmethod
+ def from_dict(obj):
+ assert isinstance(obj, dict)
+ has_request = from_bool(obj.get(u"has_request"))
+ has_response = from_bool(obj.get(u"has_response"))
+ maybe_attributes = from_union([lambda x: from_list(Attribute.from_dict, x), from_none], obj.get(u"maybe_attributes"))
+ maybe_request = from_union([lambda x: from_list(InterfaceMethodParameter.from_dict, x), from_none], obj.get(u"maybe_request"))
+ maybe_request_alignment = from_union([from_int, from_none], obj.get(u"maybe_request_alignment"))
+ maybe_request_size = from_union([from_int, from_none], obj.get(u"maybe_request_size"))
+ maybe_response = from_union([lambda x: from_list(InterfaceMethodParameter.from_dict, x), from_none], obj.get(u"maybe_response"))
+ maybe_response_alignment = from_union([from_int, from_none], obj.get(u"maybe_response_alignment"))
+ maybe_response_size = from_union([from_int, from_none], obj.get(u"maybe_response_size"))
+ name = from_str(obj.get(u"name"))
+ ordinal = from_int(obj.get(u"ordinal"))
+ return InterfaceMethod(has_request, has_response, maybe_attributes, maybe_request, maybe_request_alignment, maybe_request_size, maybe_response, maybe_response_alignment, maybe_response_size, name, ordinal)
+
+ def to_dict(self):
+ result = {}
+ result[u"has_request"] = from_bool(self.has_request)
+ result[u"has_response"] = from_bool(self.has_response)
+ result[u"maybe_attributes"] = from_union([lambda x: from_list(lambda x: to_class(Attribute, x), x), from_none], self.maybe_attributes)
+ result[u"maybe_request"] = from_union([lambda x: from_list(lambda x: to_class(InterfaceMethodParameter, x), x), from_none], self.maybe_request)
+ result[u"maybe_request_alignment"] = from_union([from_int, from_none], self.maybe_request_alignment)
+ result[u"maybe_request_size"] = from_union([from_int, from_none], self.maybe_request_size)
+ result[u"maybe_response"] = from_union([lambda x: from_list(lambda x: to_class(InterfaceMethodParameter, x), x), from_none], self.maybe_response)
+ result[u"maybe_response_alignment"] = from_union([from_int, from_none], self.maybe_response_alignment)
+ result[u"maybe_response_size"] = from_union([from_int, from_none], self.maybe_response_size)
+ result[u"name"] = from_str(self.name)
+ result[u"ordinal"] = from_int(self.ordinal)
+ return result
+
+
+class Interface:
+ def __init__(self, maybe_attributes, methods, name):
+ self.maybe_attributes = maybe_attributes
+ self.methods = methods
+ self.name = name
+
+ @staticmethod
+ def from_dict(obj):
+ assert isinstance(obj, dict)
+ maybe_attributes = from_union([lambda x: from_list(Attribute.from_dict, x), from_none], obj.get(u"maybe_attributes"))
+ methods = from_list(InterfaceMethod.from_dict, obj.get(u"methods"))
+ name = from_str(obj.get(u"name"))
+ return Interface(maybe_attributes, methods, name)
+
+ def to_dict(self):
+ result = {}
+ result[u"maybe_attributes"] = from_union([lambda x: from_list(lambda x: to_class(Attribute, x), x), from_none], self.maybe_attributes)
+ result[u"methods"] = from_list(lambda x: to_class(InterfaceMethod, x), self.methods)
+ result[u"name"] = from_str(self.name)
+ return result
+
+
+class Library:
+ def __init__(self, declarations, name):
+ self.declarations = declarations
+ self.name = name
+
+ @staticmethod
+ def from_dict(obj):
+ assert isinstance(obj, dict)
+ declarations = from_dict(DeclarationsMap, obj.get(u"declarations"))
+ name = from_str(obj.get(u"name"))
+ return Library(declarations, name)
+
+ def to_dict(self):
+ result = {}
+ result[u"declarations"] = from_dict(lambda x: to_enum(DeclarationsMap, x), self.declarations)
+ result[u"name"] = from_str(self.name)
+ return result
+
+
+class StructMember:
+ def __init__(self, alignment, maybe_default_value, name, offset, size, type):
+ self.alignment = alignment
+ self.maybe_default_value = maybe_default_value
+ self.name = name
+ self.offset = offset
+ self.size = size
+ self.type = type
+
+ @staticmethod
+ def from_dict(obj):
+ assert isinstance(obj, dict)
+ alignment = from_int(obj.get(u"alignment"))
+ maybe_default_value = from_union([Constant.from_dict, from_none], obj.get(u"maybe_default_value"))
+ name = from_str(obj.get(u"name"))
+ offset = from_int(obj.get(u"offset"))
+ size = from_int(obj.get(u"size"))
+ type = TypeClass.from_dict(obj.get(u"type"))
+ return StructMember(alignment, maybe_default_value, name, offset, size, type)
+
+ def to_dict(self):
+ result = {}
+ result[u"alignment"] = from_int(self.alignment)
+ result[u"maybe_default_value"] = from_union([lambda x: to_class(Constant, x), from_none], self.maybe_default_value)
+ result[u"name"] = from_str(self.name)
+ result[u"offset"] = from_int(self.offset)
+ result[u"size"] = from_int(self.size)
+ result[u"type"] = to_class(TypeClass, self.type)
+ return result
+
+
+class Struct:
+ def __init__(self, max_handles, maybe_attributes, members, name, size):
+ self.max_handles = max_handles
+ self.maybe_attributes = maybe_attributes
+ self.members = members
+ self.name = name
+ self.size = size
+
+ @staticmethod
+ def from_dict(obj):
+ assert isinstance(obj, dict)
+ max_handles = from_union([from_int, from_none], obj.get(u"max_handles"))
+ maybe_attributes = from_union([lambda x: from_list(Attribute.from_dict, x), from_none], obj.get(u"maybe_attributes"))
+ members = from_list(StructMember.from_dict, obj.get(u"members"))
+ name = from_str(obj.get(u"name"))
+ size = from_int(obj.get(u"size"))
+ return Struct(max_handles, maybe_attributes, members, name, size)
+
+ def to_dict(self):
+ result = {}
+ result[u"max_handles"] = from_union([from_int, from_none], self.max_handles)
+ result[u"maybe_attributes"] = from_union([lambda x: from_list(lambda x: to_class(Attribute, x), x), from_none], self.maybe_attributes)
+ result[u"members"] = from_list(lambda x: to_class(StructMember, x), self.members)
+ result[u"name"] = from_str(self.name)
+ result[u"size"] = from_int(self.size)
+ return result
+
+
+class UnionMember:
+ def __init__(self, alignment, name, offset, size, type):
+ self.alignment = alignment
+ self.name = name
+ self.offset = offset
+ self.size = size
+ self.type = type
+
+ @staticmethod
+ def from_dict(obj):
+ assert isinstance(obj, dict)
+ alignment = from_int(obj.get(u"alignment"))
+ name = from_str(obj.get(u"name"))
+ offset = from_int(obj.get(u"offset"))
+ size = from_int(obj.get(u"size"))
+ type = TypeClass.from_dict(obj.get(u"type"))
+ return UnionMember(alignment, name, offset, size, type)
+
+ def to_dict(self):
+ result = {}
+ result[u"alignment"] = from_int(self.alignment)
+ result[u"name"] = from_str(self.name)
+ result[u"offset"] = from_int(self.offset)
+ result[u"size"] = from_int(self.size)
+ result[u"type"] = to_class(TypeClass, self.type)
+ return result
+
+
+class UnionDeclarationElement:
+ def __init__(self, alignment, max_handles, maybe_attributes, members, name, size):
+ self.alignment = alignment
+ self.max_handles = max_handles
+ self.maybe_attributes = maybe_attributes
+ self.members = members
+ self.name = name
+ self.size = size
+
+ @staticmethod
+ def from_dict(obj):
+ assert isinstance(obj, dict)
+ alignment = from_int(obj.get(u"alignment"))
+ max_handles = from_union([from_int, from_none], obj.get(u"max_handles"))
+ maybe_attributes = from_union([lambda x: from_list(Attribute.from_dict, x), from_none], obj.get(u"maybe_attributes"))
+ members = from_list(UnionMember.from_dict, obj.get(u"members"))
+ name = from_str(obj.get(u"name"))
+ size = from_int(obj.get(u"size"))
+ return UnionDeclarationElement(alignment, max_handles, maybe_attributes, members, name, size)
+
+ def to_dict(self):
+ result = {}
+ result[u"alignment"] = from_int(self.alignment)
+ result[u"max_handles"] = from_union([from_int, from_none], self.max_handles)
+ result[u"maybe_attributes"] = from_union([lambda x: from_list(lambda x: to_class(Attribute, x), x), from_none], self.maybe_attributes)
+ result[u"members"] = from_list(lambda x: to_class(UnionMember, x), self.members)
+ result[u"name"] = from_str(self.name)
+ result[u"size"] = from_int(self.size)
+ return result
+
+
+class Fidl:
+ def __init__(self, const_declarations, declaration_order, declarations, enum_declarations, interface_declarations, library_dependencies, name, struct_declarations, union_declarations, version):
+ self.const_declarations = const_declarations
+ self.declaration_order = declaration_order
+ self.declarations = declarations
+ self.enum_declarations = enum_declarations
+ self.interface_declarations = interface_declarations
+ self.library_dependencies = library_dependencies
+ self.name = name
+ self.struct_declarations = struct_declarations
+ self.union_declarations = union_declarations
+ self.version = version
+
+ @staticmethod
+ def from_dict(obj):
+ assert isinstance(obj, dict)
+ const_declarations = from_list(Const.from_dict, obj.get(u"const_declarations"))
+ declaration_order = from_list(from_str, obj.get(u"declaration_order"))
+ declarations = from_dict(DeclarationsMap, obj.get(u"declarations"))
+ enum_declarations = from_list(EnumDeclarationElement.from_dict, obj.get(u"enum_declarations"))
+ interface_declarations = from_list(Interface.from_dict, obj.get(u"interface_declarations"))
+ library_dependencies = from_list(Library.from_dict, obj.get(u"library_dependencies"))
+ name = from_str(obj.get(u"name"))
+ struct_declarations = from_list(Struct.from_dict, obj.get(u"struct_declarations"))
+ union_declarations = from_list(UnionDeclarationElement.from_dict, obj.get(u"union_declarations"))
+ version = from_str(obj.get(u"version"))
+ return Fidl(const_declarations, declaration_order, declarations, enum_declarations, interface_declarations, library_dependencies, name, struct_declarations, union_declarations, version)
+
+ def to_dict(self):
+ result = {}
+ result[u"const_declarations"] = from_list(lambda x: to_class(Const, x), self.const_declarations)
+ result[u"declaration_order"] = from_list(from_str, self.declaration_order)
+ result[u"declarations"] = from_dict(lambda x: to_enum(DeclarationsMap, x), self.declarations)
+ result[u"enum_declarations"] = from_list(lambda x: to_class(EnumDeclarationElement, x), self.enum_declarations)
+ result[u"interface_declarations"] = from_list(lambda x: to_class(Interface, x), self.interface_declarations)
+ result[u"library_dependencies"] = from_list(lambda x: to_class(Library, x), self.library_dependencies)
+ result[u"name"] = from_str(self.name)
+ result[u"struct_declarations"] = from_list(lambda x: to_class(Struct, x), self.struct_declarations)
+ result[u"union_declarations"] = from_list(lambda x: to_class(UnionDeclarationElement, x), self.union_declarations)
+ result[u"version"] = from_str(self.version)
+ return result
+
+
+def fidl_from_dict(s):
+ return Fidl.from_dict(s)
+
+
+def fidl_to_dict(x):
+ return to_class(Fidl, x)
+
diff --git a/deps/v8/build/fuchsia/fidlgen_js/gen.py b/deps/v8/build/fuchsia/fidlgen_js/gen.py
new file mode 100755
index 0000000000..484440e2d1
--- /dev/null
+++ b/deps/v8/build/fuchsia/fidlgen_js/gen.py
@@ -0,0 +1,673 @@
+#!/usr/bin/env python
+
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import fidl
+import json
+
+
+class _CompoundIdentifier(object):
+
+ def __init__(self, library, name):
+ self.library = library
+ self.name = name
+
+
+def _ParseLibraryName(lib):
+ return lib.split('.')
+
+
+def _ParseCompoundIdentifier(ident):
+ parts = ident.split('/', 2)
+ raw_library = ''
+ raw_name = parts[0]
+ if len(parts) == 2:
+ raw_library, raw_name = parts
+ library = _ParseLibraryName(raw_library)
+ return _CompoundIdentifier(library, raw_name)
+
+
+def _ChangeIfReserved(name):
+ # TODO(crbug.com/883496): Remap any JS keywords.
+ return name
+
+
+def _CompileCompoundIdentifier(compound, ext=''):
+ result = _ChangeIfReserved(compound.name) + ext
+ return result
+
+
+def _CompileIdentifier(ident):
+ return _ChangeIfReserved(ident)
+
+
+def _GetUnderlyingPrimitiveType(t):
+ """Returns the underlying FIDL primitive type for a higher level type."""
+ if t.kind == fidl.TypeKind.PRIMITIVE:
+ return t.subtype
+ elif t.kind == fidl.TypeKind.STRING:
+ return 'string'
+ elif t.kind == fidl.TypeKind.IDENTIFIER:
+ # No underlying type is required because it will be implied by the type of
+ # the value that the identifer represents.
+ return None
+ else:
+ raise Exception(
+ 'expected primitive or identifier representing primitive underlying '
+ 'type, but got ' + str(t.kind))
+
+
+def _InlineSizeOfPrimitiveType(primitive_type):
+ return {
+ 'bool': 1,
+ 'float32': 4,
+ 'float64': 8,
+ 'int16': 2,
+ 'int32': 4,
+ 'int64': 8,
+ 'int8': 1,
+ 'uint16': 2,
+ 'uint32': 4,
+ 'uint64': 8,
+ 'uint8': 1,
+ }[primitive_type]
+
+
+def _JsTypeForPrimitiveType(t):
+ mapping = {
+ fidl.IntegerType.INT16: 'number',
+ fidl.IntegerType.INT32: 'number',
+ fidl.IntegerType.INT64: 'BigInt',
+ fidl.IntegerType.INT8: 'number',
+ fidl.IntegerType.UINT16: 'number',
+ fidl.IntegerType.UINT32: 'number',
+ fidl.IntegerType.UINT64: 'BigInt',
+ fidl.IntegerType.UINT8: 'number',
+ }
+ return mapping[t]
+
+
+def _BuildInlineSizeTable(fidl):
+ """Builds a mapping from type name to inline type size. These need to be
+ extracted beforehand because a vector<X> can be required during compilation
+ before seeing the compilation of X."""
+ result = {}
+ for enum in fidl.enum_declarations:
+ result[enum.name] = _InlineSizeOfPrimitiveType(enum.type.value)
+ for union in fidl.union_declarations:
+ result[union.name] = union.size
+ for struct in fidl.struct_declarations:
+ result[struct.name] = struct.size
+ return result
+
+
+class Compiler(object):
+
+ def __init__(self, fidl, output_file):
+ self.fidl = fidl
+ self.f = output_file
+ self.output_deferred_to_eof = ''
+ self.type_table_defined = set()
+ self.type_inline_size_by_name = _BuildInlineSizeTable(self.fidl)
+ # Used to hold the JS name for constants and enumerants. In particular,
+ # enums aren't scoped by name to their enum in the fidl json, but the JS
+ # bindings emit them as Enum.Something. So this maps from Something ->
+ # Enum.Something.
+ self.resolved_constant_name = {}
+
+ def Compile(self):
+ self._EmitHeader()
+ for c in self.fidl.const_declarations:
+ self._CompileConst(c)
+ for e in self.fidl.enum_declarations:
+ self._CompileEnum(e)
+ for u in self.fidl.union_declarations:
+ self._CompileUnion(u)
+ for s in self.fidl.struct_declarations:
+ self._CompileStruct(s)
+ for i in self.fidl.interface_declarations:
+ self._CompileInterface(i)
+
+ self.f.write(self.output_deferred_to_eof)
+
+ def _InlineSizeOfType(self, t):
+ if t.kind == fidl.TypeKind.PRIMITIVE:
+ return _InlineSizeOfPrimitiveType(t.subtype)
+ elif t.kind == fidl.TypeKind.STRING:
+ return 16
+ elif t.kind == fidl.TypeKind.IDENTIFIER:
+ size = self.type_inline_size_by_name.get(t.identifier)
+ if size is None:
+ raise Exception('expected ' + t.identifier +
+ ' to be in self.type_inline_size_by_name')
+ return size
+ elif t.kind == fidl.TypeKind.HANDLE:
+ return 4
+ else:
+ raise NotImplementedError(t.kind)
+
+ def _CompileConstant(self, val, primitive_type):
+ """primitive_type is the string representation of the underlying FIDL type
+ of the constant's value. Note that this is not a type object, but rather
+ the string name of a basic primitive type, e.g. 'int8' or 'uint64'."""
+ if val.kind == fidl.ConstantKind.IDENTIFIER:
+ js_name = self.resolved_constant_name.get(val.identifier)
+ if not js_name:
+ raise Exception('expected ' + val.identifer +
+ ' to be in self.resolved_constant_name')
+ return js_name
+ elif val.kind == fidl.ConstantKind.LITERAL:
+ lit_kind = val.literal.kind
+ if lit_kind == fidl.LiteralKind.STRING:
+ return json.dumps(val.literal.value)
+ elif lit_kind == fidl.LiteralKind.NUMERIC:
+ suffix = 'n' if primitive_type in ('int64', 'uint64') else ''
+ return val.literal.value + suffix
+ elif lit_kind == fidl.LiteralKind.TRUE:
+ return 'true'
+ elif lit_kind == fidl.LiteralKind.FALSE:
+ return 'false'
+ elif lit_kind == fidl.LiteralKind.DEFAULT:
+ return 'default'
+ else:
+ raise Exception('unexpected kind')
+
+ def _EmitHeader(self):
+ self.f.write('''// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// WARNING: This file is machine generated by fidlgen_js.
+
+''')
+
+ def _CompileConst(self, const):
+ compound = _ParseCompoundIdentifier(const.name)
+ name = _CompileCompoundIdentifier(compound)
+ value = self._CompileConstant(const.value,
+ _GetUnderlyingPrimitiveType(const.type))
+ self.f.write('''/**
+ * @const
+ */
+const %(name)s = %(value)s;
+
+''' % {
+ 'name': name,
+ 'value': value
+ })
+ self.resolved_constant_name[const.name] = name
+
+ def _CompileEnum(self, enum):
+ compound = _ParseCompoundIdentifier(enum.name)
+ name = _CompileCompoundIdentifier(compound)
+ js_type = _JsTypeForPrimitiveType(enum.type)
+ data = {'js_type': js_type, 'type': enum.type.value, 'name': name}
+ self.f.write('''/**
+ * @enum {%(js_type)s}
+ */
+const %(name)s = {
+''' % data)
+ for member in enum.members:
+ # The 'type' of an enum isn't a real Type like most other places, but
+ # instead just a simple 'int8' or similar.
+ underlying_type = enum.type.value
+ self.f.write(
+ ''' %s: %s,\n''' %
+ (member.name, self._CompileConstant(member.value, underlying_type)))
+ fidl_constant_name = '.'.join(compound.library) + '/' + member.name
+ javascript_name = name + '.' + member.name
+ self.resolved_constant_name[fidl_constant_name] = javascript_name
+ self.f.write('};\n')
+ self.f.write('const _kTT_%(name)s = _kTT_%(type)s;\n\n' % data)
+
+ def _CompileUnion(self, union):
+ compound = _ParseCompoundIdentifier(union.name)
+ name = _CompileCompoundIdentifier(compound)
+ member_names = []
+ enc_cases = []
+ dec_cases = []
+ for i, m in enumerate(union.members):
+ member_name = _ChangeIfReserved(m.name)
+ member_names.append(member_name)
+ member_type = self._CompileType(m.type)
+ enc_cases.append('''\
+ case %(index)s:
+ _kTT_%(member_type)s.enc(e, o + 4, v.%(member_name)s);
+ break;''' % {
+ 'index': i,
+ 'member_type': member_type,
+ 'member_name': member_name,
+ })
+ dec_cases.append('''\
+ case %(index)s:
+ result.set_%(member_name)s(_kTT_%(member_type)s.dec(d, o + 4));
+ break;''' % {
+ 'index': i,
+ 'member_type': member_type,
+ 'member_name': member_name,
+ })
+
+ self.f.write(
+ '''\
+const _kTT_%(name)s = {
+ enc: function(e, o, v) {
+ if (v.$tag === $fidl__kInvalidUnionTag) throw "invalid tag";
+ e.data.setUint32(o, v.$tag, $fidl__kLE);
+ switch (v.$tag) {
+%(enc_cases)s
+ }
+ },
+ dec: function(d, o) {
+ var tag = d.data.getUint32(o, $fidl__kLE);
+ var result = new %(name)s();
+ switch (tag) {
+%(dec_cases)s
+ default:
+ throw "invalid tag";
+ }
+ return result;
+ },
+};
+
+const _kTT_%(name)s_Nullable = {
+ enc: function(e, o, v) {
+ e.data.setUint32(o, v ? 0xffffffff : 0, $fidl__kLE);
+ e.data.setUint32(o + 4, v ? 0xffffffff : 0, $fidl__kLE);
+ var start = e.alloc(%(size)s);
+ _kTT_%(name)s.enc(e, start, v);
+ },
+ dec: function(d, o) {
+ if (d.data.getUint32(o, $fidl__kLE) === 0) {
+ return new %(name)s();
+ }
+ var pointer = d.data.getUint32(o + 4, $fidl__kLE);
+ var dataOffset = d.claimMemory(%(size)s);
+ return _kTT_%(name)s.dec(d, dataOffset);
+ },
+};
+
+/**
+ * @constructor
+ */
+function %(name)s() { this.reset(); }
+
+%(name)s.prototype.reset = function(i) {
+ this.$tag = (i === undefined) ? $fidl__kInvalidUnionTag : i;
+''' % {
+ 'name': name,
+ 'size': union.size,
+ 'enc_cases': '\n'.join(enc_cases),
+ 'dec_cases': '\n'.join(dec_cases),
+ })
+ for m in member_names:
+ self.f.write(' this.%s = null;\n' % m)
+ self.f.write('}\n\n')
+
+ for i, m in enumerate(member_names):
+ self.f.write('''\
+%(name)s.prototype.set_%(member_name)s = function(v) {
+ this.reset(%(index)s);
+ this.%(member_name)s = v;
+};
+
+%(name)s.prototype.is_%(member_name)s = function() {
+ return this.$tag === %(index)s;
+};
+
+''' % {
+ 'name': name,
+ 'member_name': m,
+ 'index': i,
+ })
+
+ def _CompileStruct(self, struct):
+ compound = _ParseCompoundIdentifier(struct.name)
+ name = _CompileCompoundIdentifier(compound)
+ param_names = [_ChangeIfReserved(x.name) for x in struct.members]
+ # TODO(crbug.com/883496): @param and types.
+ self.f.write('''/**
+ * @constructor
+ * @struct
+ */
+function %(name)s(%(param_names)s) {
+''' % {
+ 'name': name,
+ 'param_names': ', '.join(param_names)
+ })
+ for member in struct.members:
+ member_name = _ChangeIfReserved(member.name)
+ value = '%(member_name)s'
+ if member.maybe_default_value:
+ underlying_type = _GetUnderlyingPrimitiveType(member.type)
+ value = (
+ '(%(member_name)s !== undefined) ? %(member_name)s : ' +
+ self._CompileConstant(member.maybe_default_value, underlying_type))
+ elif self.fidl.declarations.get(member.type.identifier) == \
+ fidl.DeclarationsMap.UNION:
+ union_compound = _ParseCompoundIdentifier(member.type.identifier)
+ union_name = _CompileCompoundIdentifier(union_compound)
+ value = ('(%(member_name)s !== undefined) ? %(member_name)s : ' + 'new '
+ + union_name + '()')
+ self.f.write((' this.%(member_name)s = ' + value + ';\n') %
+ {'member_name': member_name})
+ self.f.write('}\n\n')
+
+ self.f.write('''const _kTT_%(name)s = {
+ enc: function(e, o, v) {
+''' % {'name': name})
+
+ for member in struct.members:
+ element_ttname = self._CompileType(member.type)
+ self.f.write(
+ ' _kTT_%(element_ttname)s.enc('
+ 'e, o + %(offset)s, v.%(member_name)s);\n' % {
+ 'element_ttname': element_ttname,
+ 'offset': member.offset,
+ 'member_name': _ChangeIfReserved(member.name)
+ })
+
+ self.f.write(''' },
+ dec: function(d, o) {
+''')
+
+ for member in struct.members:
+ element_ttname = self._CompileType(member.type)
+ self.f.write(
+ ' var $temp_%(member_name)s = _kTT_%(element_ttname)s.dec('
+ 'd, o + %(offset)s);\n' % {
+ 'element_ttname': element_ttname,
+ 'offset': member.offset,
+ 'member_name': _ChangeIfReserved(member.name)
+ })
+ self.f.write(''' return new %(name)s(%(temp_names)s);
+ }
+};
+
+''' % {
+ 'name': name,
+ 'temp_names': ', '.join(['$temp_' + x for x in param_names])
+ })
+
+ def _CompileType(self, t):
+ """Ensures there's a type table for the given type, and returns the stem of
+ its name."""
+ if t.kind == fidl.TypeKind.PRIMITIVE:
+ return t.subtype
+ elif t.kind == fidl.TypeKind.STRING:
+ return 'String' + ('_Nullable' if t.nullable else '')
+ elif t.kind == fidl.TypeKind.IDENTIFIER:
+ compound = _ParseCompoundIdentifier(t.identifier)
+ name = _CompileCompoundIdentifier(compound)
+ return name + ('_Nullable' if t.nullable else '')
+ elif t.kind == fidl.TypeKind.HANDLE or t.kind == fidl.TypeKind.REQUEST:
+ return 'Handle'
+ elif t.kind == fidl.TypeKind.ARRAY:
+ element_ttname = self._CompileType(t.element_type)
+ ttname = 'ARR_%d_%s' % (t.element_count, element_ttname)
+ if ttname not in self.type_table_defined:
+ self.type_table_defined.add(ttname)
+ self.output_deferred_to_eof += ('''\
+const _kTT_%(ttname)s = {
+ enc: function(e, o, v) {
+ for (var i = 0; i < %(element_count)s; i++) {
+ _kTT_%(element_ttname)s.enc(e, o + (i * %(element_size)s), v[i]);
+ }
+ },
+ dec: function(d, o) {
+ var result = [];
+ for (var i = 0; i < %(element_count)s; i++) {
+ result.push(_kTT_%(element_ttname)s.dec(d, o + (i * %(element_size)s)));
+ }
+ return result;
+ },
+};
+
+''' % {
+ 'ttname': ttname,
+ 'element_ttname': element_ttname,
+ 'element_count': t.element_count,
+ 'element_size': self._InlineSizeOfType(t.element_type),
+ })
+ return ttname
+ elif t.kind == fidl.TypeKind.VECTOR:
+ element_ttname = self._CompileType(t.element_type)
+ ttname = ('VEC_' + ('Nullable_' if t.nullable else '') + element_ttname)
+ if t.nullable:
+ handle_null_enc = '''e.data.setUint32(o, 0, $fidl__kLE);
+ e.data.setUint32(o + 4, 0, $fidl__kLE);
+ e.data.setUint32(o + 8, 0, $fidl__kLE);
+ e.data.setUint32(o + 12, 0, $fidl__kLE);
+ return;
+'''
+ handle_null_dec = 'return null;'
+ else:
+ handle_null_enc = 'throw "non-null vector required";'
+ handle_null_dec = 'throw "non-null vector required";'
+
+ if ttname not in self.type_table_defined:
+ self.type_table_defined.add(ttname)
+ self.output_deferred_to_eof += ('''\
+const _kTT_%(ttname)s = {
+ enc: function(e, o, v) {
+ if (v === null || v === undefined) {
+ %(handle_null_enc)s
+ }
+ e.data.setUint32(o, v.length, $fidl__kLE);
+ e.data.setUint32(o + 4, 0, $fidl__kLE);
+ e.data.setUint32(o + 8, 0xffffffff, $fidl__kLE);
+ e.data.setUint32(o + 12, 0xffffffff, $fidl__kLE);
+ var start = e.alloc(v.length * %(element_size)s);
+ for (var i = 0; i < v.length; i++) {
+ _kTT_%(element_ttname)s.enc(e, start + (i * %(element_size)s), v[i]);
+ }
+ },
+ dec: function(d, o) {
+ var len = d.data.getUint32(o, $fidl__kLE);
+ var pointer = d.data.getUint32(o + 8, $fidl__kLE);
+ if (pointer === 0) {
+ %(handle_null_dec)s
+ }
+ var dataOffset = d.claimMemory(len * %(element_size)s);
+ var result = [];
+ for (var i = 0; i < len; i++) {
+ result.push(_kTT_%(element_ttname)s.dec(
+ d, dataOffset + (i * %(element_size)s)));
+ }
+ return result;
+ }
+};
+
+''' % {
+ 'ttname': ttname,
+ 'element_ttname': element_ttname,
+ 'element_size': self._InlineSizeOfType(t.element_type),
+ 'handle_null_enc': handle_null_enc,
+ 'handle_null_dec': handle_null_dec,
+ })
+ return ttname
+ else:
+ raise NotImplementedError(t.kind)
+
+ def _GenerateJsInterfaceForInterface(self, name, interface):
+ """Generates a JS @interface for the given FIDL interface."""
+ self.f.write('''/**
+ * @interface
+ */
+function %(name)s() {}
+
+''' % {'name': name})
+
+ # Define a JS interface part for the interface for typechecking.
+ for method in interface.methods:
+ method_name = _CompileIdentifier(method.name)
+ if method.has_request:
+ param_names = [_CompileIdentifier(x.name) for x in method.maybe_request]
+ if len(param_names):
+ self.f.write('/**\n')
+ # TODO(crbug.com/883496): Emit @param and @return type comments.
+ self.f.write(' */\n')
+ self.f.write(
+ '%(name)s.prototype.%(method_name)s = '
+ 'function(%(param_names)s) {};\n\n' % {
+ 'name': name,
+ 'method_name': method_name,
+ 'param_names': ', '.join(param_names)
+ })
+
+ # Emit message ordinals for later use.
+ for method in interface.methods:
+ method_name = _CompileIdentifier(method.name)
+ self.f.write(
+ 'const _k%(name)s_%(method_name)s_Ordinal = %(ordinal)s;\n' % {
+ 'name': name,
+ 'method_name': method_name,
+ 'ordinal': method.ordinal
+ })
+
+ self.f.write('\n')
+
+ def _GenerateJsProxyForInterface(self, name, interface):
+ """Generates the JS side implementation of a proxy class implementing the
+ given interface."""
+ proxy_name = name + 'Proxy'
+ self.f.write('''/**
+ * @constructor
+ * @implements %(name)s
+ */
+function %(proxy_name)s() {
+ this.channel = $ZX_HANDLE_INVALID;
+}
+
+%(proxy_name)s.prototype.$bind = function(channel) {
+ this.channel = channel;
+};
+
+%(proxy_name)s.prototype.$is_bound = function() {
+ return this.channel != $ZX_HANDLE_INVALID;
+};
+
+%(proxy_name)s.prototype.$request = function() {
+ if (this.$is_bound())
+ throw "Proxy already bound";
+ var pair = $ZxChannelCreate();
+ if (pair.status != $ZX_OK)
+ throw "ChannelPair creation failed";
+ this.channel = pair.first;
+ return pair.second;
+};
+
+%(proxy_name)s.prototype.$close = function() {
+ if (!this.$is_bound())
+ return;
+ var status = $zx_handle_close(this.channel);
+ if (status !== $ZX_OK) {
+ throw "close handle failed";
+ }
+ this.channel = $ZX_HANDLE_INVALID;
+};
+
+''' % {
+ 'name': name,
+ 'proxy_name': proxy_name
+ })
+ for method in interface.methods:
+ method_name = _CompileIdentifier(method.name)
+ if method.has_request:
+ type_tables = []
+ for param in method.maybe_request:
+ type_tables.append(self._CompileType(param.type))
+ param_names = [_CompileIdentifier(x.name) for x in method.maybe_request]
+ self.f.write(
+ '''\
+%(proxy_name)s.prototype.%(method_name)s = function(%(param_names)s) {
+ if (this.channel === $ZX_HANDLE_INVALID) {
+ throw "channel closed";
+ }
+ var $encoder = new $fidl_Encoder(_k%(name)s_%(method_name)s_Ordinal);
+ $encoder.alloc(%(size)s - $fidl_kMessageHeaderSize);
+''' % {
+ 'name': name,
+ 'proxy_name': proxy_name,
+ 'method_name': method_name,
+ 'param_names': ', '.join(param_names),
+ 'size': method.maybe_request_size
+ })
+
+ for param, ttname in zip(method.maybe_request, type_tables):
+ self.f.write(
+ '''\
+ _kTT_%(type_table)s.enc($encoder, %(offset)s, %(param_name)s);
+''' % {
+ 'type_table': ttname,
+ 'param_name': _CompileIdentifier(param.name),
+ 'offset': param.offset
+ })
+
+ self.f.write(''' var $writeResult = $ZxChannelWrite(this.channel,
+ $encoder.messageData(),
+ $encoder.messageHandles());
+ if ($writeResult !== $ZX_OK) {
+ throw "$ZxChannelWrite failed: " + $writeResult;
+ }
+''')
+
+ if method.has_response:
+ type_tables = []
+ for param in method.maybe_response:
+ type_tables.append(self._CompileType(param.type))
+ self.f.write('''
+ return $ZxObjectWaitOne(this.channel, $ZX_CHANNEL_READABLE, $ZX_TIME_INFINITE)
+ .then(() => new Promise(res => {
+ var $readResult = $ZxChannelRead(this.channel);
+ if ($readResult.status !== $ZX_OK) {
+ throw "channel read failed";
+ }
+
+ var $view = new DataView($readResult.data);
+
+ var $decoder = new $fidl_Decoder($view, $readResult.handles);
+ $decoder.claimMemory(%(size)s - $fidl_kMessageHeaderSize);
+''' % {'size': method.maybe_response_size})
+ for param, ttname in zip(method.maybe_response, type_tables):
+ self.f.write(
+ '''\
+ var %(param_name)s = _kTT_%(type_table)s.dec($decoder, %(offset)s);
+''' % {
+ 'type_table': ttname,
+ 'param_name': _CompileIdentifier(param.name),
+ 'offset': param.offset
+ })
+
+ self.f.write('''
+ res(%(args)s);
+ }));
+''' % {'args': ', '.join(x.name for x in method.maybe_response)})
+
+ self.f.write('''};
+
+''')
+
+ def _CompileInterface(self, interface):
+ compound = _ParseCompoundIdentifier(interface.name)
+ name = _CompileCompoundIdentifier(compound)
+ self._GenerateJsInterfaceForInterface(name, interface)
+ self._GenerateJsProxyForInterface(name, interface)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('json')
+ parser.add_argument('--output', required=True)
+ args = parser.parse_args()
+
+ fidl_obj = fidl.fidl_from_dict(json.load(open(args.json, 'r')))
+ with open(args.output, 'w') as f:
+ c = Compiler(fidl_obj, f)
+ c.Compile()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/deps/v8/build/fuchsia/fidlgen_js/runtime/fidl.mjs b/deps/v8/build/fuchsia/fidlgen_js/runtime/fidl.mjs
new file mode 100644
index 0000000000..722098b143
--- /dev/null
+++ b/deps/v8/build/fuchsia/fidlgen_js/runtime/fidl.mjs
@@ -0,0 +1,270 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is the JS runtime support library for code generated by fidlgen_js. It
+// mostly consists of helpers to facilitate encoding and decoding of FIDL
+// messages.
+
+const $fidl_kInitialBufferSize = 1024;
+
+const $fidl_kMessageHeaderSize = 16;
+const $fidl_kMessageTxidOffset = 0;
+const $fidl_kMessageOrdinalOffset = 12;
+
+const $fidl__kAlignment = 8;
+const $fidl__kAlignmentMask = 0x7;
+
+const $fidl__kLE = true;
+
+const $fidl__kUserspaceTxidMask = 0x7fffffff;
+const $fidl__kHandlePresent = 0xffffffff;
+const $fidl__kInvalidUnionTag = 0xffffffff;
+var $fidl__nextTxid = 1;
+
+function $fidl__align(size) {
+ return size + (($fidl__kAlignment - (size & $fidl__kAlignmentMask)) &
+ $fidl__kAlignmentMask);
+}
+
+/**
+ * @constructor
+ * @param {number} ordinal
+ */
+function $fidl_Encoder(ordinal) {
+ var buf = new ArrayBuffer($fidl_kInitialBufferSize);
+ this.data = new DataView(buf);
+ this.extent = 0;
+ this.handles = [];
+ this._encodeMessageHeader(ordinal);
+}
+
+/**
+ * @param {number} ordinal
+ */
+$fidl_Encoder.prototype._encodeMessageHeader = function(ordinal) {
+ this.alloc($fidl_kMessageHeaderSize);
+ var txid = $fidl__nextTxid++ & $fidl__kUserspaceTxidMask;
+ this.data.setUint32($fidl_kMessageTxidOffset, txid, $fidl__kLE);
+ this.data.setUint32($fidl_kMessageOrdinalOffset, ordinal, $fidl__kLE);
+};
+
+/**
+ * @param {number} size
+ */
+$fidl_Encoder.prototype.alloc = function(size) {
+ var offset = this.extent;
+ this._claimMemory($fidl__align(size));
+ return offset;
+};
+
+/**
+ * @param {number} claimSize
+ */
+$fidl_Encoder.prototype._claimMemory = function(claimSize) {
+ this.extent += claimSize;
+ if (this.extent > this.data.byteLength) {
+ var newSize = this.data.byteLength + claimSize;
+ newSize += newSize * 2;
+ this._grow(newSize);
+ }
+};
+
+/**
+ * @param {number} newSize
+ */
+$fidl_Encoder.prototype._grow = function(newSize) {
+ var newBuffer = new ArrayBuffer(newSize);
+ new Uint8Array(newBuffer).set(new Uint8Array(this.data.buffer));
+ this.data = new DataView(newBuffer);
+};
+
+/**
+ * @param {number} handle
+ */
+$fidl_Encoder.prototype.addHandle = function(handle) {
+ this.handles.push(handle);
+};
+
+$fidl_Encoder.prototype.messageData = function() {
+ return new DataView(this.data.buffer, 0, this.extent);
+};
+
+$fidl_Encoder.prototype.messageHandles = function() {
+ return this.handles;
+};
+
+
+/**
+ * @constructor
+ * @param {Array} data
+ * @param {Array} handles
+ */
+function $fidl_Decoder(data, handles) {
+ this.data = data;
+ this.handles = handles;
+ this.nextOffset = 0;
+ this.nextHandle = 0;
+ this.claimMemory($fidl_kMessageHeaderSize);
+}
+
+/**
+ * @param {number} size
+ */
+$fidl_Decoder.prototype.claimMemory = function(size) {
+ var result = this.nextOffset;
+ this.nextOffset = $fidl__align(this.nextOffset + size);
+ return result;
+}
+
+$fidl_Decoder.prototype.claimHandle = function() {
+ if (this.nextHandle >= this.handles.length)
+ throw "Attempt to claim more handles than are available";
+ return this.handles[this.nextHandle++];
+}
+
+
+// Type tables and encoding helpers for generated Proxy code.
+const _kTT_bool = {
+ enc: function(e, o, v) { e.data.setInt8(o, v ? 1 : 0); },
+ dec: function(d, o) { return d.data.getInt8(o) != 0; },
+};
+
+const _kTT_float32 = {
+ enc: function(e, o, v) { e.data.setFloat32(o, v, $fidl__kLE); },
+ dec: function(d, o) { return d.data.getFloat32(o, $fidl__kLE); },
+};
+
+const _kTT_float64 = {
+ enc: function(e, o, v) { e.data.setFloat64(o, v, $fidl__kLE); },
+ dec: function(d, o) { return d.data.getFloat64(o, $fidl__kLE); },
+};
+
+const _kTT_int8 = {
+ enc: function(e, o, v) { e.data.setInt8(o, v); },
+ dec: function(d, o) { return d.data.getInt8(o); },
+};
+
+const _kTT_int16 = {
+ enc: function(e, o, v) { e.data.setInt16(o, v, $fidl__kLE); },
+ dec: function(d, o) { return d.data.getInt16(o, $fidl__kLE); },
+};
+
+const _kTT_int32 = {
+ enc: function(e, o, v) { e.data.setUint32(o, v, $fidl__kLE); },
+ dec: function(d, o) { return d.data.getInt32(o, $fidl__kLE); },
+};
+
+const _kTT_int64 = {
+ enc: function(e, o, v) {
+ var bi = BigInt.asIntN(64, BigInt(v));
+ var x = Number(bi & 0xffffffffn);
+ var y = Number((bi >> 32n) & 0xffffffffn);
+ e.data.setInt32(o, x, $fidl__kLE);
+ e.data.setInt32(o + 4, y, $fidl__kLE);
+ },
+ dec: function(d, o) {
+ var x = BigInt.asIntN(64, BigInt(d.data.getInt32(o, $fidl__kLE)));
+ var y = BigInt.asIntN(64, BigInt(d.data.getInt32(o + 4, $fidl__kLE)));
+ return x | (y << 32n);
+ },
+};
+
+const _kTT_uint8 = {
+ enc: function(e, o, v) { e.data.setUint8(o, v); },
+ dec: function(d, o) { return d.data.getUint8(o); },
+};
+
+const _kTT_uint16 = {
+ enc: function(e, o, v) { e.data.setUint16(o, v, $fidl__kLE); },
+ dec: function(d, o) { return d.data.getUint16(o, $fidl__kLE); },
+};
+
+const _kTT_uint32 = {
+ enc: function(e, o, v) { e.data.setUint32(o, v, $fidl__kLE); },
+ dec: function(d, o) { return d.data.getUint32(o, $fidl__kLE); },
+};
+
+const _kTT_uint64 = {
+ enc: function(e, o, v) {
+ var bi = BigInt.asUintN(64, BigInt(v));
+ var x = Number(bi & 0xffffffffn);
+ var y = Number((bi >> 32n) & 0xffffffffn);
+ e.data.setUint32(o, x, $fidl__kLE);
+ e.data.setUint32(o + 4, y, $fidl__kLE);
+ },
+ dec: function(d, o) {
+ var x = BigInt.asUintN(64, BigInt(d.data.getUint32(o, $fidl__kLE)));
+ var y = BigInt.asUintN(64, BigInt(d.data.getUint32(o + 4, $fidl__kLE)));
+ return x | (y << 32n);
+ },
+};
+
+const _kTT_Handle = {
+ enc: function(e, o, v) {
+ if (v === null || v === undefined) {
+ e.data.setUint32(o, 0, $fidl__kLE);
+ } else {
+ e.data.setUint32(o, $fidl__kHandlePresent, $fidl__kLE);
+ e.addHandle(v);
+ }
+ },
+ dec: function(d, o) {
+ var $present = d.data.getUint32(o, $fidl__kLE);
+ if ($present === 0) {
+ return 0;
+ } else {
+ if ($present !== $fidl__kHandlePresent)
+ throw "Expected UINT32_MAX to indicate handle presence";
+ return d.claimHandle();
+ }
+ },
+};
+
+const _kTT_String = {
+ enc: function(e, o, v) {
+ if (v === null || v === undefined) throw "non-null string required";
+ // Both size and data are uint64, but that's awkward in JS, so for now only
+ // support a maximum of 32b lengths. The maximum length of a FIDL message is
+ // shorter than 32b in any case.
+ var asUtf8 = $FidlJsStrToUtf8Array(v);
+ e.data.setUint32(o, asUtf8.length, $fidl__kLE);
+ e.data.setUint32(o + 4, 0, $fidl__kLE);
+ e.data.setUint32(o + 8, 0xffffffff, $fidl__kLE);
+ e.data.setUint32(o + 12, 0xffffffff, $fidl__kLE);
+ var body = e.alloc(asUtf8.length);
+ for (var i = 0; i < asUtf8.length; i++) {
+ e.data.setUint8(body + i, asUtf8[i], $fidl__kLE);
+ }
+ },
+ dec: function(d, o) {
+ var len = d.data.getUint32(o, $fidl__kLE);
+ var pointer = d.data.getUint32(o + 8, $fidl__kLE);
+ if (pointer === 0) throw "non-null string required";
+ var dataOffset = d.claimMemory(len);
+ return $FidlJsUtf8ArrayToStr(new DataView(d.data.buffer, dataOffset, len));
+ }
+};
+
+const _kTT_String_Nullable = {
+ enc: function(e, o, v) {
+ if (v === null || v === undefined) {
+ e.data.setUint32(o, 0, $fidl__kLE);
+ e.data.setUint32(o + 4, 0, $fidl__kLE);
+ e.data.setUint32(o + 8, 0, $fidl__kLE);
+ e.data.setUint32(o + 12, 0, $fidl__kLE);
+ } else {
+ _kTT_String.enc(e, o, v);
+ }
+ },
+ dec: function(d, o) {
+ if (v === null || v === undefined) {
+ var pointer = d.data.getUint32(o + 8, $fidl__kLE);
+ if (pointer === 0) {
+ return null;
+ }
+ } else {
+ return _kTT_String.dec(e, o, v);
+ }
+ }
+};
diff --git a/deps/v8/build/fuchsia/fidlgen_js/runtime/zircon.cc b/deps/v8/build/fuchsia/fidlgen_js/runtime/zircon.cc
new file mode 100644
index 0000000000..6dd1b1964b
--- /dev/null
+++ b/deps/v8/build/fuchsia/fidlgen_js/runtime/zircon.cc
@@ -0,0 +1,438 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "build/fuchsia/fidlgen_js/runtime/zircon.h"
+
+#include <lib/async/default.h>
+#include <lib/async/wait.h>
+#include <lib/zx/channel.h>
+#include <zircon/errors.h>
+#include <zircon/syscalls.h>
+#include <zircon/types.h>
+
+#include "base/bind.h"
+#include "base/threading/thread_checker.h"
+#include "gin/arguments.h"
+#include "gin/array_buffer.h"
+#include "gin/converter.h"
+#include "gin/data_object_builder.h"
+#include "gin/function_template.h"
+#include "gin/public/gin_embedders.h"
+
+namespace {
+
+fidljs::WaitSet& GetWaitsForIsolate(v8::Isolate* isolate) {
+ return *static_cast<fidljs::WaitSet*>(
+ isolate->GetData(gin::kEmbedderFuchsia));
+}
+
+} // namespace
+
+namespace fidljs {
+
+class WaitPromiseImpl : public async_wait_t {
+ public:
+ WaitPromiseImpl(v8::Isolate* isolate,
+ v8::Local<v8::Context> context,
+ v8::Local<v8::Promise::Resolver> resolver,
+ zx_handle_t handle,
+ zx_signals_t signals)
+ : async_wait_t({ASYNC_STATE_INIT, &WaitPromiseImpl::StaticOnSignaled,
+ handle, signals}),
+ isolate_(isolate),
+ wait_state_(WaitState::kCreated),
+ failed_start_status_(ZX_OK) {
+ context_.Reset(isolate_, context);
+ resolver_.Reset(isolate_, resolver);
+ }
+
+ ~WaitPromiseImpl() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ switch (wait_state_) {
+ case WaitState::kCreated:
+ // The wait never started, so reject the promise (but don't attempt to
+ // cancel the wait).
+ DCHECK_NE(failed_start_status_, ZX_OK);
+ RejectPromise(failed_start_status_, 0);
+ break;
+
+ case WaitState::kStarted:
+ // The wait was started, but has not yet completed. Cancel the wait and
+ // reject the promise. The object is being destructed here because it's
+ // been removed from the set of waits attached to the isolate, so
+ // we need not remove it.
+ CHECK_EQ(async_cancel_wait(async_get_default_dispatcher(), this),
+ ZX_OK);
+ RejectPromise(ZX_ERR_CANCELED, 0);
+ break;
+
+ case WaitState::kCompleted:
+ // The callback has already been called and so the promise has been
+ // resolved or rejected, and the wait has been removed from the
+ // dispatcher, so there's nothing to do.
+ break;
+ }
+ }
+
+ bool BeginWait() {
+ DCHECK_EQ(wait_state_, WaitState::kCreated);
+ zx_status_t status = async_begin_wait(async_get_default_dispatcher(), this);
+ if (status == ZX_OK) {
+ wait_state_ = WaitState::kStarted;
+ } else {
+ failed_start_status_ = status;
+ }
+ return status == ZX_OK;
+ }
+
+ private:
+ static void StaticOnSignaled(async_dispatcher_t* dispatcher,
+ async_wait_t* wait,
+ zx_status_t status,
+ const zx_packet_signal_t* signal) {
+ auto* self = static_cast<WaitPromiseImpl*>(wait);
+ self->OnSignaled(status, signal);
+ }
+
+ void OnSignaled(zx_status_t status, const zx_packet_signal_t* signal) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK_EQ(wait_state_, WaitState::kStarted);
+ DCHECK_NE(status, ZX_ERR_CANCELED)
+ << "wait should have been canceled before shutdown";
+
+ wait_state_ = WaitState::kCompleted;
+
+ if (status == ZX_OK &&
+ (signal->observed & signal->trigger) == signal->trigger) {
+ ResolvePromise(signal->observed);
+ } else {
+ RejectPromise(status, signal->observed);
+ }
+
+ GetWaitsForIsolate(isolate_).erase(this);
+ // |this| has been deleted.
+ }
+
+ void ResolvePromise(zx_signals_t observed) {
+ v8::Local<v8::Promise::Resolver> resolver(resolver_.Get(isolate_));
+ v8::Local<v8::Context> context(context_.Get(isolate_));
+ v8::Local<v8::Object> value = gin::DataObjectBuilder(isolate_)
+ .Set("status", ZX_OK)
+ .Set("observed", observed)
+ .Build();
+ resolver->Resolve(context, value).ToChecked();
+ }
+
+ void RejectPromise(zx_status_t status, zx_signals_t observed) {
+ v8::Local<v8::Promise::Resolver> resolver(resolver_.Get(isolate_));
+ v8::Local<v8::Context> context(context_.Get(isolate_));
+ v8::Local<v8::Object> value = gin::DataObjectBuilder(isolate_)
+ .Set("status", status)
+ .Set("observed", observed)
+ .Build();
+ resolver->Reject(context, value).ToChecked();
+ }
+
+ v8::Isolate* isolate_;
+ v8::Global<v8::Context> context_;
+ v8::Global<v8::Promise::Resolver> resolver_;
+ enum class WaitState {
+ kCreated,
+ kStarted,
+ kCompleted,
+ } wait_state_;
+ zx_status_t failed_start_status_;
+
+ THREAD_CHECKER(thread_checker_);
+
+ DISALLOW_COPY_AND_ASSIGN(WaitPromiseImpl);
+};
+
+} // namespace fidljs
+
+namespace {
+
+v8::Local<v8::Promise> ZxObjectWaitOne(gin::Arguments* args) {
+ zx_handle_t handle;
+ if (!args->GetNext(&handle)) {
+ args->ThrowError();
+ return v8::Local<v8::Promise>();
+ }
+
+ zx_signals_t signals;
+ if (!args->GetNext(&signals)) {
+ args->ThrowError();
+ return v8::Local<v8::Promise>();
+ }
+
+ v8::MaybeLocal<v8::Promise::Resolver> maybe_resolver =
+ v8::Promise::Resolver::New(args->GetHolderCreationContext());
+ v8::Local<v8::Promise::Resolver> resolver;
+ if (maybe_resolver.ToLocal(&resolver)) {
+ auto wait = std::make_unique<fidljs::WaitPromiseImpl>(
+ args->isolate(), args->GetHolderCreationContext(), resolver, handle,
+ signals);
+ if (wait->BeginWait()) {
+ // The wait will always be notified asynchronously, so it's OK to delay
+ // the add until after it has completed successfully. Move |wait| into the
+ // set of active waits.
+ GetWaitsForIsolate(args->isolate()).insert(std::move(wait));
+ }
+
+ // If BeginWait() fails, then |wait| will be deleted here, causing the
+ // returned promise to be rejected.
+ return resolver->GetPromise();
+ }
+
+ return v8::Local<v8::Promise>();
+}
+
+v8::Local<v8::Value> ZxChannelCreate(gin::Arguments* args) {
+ zx_handle_t channel0, channel1;
+ zx_status_t status = zx_channel_create(0, &channel0, &channel1);
+ if (status != ZX_OK) {
+ return gin::DataObjectBuilder(args->isolate())
+ .Set("status", status)
+ .Build();
+ }
+
+ return gin::DataObjectBuilder(args->isolate())
+ .Set("status", status)
+ .Set("first", channel0)
+ .Set("second", channel1)
+ .Build();
+}
+
+zx_status_t ZxChannelWrite(gin::Arguments* args) {
+ zx_handle_t handle;
+ if (!args->GetNext(&handle)) {
+ args->ThrowError();
+ return ZX_ERR_INVALID_ARGS;
+ }
+
+ gin::ArrayBufferView data;
+ if (!args->GetNext(&data)) {
+ args->ThrowError();
+ return ZX_ERR_INVALID_ARGS;
+ }
+
+ std::vector<zx_handle_t> handles;
+ if (!args->GetNext(&handles)) {
+ args->ThrowError();
+ return ZX_ERR_INVALID_ARGS;
+ }
+
+ zx_status_t status =
+ zx_channel_write(handle, 0, data.bytes(), data.num_bytes(),
+ handles.data(), handles.size());
+ return status;
+}
+
+v8::Local<v8::Object> ZxChannelRead(gin::Arguments* args) {
+ zx_handle_t handle;
+ if (!args->GetNext(&handle)) {
+ args->ThrowError();
+ return gin::DataObjectBuilder(args->isolate())
+ .Set("status", ZX_ERR_INVALID_ARGS)
+ .Build();
+ }
+ zx::unowned_channel ch(handle);
+
+ uint32_t data_size;
+ uint32_t num_handles;
+ zx_status_t status =
+ ch->rea2(0, nullptr, nullptr, 0, 0, &data_size, &num_handles);
+ DCHECK_EQ(status, ZX_ERR_BUFFER_TOO_SMALL);
+
+ std::vector<zx_handle_t> handles;
+ handles.resize(num_handles);
+
+ v8::Local<v8::ArrayBuffer> buf =
+ v8::ArrayBuffer::New(args->isolate(), data_size);
+ uint32_t actual_bytes, actual_handles;
+ status = ch->rea2(0, buf->GetContents().Data(), handles.data(), data_size,
+ handles.size(), &actual_bytes, &actual_handles);
+ DCHECK_EQ(actual_bytes, data_size);
+ DCHECK_EQ(actual_handles, num_handles);
+
+ if (status != ZX_OK) {
+ return gin::DataObjectBuilder(args->isolate())
+ .Set("status", status)
+ .Build();
+ }
+
+ return gin::DataObjectBuilder(args->isolate())
+ .Set("status", status)
+ .Set("data", buf)
+ .Set("handles", handles)
+ .Build();
+}
+
+v8::Local<v8::Value> StrToUtf8Array(gin::Arguments* args) {
+ std::string str;
+ // This converts the string to utf8 from ucs2, so then just repackage the
+ // string as an array and return it.
+ if (!args->GetNext(&str)) {
+ args->ThrowError();
+ return v8::Local<v8::Object>();
+ }
+
+ // TODO(crbug.com/883496): Not sure how to make a Uint8Array to return here
+ // which would be a bit more efficient.
+ std::vector<int> data;
+ std::copy(str.begin(), str.end(), std::back_inserter(data));
+ return gin::ConvertToV8(args->isolate(), data);
+}
+
+v8::Local<v8::Value> Utf8ArrayToStr(gin::Arguments* args) {
+ gin::ArrayBufferView data;
+ if (!args->GetNext(&data)) {
+ args->ThrowError();
+ return v8::Local<v8::Value>();
+ }
+
+ // Get the UTF-8 out into a string, and then rely on ConvertToV8 to convert
+ // that to a UCS-2 string.
+ return gin::StringToV8(
+ args->isolate(), base::StringPiece(static_cast<const char*>(data.bytes()),
+ data.num_bytes()));
+}
+
+} // namespace
+
+namespace fidljs {
+
+ZxBindings::ZxBindings(v8::Isolate* isolate, v8::Local<v8::Object> global)
+ : isolate_(isolate), wait_set_(std::make_unique<WaitSet>()) {
+ DCHECK_EQ(isolate->GetData(gin::kEmbedderFuchsia), nullptr);
+ isolate->SetData(gin::kEmbedderFuchsia, wait_set_.get());
+
+#define SET_CONSTANT(k) \
+ global->Set(gin::StringToSymbol(isolate, "$" #k), \
+ gin::ConvertToV8(isolate, k))
+
+ // zx_status_t.
+ SET_CONSTANT(ZX_OK);
+ SET_CONSTANT(ZX_ERR_INTERNAL);
+ SET_CONSTANT(ZX_ERR_NOT_SUPPORTED);
+ SET_CONSTANT(ZX_ERR_NO_RESOURCES);
+ SET_CONSTANT(ZX_ERR_NO_MEMORY);
+ SET_CONSTANT(ZX_ERR_INTERNAL_INTR_RETRY);
+ SET_CONSTANT(ZX_ERR_INVALID_ARGS);
+ SET_CONSTANT(ZX_ERR_BAD_HANDLE);
+ SET_CONSTANT(ZX_ERR_WRONG_TYPE);
+ SET_CONSTANT(ZX_ERR_BAD_SYSCALL);
+ SET_CONSTANT(ZX_ERR_OUT_OF_RANGE);
+ SET_CONSTANT(ZX_ERR_BUFFER_TOO_SMALL);
+ SET_CONSTANT(ZX_ERR_BAD_STATE);
+ SET_CONSTANT(ZX_ERR_TIMED_OUT);
+ SET_CONSTANT(ZX_ERR_SHOULD_WAIT);
+ SET_CONSTANT(ZX_ERR_CANCELED);
+ SET_CONSTANT(ZX_ERR_PEER_CLOSED);
+ SET_CONSTANT(ZX_ERR_NOT_FOUND);
+ SET_CONSTANT(ZX_ERR_ALREADY_EXISTS);
+ SET_CONSTANT(ZX_ERR_ALREADY_BOUND);
+ SET_CONSTANT(ZX_ERR_UNAVAILABLE);
+ SET_CONSTANT(ZX_ERR_ACCESS_DENIED);
+ SET_CONSTANT(ZX_ERR_IO);
+ SET_CONSTANT(ZX_ERR_IO_REFUSED);
+ SET_CONSTANT(ZX_ERR_IO_DATA_INTEGRITY);
+ SET_CONSTANT(ZX_ERR_IO_DATA_LOSS);
+ SET_CONSTANT(ZX_ERR_IO_NOT_PRESENT);
+ SET_CONSTANT(ZX_ERR_IO_OVERRUN);
+ SET_CONSTANT(ZX_ERR_IO_MISSED_DEADLINE);
+ SET_CONSTANT(ZX_ERR_IO_INVALID);
+ SET_CONSTANT(ZX_ERR_BAD_PATH);
+ SET_CONSTANT(ZX_ERR_NOT_DIR);
+ SET_CONSTANT(ZX_ERR_NOT_FILE);
+ SET_CONSTANT(ZX_ERR_FILE_BIG);
+ SET_CONSTANT(ZX_ERR_NO_SPACE);
+ SET_CONSTANT(ZX_ERR_NOT_EMPTY);
+ SET_CONSTANT(ZX_ERR_STOP);
+ SET_CONSTANT(ZX_ERR_NEXT);
+ SET_CONSTANT(ZX_ERR_ASYNC);
+ SET_CONSTANT(ZX_ERR_PROTOCOL_NOT_SUPPORTED);
+ SET_CONSTANT(ZX_ERR_ADDRESS_UNREACHABLE);
+ SET_CONSTANT(ZX_ERR_ADDRESS_IN_USE);
+ SET_CONSTANT(ZX_ERR_NOT_CONNECTED);
+ SET_CONSTANT(ZX_ERR_CONNECTION_REFUSED);
+ SET_CONSTANT(ZX_ERR_CONNECTION_RESET);
+ SET_CONSTANT(ZX_ERR_CONNECTION_ABORTED);
+
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+
+ // Handle APIs.
+ global
+ ->Set(context, gin::StringToSymbol(isolate, "$ZxObjectWaitOne"),
+ gin::CreateFunctionTemplate(isolate,
+ base::BindRepeating(ZxObjectWaitOne))
+ ->GetFunction(context)
+ .ToLocalChecked())
+ .ToChecked();
+ global
+ ->Set(context, gin::StringToSymbol(isolate, "$zx_handle_close"),
+ gin::CreateFunctionTemplate(isolate,
+ base::BindRepeating(zx_handle_close))
+ ->GetFunction(context)
+ .ToLocalChecked())
+ .ToChecked();
+ SET_CONSTANT(ZX_HANDLE_INVALID);
+ SET_CONSTANT(ZX_TIME_INFINITE);
+
+ // Channel APIs.
+ global
+ ->Set(context, gin::StringToSymbol(isolate, "$ZxChannelCreate"),
+ gin::CreateFunctionTemplate(isolate,
+ base::BindRepeating(&ZxChannelCreate))
+ ->GetFunction(context)
+ .ToLocalChecked())
+ .ToChecked();
+ global
+ ->Set(context, gin::StringToSymbol(isolate, "$ZxChannelWrite"),
+ gin::CreateFunctionTemplate(isolate,
+ base::BindRepeating(&ZxChannelWrite))
+ ->GetFunction(context)
+ .ToLocalChecked())
+ .ToChecked();
+ global
+ ->Set(context, gin::StringToSymbol(isolate, "$ZxChannelRead"),
+ gin::CreateFunctionTemplate(isolate,
+ base::BindRepeating(&ZxChannelRead))
+ ->GetFunction(context)
+ .ToLocalChecked())
+ .ToChecked();
+ SET_CONSTANT(ZX_CHANNEL_READABLE);
+ SET_CONSTANT(ZX_CHANNEL_WRITABLE);
+ SET_CONSTANT(ZX_CHANNEL_PEER_CLOSED);
+ SET_CONSTANT(ZX_CHANNEL_READ_MAY_DISCARD);
+ SET_CONSTANT(ZX_CHANNEL_MAX_MSG_BYTES);
+ SET_CONSTANT(ZX_CHANNEL_MAX_MSG_HANDLES);
+
+ // Utilities to make string handling easier to convert to/from UCS-2 (JS) <->
+ // UTF-8 (FIDL).
+ global
+ ->Set(context, gin::StringToSymbol(isolate, "$FidlJsStrToUtf8Array"),
+ gin::CreateFunctionTemplate(isolate,
+ base::BindRepeating(&StrToUtf8Array))
+ ->GetFunction(context)
+ .ToLocalChecked())
+ .ToChecked();
+ global
+ ->Set(context, gin::StringToSymbol(isolate, "$FidlJsUtf8ArrayToStr"),
+ gin::CreateFunctionTemplate(isolate,
+ base::BindRepeating(&Utf8ArrayToStr))
+ ->GetFunction(context)
+ .ToLocalChecked())
+ .ToChecked();
+
+#undef SET_CONSTANT
+}
+
+ZxBindings::~ZxBindings() {
+ wait_set_->clear();
+ isolate_->SetData(gin::kEmbedderFuchsia, nullptr);
+}
+
+} // namespace fidljs
diff --git a/deps/v8/build/fuchsia/fidlgen_js/runtime/zircon.h b/deps/v8/build/fuchsia/fidlgen_js/runtime/zircon.h
new file mode 100644
index 0000000000..b54d35495c
--- /dev/null
+++ b/deps/v8/build/fuchsia/fidlgen_js/runtime/zircon.h
@@ -0,0 +1,58 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BUILD_FUCHSIA_FIDLGEN_JS_RUNTIME_ZIRCON_H_
+#define BUILD_FUCHSIA_FIDLGEN_JS_RUNTIME_ZIRCON_H_
+
+#include <memory>
+
+#include "base/containers/flat_set.h"
+#include "base/containers/unique_ptr_adapters.h"
+#include "base/macros.h"
+#include "v8/include/v8.h"
+
+namespace fidljs {
+
+class WaitPromiseImpl;
+
+// A WaitSet is associated with each Isolate and represents all outstanding
+// waits that are queued on the dispatcher.
+//
+// If the wait completes normally, the contained promise is resolved, the
+// WaitPromiseImpl is marked as completed, and then deleted (by removing it from
+// the pending set).
+//
+// If the caller shuts down with outstanding waits pending, the asynchronous
+// waits are canceled by clearing the set (which deletes all the
+// WaitPromiseImpls). If a WaitPromiseImpl has not completed when it is
+// destroyed, it cancels the outstanding wait in its destructor.
+//
+// WaitPromiseImpl is responsible for resolving or rejecting promises. If the
+// object was created, but a wait never started it will not have been added to
+// the wait set, and so will reject the promise immediately. Otherwise, the
+// promise will be resolved or rejected when the asynchronous wait is signaled
+// or canceled.
+using WaitSet =
+ base::flat_set<std::unique_ptr<WaitPromiseImpl>, base::UniquePtrComparator>;
+
+class ZxBindings {
+ public:
+ // Adds Zircon APIs bindings to |global|, for use by JavaScript callers.
+ ZxBindings(v8::Isolate* isolate, v8::Local<v8::Object> global);
+
+ // Cleans up attached storage in the isolate added by the bindings, and
+ // cancels any pending asynchronous requests. It is important this this be
+ // done before the v8 context is torn down.
+ ~ZxBindings();
+
+ private:
+ v8::Isolate* const isolate_;
+ std::unique_ptr<WaitSet> wait_set_;
+
+ DISALLOW_COPY_AND_ASSIGN(ZxBindings);
+};
+
+} // namespace fidljs
+
+#endif // BUILD_FUCHSIA_FIDLGEN_JS_RUNTIME_ZIRCON_H_
diff --git a/deps/v8/build/fuchsia/fidlgen_js/test/fidlgen_js_unittest.cc b/deps/v8/build/fuchsia/fidlgen_js/test/fidlgen_js_unittest.cc
new file mode 100644
index 0000000000..ed025c878b
--- /dev/null
+++ b/deps/v8/build/fuchsia/fidlgen_js/test/fidlgen_js_unittest.cc
@@ -0,0 +1,1334 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <lib/fidl/cpp/binding.h>
+#include <lib/fidl/cpp/internal/pending_response.h>
+#include <lib/fidl/cpp/internal/weak_stub_controller.h>
+#include <lib/zx/debuglog.h>
+#include <zircon/syscalls/log.h>
+
+#include "base/bind.h"
+#include "base/files/file_util.h"
+#include "base/fuchsia/fuchsia_logging.h"
+#include "base/run_loop.h"
+#include "base/stl_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/test/launcher/unit_test_launcher.h"
+#include "base/test/test_suite.h"
+#include "base/test/test_timeouts.h"
+#include "build/fuchsia/fidlgen_js/fidl/fidljstest/cpp/fidl.h"
+#include "build/fuchsia/fidlgen_js/runtime/zircon.h"
+#include "gin/converter.h"
+#include "gin/modules/console.h"
+#include "gin/object_template_builder.h"
+#include "gin/public/isolate_holder.h"
+#include "gin/shell_runner.h"
+#include "gin/test/v8_test.h"
+#include "gin/try_catch.h"
+#include "v8/include/v8.h"
+
+static const char kRuntimeFile[] =
+ "/pkg/build/fuchsia/fidlgen_js/runtime/fidl.mjs";
+static const char kTestBindingFile[] =
+ "/pkg/build/fuchsia/fidlgen_js/fidl/fidljstest/js/fidl.js";
+
+namespace {
+
+zx_koid_t GetKoidForHandle(zx_handle_t handle) {
+ zx_info_handle_basic_t info;
+ zx_status_t status = zx_object_get_info(handle, ZX_INFO_HANDLE_BASIC, &info,
+ sizeof(info), nullptr, nullptr);
+ if (status != ZX_OK) {
+ ZX_LOG(ERROR, status) << "zx_object_get_info";
+ return ZX_KOID_INVALID;
+ }
+ return info.koid;
+}
+
+zx_koid_t GetKoidForHandle(const zx::object_base& object) {
+ return GetKoidForHandle(object.get());
+}
+
+} // namespace
+
+class FidlGenJsTestShellRunnerDelegate : public gin::ShellRunnerDelegate {
+ public:
+ FidlGenJsTestShellRunnerDelegate() {}
+
+ v8::Local<v8::ObjectTemplate> GetGlobalTemplate(
+ gin::ShellRunner* runner,
+ v8::Isolate* isolate) override {
+ v8::Local<v8::ObjectTemplate> templ =
+ gin::ObjectTemplateBuilder(isolate).Build();
+ gin::Console::Register(isolate, templ);
+ return templ;
+ }
+
+ void UnhandledException(gin::ShellRunner* runner,
+ gin::TryCatch& try_catch) override {
+ LOG(ERROR) << try_catch.GetStackTrace();
+ ADD_FAILURE();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FidlGenJsTestShellRunnerDelegate);
+};
+
+using FidlGenJsTest = gin::V8Test;
+
+TEST_F(FidlGenJsTest, BasicJSSetup) {
+ v8::Isolate* isolate = instance_->isolate();
+
+ std::string source = "log('this is a log'); this.stuff = 'HAI';";
+ FidlGenJsTestShellRunnerDelegate delegate;
+ gin::ShellRunner runner(&delegate, isolate);
+ gin::Runner::Scope scope(&runner);
+ runner.Run(source, "test.js");
+
+ std::string result;
+ EXPECT_TRUE(gin::Converter<std::string>::FromV8(
+ isolate, runner.global()->Get(gin::StringToV8(isolate, "stuff")),
+ &result));
+ EXPECT_EQ("HAI", result);
+}
+
+void LoadAndSource(gin::ShellRunner* runner, const base::FilePath& filename) {
+ std::string contents;
+ ASSERT_TRUE(base::ReadFileToString(filename, &contents));
+
+ runner->Run(contents, filename.MaybeAsASCII());
+}
+
+class BindingsSetupHelper {
+ public:
+ explicit BindingsSetupHelper(v8::Isolate* isolate)
+ : isolate_(isolate),
+ handle_scope_(isolate),
+ delegate_(),
+ runner_(&delegate_, isolate),
+ scope_(&runner_),
+ zx_bindings_(
+ std::make_unique<fidljs::ZxBindings>(isolate, runner_.global())) {
+ // TODO(scottmg): Figure out how to set up v8 import hooking and make
+ // fidl_Xyz into $fidl.Xyz. Manually inject the runtime support js files
+ // for now. https://crbug.com/883496.
+ LoadAndSource(&runner_, base::FilePath(kRuntimeFile));
+ LoadAndSource(&runner_, base::FilePath(kTestBindingFile));
+
+ zx_status_t status = zx::channel::create(0, &server_, &client_);
+ EXPECT_EQ(status, ZX_OK);
+
+ runner_.global()->Set(gin::StringToSymbol(isolate, "testHandle"),
+ gin::ConvertToV8(isolate, client_.get()));
+ }
+
+ template <class T>
+ T Get(const std::string& name) {
+ T t;
+ EXPECT_TRUE(gin::Converter<T>::FromV8(
+ isolate_, runner_.global()->Get(gin::StringToV8(isolate_, name)), &t));
+ return t;
+ }
+
+ template <class T>
+ T FromV8BigInt(v8::Local<v8::Value> val);
+
+ template <>
+ uint64_t FromV8BigInt(v8::Local<v8::Value> val) {
+ EXPECT_TRUE(val->IsBigInt());
+ return val.As<v8::BigInt>()->Uint64Value(nullptr);
+ }
+
+ template <>
+ int64_t FromV8BigInt(v8::Local<v8::Value> val) {
+ EXPECT_TRUE(val->IsBigInt());
+ return val.As<v8::BigInt>()->Int64Value(nullptr);
+ }
+
+ // Custom version of gin::Converter that handles int64/uint64 from BigInt as
+ // gin::Converter is quite tied to Number.
+ template <class T>
+ std::vector<T> GetBigIntVector(const std::string& name) {
+ v8::Local<v8::Value> val =
+ runner_.global()->Get(gin::StringToV8(isolate_, name));
+ EXPECT_TRUE(val->IsArray());
+
+ std::vector<T> result;
+ v8::Local<v8::Array> array(v8::Local<v8::Array>::Cast(val));
+ uint32_t length = array->Length();
+ for (uint32_t i = 0; i < length; ++i) {
+ v8::Local<v8::Value> v8_item;
+ EXPECT_TRUE(
+ array->Get(isolate_->GetCurrentContext(), i).ToLocal(&v8_item));
+ T item;
+ if (v8_item->IsNumber()) {
+ EXPECT_TRUE(gin::Converter<T>::FromV8(isolate_, v8_item, &item));
+ } else if (v8_item->IsBigInt()) {
+ item = FromV8BigInt<T>(v8_item);
+ } else {
+ ADD_FAILURE();
+ }
+ result.push_back(item);
+ }
+
+ return result;
+ }
+
+ bool IsNull(const std::string& name) {
+ return runner_.global()->Get(gin::StringToV8(isolate_, name))->IsNull();
+ }
+
+ void DestroyBindingsForTesting() { zx_bindings_.reset(); }
+
+ zx::channel& server() { return server_; }
+ zx::channel& client() { return client_; }
+ gin::ShellRunner& runner() { return runner_; }
+
+ private:
+ v8::Isolate* isolate_;
+ v8::HandleScope handle_scope_;
+ FidlGenJsTestShellRunnerDelegate delegate_;
+ gin::ShellRunner runner_;
+ gin::Runner::Scope scope_;
+ std::unique_ptr<fidljs::ZxBindings> zx_bindings_;
+ zx::channel server_;
+ zx::channel client_;
+
+ DISALLOW_COPY_AND_ASSIGN(BindingsSetupHelper);
+};
+
+class AnotherInterfaceImpl : public fidljstest::AnotherInterface {
+ public:
+ AnotherInterfaceImpl(
+ fidl::InterfaceRequest<fidljstest::AnotherInterface> request)
+ : binding_(this, std::move(request)) {}
+ ~AnotherInterfaceImpl() override = default;
+
+ void TimesTwo(int32_t a, TimesTwoCallback callback) override {
+ callback(a * 2);
+ }
+
+ private:
+ fidl::Binding<fidljstest::AnotherInterface> binding_;
+
+ DISALLOW_COPY_AND_ASSIGN(AnotherInterfaceImpl);
+};
+
+class TestolaImpl : public fidljstest::Testola {
+ public:
+ TestolaImpl() {
+ // Don't want the default values from the C++ side.
+ memset(&basic_struct_, -1, sizeof(basic_struct_));
+ }
+ ~TestolaImpl() override {}
+
+ void DoSomething() override { was_do_something_called_ = true; }
+
+ void PrintInt(int32_t number) override { received_int_ = number; }
+
+ void PrintMsg(std::string message) override { received_msg_ = message; }
+
+ void VariousArgs(fidljstest::Blorp blorp,
+ std::string msg,
+ std::vector<uint32_t> stuff) override {
+ various_blorp_ = blorp;
+ various_msg_ = msg;
+ various_stuff_ = stuff;
+ }
+
+ void WithResponse(int32_t a,
+ int32_t b,
+ WithResponseCallback callback) override {
+ response_callbacks_.push_back(base::BindOnce(
+ [](WithResponseCallback callback, int32_t result) { callback(result); },
+ std::move(callback), a + b));
+ }
+
+ void SendAStruct(fidljstest::BasicStruct basic_struct) override {
+ basic_struct_ = basic_struct;
+ }
+
+ void NestedStructsWithResponse(
+ fidljstest::BasicStruct basic_struct,
+ NestedStructsWithResponseCallback resp) override {
+ // Construct a response, echoing the passed in structure with some
+ // modifications, as well as additional data.
+ fidljstest::StuffAndThings sat;
+ sat.count = 123;
+ sat.id = "here is my id";
+ sat.a_vector.push_back(1);
+ sat.a_vector.push_back(-2);
+ sat.a_vector.push_back(4);
+ sat.a_vector.push_back(-8);
+ sat.basic.b = !basic_struct.b;
+ sat.basic.i8 = basic_struct.i8 * 2;
+ sat.basic.i16 = basic_struct.i16 * 2;
+ sat.basic.i32 = basic_struct.i32 * 2;
+ sat.basic.u8 = basic_struct.u8 * 2;
+ sat.basic.u16 = basic_struct.u16 * 2;
+ sat.basic.u32 = basic_struct.u32 * 2;
+ sat.later_string = "ⓣⓔⓡⓜⓘⓝⓐⓣⓞⓡ";
+ for (uint64_t i = 0; i < fidljstest::ARRRR_SIZE; ++i) {
+ sat.arrrr[i] = static_cast<int32_t>(i * 5) - 10;
+ }
+ sat.nullable_vector_of_string0 = nullptr;
+ std::vector<std::string> vector_of_str;
+ vector_of_str.push_back("passed_str0");
+ vector_of_str.push_back("passed_str1");
+ sat.nullable_vector_of_string1.reset(std::move(vector_of_str));
+ std::vector<fidljstest::Blorp> vector_of_blorp;
+ vector_of_blorp.push_back(fidljstest::Blorp::GAMMA);
+ vector_of_blorp.push_back(fidljstest::Blorp::BETA);
+ vector_of_blorp.push_back(fidljstest::Blorp::BETA);
+ vector_of_blorp.push_back(fidljstest::Blorp::ALPHA);
+ sat.vector_of_blorp = std::move(vector_of_blorp);
+
+ resp(std::move(sat));
+ }
+
+ void PassHandles(zx::job job, PassHandlesCallback callback) override {
+ EXPECT_EQ(GetKoidForHandle(job), GetKoidForHandle(*zx::job::default_job()));
+ zx::process process;
+ ASSERT_EQ(zx::process::self()->duplicate(ZX_RIGHT_SAME_RIGHTS, &process),
+ ZX_OK);
+ callback(std::move(process));
+ }
+
+ void ReceiveUnions(fidljstest::StructOfMultipleUnions somu) override {
+ EXPECT_TRUE(somu.initial.is_swb());
+ EXPECT_TRUE(somu.initial.swb().some_bool);
+
+ EXPECT_TRUE(somu.optional.get());
+ EXPECT_TRUE(somu.optional->is_lswa());
+ for (int i = 0; i < 32; ++i) {
+ EXPECT_EQ(somu.optional->lswa().components[i], i * 99);
+ }
+
+ EXPECT_TRUE(somu.trailing.is_swu());
+ EXPECT_EQ(somu.trailing.swu().num, 123456u);
+
+ did_receive_union_ = true;
+ }
+
+ void SendUnions(SendUnionsCallback callback) override {
+ fidljstest::StructOfMultipleUnions resp;
+
+ resp.initial.set_swb(fidljstest::StructWithBool());
+ resp.initial.swb().some_bool = true;
+
+ resp.optional = std::make_unique<fidljstest::UnionOfStructs>();
+ resp.optional->set_swu(fidljstest::StructWithUint());
+ resp.optional->swu().num = 987654;
+
+ resp.trailing.set_lswa(fidljstest::LargerStructWithArray());
+
+ callback(std::move(resp));
+ }
+
+ void SendVectorsOfString(std::vector<std::string> unsized,
+ std::vector<fidl::StringPtr> nullable,
+ std::vector<std::string> max_strlen) override {
+ ASSERT_EQ(unsized.size(), 3u);
+ EXPECT_EQ(unsized[0], "str0");
+ EXPECT_EQ(unsized[1], "str1");
+ EXPECT_EQ(unsized[2], "str2");
+
+ ASSERT_EQ(nullable.size(), 5u);
+ EXPECT_EQ(nullable[0], "str3");
+ EXPECT_TRUE(nullable[1].is_null());
+ EXPECT_TRUE(nullable[2].is_null());
+ EXPECT_TRUE(nullable[3].is_null());
+ EXPECT_EQ(nullable[4], "str4");
+
+ ASSERT_EQ(max_strlen.size(), 1u);
+ EXPECT_EQ(max_strlen[0], "0123456789");
+
+ did_get_vectors_of_string_ = true;
+ }
+
+ void VectorOfStruct(std::vector<fidljstest::StructWithUint> stuff,
+ VectorOfStructCallback callback) override {
+ ASSERT_EQ(stuff.size(), 4u);
+ EXPECT_EQ(stuff[0].num, 456u);
+ EXPECT_EQ(stuff[1].num, 789u);
+ EXPECT_EQ(stuff[2].num, 123u);
+ EXPECT_EQ(stuff[3].num, 0xfffffu);
+
+ std::vector<fidljstest::StructWithUint> response;
+ fidljstest::StructWithUint a;
+ a.num = 369;
+ response.push_back(a);
+ fidljstest::StructWithUint b;
+ b.num = 258;
+ response.push_back(b);
+ callback(std::move(response));
+ }
+
+ void PassVectorOfPrimitives(
+ fidljstest::VectorsOfPrimitives input,
+ PassVectorOfPrimitivesCallback callback) override {
+ ASSERT_EQ(input.v_bool.size(), 1u);
+ ASSERT_EQ(input.v_uint8.size(), 2u);
+ ASSERT_EQ(input.v_uint16.size(), 3u);
+ ASSERT_EQ(input.v_uint32.size(), 4u);
+ ASSERT_EQ(input.v_uint64.size(), 5u);
+ ASSERT_EQ(input.v_int8.size(), 6u);
+ ASSERT_EQ(input.v_int16.size(), 7u);
+ ASSERT_EQ(input.v_int32.size(), 8u);
+ ASSERT_EQ(input.v_int64.size(), 9u);
+ ASSERT_EQ(input.v_float32.size(), 10u);
+ ASSERT_EQ(input.v_float64.size(), 11u);
+
+ EXPECT_EQ(input.v_bool[0], true);
+
+ EXPECT_EQ(input.v_uint8[0], 2u);
+ EXPECT_EQ(input.v_uint8[1], 3u);
+
+ EXPECT_EQ(input.v_uint16[0], 4u);
+ EXPECT_EQ(input.v_uint16[1], 5u);
+ EXPECT_EQ(input.v_uint16[2], 6u);
+
+ EXPECT_EQ(input.v_uint32[0], 7u);
+ EXPECT_EQ(input.v_uint32[1], 8u);
+ EXPECT_EQ(input.v_uint32[2], 9u);
+ EXPECT_EQ(input.v_uint32[3], 10u);
+
+ EXPECT_EQ(input.v_uint64[0], 11u);
+ EXPECT_EQ(input.v_uint64[1], 12u);
+ EXPECT_EQ(input.v_uint64[2], 13u);
+ EXPECT_EQ(input.v_uint64[3], 14u);
+ EXPECT_EQ(input.v_uint64[4], 0xffffffffffffff00ULL);
+
+ EXPECT_EQ(input.v_int8[0], -16);
+ EXPECT_EQ(input.v_int8[1], -17);
+ EXPECT_EQ(input.v_int8[2], -18);
+ EXPECT_EQ(input.v_int8[3], -19);
+ EXPECT_EQ(input.v_int8[4], -20);
+ EXPECT_EQ(input.v_int8[5], -21);
+
+ EXPECT_EQ(input.v_int16[0], -22);
+ EXPECT_EQ(input.v_int16[1], -23);
+ EXPECT_EQ(input.v_int16[2], -24);
+ EXPECT_EQ(input.v_int16[3], -25);
+ EXPECT_EQ(input.v_int16[4], -26);
+ EXPECT_EQ(input.v_int16[5], -27);
+ EXPECT_EQ(input.v_int16[6], -28);
+
+ EXPECT_EQ(input.v_int32[0], -29);
+ EXPECT_EQ(input.v_int32[1], -30);
+ EXPECT_EQ(input.v_int32[2], -31);
+ EXPECT_EQ(input.v_int32[3], -32);
+ EXPECT_EQ(input.v_int32[4], -33);
+ EXPECT_EQ(input.v_int32[5], -34);
+ EXPECT_EQ(input.v_int32[6], -35);
+ EXPECT_EQ(input.v_int32[7], -36);
+
+ EXPECT_EQ(input.v_int64[0], -37);
+ EXPECT_EQ(input.v_int64[1], -38);
+ EXPECT_EQ(input.v_int64[2], -39);
+ EXPECT_EQ(input.v_int64[3], -40);
+ EXPECT_EQ(input.v_int64[4], -41);
+ EXPECT_EQ(input.v_int64[5], -42);
+ EXPECT_EQ(input.v_int64[6], -43);
+ EXPECT_EQ(input.v_int64[7], -44);
+ EXPECT_EQ(input.v_int64[8], -0x7fffffffffffffffLL);
+
+ EXPECT_EQ(input.v_float32[0], 46.f);
+ EXPECT_EQ(input.v_float32[1], 47.f);
+ EXPECT_EQ(input.v_float32[2], 48.f);
+ EXPECT_EQ(input.v_float32[3], 49.f);
+ EXPECT_EQ(input.v_float32[4], 50.f);
+ EXPECT_EQ(input.v_float32[5], 51.f);
+ EXPECT_EQ(input.v_float32[6], 52.f);
+ EXPECT_EQ(input.v_float32[7], 53.f);
+ EXPECT_EQ(input.v_float32[8], 54.f);
+ EXPECT_EQ(input.v_float32[9], 55.f);
+
+ EXPECT_EQ(input.v_float64[0], 56.0);
+ EXPECT_EQ(input.v_float64[1], 57.0);
+ EXPECT_EQ(input.v_float64[2], 58.0);
+ EXPECT_EQ(input.v_float64[3], 59.0);
+ EXPECT_EQ(input.v_float64[4], 60.0);
+ EXPECT_EQ(input.v_float64[5], 61.0);
+ EXPECT_EQ(input.v_float64[6], 62.0);
+ EXPECT_EQ(input.v_float64[7], 63.0);
+ EXPECT_EQ(input.v_float64[8], 64.0);
+ EXPECT_EQ(input.v_float64[9], 65.0);
+ EXPECT_EQ(input.v_float64[10], 66.0);
+
+ fidljstest::VectorsOfPrimitives output = std::move(input);
+#define INC_OUTPUT_ARRAY(v) \
+ for (size_t i = 0; i < output.v.size(); ++i) { \
+ output.v[i] += 10; \
+ }
+ INC_OUTPUT_ARRAY(v_uint8);
+ INC_OUTPUT_ARRAY(v_uint16);
+ INC_OUTPUT_ARRAY(v_uint32);
+ INC_OUTPUT_ARRAY(v_uint64);
+ INC_OUTPUT_ARRAY(v_int8);
+ INC_OUTPUT_ARRAY(v_int16);
+ INC_OUTPUT_ARRAY(v_int32);
+ INC_OUTPUT_ARRAY(v_int64);
+ INC_OUTPUT_ARRAY(v_float32);
+ INC_OUTPUT_ARRAY(v_float64);
+#undef INC_OUTPUT_ARRAY
+
+ callback(std::move(output));
+ }
+
+ void PassVectorOfVMO(fidljstest::VectorOfHandleToVMO input,
+ PassVectorOfVMOCallback callback) override {
+ callback(std::move(input));
+ }
+
+ bool was_do_something_called() const { return was_do_something_called_; }
+ int32_t received_int() const { return received_int_; }
+ const std::string& received_msg() const { return received_msg_; }
+
+ fidljstest::Blorp various_blorp() const { return various_blorp_; }
+ const std::string& various_msg() const { return various_msg_; }
+ const std::vector<uint32_t>& various_stuff() const { return various_stuff_; }
+
+ fidljstest::BasicStruct GetReceivedStruct() const { return basic_struct_; }
+
+ bool did_receive_union() const { return did_receive_union_; }
+
+ bool did_get_vectors_of_string() const { return did_get_vectors_of_string_; }
+
+ void CallResponseCallbacks() {
+ for (auto& cb : response_callbacks_) {
+ std::move(cb).Run();
+ }
+ response_callbacks_.clear();
+ }
+
+ void GetAnother(
+ fidl::InterfaceRequest<fidljstest::AnotherInterface> request) override {
+ another_interface_impl_ =
+ std::make_unique<AnotherInterfaceImpl>(std::move(request));
+ }
+
+ private:
+ bool was_do_something_called_ = false;
+ int32_t received_int_ = -1;
+ std::string received_msg_;
+ fidljstest::Blorp various_blorp_;
+ std::string various_msg_;
+ std::vector<uint32_t> various_stuff_;
+ fidljstest::BasicStruct basic_struct_;
+ std::vector<base::OnceClosure> response_callbacks_;
+ bool did_receive_union_ = false;
+ bool did_get_vectors_of_string_ = false;
+ std::unique_ptr<AnotherInterfaceImpl> another_interface_impl_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestolaImpl);
+};
+
+TEST_F(FidlGenJsTest, RawReceiveFidlMessage) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ // Send the data from the JS side into the channel.
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+ proxy.DoSomething();
+ )";
+ helper.runner().Run(source, "test.js");
+
+ // Read it out, decode, and confirm it was dispatched.
+ TestolaImpl testola_impl;
+ fidljstest::Testola_Stub stub(&testola_impl);
+ uint8_t data[1024];
+ zx_handle_t handles[1];
+ uint32_t actual_bytes, actual_handles;
+ ASSERT_EQ(
+ helper.server().rea2(0, data, handles, base::size(data),
+ base::size(handles), &actual_bytes, &actual_handles),
+ ZX_OK);
+ EXPECT_EQ(actual_bytes, 16u);
+ EXPECT_EQ(actual_handles, 0u);
+
+ fidl::Message message(
+ fidl::BytePart(data, actual_bytes, actual_bytes),
+ fidl::HandlePart(handles, actual_handles, actual_handles));
+ stub.Dispatch_(std::move(message), fidl::internal::PendingResponse());
+
+ EXPECT_TRUE(testola_impl.was_do_something_called());
+}
+
+TEST_F(FidlGenJsTest, RawReceiveFidlMessageWithSimpleArg) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ // Send the data from the JS side into the channel.
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+ proxy.PrintInt(12345);
+ )";
+ helper.runner().Run(source, "test.js");
+
+ // Read it out, decode, and confirm it was dispatched.
+ TestolaImpl testola_impl;
+ fidljstest::Testola_Stub stub(&testola_impl);
+ uint8_t data[1024];
+ zx_handle_t handles[1];
+ uint32_t actual_bytes, actual_handles;
+ ASSERT_EQ(
+ helper.server().rea2(0, data, handles, base::size(data),
+ base::size(handles), &actual_bytes, &actual_handles),
+ ZX_OK);
+ // 24 rather than 20 because everything's 8 aligned.
+ EXPECT_EQ(actual_bytes, 24u);
+ EXPECT_EQ(actual_handles, 0u);
+
+ fidl::Message message(
+ fidl::BytePart(data, actual_bytes, actual_bytes),
+ fidl::HandlePart(handles, actual_handles, actual_handles));
+ stub.Dispatch_(std::move(message), fidl::internal::PendingResponse());
+
+ EXPECT_EQ(testola_impl.received_int(), 12345);
+}
+
+TEST_F(FidlGenJsTest, RawReceiveFidlMessageWithStringArg) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ // Send the data from the JS side into the channel.
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+ proxy.PrintMsg('Ça c\'est a 你好 from deep in JS');
+ )";
+ helper.runner().Run(source, "test.js");
+
+ // Read it out, decode, and confirm it was dispatched.
+ TestolaImpl testola_impl;
+ fidljstest::Testola_Stub stub(&testola_impl);
+ uint8_t data[1024];
+ zx_handle_t handles[1];
+ uint32_t actual_bytes, actual_handles;
+ ASSERT_EQ(
+ helper.server().rea2(0, data, handles, base::size(data),
+ base::size(handles), &actual_bytes, &actual_handles),
+ ZX_OK);
+ EXPECT_EQ(actual_handles, 0u);
+
+ fidl::Message message(
+ fidl::BytePart(data, actual_bytes, actual_bytes),
+ fidl::HandlePart(handles, actual_handles, actual_handles));
+ stub.Dispatch_(std::move(message), fidl::internal::PendingResponse());
+
+ EXPECT_EQ(testola_impl.received_msg(), "Ça c'est a 你好 from deep in JS");
+}
+
+TEST_F(FidlGenJsTest, RawReceiveFidlMessageWithMultipleArgs) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ // Send the data from the JS side into the channel.
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+ proxy.VariousArgs(Blorp.GAMMA, 'zippy zap', [ 999, 987, 123456 ]);
+ )";
+ helper.runner().Run(source, "test.js");
+
+ // Read it out, decode, and confirm it was dispatched.
+ TestolaImpl testola_impl;
+ fidljstest::Testola_Stub stub(&testola_impl);
+ uint8_t data[1024];
+ zx_handle_t handles[1];
+ uint32_t actual_bytes, actual_handles;
+ ASSERT_EQ(
+ helper.server().rea2(0, data, handles, base::size(data),
+ base::size(handles), &actual_bytes, &actual_handles),
+ ZX_OK);
+ EXPECT_EQ(actual_handles, 0u);
+
+ fidl::Message message(
+ fidl::BytePart(data, actual_bytes, actual_bytes),
+ fidl::HandlePart(handles, actual_handles, actual_handles));
+ stub.Dispatch_(std::move(message), fidl::internal::PendingResponse());
+
+ EXPECT_EQ(testola_impl.various_blorp(), fidljstest::Blorp::GAMMA);
+ EXPECT_EQ(testola_impl.various_msg(), "zippy zap");
+ ASSERT_EQ(testola_impl.various_stuff().size(), 3u);
+ EXPECT_EQ(testola_impl.various_stuff()[0], 999u);
+ EXPECT_EQ(testola_impl.various_stuff()[1], 987u);
+ EXPECT_EQ(testola_impl.various_stuff()[2], 123456u);
+}
+
+TEST_F(FidlGenJsTest, RawWithResponse) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ TestolaImpl testola_impl;
+ fidl::Binding<fidljstest::Testola> binding(&testola_impl);
+ binding.Bind(std::move(helper.server()));
+
+ // Send the data from the JS side into the channel.
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+ this.sum_result = -1;
+ proxy.WithResponse(72, 99)
+ .then(sum => {
+ this.sum_result = sum;
+ })
+ .catch((e) => log('FAILED: ' + e));
+ )";
+ helper.runner().Run(source, "test.js");
+
+ base::RunLoop().RunUntilIdle();
+
+ testola_impl.CallResponseCallbacks();
+
+ base::RunLoop().RunUntilIdle();
+
+ // Confirm that the response was received with the correct value.
+ auto sum_result = helper.Get<int>("sum_result");
+ EXPECT_EQ(sum_result, 72 + 99);
+}
+
+TEST_F(FidlGenJsTest, NoResponseBeforeTearDown) {
+ v8::Isolate* isolate = instance_->isolate();
+
+ BindingsSetupHelper helper(isolate);
+
+ TestolaImpl testola_impl;
+ fidl::Binding<fidljstest::Testola> binding(&testola_impl);
+ binding.Bind(std::move(helper.server()));
+
+ // Send the data from the JS side into the channel.
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+ this.resolved = false;
+ this.rejected = false;
+ this.excepted = false;
+ proxy.WithResponse(1, 2)
+ .then(sum => {
+ this.resolved = true;
+ }, () => {
+ this.rejected = true;
+ })
+ .catch((e) => {
+ log('FAILED: ' + e);
+ this.excepted = true;
+ })
+ )";
+ helper.runner().Run(source, "test.js");
+
+ // Run the message loop to read and queue the request, but don't send the
+ // response.
+ base::RunLoop().RunUntilIdle();
+
+ // This causes outstanding waits to be canceled.
+ helper.DestroyBindingsForTesting();
+
+ EXPECT_FALSE(helper.Get<bool>("resolved"));
+ EXPECT_TRUE(helper.Get<bool>("rejected"));
+ EXPECT_FALSE(helper.Get<bool>("excepted"));
+}
+
+TEST_F(FidlGenJsTest, RawReceiveFidlStructMessage) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ TestolaImpl testola_impl;
+ fidl::Binding<fidljstest::Testola> binding(&testola_impl);
+ binding.Bind(std::move(helper.server()));
+
+ // Send the data from the JS side into the channel.
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+ var basicStruct = new BasicStruct(
+ true, -30, undefined, -789, 200, 65000, 0);
+ proxy.SendAStruct(basicStruct);
+ )";
+ helper.runner().Run(source, "test.js");
+
+ // Run the dispatcher to read and dispatch the response.
+ base::RunLoop().RunUntilIdle();
+
+ fidljstest::BasicStruct received_struct = testola_impl.GetReceivedStruct();
+ EXPECT_EQ(received_struct.b, true);
+ EXPECT_EQ(received_struct.i8, -30);
+ EXPECT_EQ(received_struct.i16, 18); // From defaults.
+ EXPECT_EQ(received_struct.i32, -789);
+ EXPECT_EQ(received_struct.u8, 200);
+ EXPECT_EQ(received_struct.u16, 65000);
+ // Make sure this didn't get defaulted, even though it has a false-ish value.
+ EXPECT_EQ(received_struct.u32, 0u);
+}
+
+TEST_F(FidlGenJsTest, RawReceiveFidlNestedStructsAndRespond) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ TestolaImpl testola_impl;
+ fidl::Binding<fidljstest::Testola> binding(&testola_impl);
+ binding.Bind(std::move(helper.server()));
+
+ // Send the data from the JS side into the channel.
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+ var toSend = new BasicStruct(false, -5, -6, -7, 8, 32000, 2000000000);
+ proxy.NestedStructsWithResponse(toSend)
+ .then(sat => {
+ this.result_count = sat.count;
+ this.result_id = sat.id;
+ this.result_vector = sat.a_vector;
+ this.result_basic_b = sat.basic.b;
+ this.result_basic_i8 = sat.basic.i8;
+ this.result_basic_i16 = sat.basic.i16;
+ this.result_basic_i32 = sat.basic.i32;
+ this.result_basic_u8 = sat.basic.u8;
+ this.result_basic_u16 = sat.basic.u16;
+ this.result_basic_u32 = sat.basic.u32;
+ this.result_later_string = sat.later_string;
+ this.result_arrrr = sat.arrrr;
+ this.result_vs0 = sat.nullable_vector_of_string0;
+ this.result_vs1 = sat.nullable_vector_of_string1;
+ this.result_vblorp = sat.vector_of_blorp;
+ })
+ .catch((e) => log('FAILED: ' + e));
+ )";
+ helper.runner().Run(source, "test.js");
+
+ // Run the message loop to read the request and write the response.
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(helper.Get<int>("result_count"), 123);
+ EXPECT_EQ(helper.Get<std::string>("result_id"), "here is my id");
+ auto result_vector = helper.Get<std::vector<int>>("result_vector");
+ ASSERT_EQ(result_vector.size(), 4u);
+ EXPECT_EQ(result_vector[0], 1);
+ EXPECT_EQ(result_vector[1], -2);
+ EXPECT_EQ(result_vector[2], 4);
+ EXPECT_EQ(result_vector[3], -8);
+ EXPECT_EQ(helper.Get<bool>("result_basic_b"), true);
+ EXPECT_EQ(helper.Get<int>("result_basic_i8"), -10);
+ EXPECT_EQ(helper.Get<int>("result_basic_i16"), -12);
+ EXPECT_EQ(helper.Get<int>("result_basic_i32"), -14);
+ EXPECT_EQ(helper.Get<unsigned int>("result_basic_u8"), 16u);
+ EXPECT_EQ(helper.Get<unsigned int>("result_basic_u16"), 64000u);
+ EXPECT_EQ(helper.Get<unsigned int>("result_basic_u32"), 4000000000u);
+ EXPECT_EQ(helper.Get<std::string>("result_later_string"), "ⓣⓔⓡⓜⓘⓝⓐⓣⓞⓡ");
+ // Retrieve as a vector as there's no difference in representation in JS (and
+ // gin already supports vector), and verify the length matches the expected
+ // length of the fidl array.
+ auto result_arrrr = helper.Get<std::vector<int32_t>>("result_arrrr");
+ ASSERT_EQ(result_arrrr.size(), fidljstest::ARRRR_SIZE);
+ for (uint64_t i = 0; i < fidljstest::ARRRR_SIZE; ++i) {
+ EXPECT_EQ(result_arrrr[i], static_cast<int32_t>(i * 5) - 10);
+ }
+ EXPECT_TRUE(helper.IsNull("result_vs0"));
+ EXPECT_FALSE(helper.IsNull("result_vs1"));
+ auto result_vs1 = helper.Get<std::vector<std::string>>("result_vs1");
+ ASSERT_EQ(result_vs1.size(), 2u);
+ EXPECT_EQ(result_vs1[0], "passed_str0");
+ EXPECT_EQ(result_vs1[1], "passed_str1");
+
+ // This is a vector of enum class fidljstest::Blorp, but gin can't retrieve
+ // those, so just get it as int, and cast to check values.
+ auto result_vblorp = helper.Get<std::vector<int>>("result_vblorp");
+ ASSERT_EQ(result_vblorp.size(), 4u);
+ EXPECT_EQ(result_vblorp[0], static_cast<int>(fidljstest::Blorp::GAMMA));
+ EXPECT_EQ(result_vblorp[1], static_cast<int>(fidljstest::Blorp::BETA));
+ EXPECT_EQ(result_vblorp[2], static_cast<int>(fidljstest::Blorp::BETA));
+ EXPECT_EQ(result_vblorp[3], static_cast<int>(fidljstest::Blorp::ALPHA));
+}
+
+TEST_F(FidlGenJsTest, HandlePassing) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ TestolaImpl testola_impl;
+ fidl::Binding<fidljstest::Testola> binding(&testola_impl);
+ binding.Bind(std::move(helper.server()));
+
+ zx::job default_job_copy;
+ ASSERT_EQ(zx::job::default_job()->duplicate(ZX_RIGHT_SAME_RIGHTS,
+ &default_job_copy),
+ ZX_OK);
+ helper.runner().global()->Set(
+ gin::StringToSymbol(isolate, "testJobHandle"),
+ gin::ConvertToV8(isolate, default_job_copy.get()));
+
+ // TODO(crbug.com/883496): Handles wrapped in Transferrable once MessagePort
+ // is sorted out, and then stop treating handles as unmanaged |uint32_t|s.
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+ proxy.PassHandles(testJobHandle).then(h => {
+ this.processHandle = h;
+ }).catch((e) => log('FAILED: ' + e));
+ )";
+ helper.runner().Run(source, "test.js");
+
+ // Run the message loop to send the request and receive a response.
+ base::RunLoop().RunUntilIdle();
+
+ zx_handle_t process_handle_back_from_js =
+ helper.Get<uint32_t>("processHandle");
+ EXPECT_EQ(GetKoidForHandle(process_handle_back_from_js),
+ GetKoidForHandle(*zx::process::self()));
+
+ // Make sure we received the valid handle back correctly, and close it. Not
+ // stored into a zx::process in case it isn't valid, and to check the return
+ // value from closing it.
+ EXPECT_EQ(zx_handle_close(process_handle_back_from_js), ZX_OK);
+
+ // Ensure we didn't pass away our default job, or process self.
+ EXPECT_NE(GetKoidForHandle(*zx::job::default_job()), ZX_KOID_INVALID);
+ EXPECT_NE(GetKoidForHandle(*zx::process::self()), ZX_KOID_INVALID);
+}
+
+TEST_F(FidlGenJsTest, UnionSend) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ TestolaImpl testola_impl;
+ fidl::Binding<fidljstest::Testola> binding(&testola_impl);
+ binding.Bind(std::move(helper.server()));
+
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+ var somu = new StructOfMultipleUnions();
+
+ var swb = new StructWithBool(/*some_bool*/ true);
+ somu.initial.set_swb(swb);
+
+ var lswa = new LargerStructWithArray([]);
+ for (var i = 0; i < 32; ++i) {
+ lswa.components[i] = i * 99;
+ }
+ somu.optional.set_lswa(lswa);
+
+ somu.trailing.set_swu(new StructWithUint(123456));
+
+ proxy.ReceiveUnions(somu);
+ )";
+ helper.runner().Run(source, "test.js");
+
+ base::RunLoop().RunUntilIdle();
+
+ // Expectations on the contents of the union are checked in the body of
+ // TestolaImpl::ReceiveAUnion().
+ EXPECT_TRUE(testola_impl.did_receive_union());
+}
+
+TEST_F(FidlGenJsTest, UnionReceive) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ TestolaImpl testola_impl;
+ fidl::Binding<fidljstest::Testola> binding(&testola_impl);
+ binding.Bind(std::move(helper.server()));
+
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+ proxy.SendUnions().then(resp => {
+ this.result_initial_is_swb = resp.initial.is_swb();
+ this.result_initial_is_swu = resp.initial.is_swu();
+ this.result_initial_is_lswa = resp.initial.is_lswa();
+ this.result_optional_is_swb = resp.optional.is_swb();
+ this.result_optional_is_swu = resp.optional.is_swu();
+ this.result_optional_is_lswa = resp.optional.is_lswa();
+ this.result_trailing_is_swb = resp.trailing.is_swb();
+ this.result_trailing_is_swu = resp.trailing.is_swu();
+ this.result_trailing_is_lswa = resp.trailing.is_lswa();
+
+ this.result_initial_some_bool = resp.initial.swb.some_bool;
+ this.result_optional_num = resp.optional.swu.num;
+ }).catch((e) => log('FAILED: ' + e));
+ )";
+ helper.runner().Run(source, "test.js");
+
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_TRUE(helper.Get<bool>("result_initial_is_swb"));
+ EXPECT_FALSE(helper.Get<bool>("result_initial_is_swu"));
+ EXPECT_FALSE(helper.Get<bool>("result_initial_is_lswa"));
+
+ EXPECT_FALSE(helper.Get<bool>("result_optional_is_swb"));
+ EXPECT_TRUE(helper.Get<bool>("result_optional_is_swu"));
+ EXPECT_FALSE(helper.Get<bool>("result_optional_is_lswa"));
+
+ EXPECT_FALSE(helper.Get<bool>("result_trailing_is_swb"));
+ EXPECT_FALSE(helper.Get<bool>("result_trailing_is_swu"));
+ EXPECT_TRUE(helper.Get<bool>("result_trailing_is_lswa"));
+
+ EXPECT_TRUE(helper.Get<bool>("result_initial_some_bool"));
+ EXPECT_EQ(helper.Get<uint32_t>("result_optional_num"), 987654u);
+}
+
+TEST_F(FidlGenJsTest, VariousDefaults) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ std::string source = R"(
+ var temp = new VariousDefaults();
+ this.result_blorp = temp.blorp_defaulting_to_beta;
+ this.result_timestamp = temp.int64_defaulting_to_no_timestamp;
+ this.result_another_copy = ANOTHER_COPY;
+ this.result_int64_const = temp.int64_defaulting_to_const;
+ this.result_string_in_struct = temp.string_with_default;
+ this.result_string_const = SOME_STRING;
+ )";
+ helper.runner().Run(source, "test.js");
+
+ EXPECT_EQ(helper.Get<int>("result_blorp"),
+ static_cast<int>(fidljstest::Blorp::BETA));
+ EXPECT_EQ(helper.FromV8BigInt<int64_t>(helper.runner().global()->Get(
+ gin::StringToV8(isolate, "result_timestamp"))),
+ fidljstest::NO_TIMESTAMP);
+ EXPECT_EQ(helper.FromV8BigInt<int64_t>(helper.runner().global()->Get(
+ gin::StringToV8(isolate, "result_another_copy"))),
+ fidljstest::ANOTHER_COPY);
+ EXPECT_EQ(helper.FromV8BigInt<int64_t>(helper.runner().global()->Get(
+ gin::StringToV8(isolate, "result_int64_const"))),
+ 0x7fffffffffffff11LL);
+ EXPECT_EQ(helper.Get<std::string>("result_string_const"),
+ "a 你好 thing\" containing ' quotes");
+ EXPECT_EQ(helper.Get<std::string>("result_string_in_struct"), "stuff");
+}
+
+TEST_F(FidlGenJsTest, VectorOfStrings) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ TestolaImpl testola_impl;
+ fidl::Binding<fidljstest::Testola> binding(&testola_impl);
+ binding.Bind(std::move(helper.server()));
+
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+
+ var v1 = ['str0', 'str1', 'str2'];
+ var v2 = ['str3', null, null, null, 'str4'];
+ var v3 = ['0123456789']; // This is the maximum allowed length.
+ proxy.SendVectorsOfString(v1, v2, v3);
+ )";
+ helper.runner().Run(source, "test.js");
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_TRUE(testola_impl.did_get_vectors_of_string());
+}
+
+TEST_F(FidlGenJsTest, VectorOfStringsTooLongString) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ TestolaImpl testola_impl;
+ fidl::Binding<fidljstest::Testola> binding(&testola_impl);
+ binding.Bind(std::move(helper.server()));
+
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+
+ var too_long = ['this string is longer than allowed'];
+ proxy.SendVectorsOfString([], [], too_long);
+ this.tried_to_send = true;
+ )";
+ helper.runner().Run(source, "test.js");
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_TRUE(helper.Get<bool>("tried_to_send"));
+ EXPECT_FALSE(testola_impl.did_get_vectors_of_string());
+}
+
+TEST_F(FidlGenJsTest, VectorOfStruct) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ TestolaImpl testola_impl;
+ fidl::Binding<fidljstest::Testola> binding(&testola_impl);
+ binding.Bind(std::move(helper.server()));
+
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+
+ var data = [
+ new StructWithUint(456),
+ new StructWithUint(789),
+ new StructWithUint(123),
+ new StructWithUint(0xfffff),
+ ];
+ proxy.VectorOfStruct(data).then(resp => {
+ this.result_length = resp.length;
+ this.result_0 = resp[0].num;
+ this.result_1 = resp[1].num;
+ }).catch((e) => log('FAILED: ' + e));
+ )";
+ helper.runner().Run(source, "test.js");
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(helper.Get<uint32_t>("result_length"), 2u);
+ EXPECT_EQ(helper.Get<int>("result_0"), 369);
+ EXPECT_EQ(helper.Get<int>("result_1"), 258);
+}
+
+TEST_F(FidlGenJsTest, VectorsOfPrimitives) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ TestolaImpl testola_impl;
+ fidl::Binding<fidljstest::Testola> binding(&testola_impl);
+ binding.Bind(std::move(helper.server()));
+
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+
+ var v_bool = [true];
+ var v_uint8 = [2, 3];
+ var v_uint16 = [4, 5, 6];
+ var v_uint32 = [7, 8, 9, 10];
+ var v_uint64 = [11, 12, 13, 14, 0xffffffffffffff00n];
+ var v_int8 = [-16, -17, -18, -19, -20, -21];
+ var v_int16 = [-22, -23, -24, -25, -26, -27, -28];
+ var v_int32 = [-29, -30, -31, -32, -33, -34, -35, -36];
+ var v_int64 = [-37, -38, -39, -40, -41, -42, -43, -44,
+ -0x7fffffffffffffffn];
+ var v_float32 = [46, 47, 48, 49, 50, 51, 52, 53, 54, 55];
+ var v_float64 = [56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66];
+
+ var data = new VectorsOfPrimitives(
+ v_bool,
+ v_uint8,
+ v_uint16,
+ v_uint32,
+ v_uint64,
+ v_int8,
+ v_int16,
+ v_int32,
+ v_int64,
+ v_float32,
+ v_float64);
+
+ proxy.PassVectorOfPrimitives(data).then(resp => {
+ this.result_v_bool = resp.v_bool;
+ this.result_v_uint8 = resp.v_uint8;
+ this.result_v_uint16 = resp.v_uint16;
+ this.result_v_uint32 = resp.v_uint32;
+ this.result_v_uint64 = resp.v_uint64;
+ this.result_v_int8 = resp.v_int8;
+ this.result_v_int16 = resp.v_int16;
+ this.result_v_int32 = resp.v_int32;
+ this.result_v_int64 = resp.v_int64;
+ this.result_v_float32 = resp.v_float32;
+ this.result_v_float64 = resp.v_float64;
+ }).catch((e) => log('FAILED: ' + e));
+ )";
+
+ helper.runner().Run(source, "test.js");
+ base::RunLoop().RunUntilIdle();
+
+ auto result_v_bool = helper.Get<std::vector<bool>>("result_v_bool");
+ auto result_v_uint8 = helper.Get<std::vector<unsigned int>>("result_v_uint8");
+ auto result_v_uint16 =
+ helper.Get<std::vector<unsigned int>>("result_v_uint16");
+ auto result_v_uint32 = helper.Get<std::vector<uint32_t>>("result_v_uint32");
+ auto result_v_uint64 = helper.GetBigIntVector<uint64_t>("result_v_uint64");
+ auto result_v_int8 = helper.Get<std::vector<int>>("result_v_int8");
+ auto result_v_int16 = helper.Get<std::vector<int>>("result_v_int16");
+ auto result_v_int32 = helper.Get<std::vector<int32_t>>("result_v_int32");
+ auto result_v_int64 = helper.GetBigIntVector<int64_t>("result_v_int64");
+ auto result_v_float32 = helper.Get<std::vector<float>>("result_v_float32");
+ auto result_v_float64 = helper.Get<std::vector<double>>("result_v_float64");
+
+ ASSERT_EQ(result_v_bool.size(), 1u);
+ ASSERT_EQ(result_v_uint8.size(), 2u);
+ ASSERT_EQ(result_v_uint16.size(), 3u);
+ ASSERT_EQ(result_v_uint32.size(), 4u);
+ ASSERT_EQ(result_v_uint64.size(), 5u);
+ ASSERT_EQ(result_v_int8.size(), 6u);
+ ASSERT_EQ(result_v_int16.size(), 7u);
+ ASSERT_EQ(result_v_int32.size(), 8u);
+ ASSERT_EQ(result_v_int64.size(), 9u);
+ ASSERT_EQ(result_v_float32.size(), 10u);
+ ASSERT_EQ(result_v_float64.size(), 11u);
+
+ // Check that all the responses have had 10 added to them (except bool).
+
+ EXPECT_EQ(result_v_bool[0], true);
+
+ EXPECT_EQ(result_v_uint8[0], 12u);
+ EXPECT_EQ(result_v_uint8[1], 13u);
+
+ EXPECT_EQ(result_v_uint16[0], 14u);
+ EXPECT_EQ(result_v_uint16[1], 15u);
+ EXPECT_EQ(result_v_uint16[2], 16u);
+
+ EXPECT_EQ(result_v_uint32[0], 17u);
+ EXPECT_EQ(result_v_uint32[1], 18u);
+ EXPECT_EQ(result_v_uint32[2], 19u);
+ EXPECT_EQ(result_v_uint32[3], 20u);
+
+ EXPECT_EQ(result_v_uint64[0], 21u);
+ EXPECT_EQ(result_v_uint64[1], 22u);
+ EXPECT_EQ(result_v_uint64[2], 23u);
+ EXPECT_EQ(result_v_uint64[3], 24u);
+ EXPECT_EQ(result_v_uint64[4], 0xffffffffffffff0aULL);
+
+ EXPECT_EQ(result_v_int8[0], -6);
+ EXPECT_EQ(result_v_int8[1], -7);
+ EXPECT_EQ(result_v_int8[2], -8);
+ EXPECT_EQ(result_v_int8[3], -9);
+ EXPECT_EQ(result_v_int8[4], -10);
+ EXPECT_EQ(result_v_int8[5], -11);
+
+ EXPECT_EQ(result_v_int16[0], -12);
+ EXPECT_EQ(result_v_int16[1], -13);
+ EXPECT_EQ(result_v_int16[2], -14);
+ EXPECT_EQ(result_v_int16[3], -15);
+ EXPECT_EQ(result_v_int16[4], -16);
+ EXPECT_EQ(result_v_int16[5], -17);
+ EXPECT_EQ(result_v_int16[6], -18);
+
+ EXPECT_EQ(result_v_int32[0], -19);
+ EXPECT_EQ(result_v_int32[1], -20);
+ EXPECT_EQ(result_v_int32[2], -21);
+ EXPECT_EQ(result_v_int32[3], -22);
+ EXPECT_EQ(result_v_int32[4], -23);
+ EXPECT_EQ(result_v_int32[5], -24);
+ EXPECT_EQ(result_v_int32[6], -25);
+ EXPECT_EQ(result_v_int32[7], -26);
+
+ EXPECT_EQ(result_v_int64[0], -27);
+ EXPECT_EQ(result_v_int64[1], -28);
+ EXPECT_EQ(result_v_int64[2], -29);
+ EXPECT_EQ(result_v_int64[3], -30);
+ EXPECT_EQ(result_v_int64[4], -31);
+ EXPECT_EQ(result_v_int64[5], -32);
+ EXPECT_EQ(result_v_int64[6], -33);
+ EXPECT_EQ(result_v_int64[7], -34);
+ EXPECT_EQ(result_v_int64[8], -0x7ffffffffffffff5LL);
+
+ EXPECT_EQ(result_v_float32[0], 56.f);
+ EXPECT_EQ(result_v_float32[1], 57.f);
+ EXPECT_EQ(result_v_float32[2], 58.f);
+ EXPECT_EQ(result_v_float32[3], 59.f);
+ EXPECT_EQ(result_v_float32[4], 60.f);
+ EXPECT_EQ(result_v_float32[5], 61.f);
+ EXPECT_EQ(result_v_float32[6], 62.f);
+ EXPECT_EQ(result_v_float32[7], 63.f);
+ EXPECT_EQ(result_v_float32[8], 64.f);
+ EXPECT_EQ(result_v_float32[9], 65.f);
+
+ EXPECT_EQ(result_v_float64[0], 66.f);
+ EXPECT_EQ(result_v_float64[1], 67.f);
+ EXPECT_EQ(result_v_float64[2], 68.f);
+ EXPECT_EQ(result_v_float64[3], 69.f);
+ EXPECT_EQ(result_v_float64[4], 70.f);
+ EXPECT_EQ(result_v_float64[5], 71.f);
+ EXPECT_EQ(result_v_float64[6], 72.f);
+ EXPECT_EQ(result_v_float64[7], 73.f);
+ EXPECT_EQ(result_v_float64[8], 74.f);
+ EXPECT_EQ(result_v_float64[9], 75.f);
+ EXPECT_EQ(result_v_float64[10], 76.f);
+}
+
+TEST_F(FidlGenJsTest, VectorOfHandle) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ TestolaImpl testola_impl;
+ fidl::Binding<fidljstest::Testola> binding(&testola_impl);
+ binding.Bind(std::move(helper.server()));
+
+ zx::vmo test_vmo0, test_vmo1;
+ ASSERT_EQ(zx::vmo::create(4096, 0, &test_vmo0), ZX_OK);
+ ASSERT_EQ(zx::vmo::create(16384, 0, &test_vmo1), ZX_OK);
+
+ // Save to compare on return.
+ zx_koid_t koid_of_vmo0 = GetKoidForHandle(test_vmo0);
+ zx_koid_t koid_of_vmo1 = GetKoidForHandle(test_vmo1);
+
+ helper.runner().global()->Set(gin::StringToSymbol(isolate, "vmo0"),
+ gin::ConvertToV8(isolate, test_vmo0.release()));
+ helper.runner().global()->Set(gin::StringToSymbol(isolate, "vmo1"),
+ gin::ConvertToV8(isolate, test_vmo1.release()));
+
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+
+ proxy.PassVectorOfVMO(new VectorOfHandleToVMO([vmo0, vmo1])).then(
+ resp => {
+ this.result_vmo0 = resp.vmos[0];
+ this.result_vmo1 = resp.vmos[1];
+ }).catch((e) => log('FAILED: ' + e));
+ )";
+ helper.runner().Run(source, "test.js");
+ base::RunLoop().RunUntilIdle();
+
+ zx_handle_t result_vmo0 = helper.Get<zx_handle_t>("result_vmo0");
+ zx_handle_t result_vmo1 = helper.Get<zx_handle_t>("result_vmo1");
+
+ EXPECT_EQ(GetKoidForHandle(result_vmo0), koid_of_vmo0);
+ EXPECT_EQ(GetKoidForHandle(result_vmo1), koid_of_vmo1);
+
+ uint64_t size;
+ ASSERT_EQ(zx_vmo_get_size(result_vmo0, &size), ZX_OK);
+ EXPECT_EQ(size, 4096u);
+ ASSERT_EQ(zx_vmo_get_size(result_vmo1, &size), ZX_OK);
+ EXPECT_EQ(size, 16384u);
+
+ EXPECT_EQ(zx_handle_close(result_vmo0), ZX_OK);
+ EXPECT_EQ(zx_handle_close(result_vmo1), ZX_OK);
+}
+
+TEST_F(FidlGenJsTest, RequestInterface) {
+ v8::Isolate* isolate = instance_->isolate();
+ BindingsSetupHelper helper(isolate);
+
+ TestolaImpl testola_impl;
+ fidl::Binding<fidljstest::Testola> binding(&testola_impl);
+ binding.Bind(std::move(helper.server()));
+
+ std::string source = R"(
+ var proxy = new TestolaProxy();
+ proxy.$bind(testHandle);
+
+ var another_proxy = new AnotherInterfaceProxy();
+
+ proxy.GetAnother(another_proxy.$request());
+ this.is_bound = another_proxy.$is_bound();
+ another_proxy.TimesTwo(456).then(resp => {
+ this.result = resp;
+
+ // TODO(crbug.com/883496): Handle created by $request() must be manually
+ // closed for now to avoid leaking it.
+ another_proxy.$close();
+ }).catch((e) => log('FAILED: ' + e));
+
+ // Use the original interface to make sure we didn't break its connection.
+ proxy.PrintInt(789);
+ )";
+ helper.runner().Run(source, "test.js");
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(helper.Get<int>("result"), 456 * 2);
+ EXPECT_EQ(testola_impl.received_int(), 789);
+}
+
+int main(int argc, char** argv) {
+ base::TestSuite test_suite(argc, argv);
+
+ return base::LaunchUnitTests(
+ argc, argv,
+ base::BindOnce(&base::TestSuite::Run, base::Unretained(&test_suite)));
+}
diff --git a/deps/v8/build/fuchsia/fidlgen_js/test/simple.fidl b/deps/v8/build/fuchsia/fidlgen_js/test/simple.fidl
new file mode 100644
index 0000000000..18770650fb
--- /dev/null
+++ b/deps/v8/build/fuchsia/fidlgen_js/test/simple.fidl
@@ -0,0 +1,142 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+library fidljstest;
+
+enum Blorp : int8 {
+ ALPHA = 1;
+ BETA = 2;
+ GAMMA = 0x48;
+};
+
+// A struct of basic types, some with defaults and some without to test various
+// paths of the generator.
+struct BasicStruct {
+ bool b;
+ int8 i8;
+ int16 i16 = 18;
+ int32 i32;
+ uint8 u8;
+ uint16 u16;
+ uint32 u32 = 4000000000;
+};
+
+const uint64 ARRRR_SIZE = 32;
+
+struct StuffAndThings {
+ int32 count;
+ string id;
+ vector<int32> a_vector;
+ BasicStruct basic;
+ string later_string;
+ array<int32>:ARRRR_SIZE arrrr;
+ vector<string>? nullable_vector_of_string0;
+ vector<string>? nullable_vector_of_string1;
+ vector<Blorp> vector_of_blorp;
+};
+
+struct StructWithBool {
+ bool some_bool = false;
+};
+
+struct StructWithUint {
+ uint32 num;
+};
+
+struct LargerStructWithArray {
+ array<int32>:32 components;
+};
+
+union UnionOfStructs {
+ StructWithBool swb;
+ StructWithUint swu;
+ LargerStructWithArray lswa;
+};
+
+struct StructOfMultipleUnions {
+ UnionOfStructs initial;
+ UnionOfStructs? optional;
+ UnionOfStructs trailing;
+};
+
+const int64 NO_TIMESTAMP = 0x7fffffffffffffff;
+const int64 ANOTHER_COPY = NO_TIMESTAMP;
+const string SOME_STRING = "a 你好 thing\" containing ' quotes";
+
+struct VariousDefaults {
+ Blorp blorp_defaulting_to_beta = BETA;
+ int64 int64_defaulting_to_no_timestamp = NO_TIMESTAMP;
+ int64 int64_defaulting_to_const = 0x7fffffffffffff11;
+ string string_with_default = "stuff";
+};
+
+struct VectorsOfPrimitives {
+ vector<bool> v_bool;
+ vector<uint8> v_uint8;
+ vector<uint16> v_uint16;
+ vector<uint32> v_uint32;
+ vector<uint64> v_uint64;
+ vector<int8> v_int8;
+ vector<int16> v_int16;
+ vector<int32> v_int32;
+ vector<int64> v_int64;
+ vector<float32> v_float32;
+ vector<float64> v_float64;
+};
+
+struct VectorOfHandleToVMO {
+ vector<handle<vmo>> vmos;
+};
+
+// This is a compile-only test for gen.py to ensure that the size of
+// AfterPreviousReference is available before the vector<AfterPreviousReference>
+// is compiled in this struct.
+struct LaterReference {
+ vector<AfterPreviousReference>? later;
+};
+
+struct AfterPreviousReference {
+ int32 an_int;
+};
+
+protocol AnotherInterface {
+ TimesTwo(int32 a) -> (int32 b);
+};
+
+protocol Testola {
+ DoSomething();
+
+ PrintInt(int32 num);
+
+ PrintMsg(string msg);
+
+ VariousArgs(Blorp blorp, string:32 msg, vector<uint32> stuff);
+
+ WithResponse(int32 a, int32 b) -> (int32 sum);
+
+ SendAStruct(BasicStruct basic);
+
+ NestedStructsWithResponse(BasicStruct basic) -> (StuffAndThings resp);
+
+ PassHandles(handle<job> job) -> (handle<process> process);
+
+ ReceiveUnions(StructOfMultipleUnions somu);
+
+ SendUnions() -> (StructOfMultipleUnions somu);
+
+ SendVectorsOfString(vector<string> unsized,
+ vector<string?> nullable,
+ vector<string:10> max_strlen);
+
+ VectorOfStruct(vector<StructWithUint> stuff)
+ -> (vector<StructWithUint> result);
+
+ PassVectorOfPrimitives(VectorsOfPrimitives input)
+ -> (VectorsOfPrimitives output);
+
+ PassVectorOfVMO(VectorOfHandleToVMO input)
+ -> (VectorOfHandleToVMO output);
+
+ GetAnother(request<AnotherInterface> another);
+};
diff --git a/deps/v8/build/fuchsia/fidlgen_js/third_party/__init__.py b/deps/v8/build/fuchsia/fidlgen_js/third_party/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/build/fuchsia/fidlgen_js/third_party/__init__.py
diff --git a/deps/v8/build/fuchsia/fidlgen_js/third_party/enum34/LICENSE b/deps/v8/build/fuchsia/fidlgen_js/third_party/enum34/LICENSE
new file mode 100644
index 0000000000..9003b8850e
--- /dev/null
+++ b/deps/v8/build/fuchsia/fidlgen_js/third_party/enum34/LICENSE
@@ -0,0 +1,32 @@
+Copyright (c) 2013, Ethan Furman.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ Redistributions of source code must retain the above
+ copyright notice, this list of conditions and the
+ following disclaimer.
+
+ Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials
+ provided with the distribution.
+
+ Neither the name Ethan Furman nor the names of any
+ contributors may be used to endorse or promote products
+ derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/deps/v8/build/fuchsia/fidlgen_js/third_party/enum34/README.chromium b/deps/v8/build/fuchsia/fidlgen_js/third_party/enum34/README.chromium
new file mode 100644
index 0000000000..4d0ef07c43
--- /dev/null
+++ b/deps/v8/build/fuchsia/fidlgen_js/third_party/enum34/README.chromium
@@ -0,0 +1,15 @@
+Name: enum34
+Short Name: enum34
+URL: https://bitbucket.org/stoneleaf/enum34
+License: BSD
+License File: LICENSE
+Revision: f24487b
+Security Critical: no
+
+
+Description:
+
+'Enum' backported from Python 3.4 to earlier Python versions. Only LICENSE and
+__init__.py are taken, other packaging files, documentation, etc. removed.
+
+Only used at build time.
diff --git a/deps/v8/build/fuchsia/fidlgen_js/third_party/enum34/__init__.py b/deps/v8/build/fuchsia/fidlgen_js/third_party/enum34/__init__.py
new file mode 100644
index 0000000000..d6ffb3a40f
--- /dev/null
+++ b/deps/v8/build/fuchsia/fidlgen_js/third_party/enum34/__init__.py
@@ -0,0 +1,837 @@
+"""Python Enumerations"""
+
+import sys as _sys
+
+__all__ = ['Enum', 'IntEnum', 'unique']
+
+version = 1, 1, 6
+
+pyver = float('%s.%s' % _sys.version_info[:2])
+
+try:
+ any
+except NameError:
+ def any(iterable):
+ for element in iterable:
+ if element:
+ return True
+ return False
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ OrderedDict = None
+
+try:
+ basestring
+except NameError:
+ # In Python 2 basestring is the ancestor of both str and unicode
+ # in Python 3 it's just str, but was missing in 3.1
+ basestring = str
+
+try:
+ unicode
+except NameError:
+ # In Python 3 unicode no longer exists (it's just str)
+ unicode = str
+
+class _RouteClassAttributeToGetattr(object):
+ """Route attribute access on a class to __getattr__.
+
+ This is a descriptor, used to define attributes that act differently when
+ accessed through an instance and through a class. Instance access remains
+ normal, but access to an attribute through a class will be routed to the
+ class's __getattr__ method; this is done by raising AttributeError.
+
+ """
+ def __init__(self, fget=None):
+ self.fget = fget
+
+ def __get__(self, instance, ownerclass=None):
+ if instance is None:
+ raise AttributeError()
+ return self.fget(instance)
+
+ def __set__(self, instance, value):
+ raise AttributeError("can't set attribute")
+
+ def __delete__(self, instance):
+ raise AttributeError("can't delete attribute")
+
+
+def _is_descriptor(obj):
+ """Returns True if obj is a descriptor, False otherwise."""
+ return (
+ hasattr(obj, '__get__') or
+ hasattr(obj, '__set__') or
+ hasattr(obj, '__delete__'))
+
+
+def _is_dunder(name):
+ """Returns True if a __dunder__ name, False otherwise."""
+ return (name[:2] == name[-2:] == '__' and
+ name[2:3] != '_' and
+ name[-3:-2] != '_' and
+ len(name) > 4)
+
+
+def _is_sunder(name):
+ """Returns True if a _sunder_ name, False otherwise."""
+ return (name[0] == name[-1] == '_' and
+ name[1:2] != '_' and
+ name[-2:-1] != '_' and
+ len(name) > 2)
+
+
+def _make_class_unpicklable(cls):
+ """Make the given class un-picklable."""
+ def _break_on_call_reduce(self, protocol=None):
+ raise TypeError('%r cannot be pickled' % self)
+ cls.__reduce_ex__ = _break_on_call_reduce
+ cls.__module__ = '<unknown>'
+
+
+class _EnumDict(dict):
+ """Track enum member order and ensure member names are not reused.
+
+ EnumMeta will use the names found in self._member_names as the
+ enumeration member names.
+
+ """
+ def __init__(self):
+ super(_EnumDict, self).__init__()
+ self._member_names = []
+
+ def __setitem__(self, key, value):
+ """Changes anything not dundered or not a descriptor.
+
+ If a descriptor is added with the same name as an enum member, the name
+ is removed from _member_names (this may leave a hole in the numerical
+ sequence of values).
+
+ If an enum member name is used twice, an error is raised; duplicate
+ values are not checked for.
+
+ Single underscore (sunder) names are reserved.
+
+ Note: in 3.x __order__ is simply discarded as a not necessary piece
+ leftover from 2.x
+
+ """
+ if pyver >= 3.0 and key in ('_order_', '__order__'):
+ return
+ elif key == '__order__':
+ key = '_order_'
+ if _is_sunder(key):
+ if key != '_order_':
+ raise ValueError('_names_ are reserved for future Enum use')
+ elif _is_dunder(key):
+ pass
+ elif key in self._member_names:
+ # descriptor overwriting an enum?
+ raise TypeError('Attempted to reuse key: %r' % key)
+ elif not _is_descriptor(value):
+ if key in self:
+ # enum overwriting a descriptor?
+ raise TypeError('Key already defined as: %r' % self[key])
+ self._member_names.append(key)
+ super(_EnumDict, self).__setitem__(key, value)
+
+
+# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
+# EnumMeta finishes running the first time the Enum class doesn't exist. This
+# is also why there are checks in EnumMeta like `if Enum is not None`
+Enum = None
+
+
+class EnumMeta(type):
+ """Metaclass for Enum"""
+ @classmethod
+ def __prepare__(metacls, cls, bases):
+ return _EnumDict()
+
+ def __new__(metacls, cls, bases, classdict):
+ # an Enum class is final once enumeration items have been defined; it
+ # cannot be mixed with other types (int, float, etc.) if it has an
+ # inherited __new__ unless a new __new__ is defined (or the resulting
+ # class will fail).
+ if type(classdict) is dict:
+ original_dict = classdict
+ classdict = _EnumDict()
+ for k, v in original_dict.items():
+ classdict[k] = v
+
+ member_type, first_enum = metacls._get_mixins_(bases)
+ __new__, save_new, use_args = metacls._find_new_(classdict, member_type,
+ first_enum)
+ # save enum items into separate mapping so they don't get baked into
+ # the new class
+ members = dict((k, classdict[k]) for k in classdict._member_names)
+ for name in classdict._member_names:
+ del classdict[name]
+
+ # py2 support for definition order
+ _order_ = classdict.get('_order_')
+ if _order_ is None:
+ if pyver < 3.0:
+ try:
+ _order_ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
+ except TypeError:
+ _order_ = [name for name in sorted(members.keys())]
+ else:
+ _order_ = classdict._member_names
+ else:
+ del classdict['_order_']
+ if pyver < 3.0:
+ _order_ = _order_.replace(',', ' ').split()
+ aliases = [name for name in members if name not in _order_]
+ _order_ += aliases
+
+ # check for illegal enum names (any others?)
+ invalid_names = set(members) & set(['mro'])
+ if invalid_names:
+ raise ValueError('Invalid enum member name(s): %s' % (
+ ', '.join(invalid_names), ))
+
+ # save attributes from super classes so we know if we can take
+ # the shortcut of storing members in the class dict
+ base_attributes = set([a for b in bases for a in b.__dict__])
+ # create our new Enum type
+ enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
+ enum_class._member_names_ = [] # names in random order
+ if OrderedDict is not None:
+ enum_class._member_map_ = OrderedDict()
+ else:
+ enum_class._member_map_ = {} # name->value map
+ enum_class._member_type_ = member_type
+
+ # Reverse value->name map for hashable values.
+ enum_class._value2member_map_ = {}
+
+ # instantiate them, checking for duplicates as we go
+ # we instantiate first instead of checking for duplicates first in case
+ # a custom __new__ is doing something funky with the values -- such as
+ # auto-numbering ;)
+ if __new__ is None:
+ __new__ = enum_class.__new__
+ for member_name in _order_:
+ value = members[member_name]
+ if not isinstance(value, tuple):
+ args = (value, )
+ else:
+ args = value
+ if member_type is tuple: # special case for tuple enums
+ args = (args, ) # wrap it one more time
+ if not use_args or not args:
+ enum_member = __new__(enum_class)
+ if not hasattr(enum_member, '_value_'):
+ enum_member._value_ = value
+ else:
+ enum_member = __new__(enum_class, *args)
+ if not hasattr(enum_member, '_value_'):
+ enum_member._value_ = member_type(*args)
+ value = enum_member._value_
+ enum_member._name_ = member_name
+ enum_member.__objclass__ = enum_class
+ enum_member.__init__(*args)
+ # If another member with the same value was already defined, the
+ # new member becomes an alias to the existing one.
+ for name, canonical_member in enum_class._member_map_.items():
+ if canonical_member.value == enum_member._value_:
+ enum_member = canonical_member
+ break
+ else:
+ # Aliases don't appear in member names (only in __members__).
+ enum_class._member_names_.append(member_name)
+ # performance boost for any member that would not shadow
+ # a DynamicClassAttribute (aka _RouteClassAttributeToGetattr)
+ if member_name not in base_attributes:
+ setattr(enum_class, member_name, enum_member)
+ # now add to _member_map_
+ enum_class._member_map_[member_name] = enum_member
+ try:
+ # This may fail if value is not hashable. We can't add the value
+ # to the map, and by-value lookups for this value will be
+ # linear.
+ enum_class._value2member_map_[value] = enum_member
+ except TypeError:
+ pass
+
+
+ # If a custom type is mixed into the Enum, and it does not know how
+ # to pickle itself, pickle.dumps will succeed but pickle.loads will
+ # fail. Rather than have the error show up later and possibly far
+ # from the source, sabotage the pickle protocol for this class so
+ # that pickle.dumps also fails.
+ #
+ # However, if the new class implements its own __reduce_ex__, do not
+ # sabotage -- it's on them to make sure it works correctly. We use
+ # __reduce_ex__ instead of any of the others as it is preferred by
+ # pickle over __reduce__, and it handles all pickle protocols.
+ unpicklable = False
+ if '__reduce_ex__' not in classdict:
+ if member_type is not object:
+ methods = ('__getnewargs_ex__', '__getnewargs__',
+ '__reduce_ex__', '__reduce__')
+ if not any(m in member_type.__dict__ for m in methods):
+ _make_class_unpicklable(enum_class)
+ unpicklable = True
+
+
+ # double check that repr and friends are not the mixin's or various
+ # things break (such as pickle)
+ for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
+ class_method = getattr(enum_class, name)
+ obj_method = getattr(member_type, name, None)
+ enum_method = getattr(first_enum, name, None)
+ if name not in classdict and class_method is not enum_method:
+ if name == '__reduce_ex__' and unpicklable:
+ continue
+ setattr(enum_class, name, enum_method)
+
+ # method resolution and int's are not playing nice
+ # Python's less than 2.6 use __cmp__
+
+ if pyver < 2.6:
+
+ if issubclass(enum_class, int):
+ setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
+
+ elif pyver < 3.0:
+
+ if issubclass(enum_class, int):
+ for method in (
+ '__le__',
+ '__lt__',
+ '__gt__',
+ '__ge__',
+ '__eq__',
+ '__ne__',
+ '__hash__',
+ ):
+ setattr(enum_class, method, getattr(int, method))
+
+ # replace any other __new__ with our own (as long as Enum is not None,
+ # anyway) -- again, this is to support pickle
+ if Enum is not None:
+ # if the user defined their own __new__, save it before it gets
+ # clobbered in case they subclass later
+ if save_new:
+ setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
+ setattr(enum_class, '__new__', Enum.__dict__['__new__'])
+ return enum_class
+
+ def __bool__(cls):
+ """
+ classes/types should always be True.
+ """
+ return True
+
+ def __call__(cls, value, names=None, module=None, type=None, start=1):
+ """Either returns an existing member, or creates a new enum class.
+
+ This method is used both when an enum class is given a value to match
+ to an enumeration member (i.e. Color(3)) and for the functional API
+ (i.e. Color = Enum('Color', names='red green blue')).
+
+ When used for the functional API: `module`, if set, will be stored in
+ the new class' __module__ attribute; `type`, if set, will be mixed in
+ as the first base class.
+
+ Note: if `module` is not set this routine will attempt to discover the
+ calling module by walking the frame stack; if this is unsuccessful
+ the resulting class will not be pickleable.
+
+ """
+ if names is None: # simple value lookup
+ return cls.__new__(cls, value)
+ # otherwise, functional API: we're creating a new Enum type
+ return cls._create_(value, names, module=module, type=type, start=start)
+
+ def __contains__(cls, member):
+ return isinstance(member, cls) and member.name in cls._member_map_
+
+ def __delattr__(cls, attr):
+ # nicer error message when someone tries to delete an attribute
+ # (see issue19025).
+ if attr in cls._member_map_:
+ raise AttributeError(
+ "%s: cannot delete Enum member." % cls.__name__)
+ super(EnumMeta, cls).__delattr__(attr)
+
+ def __dir__(self):
+ return (['__class__', '__doc__', '__members__', '__module__'] +
+ self._member_names_)
+
+ @property
+ def __members__(cls):
+ """Returns a mapping of member name->value.
+
+ This mapping lists all enum members, including aliases. Note that this
+ is a copy of the internal mapping.
+
+ """
+ return cls._member_map_.copy()
+
+ def __getattr__(cls, name):
+ """Return the enum member matching `name`
+
+ We use __getattr__ instead of descriptors or inserting into the enum
+ class' __dict__ in order to support `name` and `value` being both
+ properties for enum members (which live in the class' __dict__) and
+ enum members themselves.
+
+ """
+ if _is_dunder(name):
+ raise AttributeError(name)
+ try:
+ return cls._member_map_[name]
+ except KeyError:
+ raise AttributeError(name)
+
+ def __getitem__(cls, name):
+ return cls._member_map_[name]
+
+ def __iter__(cls):
+ return (cls._member_map_[name] for name in cls._member_names_)
+
+ def __reversed__(cls):
+ return (cls._member_map_[name] for name in reversed(cls._member_names_))
+
+ def __len__(cls):
+ return len(cls._member_names_)
+
+ __nonzero__ = __bool__
+
+ def __repr__(cls):
+ return "<enum %r>" % cls.__name__
+
+ def __setattr__(cls, name, value):
+ """Block attempts to reassign Enum members.
+
+ A simple assignment to the class namespace only changes one of the
+ several possible ways to get an Enum member from the Enum class,
+ resulting in an inconsistent Enumeration.
+
+ """
+ member_map = cls.__dict__.get('_member_map_', {})
+ if name in member_map:
+ raise AttributeError('Cannot reassign members.')
+ super(EnumMeta, cls).__setattr__(name, value)
+
+ def _create_(cls, class_name, names=None, module=None, type=None, start=1):
+ """Convenience method to create a new Enum class.
+
+ `names` can be:
+
+ * A string containing member names, separated either with spaces or
+ commas. Values are auto-numbered from 1.
+ * An iterable of member names. Values are auto-numbered from 1.
+ * An iterable of (member name, value) pairs.
+ * A mapping of member name -> value.
+
+ """
+ if pyver < 3.0:
+ # if class_name is unicode, attempt a conversion to ASCII
+ if isinstance(class_name, unicode):
+ try:
+ class_name = class_name.encode('ascii')
+ except UnicodeEncodeError:
+ raise TypeError('%r is not representable in ASCII' % class_name)
+ metacls = cls.__class__
+ if type is None:
+ bases = (cls, )
+ else:
+ bases = (type, cls)
+ classdict = metacls.__prepare__(class_name, bases)
+ _order_ = []
+
+ # special processing needed for names?
+ if isinstance(names, basestring):
+ names = names.replace(',', ' ').split()
+ if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
+ names = [(e, i+start) for (i, e) in enumerate(names)]
+
+ # Here, names is either an iterable of (name, value) or a mapping.
+ item = None # in case names is empty
+ for item in names:
+ if isinstance(item, basestring):
+ member_name, member_value = item, names[item]
+ else:
+ member_name, member_value = item
+ classdict[member_name] = member_value
+ _order_.append(member_name)
+ # only set _order_ in classdict if name/value was not from a mapping
+ if not isinstance(item, basestring):
+ classdict['_order_'] = ' '.join(_order_)
+ enum_class = metacls.__new__(metacls, class_name, bases, classdict)
+
+ # TODO: replace the frame hack if a blessed way to know the calling
+ # module is ever developed
+ if module is None:
+ try:
+ module = _sys._getframe(2).f_globals['__name__']
+ except (AttributeError, ValueError):
+ pass
+ if module is None:
+ _make_class_unpicklable(enum_class)
+ else:
+ enum_class.__module__ = module
+
+ return enum_class
+
+ @staticmethod
+ def _get_mixins_(bases):
+ """Returns the type for creating enum members, and the first inherited
+ enum class.
+
+ bases: the tuple of bases that was given to __new__
+
+ """
+ if not bases or Enum is None:
+ return object, Enum
+
+
+ # double check that we are not subclassing a class with existing
+ # enumeration members; while we're at it, see if any other data
+ # type has been mixed in so we can use the correct __new__
+ member_type = first_enum = None
+ for base in bases:
+ if (base is not Enum and
+ issubclass(base, Enum) and
+ base._member_names_):
+ raise TypeError("Cannot extend enumerations")
+ # base is now the last base in bases
+ if not issubclass(base, Enum):
+ raise TypeError("new enumerations must be created as "
+ "`ClassName([mixin_type,] enum_type)`")
+
+ # get correct mix-in type (either mix-in type of Enum subclass, or
+ # first base if last base is Enum)
+ if not issubclass(bases[0], Enum):
+ member_type = bases[0] # first data type
+ first_enum = bases[-1] # enum type
+ else:
+ for base in bases[0].__mro__:
+ # most common: (IntEnum, int, Enum, object)
+ # possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
+ # <class 'int'>, <Enum 'Enum'>,
+ # <class 'object'>)
+ if issubclass(base, Enum):
+ if first_enum is None:
+ first_enum = base
+ else:
+ if member_type is None:
+ member_type = base
+
+ return member_type, first_enum
+
+ if pyver < 3.0:
+ @staticmethod
+ def _find_new_(classdict, member_type, first_enum):
+ """Returns the __new__ to be used for creating the enum members.
+
+ classdict: the class dictionary given to __new__
+ member_type: the data type whose __new__ will be used by default
+ first_enum: enumeration to check for an overriding __new__
+
+ """
+ # now find the correct __new__, checking to see of one was defined
+ # by the user; also check earlier enum classes in case a __new__ was
+ # saved as __member_new__
+ __new__ = classdict.get('__new__', None)
+ if __new__:
+ return None, True, True # __new__, save_new, use_args
+
+ N__new__ = getattr(None, '__new__')
+ O__new__ = getattr(object, '__new__')
+ if Enum is None:
+ E__new__ = N__new__
+ else:
+ E__new__ = Enum.__dict__['__new__']
+ # check all possibles for __member_new__ before falling back to
+ # __new__
+ for method in ('__member_new__', '__new__'):
+ for possible in (member_type, first_enum):
+ try:
+ target = possible.__dict__[method]
+ except (AttributeError, KeyError):
+ target = getattr(possible, method, None)
+ if target not in [
+ None,
+ N__new__,
+ O__new__,
+ E__new__,
+ ]:
+ if method == '__member_new__':
+ classdict['__new__'] = target
+ return None, False, True
+ if isinstance(target, staticmethod):
+ target = target.__get__(member_type)
+ __new__ = target
+ break
+ if __new__ is not None:
+ break
+ else:
+ __new__ = object.__new__
+
+ # if a non-object.__new__ is used then whatever value/tuple was
+ # assigned to the enum member name will be passed to __new__ and to the
+ # new enum member's __init__
+ if __new__ is object.__new__:
+ use_args = False
+ else:
+ use_args = True
+
+ return __new__, False, use_args
+ else:
+ @staticmethod
+ def _find_new_(classdict, member_type, first_enum):
+ """Returns the __new__ to be used for creating the enum members.
+
+ classdict: the class dictionary given to __new__
+ member_type: the data type whose __new__ will be used by default
+ first_enum: enumeration to check for an overriding __new__
+
+ """
+ # now find the correct __new__, checking to see of one was defined
+ # by the user; also check earlier enum classes in case a __new__ was
+ # saved as __member_new__
+ __new__ = classdict.get('__new__', None)
+
+ # should __new__ be saved as __member_new__ later?
+ save_new = __new__ is not None
+
+ if __new__ is None:
+ # check all possibles for __member_new__ before falling back to
+ # __new__
+ for method in ('__member_new__', '__new__'):
+ for possible in (member_type, first_enum):
+ target = getattr(possible, method, None)
+ if target not in (
+ None,
+ None.__new__,
+ object.__new__,
+ Enum.__new__,
+ ):
+ __new__ = target
+ break
+ if __new__ is not None:
+ break
+ else:
+ __new__ = object.__new__
+
+ # if a non-object.__new__ is used then whatever value/tuple was
+ # assigned to the enum member name will be passed to __new__ and to the
+ # new enum member's __init__
+ if __new__ is object.__new__:
+ use_args = False
+ else:
+ use_args = True
+
+ return __new__, save_new, use_args
+
+
+########################################################
+# In order to support Python 2 and 3 with a single
+# codebase we have to create the Enum methods separately
+# and then use the `type(name, bases, dict)` method to
+# create the class.
+########################################################
+temp_enum_dict = {}
+temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
+
+def __new__(cls, value):
+ # all enum instances are actually created during class construction
+ # without calling this method; this method is called by the metaclass'
+ # __call__ (i.e. Color(3) ), and by pickle
+ if type(value) is cls:
+ # For lookups like Color(Color.red)
+ value = value.value
+ #return value
+ # by-value search for a matching enum member
+ # see if it's in the reverse mapping (for hashable values)
+ try:
+ if value in cls._value2member_map_:
+ return cls._value2member_map_[value]
+ except TypeError:
+ # not there, now do long search -- O(n) behavior
+ for member in cls._member_map_.values():
+ if member.value == value:
+ return member
+ raise ValueError("%s is not a valid %s" % (value, cls.__name__))
+temp_enum_dict['__new__'] = __new__
+del __new__
+
+def __repr__(self):
+ return "<%s.%s: %r>" % (
+ self.__class__.__name__, self._name_, self._value_)
+temp_enum_dict['__repr__'] = __repr__
+del __repr__
+
+def __str__(self):
+ return "%s.%s" % (self.__class__.__name__, self._name_)
+temp_enum_dict['__str__'] = __str__
+del __str__
+
+if pyver >= 3.0:
+ def __dir__(self):
+ added_behavior = [
+ m
+ for cls in self.__class__.mro()
+ for m in cls.__dict__
+ if m[0] != '_' and m not in self._member_map_
+ ]
+ return (['__class__', '__doc__', '__module__', ] + added_behavior)
+ temp_enum_dict['__dir__'] = __dir__
+ del __dir__
+
+def __format__(self, format_spec):
+ # mixed-in Enums should use the mixed-in type's __format__, otherwise
+ # we can get strange results with the Enum name showing up instead of
+ # the value
+
+ # pure Enum branch
+ if self._member_type_ is object:
+ cls = str
+ val = str(self)
+ # mix-in branch
+ else:
+ cls = self._member_type_
+ val = self.value
+ return cls.__format__(val, format_spec)
+temp_enum_dict['__format__'] = __format__
+del __format__
+
+
+####################################
+# Python's less than 2.6 use __cmp__
+
+if pyver < 2.6:
+
+ def __cmp__(self, other):
+ if type(other) is self.__class__:
+ if self is other:
+ return 0
+ return -1
+ return NotImplemented
+ raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__cmp__'] = __cmp__
+ del __cmp__
+
+else:
+
+ def __le__(self, other):
+ raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__le__'] = __le__
+ del __le__
+
+ def __lt__(self, other):
+ raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__lt__'] = __lt__
+ del __lt__
+
+ def __ge__(self, other):
+ raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__ge__'] = __ge__
+ del __ge__
+
+ def __gt__(self, other):
+ raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__gt__'] = __gt__
+ del __gt__
+
+
+def __eq__(self, other):
+ if type(other) is self.__class__:
+ return self is other
+ return NotImplemented
+temp_enum_dict['__eq__'] = __eq__
+del __eq__
+
+def __ne__(self, other):
+ if type(other) is self.__class__:
+ return self is not other
+ return NotImplemented
+temp_enum_dict['__ne__'] = __ne__
+del __ne__
+
+def __hash__(self):
+ return hash(self._name_)
+temp_enum_dict['__hash__'] = __hash__
+del __hash__
+
+def __reduce_ex__(self, proto):
+ return self.__class__, (self._value_, )
+temp_enum_dict['__reduce_ex__'] = __reduce_ex__
+del __reduce_ex__
+
+# _RouteClassAttributeToGetattr is used to provide access to the `name`
+# and `value` properties of enum members while keeping some measure of
+# protection from modification, while still allowing for an enumeration
+# to have members named `name` and `value`. This works because enumeration
+# members are not set directly on the enum class -- __getattr__ is
+# used to look them up.
+
+@_RouteClassAttributeToGetattr
+def name(self):
+ return self._name_
+temp_enum_dict['name'] = name
+del name
+
+@_RouteClassAttributeToGetattr
+def value(self):
+ return self._value_
+temp_enum_dict['value'] = value
+del value
+
+@classmethod
+def _convert(cls, name, module, filter, source=None):
+ """
+ Create a new Enum subclass that replaces a collection of global constants
+ """
+ # convert all constants from source (or module) that pass filter() to
+ # a new Enum called name, and export the enum and its members back to
+ # module;
+ # also, replace the __reduce_ex__ method so unpickling works in
+ # previous Python versions
+ module_globals = vars(_sys.modules[module])
+ if source:
+ source = vars(source)
+ else:
+ source = module_globals
+ members = dict((name, value) for name, value in source.items() if filter(name))
+ cls = cls(name, members, module=module)
+ cls.__reduce_ex__ = _reduce_ex_by_name
+ module_globals.update(cls.__members__)
+ module_globals[name] = cls
+ return cls
+temp_enum_dict['_convert'] = _convert
+del _convert
+
+Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
+del temp_enum_dict
+
+# Enum has now been created
+###########################
+
+class IntEnum(int, Enum):
+ """Enum where members are also (and must be) ints"""
+
+def _reduce_ex_by_name(self, proto):
+ return self.name
+
+def unique(enumeration):
+ """Class decorator that ensures only unique members exist in an enumeration."""
+ duplicates = []
+ for name, member in enumeration.__members__.items():
+ if name != member.name:
+ duplicates.append((name, member.name))
+ if duplicates:
+ duplicate_names = ', '.join(
+ ["%s -> %s" % (alias, name) for (alias, name) in duplicates]
+ )
+ raise ValueError('duplicate names found in %r: %s' %
+ (enumeration, duplicate_names)
+ )
+ return enumeration
diff --git a/deps/v8/build/fuchsia/layout_test_proxy/BUILD.gn b/deps/v8/build/fuchsia/layout_test_proxy/BUILD.gn
new file mode 100644
index 0000000000..ad065071c5
--- /dev/null
+++ b/deps/v8/build/fuchsia/layout_test_proxy/BUILD.gn
@@ -0,0 +1,34 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+assert(is_fuchsia)
+
+import("//testing/test.gni")
+
+# Binary used to proxy TCP connections from a Fuchsia process. Potentially SSH
+# can be used to forward TCP, but this feature is currently broken on Fuchsia,
+# see ZX-1555. layout_test_proxy can be removed once that issue with sshd is
+# fixed and layout tests are updated to use SSH.
+executable("layout_test_proxy") {
+ testonly = true
+ sources = [
+ "layout_test_proxy.cc",
+ ]
+ deps = [
+ "//net",
+ "//net:test_support",
+ ]
+}
+
+fuchsia_package("layout_test_proxy_pkg") {
+ testonly = true
+ binary = ":layout_test_proxy"
+ package_name_override = "layout_test_proxy"
+}
+
+fuchsia_package_runner("layout_test_proxy_runner") {
+ testonly = true
+ package = ":layout_test_proxy_pkg"
+ package_name_override = "layout_test_proxy"
+}
diff --git a/deps/v8/build/fuchsia/layout_test_proxy/DEPS b/deps/v8/build/fuchsia/layout_test_proxy/DEPS
new file mode 100644
index 0000000000..8fa9d48d88
--- /dev/null
+++ b/deps/v8/build/fuchsia/layout_test_proxy/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+net",
+]
diff --git a/deps/v8/build/fuchsia/layout_test_proxy/layout_test_proxy.cc b/deps/v8/build/fuchsia/layout_test_proxy/layout_test_proxy.cc
new file mode 100644
index 0000000000..1d14df99ea
--- /dev/null
+++ b/deps/v8/build/fuchsia/layout_test_proxy/layout_test_proxy.cc
@@ -0,0 +1,78 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/command_line.h"
+#include "base/message_loop/message_loop.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "net/base/ip_endpoint.h"
+#include "net/test/tcp_socket_proxy.h"
+
+const char kPortsSwitch[] = "ports";
+const char kRemoteAddressSwitch[] = "remote-address";
+
+int main(int argc, char** argv) {
+ base::CommandLine::Init(argc, argv);
+
+ base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
+
+ if (!command_line->HasSwitch(kPortsSwitch)) {
+ LOG(ERROR) << "--" << kPortsSwitch << " was not specified.";
+ return 1;
+ }
+
+ std::vector<std::string> ports_strings =
+ base::SplitString(command_line->GetSwitchValueASCII(kPortsSwitch), ",",
+ base::TRIM_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
+ if (ports_strings.empty()) {
+ LOG(ERROR) << "At least one port must be specified with --" << kPortsSwitch;
+ return 1;
+ }
+
+ std::vector<int> ports;
+ for (auto& port_string : ports_strings) {
+ int port;
+ if (!base::StringToInt(port_string, &port) || port <= 0 || port > 65535) {
+ LOG(ERROR) << "Invalid value specified for --" << kPortsSwitch << ": "
+ << port_string;
+ return 1;
+ }
+ ports.push_back(port);
+ }
+
+ if (!command_line->HasSwitch(kRemoteAddressSwitch)) {
+ LOG(ERROR) << "--" << kRemoteAddressSwitch << " was not specified.";
+ return 1;
+ }
+
+ std::string remote_address_str =
+ command_line->GetSwitchValueASCII(kRemoteAddressSwitch);
+ net::IPAddress remote_address;
+ if (!remote_address.AssignFromIPLiteral(remote_address_str)) {
+ LOG(ERROR) << "Invalid value specified for --" << kRemoteAddressSwitch
+ << ": " << remote_address_str;
+ return 1;
+ }
+
+ base::MessageLoopForIO message_loop;
+
+ std::vector<std::unique_ptr<net::TcpSocketProxy>> proxies;
+
+ for (int port : ports) {
+ auto test_server_proxy =
+ std::make_unique<net::TcpSocketProxy>(message_loop.task_runner());
+ if (!test_server_proxy->Initialize(port)) {
+ LOG(ERROR) << "Can't bind proxy to port " << port;
+ return 1;
+ }
+ LOG(INFO) << "Listening on port " << test_server_proxy->local_port();
+ test_server_proxy->Start(net::IPEndPoint(remote_address, port));
+ proxies.push_back(std::move(test_server_proxy));
+ }
+
+ // Run the message loop indefinitely.
+ base::RunLoop().Run();
+
+ return 0;
+} \ No newline at end of file
diff --git a/deps/v8/build/fuchsia/linux.sdk.sha1 b/deps/v8/build/fuchsia/linux.sdk.sha1
new file mode 100644
index 0000000000..b891b02143
--- /dev/null
+++ b/deps/v8/build/fuchsia/linux.sdk.sha1
@@ -0,0 +1 @@
+8915992854282451632 \ No newline at end of file
diff --git a/deps/v8/build/fuchsia/mac.sdk.sha1 b/deps/v8/build/fuchsia/mac.sdk.sha1
new file mode 100644
index 0000000000..b622d13683
--- /dev/null
+++ b/deps/v8/build/fuchsia/mac.sdk.sha1
@@ -0,0 +1 @@
+8916000087704284384 \ No newline at end of file
diff --git a/deps/v8/build/fuchsia/net_test_server.py b/deps/v8/build/fuchsia/net_test_server.py
new file mode 100644
index 0000000000..5b7023c82d
--- /dev/null
+++ b/deps/v8/build/fuchsia/net_test_server.py
@@ -0,0 +1,89 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import common
+import json
+import logging
+import os
+import re
+import socket
+import sys
+import subprocess
+import tempfile
+
+DIR_SOURCE_ROOT = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
+sys.path.append(os.path.join(DIR_SOURCE_ROOT, 'build', 'util', 'lib', 'common'))
+import chrome_test_server_spawner
+
+
+# Implementation of chrome_test_server_spawner.PortForwarder that uses SSH's
+# remote port forwarding feature to forward ports.
+class SSHPortForwarder(chrome_test_server_spawner.PortForwarder):
+ def __init__(self, target):
+ self._target = target
+
+ # Maps the host (server) port to the device port number.
+ self._port_mapping = {}
+
+ def Map(self, port_pairs):
+ for p in port_pairs:
+ _, host_port = p
+ self._port_mapping[host_port] = \
+ common.ConnectPortForwardingTask(self._target, host_port)
+
+ def GetDevicePortForHostPort(self, host_port):
+ return self._port_mapping[host_port]
+
+ def Unmap(self, device_port):
+ for host_port, entry in self._port_mapping.iteritems():
+ if entry == device_port:
+ forwarding_args = [
+ '-NT', '-O', 'cancel', '-R', '0:localhost:%d' % host_port]
+ task = self._target.RunCommandPiped([],
+ ssh_args=forwarding_args,
+ stderr=subprocess.PIPE)
+ task.wait()
+ if task.returncode != 0:
+ raise Exception(
+ 'Error %d when unmapping port %d' % (task.returncode,
+ device_port))
+ del self._port_mapping[host_port]
+ return
+
+ raise Exception('Unmap called for unknown port: %d' % device_port)
+
+
+def SetupTestServer(target, test_concurrency):
+ """Provisions a forwarding test server and configures |target| to use it.
+
+ Returns a Popen object for the test server process."""
+
+ logging.debug('Starting test server.')
+ # The TestLauncher can launch more jobs than the limit specified with
+ # --test-launcher-jobs so the max number of spawned test servers is set to
+ # twice that limit here. See https://crbug.com/913156#c19.
+ spawning_server = chrome_test_server_spawner.SpawningServer(
+ 0, SSHPortForwarder(target), test_concurrency * 2)
+ forwarded_port = common.ConnectPortForwardingTask(
+ target, spawning_server.server_port)
+ spawning_server.Start()
+
+ logging.debug('Test server listening for connections (port=%d)' %
+ spawning_server.server_port)
+ logging.debug('Forwarded port is %d' % forwarded_port)
+
+ config_file = tempfile.NamedTemporaryFile(delete=True)
+
+ # Clean up the config JSON to only pass ports. See https://crbug.com/810209 .
+ config_file.write(json.dumps({
+ 'name': 'testserver',
+ 'address': '127.0.0.1',
+ 'spawner_url_base': 'http://localhost:%d' % forwarded_port
+ }))
+
+ config_file.flush()
+ target.PutFile(config_file.name, '/tmp/net-test-server-config')
+
+ return spawning_server
diff --git a/deps/v8/build/fuchsia/qemu_target.py b/deps/v8/build/fuchsia/qemu_target.py
new file mode 100644
index 0000000000..168364acfb
--- /dev/null
+++ b/deps/v8/build/fuchsia/qemu_target.py
@@ -0,0 +1,178 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Implements commands for running and interacting with Fuchsia on QEMU."""
+
+import boot_data
+import common
+import logging
+import target
+import os
+import platform
+import socket
+import subprocess
+import sys
+import tempfile
+import time
+
+from common import GetQemuRootForPlatform, EnsurePathExists
+
+
+# Virtual networking configuration data for QEMU.
+GUEST_NET = '192.168.3.0/24'
+GUEST_IP_ADDRESS = '192.168.3.9'
+HOST_IP_ADDRESS = '192.168.3.2'
+GUEST_MAC_ADDRESS = '52:54:00:63:5e:7b'
+
+
+class QemuTarget(target.Target):
+ def __init__(self, output_dir, target_cpu, cpu_cores, system_log_file,
+ require_kvm, ram_size_mb=2048):
+ """output_dir: The directory which will contain the files that are
+ generated to support the QEMU deployment.
+ target_cpu: The emulated target CPU architecture.
+ Can be 'x64' or 'arm64'."""
+ super(QemuTarget, self).__init__(output_dir, target_cpu)
+ self._qemu_process = None
+ self._ram_size_mb = ram_size_mb
+ self._system_log_file = system_log_file
+ self._cpu_cores = cpu_cores
+ self._require_kvm = require_kvm
+
+ def __enter__(self):
+ return self
+
+ # Used by the context manager to ensure that QEMU is killed when the Python
+ # process exits.
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.Shutdown();
+
+ def Start(self):
+ qemu_path = os.path.join(GetQemuRootForPlatform(), 'bin',
+ 'qemu-system-' + self._GetTargetSdkLegacyArch())
+ kernel_args = boot_data.GetKernelArgs(self._output_dir)
+
+ # TERM=dumb tells the guest OS to not emit ANSI commands that trigger
+ # noisy ANSI spew from the user's terminal emulator.
+ kernel_args.append('TERM=dumb')
+
+ # Enable logging to the serial port. This is a temporary fix to investigate
+ # the root cause for https://crbug.com/869753 .
+ kernel_args.append('kernel.serial=legacy')
+
+ qemu_command = [qemu_path,
+ '-m', str(self._ram_size_mb),
+ '-nographic',
+ '-kernel', EnsurePathExists(
+ boot_data.GetTargetFile(self._GetTargetSdkArch(),
+ 'qemu-kernel.bin')),
+ '-initrd', EnsurePathExists(
+ boot_data.GetBootImage(self._output_dir, self._GetTargetSdkArch())),
+ '-smp', str(self._cpu_cores),
+
+ # Attach the blobstore and data volumes. Use snapshot mode to discard
+ # any changes.
+ '-snapshot',
+ '-drive', 'file=%s,format=qcow2,if=none,id=blobstore,snapshot=on' %
+ EnsurePathExists(
+ os.path.join(self._output_dir, 'fvm.blk.qcow2')),
+ '-device', 'virtio-blk-pci,drive=blobstore',
+
+ # Use stdio for the guest OS only; don't attach the QEMU interactive
+ # monitor.
+ '-serial', 'stdio',
+ '-monitor', 'none',
+
+ '-append', ' '.join(kernel_args)
+ ]
+
+ # Configure the machine to emulate, based on the target architecture.
+ if self._target_cpu == 'arm64':
+ qemu_command.extend([
+ '-machine','virt',
+ ])
+ netdev_type = 'virtio-net-pci'
+ else:
+ qemu_command.extend([
+ '-machine', 'q35',
+ ])
+ netdev_type = 'e1000'
+
+ # Configure the CPU to emulate.
+ # On Linux, we can enable lightweight virtualization (KVM) if the host and
+ # guest architectures are the same.
+ enable_kvm = self._require_kvm or (sys.platform.startswith('linux') and (
+ (self._target_cpu == 'arm64' and platform.machine() == 'aarch64') or
+ (self._target_cpu == 'x64' and platform.machine() == 'x86_64')) and
+ os.access('/dev/kvm', os.R_OK | os.W_OK))
+ if enable_kvm:
+ qemu_command.extend(['-enable-kvm', '-cpu', 'host,migratable=no'])
+ else:
+ logging.warning('Unable to launch QEMU with KVM acceleration.')
+ if self._target_cpu == 'arm64':
+ qemu_command.extend(['-cpu', 'cortex-a53'])
+ else:
+ qemu_command.extend(['-cpu', 'Haswell,+smap,-check,-fsgsbase'])
+
+ # Configure virtual network. It is used in the tests to connect to
+ # testserver running on the host.
+ netdev_config = 'user,id=net0,net=%s,dhcpstart=%s,host=%s' % \
+ (GUEST_NET, GUEST_IP_ADDRESS, HOST_IP_ADDRESS)
+
+ self._host_ssh_port = common.GetAvailableTcpPort()
+ netdev_config += ",hostfwd=tcp::%s-:22" % self._host_ssh_port
+ qemu_command.extend([
+ '-netdev', netdev_config,
+ '-device', '%s,netdev=net0,mac=%s' % (netdev_type, GUEST_MAC_ADDRESS),
+ ])
+
+ # We pass a separate stdin stream to qemu. Sharing stdin across processes
+ # leads to flakiness due to the OS prematurely killing the stream and the
+ # Python script panicking and aborting.
+ # The precise root cause is still nebulous, but this fix works.
+ # See crbug.com/741194.
+ logging.debug('Launching QEMU.')
+ logging.debug(' '.join(qemu_command))
+
+ # Zircon sends debug logs to serial port (see kernel.serial=legacy flag
+ # above). Serial port is redirected to a file through QEMU stdout.
+ # Unless a |_system_log_file| is explicitly set, we output the kernel serial
+ # log to a temporary file, and print that out if we are unable to connect to
+ # the QEMU guest, to make it easier to diagnose connectivity issues.
+ temporary_system_log_file = None
+ if self._system_log_file:
+ stdout = self._system_log_file
+ stderr = subprocess.STDOUT
+ else:
+ temporary_system_log_file = tempfile.NamedTemporaryFile('w')
+ stdout = temporary_system_log_file
+ stderr = sys.stderr
+
+ self._qemu_process = subprocess.Popen(qemu_command, stdin=open(os.devnull),
+ stdout=stdout, stderr=stderr)
+ try:
+ self._WaitUntilReady();
+ except target.FuchsiaTargetException:
+ if temporary_system_log_file:
+ logging.info("Kernel logs:\n" +
+ open(temporary_system_log_file.name, 'r').read())
+ raise
+
+ def Shutdown(self):
+ if self._IsQemuStillRunning():
+ logging.info('Shutting down QEMU.')
+ self._qemu_process.kill()
+
+ def _IsQemuStillRunning(self):
+ if not self._qemu_process:
+ return False
+ return os.waitpid(self._qemu_process.pid, os.WNOHANG)[0] == 0
+
+ def _GetEndpoint(self):
+ if not self._IsQemuStillRunning():
+ raise Exception('QEMU quit unexpectedly.')
+ return ('localhost', self._host_ssh_port)
+
+ def _GetSshConfigPath(self):
+ return boot_data.GetSSHConfigPath(self._output_dir)
diff --git a/deps/v8/build/fuchsia/qemu_target_test.py b/deps/v8/build/fuchsia/qemu_target_test.py
new file mode 100755
index 0000000000..da596ee5b0
--- /dev/null
+++ b/deps/v8/build/fuchsia/qemu_target_test.py
@@ -0,0 +1,58 @@
+#!/usr/bin/python
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import qemu_target
+import shutil
+import subprocess
+import tempfile
+import time
+import unittest
+
+TEST_PAYLOAD = "Let's get this payload across the finish line!"
+
+tmpdir = tempfile.mkdtemp()
+
+# Register the target with the context manager so that it always gets
+# torn down on process exit. Otherwise there might be lingering QEMU instances
+# if Python crashes or is interrupted.
+with qemu_target.QemuTarget(tmpdir, 'x64') as target:
+ class TestQemuTarget(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ target.Start()
+
+ @classmethod
+ def tearDownClass(cls):
+ target.Shutdown()
+ shutil.rmtree(tmpdir)
+
+ def testCopyBidirectional(self):
+ tmp_path = tmpdir + "/payload"
+ with open(tmp_path, "w") as tmpfile:
+ tmpfile.write(TEST_PAYLOAD)
+ target.PutFile(tmp_path, '/tmp/payload')
+
+ tmp_path_roundtrip = tmp_path + ".roundtrip"
+ target.GetFile('/tmp/payload', tmp_path_roundtrip)
+ with open(tmp_path_roundtrip) as roundtrip:
+ self.assertEqual(TEST_PAYLOAD, roundtrip.read())
+
+ def testRunCommand(self):
+ self.assertEqual(0, target.RunCommand(['true']))
+ self.assertEqual(1, target.RunCommand(['false']))
+
+ def testRunCommandPiped(self):
+ proc = target.RunCommandPiped(['cat'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ proc.stdin.write(TEST_PAYLOAD)
+ proc.stdin.flush()
+ proc.stdin.close()
+ self.assertEqual(TEST_PAYLOAD, proc.stdout.readline())
+ proc.kill()
+
+
+ if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/build/fuchsia/remote_cmd.py b/deps/v8/build/fuchsia/remote_cmd.py
new file mode 100644
index 0000000000..cabdf1631d
--- /dev/null
+++ b/deps/v8/build/fuchsia/remote_cmd.py
@@ -0,0 +1,134 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import subprocess
+import sys
+import threading
+
+_SSH = ['ssh']
+_SCP = ['scp', '-C'] # Use gzip compression.
+_SSH_LOGGER = logging.getLogger('ssh')
+
+COPY_TO_TARGET = 0
+COPY_FROM_TARGET = 1
+
+
+def _IsLinkLocalIPv6(hostname):
+ return hostname.startswith('fe80::')
+
+# Adds ""
+def _EscapeIfIPv6Address(address):
+ if ':' in address:
+ return '[' + address + ']'
+ else:
+ return address
+
+class CommandRunner(object):
+ """Helper class used to execute commands on a remote host over SSH."""
+
+ def __init__(self, config_path, host, port):
+ """Creates a CommandRunner that connects to the specified |host| and |port|
+ using the ssh config at the specified |config_path|.
+
+ config_path: Full path to SSH configuration.
+ host: The hostname or IP address of the remote host.
+ port: The port to connect to."""
+
+ self._config_path = config_path
+ self._host = host
+ self._port = port
+
+ def _GetSshCommandLinePrefix(self):
+ return _SSH + ['-F', self._config_path, self._host, '-p', str(self._port)]
+
+ def RunCommand(self, command, silent, timeout_secs=None):
+ """Executes an SSH command on the remote host and blocks until completion.
+
+ command: A list of strings containing the command and its arguments.
+ silent: If true, suppresses all output from 'ssh'.
+ timeout_secs: If set, limits the amount of time that |command| may run.
+ Commands which exceed the timeout are killed.
+
+ Returns the exit code from the remote command."""
+
+ ssh_command = self._GetSshCommandLinePrefix() + command
+ _SSH_LOGGER.debug('ssh exec: ' + ' '.join(ssh_command))
+ if silent:
+ devnull = open(os.devnull, 'w')
+ process = subprocess.Popen(ssh_command, stderr=devnull, stdout=devnull)
+ else:
+ process = subprocess.Popen(ssh_command)
+
+ timeout_timer = None
+ if timeout_secs:
+ timeout_timer = threading.Timer(timeout_secs, process.kill)
+ timeout_timer.start()
+
+ process.wait()
+
+ if timeout_timer:
+ timeout_timer.cancel()
+
+ if process.returncode == -9:
+ raise Exception('Timeout when executing \"%s\".' % ' '.join(command))
+
+ return process.returncode
+
+
+ def RunCommandPiped(self, command = None, ssh_args = None, **kwargs):
+ """Executes an SSH command on the remote host and returns a process object
+ with access to the command's stdio streams. Does not block.
+
+ command: A list of strings containing the command and its arguments.
+ ssh_args: Arguments that will be passed to SSH.
+ kwargs: A dictionary of parameters to be passed to subprocess.Popen().
+ The parameters can be used to override stdin and stdout, for
+ example.
+
+ Returns a Popen object for the command."""
+
+ if not command:
+ command = []
+ if not ssh_args:
+ ssh_args = []
+
+ ssh_command = self._GetSshCommandLinePrefix() + ssh_args + ['--'] + command
+ _SSH_LOGGER.debug(' '.join(ssh_command))
+ return subprocess.Popen(ssh_command, **kwargs)
+
+
+ def RunScp(self, sources, dest, direction, recursive=False):
+ """Copies a file to or from a remote host using SCP and blocks until
+ completion.
+
+ sources: Paths of the files to be copied.
+ dest: The path that |source| will be copied to.
+ direction: Indicates whether the file should be copied to
+ or from the remote side.
+ Valid values are COPY_TO_TARGET or COPY_FROM_TARGET.
+ recursive: If true, performs a recursive copy.
+
+ Function will raise an assertion if a failure occurred."""
+
+ scp_command = _SCP[:]
+ if _SSH_LOGGER.getEffectiveLevel() == logging.DEBUG:
+ scp_command.append('-v')
+ if recursive:
+ scp_command.append('-r')
+
+ host = _EscapeIfIPv6Address(self._host)
+
+ if direction == COPY_TO_TARGET:
+ dest = "%s:%s" % (host, dest)
+ else:
+ sources = ["%s:%s" % (host, source) for source in sources]
+
+ scp_command += ['-F', self._config_path, '-P', str(self._port)]
+ scp_command += sources
+ scp_command += [dest]
+
+ _SSH_LOGGER.debug(' '.join(scp_command))
+ subprocess.check_call(scp_command, stdout=open(os.devnull, 'w'))
diff --git a/deps/v8/build/fuchsia/run_package.py b/deps/v8/build/fuchsia/run_package.py
new file mode 100644
index 0000000000..e8ea07d2f7
--- /dev/null
+++ b/deps/v8/build/fuchsia/run_package.py
@@ -0,0 +1,224 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Contains a helper function for deploying and executing a packaged
+executable on a Target."""
+
+import common
+import hashlib
+import logging
+import multiprocessing
+import os
+import re
+import select
+import subprocess
+import sys
+import time
+import threading
+import uuid
+
+from symbolizer import SymbolizerFilter
+
+FAR = os.path.join(common.SDK_ROOT, 'tools', 'far')
+
+# Amount of time to wait for the termination of the system log output thread.
+_JOIN_TIMEOUT_SECS = 5
+
+
+def _AttachKernelLogReader(target):
+ """Attaches a kernel log reader as a long-running SSH task."""
+
+ logging.info('Attaching kernel logger.')
+ return target.RunCommandPiped(['dlog', '-f'], stdin=open(os.devnull, 'r'),
+ stdout=subprocess.PIPE)
+
+
+class MergedInputStream(object):
+ """Merges a number of input streams into a UNIX pipe on a dedicated thread.
+ Terminates when the file descriptor of the primary stream (the first in
+ the sequence) is closed."""
+
+ def __init__(self, streams):
+ assert len(streams) > 0
+ self._streams = streams
+ self._read_pipe, write_pipe = os.pipe()
+ # Disable buffering for the stream to make sure there is no delay in logs.
+ self._output_stream = os.fdopen(write_pipe, 'w', 0)
+ self._thread = threading.Thread(target=self._Run)
+
+ def Start(self):
+ """Returns a file descriptor to the merged output stream."""
+
+ self._thread.start();
+ return self._read_pipe
+
+ def _Run(self):
+ streams_by_fd = {}
+ primary_fd = self._streams[0].fileno()
+ for s in self._streams:
+ streams_by_fd[s.fileno()] = s
+
+ # Set when the primary FD is closed. Input from other FDs will continue to
+ # be processed until select() runs dry.
+ flush = False
+
+ # The lifetime of the MergedInputStream is bound to the lifetime of
+ # |primary_fd|.
+ while primary_fd:
+ # When not flushing: block until data is read or an exception occurs.
+ rlist, _, xlist = select.select(streams_by_fd, [], streams_by_fd)
+
+ if len(rlist) == 0 and flush:
+ break
+
+ for fileno in xlist:
+ del streams_by_fd[fileno]
+ if fileno == primary_fd:
+ primary_fd = None
+
+ for fileno in rlist:
+ line = streams_by_fd[fileno].readline()
+ if line:
+ self._output_stream.write(line + '\n')
+ else:
+ del streams_by_fd[fileno]
+ if fileno == primary_fd:
+ primary_fd = None
+
+ # Flush the streams by executing nonblocking reads from the input file
+ # descriptors until no more data is available, or all the streams are
+ # closed.
+ while streams_by_fd:
+ rlist, _, _ = select.select(streams_by_fd, [], [], 0)
+
+ if not rlist:
+ break
+
+ for fileno in rlist:
+ line = streams_by_fd[fileno].readline()
+ if line:
+ self._output_stream.write(line + '\n')
+ else:
+ del streams_by_fd[fileno]
+
+
+def _GetComponentUri(package_name):
+ return 'fuchsia-pkg://fuchsia.com/%s#meta/%s.cmx' % (package_name,
+ package_name)
+
+
+class RunPackageArgs:
+ """RunPackage() configuration arguments structure.
+
+ install_only: If set, skips the package execution step.
+ symbolizer_config: A newline delimited list of source files contained
+ in the package. Omitting this parameter will disable symbolization.
+ system_logging: If set, connects a system log reader to the target.
+ target_staging_path: Path to which package FARs will be staged, during
+ installation. Defaults to staging into '/data'.
+ """
+ def __init__(self):
+ self.install_only = False
+ self.symbolizer_config = None
+ self.system_logging = False
+ self.target_staging_path = '/data'
+
+ @staticmethod
+ def FromCommonArgs(args):
+ run_package_args = RunPackageArgs()
+ run_package_args.install_only = args.install_only
+ run_package_args.system_logging = args.include_system_logs
+ run_package_args.target_staging_path = args.target_staging_path
+ return run_package_args
+
+
+def _DrainStreamToStdout(stream, quit_event):
+ """Outputs the contents of |stream| until |quit_event| is set."""
+
+ while not quit_event.is_set():
+ rlist, _, _ = select.select([ stream ], [], [], 0.1)
+ if rlist:
+ line = rlist[0].readline()
+ if not line:
+ return
+ print line.rstrip()
+
+
+def RunPackage(output_dir, target, package_path, package_name,
+ package_deps, package_args, args):
+ """Installs the Fuchsia package at |package_path| on the target,
+ executes it with |package_args|, and symbolizes its output.
+
+ output_dir: The path containing the build output files.
+ target: The deployment Target object that will run the package.
+ package_path: The path to the .far package file.
+ package_name: The name of app specified by package metadata.
+ package_args: The arguments which will be passed to the Fuchsia process.
+ args: Structure of arguments to configure how the package will be run.
+
+ Returns the exit code of the remote package process."""
+
+ system_logger = (
+ _AttachKernelLogReader(target) if args.system_logging else None)
+ try:
+ if system_logger:
+ # Spin up a thread to asynchronously dump the system log to stdout
+ # for easier diagnoses of early, pre-execution failures.
+ log_output_quit_event = multiprocessing.Event()
+ log_output_thread = threading.Thread(
+ target=lambda: _DrainStreamToStdout(system_logger.stdout,
+ log_output_quit_event))
+ log_output_thread.daemon = True
+ log_output_thread.start()
+
+ target.InstallPackage(package_path, package_name, package_deps)
+
+ if system_logger:
+ log_output_quit_event.set()
+ log_output_thread.join(timeout=_JOIN_TIMEOUT_SECS)
+
+ if args.install_only:
+ logging.info('Installation complete.')
+ return
+
+ logging.info('Running application.')
+ command = ['run', _GetComponentUri(package_name)] + package_args
+ process = target.RunCommandPiped(command,
+ stdin=open(os.devnull, 'r'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+
+ if system_logger:
+ output_fd = MergedInputStream([process.stdout,
+ system_logger.stdout]).Start()
+ else:
+ output_fd = process.stdout.fileno()
+
+ # Run the log data through the symbolizer process.
+ build_ids_paths = map(
+ lambda package_path: os.path.join(
+ os.path.dirname(package_path), 'ids.txt'),
+ [package_path] + package_deps)
+ output_stream = SymbolizerFilter(output_fd, build_ids_paths)
+
+ for next_line in output_stream:
+ print next_line.rstrip()
+
+ process.wait()
+ if process.returncode == 0:
+ logging.info('Process exited normally with status code 0.')
+ else:
+ # The test runner returns an error status code if *any* tests fail,
+ # so we should proceed anyway.
+ logging.warning('Process exited with status code %d.' %
+ process.returncode)
+
+ finally:
+ if system_logger:
+ logging.info('Terminating kernel log reader.')
+ log_output_quit_event.set()
+ log_output_thread.join()
+ system_logger.kill()
+
+ return process.returncode
diff --git a/deps/v8/build/fuchsia/symbolizer.py b/deps/v8/build/fuchsia/symbolizer.py
new file mode 100644
index 0000000000..0b7c39e918
--- /dev/null
+++ b/deps/v8/build/fuchsia/symbolizer.py
@@ -0,0 +1,43 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import subprocess
+
+from common import SDK_ROOT
+
+
+def SymbolizerFilter(input_fd, build_ids_files):
+ """Symbolizes an output stream from a process.
+
+ input_fd: A file descriptor of the stream to be symbolized.
+ build_ids_file: Path to the ids.txt file which maps build IDs to
+ unstripped binaries on the filesystem.
+ Returns a generator that yields symbolized process output."""
+
+ llvm_symbolizer_path = os.path.join(SDK_ROOT, os.pardir, os.pardir,
+ 'llvm-build', 'Release+Asserts', 'bin',
+ 'llvm-symbolizer')
+ symbolizer = os.path.join(SDK_ROOT, 'tools', 'symbolize')
+ symbolizer_cmd = [symbolizer,
+ '-ids-rel', '-llvm-symbolizer', llvm_symbolizer_path,
+ '-build-id-dir', os.path.join(SDK_ROOT, '.build-id')]
+ for build_ids_file in build_ids_files:
+ symbolizer_cmd.extend(['-ids', build_ids_file])
+
+ logging.info('Running "%s".' % ' '.join(symbolizer_cmd))
+ symbolizer_proc = subprocess.Popen(
+ symbolizer_cmd,
+ stdout=subprocess.PIPE,
+ stdin=input_fd,
+ close_fds=True)
+
+ while True:
+ line = symbolizer_proc.stdout.readline()
+ if not line:
+ break
+ yield line
+
+ symbolizer_proc.wait()
diff --git a/deps/v8/build/fuchsia/target.py b/deps/v8/build/fuchsia/target.py
new file mode 100644
index 0000000000..a5a5d11c4b
--- /dev/null
+++ b/deps/v8/build/fuchsia/target.py
@@ -0,0 +1,346 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import boot_data
+import common
+import json
+import logging
+import os
+import remote_cmd
+import shutil
+import subprocess
+import sys
+import tempfile
+import time
+
+
+_SHUTDOWN_CMD = ['dm', 'poweroff']
+_ATTACH_MAX_RETRIES = 10
+_ATTACH_RETRY_INTERVAL = 1
+
+_PM = os.path.join(common.SDK_ROOT, 'tools', 'pm')
+_REPO_NAME = 'chrome_runner'
+
+# Amount of time to wait for Amber to complete package installation, as a
+# mitigation against hangs due to amber/network-related failures.
+_INSTALL_TIMEOUT_SECS = 5 * 60
+
+
+def _GetPackageInfo(package_path):
+ """Returns a tuple with the name and version of a package."""
+
+ # Query the metadata file which resides next to the package file.
+ package_info = json.load(
+ open(os.path.join(os.path.dirname(package_path), 'package')))
+ return (package_info['name'], package_info['version'])
+
+
+def _PublishPackage(tuf_root, package_path):
+ """Publishes a combined FAR package to a TUF repository root."""
+
+ subprocess.check_call(
+ [_PM, 'publish', '-a', '-f', package_path, '-r', tuf_root, '-vt', '-v'],
+ stderr=subprocess.STDOUT)
+
+
+class _MapRemoteDataPathForPackage:
+ """Callable object which remaps /data paths to their package-specific
+ locations."""
+
+ def __init__(self, package_name, package_version):
+ self.data_path = '/data/r/sys/fuchsia.com:{0}:{1}#meta:{0}.cmx'.format(
+ package_name, package_version)
+
+ def __call__(self, path):
+ if path[:5] == '/data':
+ return self.data_path + path[5:]
+ return path
+
+
+class FuchsiaTargetException(Exception):
+ def __init__(self, message):
+ super(FuchsiaTargetException, self).__init__(message)
+
+
+class Target(object):
+ """Base class representing a Fuchsia deployment target."""
+
+ def __init__(self, output_dir, target_cpu):
+ self._output_dir = output_dir
+ self._started = False
+ self._dry_run = False
+ self._target_cpu = target_cpu
+ self._command_runner = None
+
+ # Functions used by the Python context manager for teardown.
+ def __enter__(self):
+ return self
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ return self
+
+ def Start(self):
+ """Handles the instantiation and connection process for the Fuchsia
+ target instance."""
+
+ pass
+
+ def IsStarted(self):
+ """Returns True if the Fuchsia target instance is ready to accept
+ commands."""
+
+ return self._started
+
+ def IsNewInstance(self):
+ """Returns True if the connected target instance is newly provisioned."""
+
+ return True
+
+ def GetCommandRunner(self):
+ """Returns CommandRunner that can be used to execute commands on the
+ target. Most clients should prefer RunCommandPiped() and RunCommand()."""
+
+ self._AssertIsStarted()
+
+ if self._command_runner == None:
+ host, port = self._GetEndpoint()
+ self._command_runner = \
+ remote_cmd.CommandRunner(self._GetSshConfigPath(), host, port)
+
+ return self._command_runner
+
+ def RunCommandPiped(self, command, **kwargs):
+ """Starts a remote command and immediately returns a Popen object for the
+ command. The caller may interact with the streams, inspect the status code,
+ wait on command termination, etc.
+
+ command: A list of strings representing the command and arguments.
+ kwargs: A dictionary of parameters to be passed to subprocess.Popen().
+ The parameters can be used to override stdin and stdout, for
+ example.
+
+ Returns: a Popen object.
+
+ Note: method does not block."""
+
+ logging.debug('running (non-blocking) \'%s\'.' % ' '.join(command))
+ return self.GetCommandRunner().RunCommandPiped(command, **kwargs)
+
+ def RunCommand(self, command, silent=False, timeout_secs=None):
+ """Executes a remote command and waits for it to finish executing.
+
+ Returns the exit code of the command."""
+
+ logging.debug('running \'%s\'.' % ' '.join(command))
+ return self.GetCommandRunner().RunCommand(command, silent,
+ timeout_secs=timeout_secs)
+
+ def EnsurePackageDataDirectoryExists(self, package_name):
+ """Ensures that the specified package's isolated /data directory exists."""
+ return self.RunCommand(
+ ['mkdir','-p',_MapRemoteDataPathForPackage(package_name, 0)('/data')])
+
+ def PutFile(self, source, dest, recursive=False, for_package=None):
+ """Copies a file from the local filesystem to the target filesystem.
+
+ source: The path of the file being copied.
+ dest: The path on the remote filesystem which will be copied to.
+ recursive: If true, performs a recursive copy.
+ for_package: If specified, /data in the |dest| is mapped to the package's
+ isolated /data location.
+ """
+
+ assert type(source) is str
+ self.PutFiles([source], dest, recursive, for_package)
+
+ def PutFiles(self, sources, dest, recursive=False, for_package=None):
+ """Copies files from the local filesystem to the target filesystem.
+
+ sources: List of local file paths to copy from, or a single path.
+ dest: The path on the remote filesystem which will be copied to.
+ recursive: If true, performs a recursive copy.
+ for_package: If specified, /data in the |dest| is mapped to the package's
+ isolated /data location.
+ """
+
+ assert type(sources) is tuple or type(sources) is list
+ if for_package:
+ self.EnsurePackageDataDirectoryExists(for_package)
+ dest = _MapRemoteDataPathForPackage(for_package, 0)(dest)
+ logging.debug('copy local:%s => remote:%s' % (sources, dest))
+ self.GetCommandRunner().RunScp(sources, dest, remote_cmd.COPY_TO_TARGET,
+ recursive)
+
+ def GetFile(self, source, dest, for_package=None):
+ """Copies a file from the target filesystem to the local filesystem.
+
+ source: The path of the file being copied.
+ dest: The path on the local filesystem which will be copied to.
+ for_package: If specified, /data in paths in |sources| is mapped to the
+ package's isolated /data location.
+ """
+ assert type(source) is str
+ self.GetFiles([source], dest, for_package)
+
+ def GetFiles(self, sources, dest, for_package=None):
+ """Copies files from the target filesystem to the local filesystem.
+
+ sources: List of remote file paths to copy.
+ dest: The path on the local filesystem which will be copied to.
+ for_package: If specified, /data in paths in |sources| is mapped to the
+ package's isolated /data location.
+ """
+ assert type(sources) is tuple or type(sources) is list
+ self._AssertIsStarted()
+ if for_package:
+ sources = map(_MapRemoteDataPathForPackage(for_package, 0), sources)
+ logging.debug('copy remote:%s => local:%s' % (sources, dest))
+ return self.GetCommandRunner().RunScp(sources, dest,
+ remote_cmd.COPY_FROM_TARGET)
+
+ def _GetEndpoint(self):
+ """Returns a (host, port) tuple for the SSH connection to the target."""
+ raise NotImplementedError
+
+ def _GetTargetSdkArch(self):
+ """Returns the Fuchsia SDK architecture name for the target CPU."""
+ if self._target_cpu == 'arm64' or self._target_cpu == 'x64':
+ return self._target_cpu
+ raise FuchsiaTargetException('Unknown target_cpu:' + self._target_cpu)
+
+ def _AssertIsStarted(self):
+ assert self.IsStarted()
+
+ def _WaitUntilReady(self, retries=_ATTACH_MAX_RETRIES):
+ logging.info('Connecting to Fuchsia using SSH.')
+
+ for retry in xrange(retries + 1):
+ host, port = self._GetEndpoint()
+ runner = remote_cmd.CommandRunner(self._GetSshConfigPath(), host, port)
+ if runner.RunCommand(['true'], True) == 0:
+ logging.info('Connected!')
+ self._started = True
+ return True
+ time.sleep(_ATTACH_RETRY_INTERVAL)
+
+ logging.error('Timeout limit reached.')
+
+ raise FuchsiaTargetException('Couldn\'t connect using SSH.')
+
+ def _GetSshConfigPath(self, path):
+ raise NotImplementedError
+
+ # TODO: remove this once all instances of architecture names have been
+ # converted to the new naming pattern.
+ def _GetTargetSdkLegacyArch(self):
+ """Returns the Fuchsia SDK architecture name for the target CPU."""
+ if self._target_cpu == 'arm64':
+ return 'aarch64'
+ elif self._target_cpu == 'x64':
+ return 'x86_64'
+ raise Exception('Unknown target_cpu %s:' % self._target_cpu)
+
+
+ def InstallPackage(self, package_path, package_name, package_deps):
+ """Installs a package and it's dependencies on the device. If the package is
+ already installed then it will be updated to the new version.
+
+ package_path: Path to the .far file to be installed.
+ package_name: Package name.
+ package_deps: List of .far files with the packages that the main package
+ depends on. These packages are installed or updated as well.
+ """
+ try:
+ tuf_root = tempfile.mkdtemp()
+ pm_serve_task = None
+
+ # Publish all packages to the serving TUF repository under |tuf_root|.
+ subprocess.check_call([_PM, 'newrepo', '-repo', tuf_root])
+ all_packages = [package_path] + package_deps
+ for next_package_path in all_packages:
+ _PublishPackage(tuf_root, next_package_path)
+
+ # Serve the |tuf_root| using 'pm serve' and configure the target to pull
+ # from it.
+ serve_port = common.GetAvailableTcpPort()
+ pm_serve_task = subprocess.Popen(
+ [_PM, 'serve', '-d', os.path.join(tuf_root, 'repository'), '-l',
+ ':%d' % serve_port, '-q'])
+ remote_port = common.ConnectPortForwardingTask(self, serve_port, 0)
+ self._RegisterAmberRepository(tuf_root, remote_port)
+
+ # Install all packages.
+ for next_package_path in all_packages:
+ install_package_name, package_version = \
+ _GetPackageInfo(next_package_path)
+ logging.info('Installing %s version %s.' %
+ (install_package_name, package_version))
+ return_code = self.RunCommand(['amberctl', 'get_up', '-n',
+ install_package_name, '-v',
+ package_version],
+ timeout_secs=_INSTALL_TIMEOUT_SECS)
+ if return_code != 0:
+ raise Exception('Error while installing %s.' % install_package_name)
+
+ finally:
+ self._UnregisterAmberRepository()
+ if pm_serve_task:
+ pm_serve_task.kill()
+ shutil.rmtree(tuf_root)
+
+
+ def _RegisterAmberRepository(self, tuf_repo, remote_port):
+ """Configures a device to use a local TUF repository as an installation
+ source for packages.
+ |tuf_repo|: The host filesystem path to the TUF repository.
+ |remote_port|: The reverse-forwarded port used to connect to instance of
+ `pm serve` that is serving the contents of |tuf_repo|."""
+
+ # Extract the public signing key for inclusion in the config file.
+ root_keys = []
+ root_json_path = os.path.join(tuf_repo, 'repository', 'root.json')
+ root_json = json.load(open(root_json_path, 'r'))
+ for root_key_id in root_json['signed']['roles']['root']['keyids']:
+ root_keys.append({
+ 'Type': root_json['signed']['keys'][root_key_id]['keytype'],
+ 'Value': root_json['signed']['keys'][root_key_id]['keyval']['public']
+ })
+
+ # "pm serve" can automatically generate a "config.json" file at query time,
+ # but the file is unusable because it specifies URLs with port
+ # numbers that are unreachable from across the port forwarding boundary.
+ # So instead, we generate our own config file with the forwarded port
+ # numbers instead.
+ config_file = open(os.path.join(tuf_repo, 'repository', 'repo_config.json'),
+ 'w')
+ json.dump({
+ 'ID': _REPO_NAME,
+ 'RepoURL': "http://127.0.0.1:%d" % remote_port,
+ 'BlobRepoURL': "http://127.0.0.1:%d/blobs" % remote_port,
+ 'RatePeriod': 10,
+ 'RootKeys': root_keys,
+ 'StatusConfig': {
+ 'Enabled': True
+ },
+ 'Auto': True
+ }, config_file)
+ config_file.close()
+
+ # Register the repo.
+ return_code = self.RunCommand(
+ [('amberctl rm_src -n %s; ' +
+ 'amberctl add_src -f http://127.0.0.1:%d/repo_config.json')
+ % (_REPO_NAME, remote_port)])
+ if return_code != 0:
+ raise Exception('Error code %d when running amberctl.' % return_code)
+
+
+ def _UnregisterAmberRepository(self):
+ """Unregisters the Amber repository."""
+
+ logging.debug('Unregistering Amber repository.')
+ self.RunCommand(['amberctl', 'rm_src', '-n', _REPO_NAME])
+
+ # Re-enable 'devhost' repo if it's present. This is useful for devices that
+ # were booted with 'fx serve'.
+ self.RunCommand(['amberctl', 'enable_src', '-n', 'devhost'], silent=True)
diff --git a/deps/v8/build/fuchsia/test_runner.py b/deps/v8/build/fuchsia/test_runner.py
new file mode 100755
index 0000000000..ca0c176341
--- /dev/null
+++ b/deps/v8/build/fuchsia/test_runner.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Deploys and runs a test package on a Fuchsia target."""
+
+import argparse
+import json
+import logging
+import os
+import socket
+import subprocess
+import sys
+import tempfile
+import time
+
+from common_args import AddCommonArgs, ConfigureLogging, GetDeploymentTargetForArgs
+from net_test_server import SetupTestServer
+from run_package import RunPackage, RunPackageArgs
+
+DEFAULT_TEST_CONCURRENCY = 4
+
+TEST_RESULT_PATH = '/data/test_summary.json'
+TEST_FILTER_PATH = '/data/test_filter.txt'
+
+def main():
+ parser = argparse.ArgumentParser()
+ AddCommonArgs(parser)
+ parser.add_argument('--gtest_filter',
+ help='GTest filter to use in place of any default.')
+ parser.add_argument('--gtest_repeat',
+ help='GTest repeat value to use. This also disables the '
+ 'test launcher timeout.')
+ parser.add_argument('--test-launcher-retry-limit',
+ help='Number of times that test suite will retry failing '
+ 'tests. This is multiplicative with --gtest_repeat.')
+ parser.add_argument('--gtest_break_on_failure', action='store_true',
+ default=False,
+ help='Should GTest break on failure; useful with '
+ '--gtest_repeat.')
+ parser.add_argument('--single-process-tests', action='store_true',
+ default=False,
+ help='Runs the tests and the launcher in the same '
+ 'process. Useful for debugging.')
+ parser.add_argument('--test-launcher-batch-limit',
+ type=int,
+ help='Sets the limit of test batch to run in a single '
+ 'process.')
+ # --test-launcher-filter-file is specified relative to --output-directory,
+ # so specifying type=os.path.* will break it.
+ parser.add_argument('--test-launcher-filter-file',
+ default=None,
+ help='Override default filter file passed to target test '
+ 'process. Set an empty path to disable filtering.')
+ parser.add_argument('--test-launcher-jobs',
+ type=int,
+ help='Sets the number of parallel test jobs.')
+ parser.add_argument('--test-launcher-summary-output',
+ help='Where the test launcher will output its json.')
+ parser.add_argument('--enable-test-server', action='store_true',
+ default=False,
+ help='Enable Chrome test server spawner.')
+ parser.add_argument('child_args', nargs='*',
+ help='Arguments for the test process.')
+ parser.add_argument('--test-launcher-bot-mode', action='store_true',
+ default=False,
+ help='Informs the TestLauncher to that it should enable '
+ 'special allowances for running on a test bot.')
+ args = parser.parse_args()
+ ConfigureLogging(args)
+
+ child_args = ['--test-launcher-retry-limit=0']
+ if args.single_process_tests:
+ child_args.append('--single-process-tests')
+ if args.test_launcher_bot_mode:
+ child_args.append('--test-launcher-bot-mode')
+ if args.test_launcher_batch_limit:
+ child_args.append('--test-launcher-batch-limit=%d' %
+ args.test_launcher_batch_limit)
+
+ test_concurrency = args.test_launcher_jobs \
+ if args.test_launcher_jobs else DEFAULT_TEST_CONCURRENCY
+ child_args.append('--test-launcher-jobs=%d' % test_concurrency)
+
+ if args.gtest_filter:
+ child_args.append('--gtest_filter=' + args.gtest_filter)
+ if args.gtest_repeat:
+ child_args.append('--gtest_repeat=' + args.gtest_repeat)
+ child_args.append('--test-launcher-timeout=-1')
+ if args.test_launcher_retry_limit:
+ child_args.append(
+ '--test-launcher-retry-limit=' + args.test_launcher_retry_limit)
+ if args.gtest_break_on_failure:
+ child_args.append('--gtest_break_on_failure')
+ if args.child_args:
+ child_args.extend(args.child_args)
+
+ if args.test_launcher_summary_output:
+ child_args.append('--test-launcher-summary-output=' + TEST_RESULT_PATH)
+
+ with GetDeploymentTargetForArgs(args) as target:
+ target.Start()
+
+ if args.test_launcher_filter_file:
+ target.PutFile(args.test_launcher_filter_file, TEST_FILTER_PATH,
+ for_package=args.package_name)
+ child_args.append('--test-launcher-filter-file=' + TEST_FILTER_PATH)
+
+ test_server = None
+ if args.enable_test_server:
+ test_server = SetupTestServer(target, test_concurrency)
+
+ run_package_args = RunPackageArgs.FromCommonArgs(args)
+ returncode = RunPackage(
+ args.output_directory, target, args.package, args.package_name,
+ args.package_dep, child_args, run_package_args)
+
+ if test_server:
+ test_server.Stop()
+
+ if args.test_launcher_summary_output:
+ target.GetFile(TEST_RESULT_PATH, args.test_launcher_summary_output,
+ for_package=args.package_name)
+
+ return returncode
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/deps/v8/build/fuchsia/update_sdk.py b/deps/v8/build/fuchsia/update_sdk.py
new file mode 100755
index 0000000000..f7d6115247
--- /dev/null
+++ b/deps/v8/build/fuchsia/update_sdk.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Updates the Fuchsia SDK to the given revision. Should be used in a 'hooks_os'
+entry so that it only runs when .gclient's target_os includes 'fuchsia'."""
+
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tarfile
+import tempfile
+
+from common import GetHostOsFromPlatform, GetHostArchFromPlatform
+
+REPOSITORY_ROOT = os.path.abspath(os.path.join(
+ os.path.dirname(__file__), '..', '..'))
+sys.path.append(os.path.join(REPOSITORY_ROOT, 'build'))
+
+import find_depot_tools
+
+SDK_SUBDIRS = ["arch", "pkg", "qemu", "sysroot", "target",
+ "toolchain_libs", "tools"]
+
+EXTRA_SDK_HASH_PREFIX = ''
+
+def GetSdkGeneration(hash):
+ if not hash:
+ return None
+
+ cmd = [os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gsutil.py'), 'ls',
+ '-L', GetBucketForPlatform() + hash]
+ sdk_details = subprocess.check_output(cmd)
+ m = re.search('Generation:\s*(\d*)', sdk_details)
+ if not m:
+ return None
+ return int(m.group(1))
+
+
+def GetSdkHashForPlatform():
+ filename = '{platform}.sdk.sha1'.format(platform = GetHostOsFromPlatform())
+
+ # Get the hash of the SDK in chromium.
+ sdk_hash = None
+ hash_file = os.path.join(os.path.dirname(__file__), filename)
+ with open(hash_file, 'r') as f:
+ sdk_hash = f.read().strip()
+
+ # Get the hash of the SDK with the extra prefix.
+ extra_sdk_hash = None
+ if EXTRA_SDK_HASH_PREFIX:
+ extra_hash_file = os.path.join(os.path.dirname(__file__),
+ EXTRA_SDK_HASH_PREFIX + filename)
+ with open(extra_hash_file, 'r') as f:
+ extra_sdk_hash = f.read().strip()
+
+ # If both files are empty, return an error.
+ if not sdk_hash and not extra_sdk_hash:
+ print >>sys.stderr, 'No SHA1 found in {} or {}'.format(
+ hash_file, extra_hash_file)
+ return 1
+
+ # Return the newer SDK based on the generation number.
+ sdk_generation = GetSdkGeneration(sdk_hash)
+ extra_sdk_generation = GetSdkGeneration(extra_sdk_hash)
+ if extra_sdk_generation > sdk_generation:
+ return extra_sdk_hash
+ return sdk_hash
+
+def GetBucketForPlatform():
+ return 'gs://fuchsia/sdk/core/{platform}-amd64/'.format(
+ platform = GetHostOsFromPlatform())
+
+
+def EnsureDirExists(path):
+ if not os.path.exists(path):
+ print 'Creating directory %s' % path
+ os.makedirs(path)
+
+
+# Removes previous SDK from the specified path if it's detected there.
+def Cleanup(path):
+ hash_file = os.path.join(path, '.hash')
+ if os.path.exists(hash_file):
+ print 'Removing old SDK from %s.' % path
+ for d in SDK_SUBDIRS:
+ to_remove = os.path.join(path, d)
+ if os.path.isdir(to_remove):
+ shutil.rmtree(to_remove)
+ os.remove(hash_file)
+
+
+# Updates the modification timestamps of |path| and its contents to the
+# current time.
+def UpdateTimestampsRecursive(path):
+ for root, dirs, files in os.walk(path):
+ for f in files:
+ os.utime(os.path.join(root, f), None)
+ for d in dirs:
+ os.utime(os.path.join(root, d), None)
+
+
+def main():
+ if len(sys.argv) != 1:
+ print >>sys.stderr, 'usage: %s' % sys.argv[0]
+ return 1
+
+ # Quietly exit if there's no SDK support for this platform.
+ try:
+ GetHostOsFromPlatform()
+ except:
+ return 0
+
+ # Previously SDK was unpacked in //third_party/fuchsia-sdk instead of
+ # //third_party/fuchsia-sdk/sdk . Remove the old files if they are still
+ # there.
+ sdk_root = os.path.join(REPOSITORY_ROOT, 'third_party', 'fuchsia-sdk')
+ Cleanup(sdk_root)
+
+ sdk_hash = GetSdkHashForPlatform()
+ if not sdk_hash:
+ return 1
+
+ output_dir = os.path.join(sdk_root, 'sdk')
+
+ hash_filename = os.path.join(output_dir, '.hash')
+ if os.path.exists(hash_filename):
+ with open(hash_filename, 'r') as f:
+ if f.read().strip() == sdk_hash:
+ # Nothing to do. Generate sdk/BUILD.gn anyways, in case the conversion
+ # script changed.
+ subprocess.check_call([os.path.join(sdk_root, 'gen_build_defs.py')])
+ return 0
+
+ print 'Downloading SDK %s...' % sdk_hash
+
+ if os.path.isdir(output_dir):
+ shutil.rmtree(output_dir)
+
+ fd, tmp = tempfile.mkstemp()
+ os.close(fd)
+
+ try:
+ cmd = [os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gsutil.py'),
+ 'cp', GetBucketForPlatform() + sdk_hash, tmp]
+ subprocess.check_call(cmd)
+ with open(tmp, 'rb') as f:
+ EnsureDirExists(output_dir)
+ tarfile.open(mode='r:gz', fileobj=f).extractall(path=output_dir)
+ finally:
+ os.remove(tmp)
+
+ # Generate sdk/BUILD.gn.
+ subprocess.check_call([os.path.join(sdk_root, 'gen_build_defs.py')])
+
+ with open(hash_filename, 'w') as f:
+ f.write(sdk_hash)
+
+ UpdateTimestampsRecursive(output_dir)
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())