summaryrefslogtreecommitdiff
path: root/deps/v8/build/android/pylib/local/device
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/build/android/pylib/local/device')
-rw-r--r--deps/v8/build/android/pylib/local/device/__init__.py3
-rw-r--r--deps/v8/build/android/pylib/local/device/local_device_environment.py300
-rw-r--r--deps/v8/build/android/pylib/local/device/local_device_gtest_run.py635
-rw-r--r--deps/v8/build/android/pylib/local/device/local_device_instrumentation_test_run.py965
-rwxr-xr-xdeps/v8/build/android/pylib/local/device/local_device_instrumentation_test_run_test.py69
-rw-r--r--deps/v8/build/android/pylib/local/device/local_device_linker_test_run.py75
-rw-r--r--deps/v8/build/android/pylib/local/device/local_device_monkey_test_run.py126
-rw-r--r--deps/v8/build/android/pylib/local/device/local_device_perf_test_run.py538
-rw-r--r--deps/v8/build/android/pylib/local/device/local_device_test_run.py251
-rwxr-xr-xdeps/v8/build/android/pylib/local/device/local_device_test_run_test.py174
10 files changed, 3136 insertions, 0 deletions
diff --git a/deps/v8/build/android/pylib/local/device/__init__.py b/deps/v8/build/android/pylib/local/device/__init__.py
new file mode 100644
index 0000000000..4d6aabb953
--- /dev/null
+++ b/deps/v8/build/android/pylib/local/device/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/deps/v8/build/android/pylib/local/device/local_device_environment.py b/deps/v8/build/android/pylib/local/device/local_device_environment.py
new file mode 100644
index 0000000000..4d7aa82ad0
--- /dev/null
+++ b/deps/v8/build/android/pylib/local/device/local_device_environment.py
@@ -0,0 +1,300 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import datetime
+import functools
+import logging
+import os
+import shutil
+import tempfile
+import threading
+
+import devil_chromium
+from devil import base_error
+from devil.android import device_blacklist
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.android import logcat_monitor
+from devil.android.sdk import adb_wrapper
+from devil.utils import file_utils
+from devil.utils import parallelizer
+from pylib import constants
+from pylib.base import environment
+from pylib.utils import instrumentation_tracing
+from py_trace_event import trace_event
+
+
+LOGCAT_FILTERS = [
+ 'chromium:v',
+ 'cr_*:v',
+ 'DEBUG:I',
+ 'StrictMode:D',
+]
+
+
+def _DeviceCachePath(device):
+ file_name = 'device_cache_%s.json' % device.adb.GetDeviceSerial()
+ return os.path.join(constants.GetOutDirectory(), file_name)
+
+
+def handle_shard_failures(f):
+ """A decorator that handles device failures for per-device functions.
+
+ Args:
+ f: the function being decorated. The function must take at least one
+ argument, and that argument must be the device.
+ """
+ return handle_shard_failures_with(None)(f)
+
+
+# TODO(jbudorick): Refactor this to work as a decorator or context manager.
+def handle_shard_failures_with(on_failure):
+ """A decorator that handles device failures for per-device functions.
+
+ This calls on_failure in the event of a failure.
+
+ Args:
+ f: the function being decorated. The function must take at least one
+ argument, and that argument must be the device.
+ on_failure: A binary function to call on failure.
+ """
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapper(dev, *args, **kwargs):
+ try:
+ return f(dev, *args, **kwargs)
+ except device_errors.CommandTimeoutError:
+ logging.exception('Shard timed out: %s(%s)', f.__name__, str(dev))
+ except device_errors.DeviceUnreachableError:
+ logging.exception('Shard died: %s(%s)', f.__name__, str(dev))
+ except base_error.BaseError:
+ logging.exception('Shard failed: %s(%s)', f.__name__, str(dev))
+ except SystemExit:
+ logging.exception('Shard killed: %s(%s)', f.__name__, str(dev))
+ raise
+ if on_failure:
+ on_failure(dev, f.__name__)
+ return None
+
+ return wrapper
+
+ return decorator
+
+
+class LocalDeviceEnvironment(environment.Environment):
+
+ def __init__(self, args, output_manager, _error_func):
+ super(LocalDeviceEnvironment, self).__init__(output_manager)
+ self._blacklist = (device_blacklist.Blacklist(args.blacklist_file)
+ if args.blacklist_file
+ else None)
+ self._device_serials = args.test_devices
+ self._devices_lock = threading.Lock()
+ self._devices = None
+ self._concurrent_adb = args.enable_concurrent_adb
+ self._enable_device_cache = args.enable_device_cache
+ self._logcat_monitors = []
+ self._logcat_output_dir = args.logcat_output_dir
+ self._logcat_output_file = args.logcat_output_file
+ self._max_tries = 1 + args.num_retries
+ self._preferred_abis = None
+ self._recover_devices = args.recover_devices
+ self._skip_clear_data = args.skip_clear_data
+ self._tool_name = args.tool
+ self._trace_output = None
+ if hasattr(args, 'trace_output'):
+ self._trace_output = args.trace_output
+ self._trace_all = None
+ if hasattr(args, 'trace_all'):
+ self._trace_all = args.trace_all
+
+ devil_chromium.Initialize(
+ output_directory=constants.GetOutDirectory(),
+ adb_path=args.adb_path)
+
+ # Some things such as Forwarder require ADB to be in the environment path.
+ adb_dir = os.path.dirname(adb_wrapper.AdbWrapper.GetAdbPath())
+ if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
+ os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
+
+ #override
+ def SetUp(self):
+ if self.trace_output and self._trace_all:
+ to_include = [r"pylib\..*", r"devil\..*", "__main__"]
+ to_exclude = ["logging"]
+ instrumentation_tracing.start_instrumenting(self.trace_output, to_include,
+ to_exclude)
+ elif self.trace_output:
+ self.EnableTracing()
+
+ # Must be called before accessing |devices|.
+ def SetPreferredAbis(self, abis):
+ assert self._devices is None
+ self._preferred_abis = abis
+
+ def _InitDevices(self):
+ device_arg = []
+ if self._device_serials:
+ device_arg = self._device_serials
+
+ self._devices = device_utils.DeviceUtils.HealthyDevices(
+ self._blacklist,
+ retries=5,
+ enable_usb_resets=True,
+ enable_device_files_cache=self._enable_device_cache,
+ default_retries=self._max_tries - 1,
+ device_arg=device_arg,
+ abis=self._preferred_abis)
+
+ if self._logcat_output_file:
+ self._logcat_output_dir = tempfile.mkdtemp()
+
+ @handle_shard_failures_with(on_failure=self.BlacklistDevice)
+ def prepare_device(d):
+ d.WaitUntilFullyBooted()
+
+ if self._enable_device_cache:
+ cache_path = _DeviceCachePath(d)
+ if os.path.exists(cache_path):
+ logging.info('Using device cache: %s', cache_path)
+ with open(cache_path) as f:
+ d.LoadCacheData(f.read())
+ # Delete cached file so that any exceptions cause it to be cleared.
+ os.unlink(cache_path)
+
+ if self._logcat_output_dir:
+ logcat_file = os.path.join(
+ self._logcat_output_dir,
+ '%s_%s' % (d.adb.GetDeviceSerial(),
+ datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%S')))
+ monitor = logcat_monitor.LogcatMonitor(
+ d.adb, clear=True, output_file=logcat_file)
+ self._logcat_monitors.append(monitor)
+ monitor.Start()
+
+ self.parallel_devices.pMap(prepare_device)
+
+ @property
+ def blacklist(self):
+ return self._blacklist
+
+ @property
+ def concurrent_adb(self):
+ return self._concurrent_adb
+
+ @property
+ def devices(self):
+ # Initialize lazily so that host-only tests do not fail when no devices are
+ # attached.
+ if self._devices is None:
+ self._InitDevices()
+ return self._devices
+
+ @property
+ def max_tries(self):
+ return self._max_tries
+
+ @property
+ def parallel_devices(self):
+ return parallelizer.SyncParallelizer(self.devices)
+
+ @property
+ def recover_devices(self):
+ return self._recover_devices
+
+ @property
+ def skip_clear_data(self):
+ return self._skip_clear_data
+
+ @property
+ def tool(self):
+ return self._tool_name
+
+ @property
+ def trace_output(self):
+ return self._trace_output
+
+ #override
+ def TearDown(self):
+ if self.trace_output and self._trace_all:
+ instrumentation_tracing.stop_instrumenting()
+ elif self.trace_output:
+ self.DisableTracing()
+
+ # By default, teardown will invoke ADB. When receiving SIGTERM due to a
+ # timeout, there's a high probability that ADB is non-responsive. In these
+ # cases, sending an ADB command will potentially take a long time to time
+ # out. Before this happens, the process will be hard-killed for not
+ # responding to SIGTERM fast enough.
+ if self._received_sigterm:
+ return
+
+ if not self._devices:
+ return
+
+ @handle_shard_failures_with(on_failure=self.BlacklistDevice)
+ def tear_down_device(d):
+ # Write the cache even when not using it so that it will be ready the
+ # first time that it is enabled. Writing it every time is also necessary
+ # so that an invalid cache can be flushed just by disabling it for one
+ # run.
+ cache_path = _DeviceCachePath(d)
+ if os.path.exists(os.path.dirname(cache_path)):
+ with open(cache_path, 'w') as f:
+ f.write(d.DumpCacheData())
+ logging.info('Wrote device cache: %s', cache_path)
+ else:
+ logging.warning(
+ 'Unable to write device cache as %s directory does not exist',
+ os.path.dirname(cache_path))
+
+ self.parallel_devices.pMap(tear_down_device)
+
+ for m in self._logcat_monitors:
+ try:
+ m.Stop()
+ m.Close()
+ _, temp_path = tempfile.mkstemp()
+ with open(m.output_file, 'r') as infile:
+ with open(temp_path, 'w') as outfile:
+ for line in infile:
+ outfile.write('Device(%s) %s' % (m.adb.GetDeviceSerial(), line))
+ shutil.move(temp_path, m.output_file)
+ except base_error.BaseError:
+ logging.exception('Failed to stop logcat monitor for %s',
+ m.adb.GetDeviceSerial())
+ except IOError:
+ logging.exception('Failed to locate logcat for device %s',
+ m.adb.GetDeviceSerial())
+
+ if self._logcat_output_file:
+ file_utils.MergeFiles(
+ self._logcat_output_file,
+ [m.output_file for m in self._logcat_monitors
+ if os.path.exists(m.output_file)])
+ shutil.rmtree(self._logcat_output_dir)
+
+ def BlacklistDevice(self, device, reason='local_device_failure'):
+ device_serial = device.adb.GetDeviceSerial()
+ if self._blacklist:
+ self._blacklist.Extend([device_serial], reason=reason)
+ with self._devices_lock:
+ self._devices = [d for d in self._devices if str(d) != device_serial]
+ logging.error('Device %s blacklisted: %s', device_serial, reason)
+ if not self._devices:
+ raise device_errors.NoDevicesError(
+ 'All devices were blacklisted due to errors')
+
+ @staticmethod
+ def DisableTracing():
+ if not trace_event.trace_is_enabled():
+ logging.warning('Tracing is not running.')
+ else:
+ trace_event.trace_disable()
+
+ def EnableTracing(self):
+ if trace_event.trace_is_enabled():
+ logging.warning('Tracing is already running.')
+ else:
+ trace_event.trace_enable(self._trace_output)
diff --git a/deps/v8/build/android/pylib/local/device/local_device_gtest_run.py b/deps/v8/build/android/pylib/local/device/local_device_gtest_run.py
new file mode 100644
index 0000000000..76d3e1bb9b
--- /dev/null
+++ b/deps/v8/build/android/pylib/local/device/local_device_gtest_run.py
@@ -0,0 +1,635 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import itertools
+import logging
+import os
+import posixpath
+import shutil
+import time
+
+from devil.android import crash_handler
+from devil.android import device_errors
+from devil.android import device_temp_file
+from devil.android import logcat_monitor
+from devil.android import ports
+from devil.utils import reraiser_thread
+from incremental_install import installer
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.gtest import gtest_test_instance
+from pylib.local import local_test_server_spawner
+from pylib.local.device import local_device_environment
+from pylib.local.device import local_device_test_run
+from pylib.utils import google_storage_helper
+from pylib.utils import logdog_helper
+from py_trace_event import trace_event
+from py_utils import contextlib_ext
+from py_utils import tempfile_ext
+import tombstones
+
+_MAX_INLINE_FLAGS_LENGTH = 50 # Arbitrarily chosen.
+_EXTRA_COMMAND_LINE_FILE = (
+ 'org.chromium.native_test.NativeTest.CommandLineFile')
+_EXTRA_COMMAND_LINE_FLAGS = (
+ 'org.chromium.native_test.NativeTest.CommandLineFlags')
+_EXTRA_STDOUT_FILE = (
+ 'org.chromium.native_test.NativeTestInstrumentationTestRunner'
+ '.StdoutFile')
+_EXTRA_TEST = (
+ 'org.chromium.native_test.NativeTestInstrumentationTestRunner'
+ '.Test')
+_EXTRA_TEST_LIST = (
+ 'org.chromium.native_test.NativeTestInstrumentationTestRunner'
+ '.TestList')
+
+_MAX_SHARD_SIZE = 256
+_SECONDS_TO_NANOS = int(1e9)
+
+# The amount of time a test executable may run before it gets killed.
+_TEST_TIMEOUT_SECONDS = 30*60
+
+# Tests that use SpawnedTestServer must run the LocalTestServerSpawner on the
+# host machine.
+# TODO(jbudorick): Move this up to the test instance if the net test server is
+# handled outside of the APK for the remote_device environment.
+_SUITE_REQUIRES_TEST_SERVER_SPAWNER = [
+ 'components_browsertests', 'content_unittests', 'content_browsertests',
+ 'net_unittests', 'services_unittests', 'unit_tests'
+]
+
+# No-op context manager. If we used Python 3, we could change this to
+# contextlib.ExitStack()
+class _NullContextManager(object):
+ def __enter__(self):
+ pass
+ def __exit__(self, *args):
+ pass
+
+
+def _GenerateSequentialFileNames(filename):
+ """Infinite generator of names: 'name.ext', 'name_1.ext', 'name_2.ext', ..."""
+ yield filename
+ base, ext = os.path.splitext(filename)
+ for i in itertools.count(1):
+ yield '%s_%d%s' % (base, i, ext)
+
+
+def _ExtractTestsFromFilter(gtest_filter):
+ """Returns the list of tests specified by the given filter.
+
+ Returns:
+ None if the device should be queried for the test list instead.
+ """
+ # Empty means all tests, - means exclude filter.
+ if not gtest_filter or '-' in gtest_filter:
+ return None
+
+ patterns = gtest_filter.split(':')
+ # For a single pattern, allow it even if it has a wildcard so long as the
+ # wildcard comes at the end and there is at least one . to prove the scope is
+ # not too large.
+ # This heuristic is not necessarily faster, but normally is.
+ if len(patterns) == 1 and patterns[0].endswith('*'):
+ no_suffix = patterns[0].rstrip('*')
+ if '*' not in no_suffix and '.' in no_suffix:
+ return patterns
+
+ if '*' in gtest_filter:
+ return None
+ return patterns
+
+
+class _ApkDelegate(object):
+ def __init__(self, test_instance, tool):
+ self._activity = test_instance.activity
+ self._apk_helper = test_instance.apk_helper
+ self._test_apk_incremental_install_json = (
+ test_instance.test_apk_incremental_install_json)
+ self._package = test_instance.package
+ self._runner = test_instance.runner
+ self._permissions = test_instance.permissions
+ self._suite = test_instance.suite
+ self._component = '%s/%s' % (self._package, self._runner)
+ self._extras = test_instance.extras
+ self._wait_for_java_debugger = test_instance.wait_for_java_debugger
+ self._tool = tool
+
+ def GetTestDataRoot(self, device):
+ # pylint: disable=no-self-use
+ return posixpath.join(device.GetExternalStoragePath(),
+ 'chromium_tests_root')
+
+ def Install(self, device):
+ if self._test_apk_incremental_install_json:
+ installer.Install(device, self._test_apk_incremental_install_json,
+ apk=self._apk_helper, permissions=self._permissions)
+ else:
+ device.Install(
+ self._apk_helper,
+ allow_downgrade=True,
+ reinstall=True,
+ permissions=self._permissions)
+
+ def ResultsDirectory(self, device):
+ return device.GetApplicationDataDirectory(self._package)
+
+ def Run(self, test, device, flags=None, **kwargs):
+ extras = dict(self._extras)
+
+ if ('timeout' in kwargs
+ and gtest_test_instance.EXTRA_SHARD_NANO_TIMEOUT not in extras):
+ # Make sure the instrumentation doesn't kill the test before the
+ # scripts do. The provided timeout value is in seconds, but the
+ # instrumentation deals with nanoseconds because that's how Android
+ # handles time.
+ extras[gtest_test_instance.EXTRA_SHARD_NANO_TIMEOUT] = int(
+ kwargs['timeout'] * _SECONDS_TO_NANOS)
+
+ # pylint: disable=redefined-variable-type
+ command_line_file = _NullContextManager()
+ if flags:
+ if len(flags) > _MAX_INLINE_FLAGS_LENGTH:
+ command_line_file = device_temp_file.DeviceTempFile(device.adb)
+ device.WriteFile(command_line_file.name, '_ %s' % flags)
+ extras[_EXTRA_COMMAND_LINE_FILE] = command_line_file.name
+ else:
+ extras[_EXTRA_COMMAND_LINE_FLAGS] = flags
+
+ test_list_file = _NullContextManager()
+ if test:
+ if len(test) > 1:
+ test_list_file = device_temp_file.DeviceTempFile(device.adb)
+ device.WriteFile(test_list_file.name, '\n'.join(test))
+ extras[_EXTRA_TEST_LIST] = test_list_file.name
+ else:
+ extras[_EXTRA_TEST] = test[0]
+ # pylint: enable=redefined-variable-type
+
+ stdout_file = device_temp_file.DeviceTempFile(
+ device.adb, dir=device.GetExternalStoragePath(), suffix='.gtest_out')
+ extras[_EXTRA_STDOUT_FILE] = stdout_file.name
+
+ if self._wait_for_java_debugger:
+ cmd = ['am', 'set-debug-app', '-w', self._package]
+ device.RunShellCommand(cmd, check_return=True)
+ logging.warning('*' * 80)
+ logging.warning('Waiting for debugger to attach to process: %s',
+ self._package)
+ logging.warning('*' * 80)
+
+ with command_line_file, test_list_file, stdout_file:
+ try:
+ device.StartInstrumentation(
+ self._component, extras=extras, raw=False, **kwargs)
+ except device_errors.CommandFailedError:
+ logging.exception('gtest shard failed.')
+ except device_errors.CommandTimeoutError:
+ logging.exception('gtest shard timed out.')
+ except device_errors.DeviceUnreachableError:
+ logging.exception('gtest shard device unreachable.')
+ except Exception:
+ device.ForceStop(self._package)
+ raise
+ # TODO(jbudorick): Remove this after resolving crbug.com/726880
+ logging.info(
+ '%s size on device: %s',
+ stdout_file.name, device.StatPath(stdout_file.name).get('st_size', 0))
+ return device.ReadFile(stdout_file.name).splitlines()
+
+ def PullAppFiles(self, device, files, directory):
+ device_dir = device.GetApplicationDataDirectory(self._package)
+ host_dir = os.path.join(directory, str(device))
+ for f in files:
+ device_file = posixpath.join(device_dir, f)
+ host_file = os.path.join(host_dir, *f.split(posixpath.sep))
+ for host_file in _GenerateSequentialFileNames(host_file):
+ if not os.path.exists(host_file):
+ break
+ device.PullFile(device_file, host_file)
+
+ def Clear(self, device):
+ device.ClearApplicationState(self._package, permissions=self._permissions)
+
+
+class _ExeDelegate(object):
+ def __init__(self, tr, dist_dir, tool):
+ self._host_dist_dir = dist_dir
+ self._exe_file_name = os.path.basename(dist_dir)[:-len('__dist')]
+ self._device_dist_dir = posixpath.join(
+ constants.TEST_EXECUTABLE_DIR, os.path.basename(dist_dir))
+ self._test_run = tr
+ self._tool = tool
+
+ def GetTestDataRoot(self, device):
+ # pylint: disable=no-self-use
+ # pylint: disable=unused-argument
+ return posixpath.join(constants.TEST_EXECUTABLE_DIR, 'chromium_tests_root')
+
+ def Install(self, device):
+ # TODO(jbudorick): Look into merging this with normal data deps pushing if
+ # executables become supported on nonlocal environments.
+ device.PushChangedFiles([(self._host_dist_dir, self._device_dist_dir)],
+ delete_device_stale=True)
+
+ def ResultsDirectory(self, device):
+ # pylint: disable=no-self-use
+ # pylint: disable=unused-argument
+ return constants.TEST_EXECUTABLE_DIR
+
+ def Run(self, test, device, flags=None, **kwargs):
+ tool = self._test_run.GetTool(device).GetTestWrapper()
+ if tool:
+ cmd = [tool]
+ else:
+ cmd = []
+ cmd.append(posixpath.join(self._device_dist_dir, self._exe_file_name))
+
+ if test:
+ cmd.append('--gtest_filter=%s' % ':'.join(test))
+ if flags:
+ # TODO(agrieve): This won't work if multiple flags are passed.
+ cmd.append(flags)
+ cwd = constants.TEST_EXECUTABLE_DIR
+
+ env = {
+ 'LD_LIBRARY_PATH': self._device_dist_dir
+ }
+
+ if self._tool != 'asan':
+ env['UBSAN_OPTIONS'] = constants.UBSAN_OPTIONS
+
+ try:
+ gcov_strip_depth = os.environ['NATIVE_COVERAGE_DEPTH_STRIP']
+ external = device.GetExternalStoragePath()
+ env['GCOV_PREFIX'] = '%s/gcov' % external
+ env['GCOV_PREFIX_STRIP'] = gcov_strip_depth
+ except (device_errors.CommandFailedError, KeyError):
+ pass
+
+ # Executable tests return a nonzero exit code on test failure, which is
+ # fine from the test runner's perspective; thus check_return=False.
+ output = device.RunShellCommand(
+ cmd, cwd=cwd, env=env, check_return=False, large_output=True, **kwargs)
+ return output
+
+ def PullAppFiles(self, device, files, directory):
+ pass
+
+ def Clear(self, device):
+ device.KillAll(self._exe_file_name, blocking=True, timeout=30, quiet=True)
+
+
+class LocalDeviceGtestRun(local_device_test_run.LocalDeviceTestRun):
+
+ def __init__(self, env, test_instance):
+ assert isinstance(env, local_device_environment.LocalDeviceEnvironment)
+ assert isinstance(test_instance, gtest_test_instance.GtestTestInstance)
+ super(LocalDeviceGtestRun, self).__init__(env, test_instance)
+
+ # pylint: disable=redefined-variable-type
+ if self._test_instance.apk:
+ self._delegate = _ApkDelegate(self._test_instance, env.tool)
+ elif self._test_instance.exe_dist_dir:
+ self._delegate = _ExeDelegate(self, self._test_instance.exe_dist_dir,
+ self._env.tool)
+ if self._test_instance.isolated_script_test_perf_output:
+ self._test_perf_output_filenames = _GenerateSequentialFileNames(
+ self._test_instance.isolated_script_test_perf_output)
+ else:
+ self._test_perf_output_filenames = itertools.repeat(None)
+ # pylint: enable=redefined-variable-type
+ self._crashes = set()
+ self._servers = collections.defaultdict(list)
+
+ #override
+ def TestPackage(self):
+ return self._test_instance.suite
+
+ #override
+ def SetUp(self):
+ @local_device_environment.handle_shard_failures_with(
+ on_failure=self._env.BlacklistDevice)
+ @trace_event.traced
+ def individual_device_set_up(device, host_device_tuples):
+ def install_apk(dev):
+ # Install test APK.
+ self._delegate.Install(dev)
+
+ def push_test_data(dev):
+ # Push data dependencies.
+ device_root = self._delegate.GetTestDataRoot(dev)
+ host_device_tuples_substituted = [
+ (h, local_device_test_run.SubstituteDeviceRoot(d, device_root))
+ for h, d in host_device_tuples]
+ dev.PushChangedFiles(
+ host_device_tuples_substituted,
+ delete_device_stale=True,
+ # Some gtest suites, e.g. unit_tests, have data dependencies that
+ # can take longer than the default timeout to push. See
+ # crbug.com/791632 for context.
+ timeout=600)
+ if not host_device_tuples:
+ dev.RemovePath(device_root, force=True, recursive=True, rename=True)
+ dev.RunShellCommand(['mkdir', '-p', device_root], check_return=True)
+
+ def init_tool_and_start_servers(dev):
+ tool = self.GetTool(dev)
+ tool.CopyFiles(dev)
+ tool.SetupEnvironment()
+
+ self._servers[str(dev)] = []
+ if self.TestPackage() in _SUITE_REQUIRES_TEST_SERVER_SPAWNER:
+ self._servers[str(dev)].append(
+ local_test_server_spawner.LocalTestServerSpawner(
+ ports.AllocateTestServerPort(), dev, tool))
+
+ for s in self._servers[str(dev)]:
+ s.SetUp()
+
+ def bind_crash_handler(step, dev):
+ return lambda: crash_handler.RetryOnSystemCrash(step, dev)
+
+ steps = [
+ bind_crash_handler(s, device)
+ for s in (install_apk, push_test_data, init_tool_and_start_servers)]
+ if self._env.concurrent_adb:
+ reraiser_thread.RunAsync(steps)
+ else:
+ for step in steps:
+ step()
+
+ self._env.parallel_devices.pMap(
+ individual_device_set_up,
+ self._test_instance.GetDataDependencies())
+
+ #override
+ def _ShouldShard(self):
+ return True
+
+ #override
+ def _CreateShards(self, tests):
+ # _crashes are tests that might crash and make the tests in the same shard
+ # following the crashed testcase not run.
+ # Thus we need to create separate shards for each crashed testcase,
+ # so that other tests can be run.
+ device_count = len(self._env.devices)
+ shards = []
+
+ # Add shards with only one suspect testcase.
+ shards += [[crash] for crash in self._crashes if crash in tests]
+
+ # Delete suspect testcase from tests.
+ tests = [test for test in tests if not test in self._crashes]
+
+ for i in xrange(0, device_count):
+ unbounded_shard = tests[i::device_count]
+ shards += [unbounded_shard[j:j+_MAX_SHARD_SIZE]
+ for j in xrange(0, len(unbounded_shard), _MAX_SHARD_SIZE)]
+ return shards
+
+ #override
+ def _GetTests(self):
+ if self._test_instance.extract_test_list_from_filter:
+ # When the exact list of tests to run is given via command-line (e.g. when
+ # locally iterating on a specific test), skip querying the device (which
+ # takes ~3 seconds).
+ tests = _ExtractTestsFromFilter(self._test_instance.gtest_filter)
+ if tests:
+ return tests
+
+ # Even when there's only one device, it still makes sense to retrieve the
+ # test list so that tests can be split up and run in batches rather than all
+ # at once (since test output is not streamed).
+ @local_device_environment.handle_shard_failures_with(
+ on_failure=self._env.BlacklistDevice)
+ def list_tests(dev):
+ timeout = 30
+ retries = 1
+ if self._test_instance.wait_for_java_debugger:
+ timeout = None
+
+ flags = [
+ f for f in self._test_instance.flags
+ if f not in ['--wait-for-debugger', '--wait-for-java-debugger']
+ ]
+ flags.append('--gtest_list_tests')
+
+ # TODO(crbug.com/726880): Remove retries when no longer necessary.
+ for i in range(0, retries+1):
+ logging.info('flags:')
+ for f in flags:
+ logging.info(' %s', f)
+
+ raw_test_list = crash_handler.RetryOnSystemCrash(
+ lambda d: self._delegate.Run(
+ None, d, flags=' '.join(flags), timeout=timeout),
+ device=dev)
+ tests = gtest_test_instance.ParseGTestListTests(raw_test_list)
+ if not tests:
+ logging.info('No tests found. Output:')
+ for l in raw_test_list:
+ logging.info(' %s', l)
+ logging.info('Logcat:')
+ for line in dev.adb.Logcat(dump=True):
+ logging.info(line)
+ dev.adb.Logcat(clear=True)
+ if i < retries:
+ logging.info('Retrying...')
+ else:
+ break
+ return tests
+
+ # Query all devices in case one fails.
+ test_lists = self._env.parallel_devices.pMap(list_tests).pGet(None)
+
+ # If all devices failed to list tests, raise an exception.
+ # Check that tl is not None and is not empty.
+ if all(not tl for tl in test_lists):
+ raise device_errors.CommandFailedError(
+ 'Failed to list tests on any device')
+ tests = list(sorted(set().union(*[set(tl) for tl in test_lists if tl])))
+ tests = self._test_instance.FilterTests(tests)
+ tests = self._ApplyExternalSharding(
+ tests, self._test_instance.external_shard_index,
+ self._test_instance.total_external_shards)
+ return tests
+
+ def _UploadTestArtifacts(self, device, test_artifacts_dir):
+ # TODO(jbudorick): Reconcile this with the output manager once
+ # https://codereview.chromium.org/2933993002/ lands.
+ if test_artifacts_dir:
+ with tempfile_ext.NamedTemporaryDirectory() as test_artifacts_host_dir:
+ device.PullFile(test_artifacts_dir.name, test_artifacts_host_dir)
+ with tempfile_ext.NamedTemporaryDirectory() as temp_zip_dir:
+ zip_base_name = os.path.join(temp_zip_dir, 'test_artifacts')
+ test_artifacts_zip = shutil.make_archive(
+ zip_base_name, 'zip', test_artifacts_host_dir)
+ link = google_storage_helper.upload(
+ google_storage_helper.unique_name(
+ 'test_artifacts', device=device),
+ test_artifacts_zip,
+ bucket='%s/test_artifacts' % (
+ self._test_instance.gs_test_artifacts_bucket))
+ logging.info('Uploading test artifacts to %s.', link)
+ return link
+ return None
+
+ #override
+ def _RunTest(self, device, test):
+ # Run the test.
+ timeout = (self._test_instance.shard_timeout
+ * self.GetTool(device).GetTimeoutScale())
+ if self._test_instance.wait_for_java_debugger:
+ timeout = None
+ if self._test_instance.store_tombstones:
+ tombstones.ClearAllTombstones(device)
+ test_perf_output_filename = next(self._test_perf_output_filenames)
+
+ with device_temp_file.DeviceTempFile(
+ adb=device.adb,
+ dir=self._delegate.ResultsDirectory(device),
+ suffix='.xml') as device_tmp_results_file:
+ with contextlib_ext.Optional(
+ device_temp_file.NamedDeviceTemporaryDirectory(
+ adb=device.adb, dir='/sdcard/'),
+ self._test_instance.gs_test_artifacts_bucket) as test_artifacts_dir:
+ with (contextlib_ext.Optional(
+ device_temp_file.DeviceTempFile(
+ adb=device.adb, dir=self._delegate.ResultsDirectory(device)),
+ test_perf_output_filename)) as isolated_script_test_perf_output:
+
+ flags = list(self._test_instance.flags)
+ if self._test_instance.enable_xml_result_parsing:
+ flags.append('--gtest_output=xml:%s' % device_tmp_results_file.name)
+
+ if self._test_instance.gs_test_artifacts_bucket:
+ flags.append('--test_artifacts_dir=%s' % test_artifacts_dir.name)
+
+ if test_perf_output_filename:
+ flags.append('--isolated_script_test_perf_output=%s'
+ % isolated_script_test_perf_output.name)
+
+ logging.info('flags:')
+ for f in flags:
+ logging.info(' %s', f)
+
+ stream_name = 'logcat_%s_%s_%s' % (
+ hash(tuple(test)),
+ time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
+ device.serial)
+
+ with self._env.output_manager.ArchivedTempfile(
+ stream_name, 'logcat') as logcat_file:
+ with logcat_monitor.LogcatMonitor(
+ device.adb,
+ filter_specs=local_device_environment.LOGCAT_FILTERS,
+ output_file=logcat_file.name) as logmon:
+ with contextlib_ext.Optional(
+ trace_event.trace(str(test)),
+ self._env.trace_output):
+ output = self._delegate.Run(
+ test, device, flags=' '.join(flags),
+ timeout=timeout, retries=0)
+ logmon.Close()
+
+ if logcat_file.Link():
+ logging.info('Logcat saved to %s', logcat_file.Link())
+
+ if self._test_instance.enable_xml_result_parsing:
+ try:
+ gtest_xml = device.ReadFile(
+ device_tmp_results_file.name,
+ as_root=True)
+ except device_errors.CommandFailedError as e:
+ logging.warning(
+ 'Failed to pull gtest results XML file %s: %s',
+ device_tmp_results_file.name,
+ str(e))
+ gtest_xml = None
+
+ if test_perf_output_filename:
+ try:
+ device.PullFile(isolated_script_test_perf_output.name,
+ test_perf_output_filename)
+ except device_errors.CommandFailedError as e:
+ logging.warning(
+ 'Failed to pull chartjson results %s: %s',
+ isolated_script_test_perf_output.name, str(e))
+
+ test_artifacts_url = self._UploadTestArtifacts(device,
+ test_artifacts_dir)
+
+ for s in self._servers[str(device)]:
+ s.Reset()
+ if self._test_instance.app_files:
+ self._delegate.PullAppFiles(device, self._test_instance.app_files,
+ self._test_instance.app_file_dir)
+ if not self._env.skip_clear_data:
+ self._delegate.Clear(device)
+
+ for l in output:
+ logging.info(l)
+
+ # Parse the output.
+ # TODO(jbudorick): Transition test scripts away from parsing stdout.
+ if self._test_instance.enable_xml_result_parsing:
+ results = gtest_test_instance.ParseGTestXML(gtest_xml)
+ else:
+ results = gtest_test_instance.ParseGTestOutput(
+ output, self._test_instance.symbolizer, device.product_cpu_abi)
+
+ tombstones_url = None
+ for r in results:
+ if logcat_file:
+ r.SetLink('logcat', logcat_file.Link())
+
+ if self._test_instance.gs_test_artifacts_bucket:
+ r.SetLink('test_artifacts', test_artifacts_url)
+
+ if r.GetType() == base_test_result.ResultType.CRASH:
+ self._crashes.add(r.GetName())
+ if self._test_instance.store_tombstones:
+ if not tombstones_url:
+ resolved_tombstones = tombstones.ResolveTombstones(
+ device,
+ resolve_all_tombstones=True,
+ include_stack_symbols=False,
+ wipe_tombstones=True)
+ stream_name = 'tombstones_%s_%s' % (
+ time.strftime('%Y%m%dT%H%M%S', time.localtime()),
+ device.serial)
+ tombstones_url = logdog_helper.text(
+ stream_name, '\n'.join(resolved_tombstones))
+ r.SetLink('tombstones', tombstones_url)
+
+ tests_stripped_disabled_prefix = set()
+ for t in test:
+ tests_stripped_disabled_prefix.add(
+ gtest_test_instance.TestNameWithoutDisabledPrefix(t))
+ not_run_tests = tests_stripped_disabled_prefix.difference(
+ set(r.GetName() for r in results))
+ return results, list(not_run_tests) if results else None
+
+ #override
+ def TearDown(self):
+ # By default, teardown will invoke ADB. When receiving SIGTERM due to a
+ # timeout, there's a high probability that ADB is non-responsive. In these
+ # cases, sending an ADB command will potentially take a long time to time
+ # out. Before this happens, the process will be hard-killed for not
+ # responding to SIGTERM fast enough.
+ if self._received_sigterm:
+ return
+
+ @local_device_environment.handle_shard_failures
+ @trace_event.traced
+ def individual_device_tear_down(dev):
+ for s in self._servers.get(str(dev), []):
+ s.TearDown()
+
+ tool = self.GetTool(dev)
+ tool.CleanUpEnvironment()
+
+ self._env.parallel_devices.pMap(individual_device_tear_down)
diff --git a/deps/v8/build/android/pylib/local/device/local_device_instrumentation_test_run.py b/deps/v8/build/android/pylib/local/device/local_device_instrumentation_test_run.py
new file mode 100644
index 0000000000..4332e74972
--- /dev/null
+++ b/deps/v8/build/android/pylib/local/device/local_device_instrumentation_test_run.py
@@ -0,0 +1,965 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import contextlib
+import copy
+import hashlib
+import json
+import logging
+import os
+import posixpath
+import re
+import sys
+import time
+
+from devil.android import crash_handler
+from devil.android import device_errors
+from devil.android import device_temp_file
+from devil.android import flag_changer
+from devil.android.sdk import shared_prefs
+from devil.android import logcat_monitor
+from devil.android.tools import system_app
+from devil.android.tools import webview_app
+from devil.utils import reraiser_thread
+from incremental_install import installer
+from pylib import constants
+from pylib import valgrind_tools
+from pylib.base import base_test_result
+from pylib.base import output_manager
+from pylib.constants import host_paths
+from pylib.instrumentation import instrumentation_test_instance
+from pylib.local.device import local_device_environment
+from pylib.local.device import local_device_test_run
+from pylib.output import remote_output_manager
+from pylib.utils import instrumentation_tracing
+from pylib.utils import shared_preference_utils
+
+from py_trace_event import trace_event
+from py_trace_event import trace_time
+from py_utils import contextlib_ext
+from py_utils import tempfile_ext
+import tombstones
+
+
+with host_paths.SysPath(
+ os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party'), 0):
+ import jinja2 # pylint: disable=import-error
+ import markupsafe # pylint: disable=import-error,unused-import
+
+
+_JINJA_TEMPLATE_DIR = os.path.join(
+ host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'pylib', 'instrumentation')
+_JINJA_TEMPLATE_FILENAME = 'render_test.html.jinja'
+
+_TAG = 'test_runner_py'
+
+TIMEOUT_ANNOTATIONS = [
+ ('Manual', 10 * 60 * 60),
+ ('IntegrationTest', 30 * 60),
+ ('External', 10 * 60),
+ ('EnormousTest', 10 * 60),
+ ('LargeTest', 5 * 60),
+ ('MediumTest', 3 * 60),
+ ('SmallTest', 1 * 60),
+]
+
+LOGCAT_FILTERS = ['*:e', 'chromium:v', 'cr_*:v', 'DEBUG:I',
+ 'StrictMode:D', '%s:I' % _TAG]
+
+EXTRA_SCREENSHOT_FILE = (
+ 'org.chromium.base.test.ScreenshotOnFailureStatement.ScreenshotFile')
+
+EXTRA_UI_CAPTURE_DIR = (
+ 'org.chromium.base.test.util.Screenshooter.ScreenshotDir')
+
+EXTRA_TRACE_FILE = ('org.chromium.base.test.BaseJUnit4ClassRunner.TraceFile')
+
+_EXTRA_TEST_LIST = (
+ 'org.chromium.base.test.BaseChromiumAndroidJUnitRunner.TestList')
+
+FEATURE_ANNOTATION = 'Feature'
+RENDER_TEST_FEATURE_ANNOTATION = 'RenderTest'
+
+# This needs to be kept in sync with formatting in |RenderUtils.imageName|
+RE_RENDER_IMAGE_NAME = re.compile(
+ r'(?P<test_class>\w+)\.'
+ r'(?P<description>[-\w]+)\.'
+ r'(?P<device_model_sdk>[-\w]+)\.png')
+
+@contextlib.contextmanager
+def _LogTestEndpoints(device, test_name):
+ device.RunShellCommand(
+ ['log', '-p', 'i', '-t', _TAG, 'START %s' % test_name],
+ check_return=True)
+ try:
+ yield
+ finally:
+ device.RunShellCommand(
+ ['log', '-p', 'i', '-t', _TAG, 'END %s' % test_name],
+ check_return=True)
+
+# TODO(jbudorick): Make this private once the instrumentation test_runner
+# is deprecated.
+def DidPackageCrashOnDevice(package_name, device):
+ # Dismiss any error dialogs. Limit the number in case we have an error
+ # loop or we are failing to dismiss.
+ try:
+ for _ in xrange(10):
+ package = device.DismissCrashDialogIfNeeded(timeout=10, retries=1)
+ if not package:
+ return False
+ # Assume test package convention of ".test" suffix
+ if package in package_name:
+ return True
+ except device_errors.CommandFailedError:
+ logging.exception('Error while attempting to dismiss crash dialog.')
+ return False
+
+
+_CURRENT_FOCUS_CRASH_RE = re.compile(
+ r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
+
+
+class LocalDeviceInstrumentationTestRun(
+ local_device_test_run.LocalDeviceTestRun):
+ def __init__(self, env, test_instance):
+ super(LocalDeviceInstrumentationTestRun, self).__init__(
+ env, test_instance)
+ self._flag_changers = {}
+ self._replace_package_contextmanager = None
+ self._shared_prefs_to_restore = []
+ self._use_webview_contextmanager = None
+
+ #override
+ def TestPackage(self):
+ return self._test_instance.suite
+
+ #override
+ def SetUp(self):
+ @local_device_environment.handle_shard_failures_with(
+ self._env.BlacklistDevice)
+ @trace_event.traced
+ def individual_device_set_up(device, host_device_tuples):
+ steps = []
+
+ if self._test_instance.replace_system_package:
+ @trace_event.traced
+ def replace_package(dev):
+ # We need the context manager to be applied before modifying any
+ # shared preference files in case the replacement APK needs to be
+ # set up, and it needs to be applied while the test is running.
+ # Thus, it needs to be applied early during setup, but must still be
+ # applied during _RunTest, which isn't possible using 'with' without
+ # applying the context manager up in test_runner. Instead, we
+ # manually invoke its __enter__ and __exit__ methods in setup and
+ # teardown.
+ self._replace_package_contextmanager = system_app.ReplaceSystemApp(
+ dev, self._test_instance.replace_system_package.package,
+ self._test_instance.replace_system_package.replacement_apk)
+ # Pylint is not smart enough to realize that this field has
+ # an __enter__ method, and will complain loudly.
+ # pylint: disable=no-member
+ self._replace_package_contextmanager.__enter__()
+ # pylint: enable=no-member
+
+ steps.append(replace_package)
+
+ if self._test_instance.use_webview_provider:
+ @trace_event.traced
+ def use_webview_provider(dev):
+ # We need the context manager to be applied before modifying any
+ # shared preference files in case the replacement APK needs to be
+ # set up, and it needs to be applied while the test is running.
+ # Thus, it needs to be applied early during setup, but must still be
+ # applied during _RunTest, which isn't possible using 'with' without
+ # applying the context manager up in test_runner. Instead, we
+ # manually invoke its __enter__ and __exit__ methods in setup and
+ # teardown.
+ self._use_webview_contextmanager = webview_app.UseWebViewProvider(
+ dev, self._test_instance.use_webview_provider)
+ # Pylint is not smart enough to realize that this field has
+ # an __enter__ method, and will complain loudly.
+ # pylint: disable=no-member
+ self._use_webview_contextmanager.__enter__()
+ # pylint: enable=no-member
+
+ steps.append(use_webview_provider)
+
+ def install_helper(apk, permissions):
+ @instrumentation_tracing.no_tracing
+ @trace_event.traced("apk_path")
+ def install_helper_internal(d, apk_path=apk.path):
+ # pylint: disable=unused-argument
+ d.Install(apk, permissions=permissions)
+ return install_helper_internal
+
+ def incremental_install_helper(apk, json_path, permissions):
+ @trace_event.traced("apk_path")
+ def incremental_install_helper_internal(d, apk_path=apk.path):
+ # pylint: disable=unused-argument
+ installer.Install(d, json_path, apk=apk, permissions=permissions)
+ return incremental_install_helper_internal
+
+ if self._test_instance.apk_under_test:
+ permissions = self._test_instance.apk_under_test.GetPermissions()
+ if self._test_instance.apk_under_test_incremental_install_json:
+ steps.append(incremental_install_helper(
+ self._test_instance.apk_under_test,
+ self._test_instance.
+ apk_under_test_incremental_install_json,
+ permissions))
+ else:
+ steps.append(install_helper(self._test_instance.apk_under_test,
+ permissions))
+
+ permissions = self._test_instance.test_apk.GetPermissions()
+ if self._test_instance.test_apk_incremental_install_json:
+ steps.append(incremental_install_helper(
+ self._test_instance.test_apk,
+ self._test_instance.
+ test_apk_incremental_install_json,
+ permissions))
+ else:
+ steps.append(install_helper(self._test_instance.test_apk,
+ permissions))
+
+ steps.extend(install_helper(apk, None)
+ for apk in self._test_instance.additional_apks)
+
+ @trace_event.traced
+ def set_debug_app(dev):
+ # Set debug app in order to enable reading command line flags on user
+ # builds
+ package_name = None
+ if self._test_instance.apk_under_test:
+ package_name = self._test_instance.apk_under_test.GetPackageName()
+ elif self._test_instance.test_apk:
+ package_name = self._test_instance.test_apk.GetPackageName()
+ else:
+ logging.error("Couldn't set debug app: no package name found")
+ return
+ cmd = ['am', 'set-debug-app', '--persistent']
+ if self._test_instance.wait_for_java_debugger:
+ cmd.append('-w')
+ cmd.append(package_name)
+ dev.RunShellCommand(cmd, check_return=True)
+
+ @trace_event.traced
+ def edit_shared_prefs(dev):
+ for setting in self._test_instance.edit_shared_prefs:
+ shared_pref = shared_prefs.SharedPrefs(
+ dev, setting['package'], setting['filename'],
+ use_encrypted_path=setting.get('supports_encrypted_path', False))
+ pref_to_restore = copy.copy(shared_pref)
+ pref_to_restore.Load()
+ self._shared_prefs_to_restore.append(pref_to_restore)
+
+ shared_preference_utils.ApplySharedPreferenceSetting(
+ shared_pref, setting)
+
+ @trace_event.traced
+ def set_vega_permissions(dev):
+ # Normally, installation of VrCore automatically grants storage
+ # permissions. However, since VrCore is part of the system image on
+ # the Vega standalone headset, we don't install the APK as part of test
+ # setup. Instead, grant the permissions here so that it can take
+ # screenshots.
+ if dev.product_name == 'vega':
+ dev.GrantPermissions('com.google.vr.vrcore', [
+ 'android.permission.WRITE_EXTERNAL_STORAGE',
+ 'android.permission.READ_EXTERNAL_STORAGE'
+ ])
+
+ @instrumentation_tracing.no_tracing
+ def push_test_data(dev):
+ device_root = posixpath.join(dev.GetExternalStoragePath(),
+ 'chromium_tests_root')
+ host_device_tuples_substituted = [
+ (h, local_device_test_run.SubstituteDeviceRoot(d, device_root))
+ for h, d in host_device_tuples]
+ logging.info('instrumentation data deps:')
+ for h, d in host_device_tuples_substituted:
+ logging.info('%r -> %r', h, d)
+ dev.PushChangedFiles(host_device_tuples_substituted,
+ delete_device_stale=True)
+ if not host_device_tuples_substituted:
+ dev.RunShellCommand(['rm', '-rf', device_root], check_return=True)
+ dev.RunShellCommand(['mkdir', '-p', device_root], check_return=True)
+
+ @trace_event.traced
+ def create_flag_changer(dev):
+ if self._test_instance.flags:
+ self._CreateFlagChangerIfNeeded(dev)
+ logging.debug('Attempting to set flags: %r',
+ self._test_instance.flags)
+ self._flag_changers[str(dev)].AddFlags(self._test_instance.flags)
+
+ valgrind_tools.SetChromeTimeoutScale(
+ dev, self._test_instance.timeout_scale)
+
+ steps += [
+ set_debug_app, edit_shared_prefs, push_test_data, create_flag_changer,
+ set_vega_permissions
+ ]
+
+ def bind_crash_handler(step, dev):
+ return lambda: crash_handler.RetryOnSystemCrash(step, dev)
+
+ steps = [bind_crash_handler(s, device) for s in steps]
+
+ try:
+ if self._env.concurrent_adb:
+ reraiser_thread.RunAsync(steps)
+ else:
+ for step in steps:
+ step()
+ if self._test_instance.store_tombstones:
+ tombstones.ClearAllTombstones(device)
+ except device_errors.CommandFailedError:
+ # A bugreport can be large and take a while to generate, so only capture
+ # one if we're using a remote manager.
+ if isinstance(
+ self._env.output_manager,
+ remote_output_manager.RemoteOutputManager):
+ logging.error(
+ 'Error when setting up device for tests. Taking a bugreport for '
+ 'investigation. This may take a while...')
+ report_name = '%s.bugreport' % device.serial
+ with self._env.output_manager.ArchivedTempfile(
+ report_name, 'bug_reports') as report_file:
+ device.TakeBugReport(report_file.name)
+ logging.error('Bug report saved to %s', report_file.Link())
+ raise
+
+ self._env.parallel_devices.pMap(
+ individual_device_set_up,
+ self._test_instance.GetDataDependencies())
+ if self._test_instance.wait_for_java_debugger:
+ apk = self._test_instance.apk_under_test or self._test_instance.test_apk
+ logging.warning('*' * 80)
+ logging.warning('Waiting for debugger to attach to process: %s',
+ apk.GetPackageName())
+ logging.warning('*' * 80)
+
+ #override
+ def TearDown(self):
+ # By default, teardown will invoke ADB. When receiving SIGTERM due to a
+ # timeout, there's a high probability that ADB is non-responsive. In these
+ # cases, sending an ADB command will potentially take a long time to time
+ # out. Before this happens, the process will be hard-killed for not
+ # responding to SIGTERM fast enough.
+ if self._received_sigterm:
+ return
+
+ @local_device_environment.handle_shard_failures_with(
+ self._env.BlacklistDevice)
+ @trace_event.traced
+ def individual_device_tear_down(dev):
+ if str(dev) in self._flag_changers:
+ self._flag_changers[str(dev)].Restore()
+
+ # Remove package-specific configuration
+ dev.RunShellCommand(['am', 'clear-debug-app'], check_return=True)
+
+ valgrind_tools.SetChromeTimeoutScale(dev, None)
+
+ # Restore any shared preference files that we stored during setup.
+ # This should be run sometime before the replace package contextmanager
+ # gets exited so we don't have to special case restoring files of
+ # replaced system apps.
+ for pref_to_restore in self._shared_prefs_to_restore:
+ pref_to_restore.Commit(force_commit=True)
+
+ # Context manager exit handlers are applied in reverse order
+ # of the enter handlers
+ if self._use_webview_contextmanager:
+ # See pylint-related comment above with __enter__()
+ # pylint: disable=no-member
+ self._use_webview_contextmanager.__exit__(*sys.exc_info())
+ # pylint: enable=no-member
+
+ if self._replace_package_contextmanager:
+ # See pylint-related comment above with __enter__()
+ # pylint: disable=no-member
+ self._replace_package_contextmanager.__exit__(*sys.exc_info())
+ # pylint: enable=no-member
+
+ self._env.parallel_devices.pMap(individual_device_tear_down)
+
+ def _CreateFlagChangerIfNeeded(self, device):
+ if str(device) not in self._flag_changers:
+ cmdline_file = 'test-cmdline-file'
+ if self._test_instance.use_apk_under_test_flags_file:
+ if self._test_instance.package_info:
+ cmdline_file = self._test_instance.package_info.cmdline_file
+ else:
+ logging.warning(
+ 'No PackageInfo found, falling back to using flag file %s',
+ cmdline_file)
+ self._flag_changers[str(device)] = flag_changer.FlagChanger(
+ device, cmdline_file)
+
+ #override
+ def _CreateShards(self, tests):
+ return tests
+
+ #override
+ def _GetTests(self):
+ if self._test_instance.junit4_runner_supports_listing:
+ raw_tests = self._GetTestsFromRunner()
+ tests = self._test_instance.ProcessRawTests(raw_tests)
+ else:
+ tests = self._test_instance.GetTests()
+ tests = self._ApplyExternalSharding(
+ tests, self._test_instance.external_shard_index,
+ self._test_instance.total_external_shards)
+ return tests
+
+ #override
+ def _GetUniqueTestName(self, test):
+ return instrumentation_test_instance.GetUniqueTestName(test)
+
+ #override
+ def _RunTest(self, device, test):
+ extras = {}
+
+ flags_to_add = []
+ test_timeout_scale = None
+ if self._test_instance.coverage_directory:
+ coverage_basename = '%s.ec' % ('%s_group' % test[0]['method']
+ if isinstance(test, list) else test['method'])
+ extras['coverage'] = 'true'
+ coverage_directory = os.path.join(
+ device.GetExternalStoragePath(), 'chrome', 'test', 'coverage')
+ coverage_device_file = os.path.join(
+ coverage_directory, coverage_basename)
+ extras['coverageFile'] = coverage_device_file
+ # Save screenshot if screenshot dir is specified (save locally) or if
+ # a GS bucket is passed (save in cloud).
+ screenshot_device_file = device_temp_file.DeviceTempFile(
+ device.adb, suffix='.png', dir=device.GetExternalStoragePath())
+ extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name
+
+ # Set up the screenshot directory. This needs to be done for each test so
+ # that we only get screenshots created by that test. It has to be on
+ # external storage since the default location doesn't allow file creation
+ # from the instrumentation test app on Android L and M.
+ ui_capture_dir = device_temp_file.NamedDeviceTemporaryDirectory(
+ device.adb,
+ dir=device.GetExternalStoragePath())
+ extras[EXTRA_UI_CAPTURE_DIR] = ui_capture_dir.name
+
+ if self._env.trace_output:
+ trace_device_file = device_temp_file.DeviceTempFile(
+ device.adb, suffix='.json', dir=device.GetExternalStoragePath())
+ extras[EXTRA_TRACE_FILE] = trace_device_file.name
+
+ if isinstance(test, list):
+ if not self._test_instance.driver_apk:
+ raise Exception('driver_apk does not exist. '
+ 'Please build it and try again.')
+ if any(t.get('is_junit4') for t in test):
+ raise Exception('driver apk does not support JUnit4 tests')
+
+ def name_and_timeout(t):
+ n = instrumentation_test_instance.GetTestName(t)
+ i = self._GetTimeoutFromAnnotations(t['annotations'], n)
+ return (n, i)
+
+ test_names, timeouts = zip(*(name_and_timeout(t) for t in test))
+
+ test_name = ','.join(test_names)
+ test_display_name = test_name
+ target = '%s/%s' % (
+ self._test_instance.driver_package,
+ self._test_instance.driver_name)
+ extras.update(
+ self._test_instance.GetDriverEnvironmentVars(
+ test_list=test_names))
+ timeout = sum(timeouts)
+ else:
+ test_name = instrumentation_test_instance.GetTestName(test)
+ test_display_name = self._GetUniqueTestName(test)
+ if test['is_junit4']:
+ target = '%s/%s' % (
+ self._test_instance.test_package,
+ self._test_instance.junit4_runner_class)
+ else:
+ target = '%s/%s' % (
+ self._test_instance.test_package,
+ self._test_instance.junit3_runner_class)
+ extras['class'] = test_name
+ if 'flags' in test and test['flags']:
+ flags_to_add.extend(test['flags'])
+ timeout = self._GetTimeoutFromAnnotations(
+ test['annotations'], test_display_name)
+
+ test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
+ test['annotations'])
+ if test_timeout_scale and test_timeout_scale != 1:
+ valgrind_tools.SetChromeTimeoutScale(
+ device, test_timeout_scale * self._test_instance.timeout_scale)
+
+ if self._test_instance.wait_for_java_debugger:
+ timeout = None
+ logging.info('preparing to run %s: %s', test_display_name, test)
+
+ render_tests_device_output_dir = None
+ if _IsRenderTest(test):
+ # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
+ render_tests_device_output_dir = posixpath.join(
+ device.GetExternalStoragePath(),
+ 'render_test_output_dir')
+ flags_to_add.append('--render-test-output-dir=%s' %
+ render_tests_device_output_dir)
+
+ if flags_to_add:
+ self._CreateFlagChangerIfNeeded(device)
+ self._flag_changers[str(device)].PushFlags(add=flags_to_add)
+
+ time_ms = lambda: int(time.time() * 1e3)
+ start_ms = time_ms()
+
+ stream_name = 'logcat_%s_%s_%s' % (
+ test_name.replace('#', '.'),
+ time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
+ device.serial)
+
+ with ui_capture_dir:
+ with self._env.output_manager.ArchivedTempfile(
+ stream_name, 'logcat') as logcat_file:
+ try:
+ with logcat_monitor.LogcatMonitor(
+ device.adb,
+ filter_specs=local_device_environment.LOGCAT_FILTERS,
+ output_file=logcat_file.name,
+ transform_func=self._test_instance.MaybeDeobfuscateLines
+ ) as logmon:
+ with _LogTestEndpoints(device, test_name):
+ with contextlib_ext.Optional(
+ trace_event.trace(test_name),
+ self._env.trace_output):
+ output = device.StartInstrumentation(
+ target, raw=True, extras=extras, timeout=timeout, retries=0)
+ finally:
+ logmon.Close()
+
+ if logcat_file.Link():
+ logging.info('Logcat saved to %s', logcat_file.Link())
+
+ duration_ms = time_ms() - start_ms
+
+ with contextlib_ext.Optional(
+ trace_event.trace('ProcessResults'),
+ self._env.trace_output):
+ output = self._test_instance.MaybeDeobfuscateLines(output)
+ # TODO(jbudorick): Make instrumentation tests output a JSON so this
+ # doesn't have to parse the output.
+ result_code, result_bundle, statuses = (
+ self._test_instance.ParseAmInstrumentRawOutput(output))
+ results = self._test_instance.GenerateTestResults(
+ result_code, result_bundle, statuses, start_ms, duration_ms,
+ device.product_cpu_abi, self._test_instance.symbolizer)
+
+ if self._env.trace_output:
+ self._SaveTraceData(trace_device_file, device, test['class'])
+
+ def restore_flags():
+ if flags_to_add:
+ self._flag_changers[str(device)].Restore()
+
+ def restore_timeout_scale():
+ if test_timeout_scale:
+ valgrind_tools.SetChromeTimeoutScale(
+ device, self._test_instance.timeout_scale)
+
+ def handle_coverage_data():
+ if self._test_instance.coverage_directory:
+ device.PullFile(coverage_directory,
+ self._test_instance.coverage_directory)
+ device.RunShellCommand(
+ 'rm -f %s' % posixpath.join(coverage_directory, '*'),
+ check_return=True, shell=True)
+
+ def handle_render_test_data():
+ if _IsRenderTest(test):
+ # Render tests do not cause test failure by default. So we have to
+ # check to see if any failure images were generated even if the test
+ # does not fail.
+ try:
+ self._ProcessRenderTestResults(
+ device, render_tests_device_output_dir, results)
+ finally:
+ device.RemovePath(render_tests_device_output_dir,
+ recursive=True, force=True)
+
+ def pull_ui_screen_captures():
+ screenshots = []
+ for filename in device.ListDirectory(ui_capture_dir.name):
+ if filename.endswith('.json'):
+ screenshots.append(pull_ui_screenshot(filename))
+ if screenshots:
+ json_archive_name = 'ui_capture_%s_%s.json' % (
+ test_name.replace('#', '.'),
+ time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()))
+ with self._env.output_manager.ArchivedTempfile(
+ json_archive_name, 'ui_capture', output_manager.Datatype.JSON
+ ) as json_archive:
+ json.dump(screenshots, json_archive)
+ for result in results:
+ result.SetLink('ui screenshot', json_archive.Link())
+
+ def pull_ui_screenshot(filename):
+ source_dir = ui_capture_dir.name
+ json_path = posixpath.join(source_dir, filename)
+ json_data = json.loads(device.ReadFile(json_path))
+ image_file_path = posixpath.join(source_dir, json_data['location'])
+ with self._env.output_manager.ArchivedTempfile(
+ json_data['location'], 'ui_capture', output_manager.Datatype.PNG
+ ) as image_archive:
+ device.PullFile(image_file_path, image_archive.name)
+ json_data['image_link'] = image_archive.Link()
+ return json_data
+
+ # While constructing the TestResult objects, we can parallelize several
+ # steps that involve ADB. These steps should NOT depend on any info in
+ # the results! Things such as whether the test CRASHED have not yet been
+ # determined.
+ post_test_steps = [restore_flags, restore_timeout_scale,
+ handle_coverage_data, handle_render_test_data,
+ pull_ui_screen_captures]
+ if self._env.concurrent_adb:
+ post_test_step_thread_group = reraiser_thread.ReraiserThreadGroup(
+ reraiser_thread.ReraiserThread(f) for f in post_test_steps)
+ post_test_step_thread_group.StartAll(will_block=True)
+ else:
+ for step in post_test_steps:
+ step()
+
+ for result in results:
+ if logcat_file:
+ result.SetLink('logcat', logcat_file.Link())
+
+ # Update the result name if the test used flags.
+ if flags_to_add:
+ for r in results:
+ if r.GetName() == test_name:
+ r.SetName(test_display_name)
+
+ # Add UNKNOWN results for any missing tests.
+ iterable_test = test if isinstance(test, list) else [test]
+ test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
+ results_names = set(r.GetName() for r in results)
+ results.extend(
+ base_test_result.BaseTestResult(u, base_test_result.ResultType.UNKNOWN)
+ for u in test_names.difference(results_names))
+
+ # Update the result type if we detect a crash.
+ try:
+ if DidPackageCrashOnDevice(self._test_instance.test_package, device):
+ for r in results:
+ if r.GetType() == base_test_result.ResultType.UNKNOWN:
+ r.SetType(base_test_result.ResultType.CRASH)
+ except device_errors.CommandTimeoutError:
+ logging.warning('timed out when detecting/dismissing error dialogs')
+ # Attach screenshot to the test to help with debugging the dialog boxes.
+ self._SaveScreenshot(device, screenshot_device_file, test_display_name,
+ results, 'dialog_box_screenshot')
+
+ # Handle failures by:
+ # - optionally taking a screenshot
+ # - logging the raw output at INFO level
+ # - clearing the application state while persisting permissions
+ if any(r.GetType() not in (base_test_result.ResultType.PASS,
+ base_test_result.ResultType.SKIP)
+ for r in results):
+ self._SaveScreenshot(device, screenshot_device_file, test_display_name,
+ results, 'post_test_screenshot')
+
+ logging.info('detected failure in %s. raw output:', test_display_name)
+ for l in output:
+ logging.info(' %s', l)
+ if (not self._env.skip_clear_data
+ and self._test_instance.package_info):
+ permissions = (
+ self._test_instance.apk_under_test.GetPermissions()
+ if self._test_instance.apk_under_test
+ else None)
+ device.ClearApplicationState(self._test_instance.package_info.package,
+ permissions=permissions)
+ else:
+ logging.debug('raw output from %s:', test_display_name)
+ for l in output:
+ logging.debug(' %s', l)
+ if self._test_instance.store_tombstones:
+ tombstones_url = None
+ for result in results:
+ if result.GetType() == base_test_result.ResultType.CRASH:
+ if not tombstones_url:
+ resolved_tombstones = tombstones.ResolveTombstones(
+ device,
+ resolve_all_tombstones=True,
+ include_stack_symbols=False,
+ wipe_tombstones=True,
+ tombstone_symbolizer=self._test_instance.symbolizer)
+ tombstone_filename = 'tombstones_%s_%s' % (
+ time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
+ device.serial)
+ with self._env.output_manager.ArchivedTempfile(
+ tombstone_filename, 'tombstones') as tombstone_file:
+ tombstone_file.write('\n'.join(resolved_tombstones))
+ result.SetLink('tombstones', tombstone_file.Link())
+ if self._env.concurrent_adb:
+ post_test_step_thread_group.JoinAll()
+ return results, None
+
+ def _GetTestsFromRunner(self):
+ test_apk_path = self._test_instance.test_apk.path
+ pickle_path = '%s-runner.pickle' % test_apk_path
+ # For incremental APKs, the code doesn't live in the apk, so instead check
+ # the timestamp of the target's .stamp file.
+ if self._test_instance.test_apk_incremental_install_json:
+ with open(self._test_instance.test_apk_incremental_install_json) as f:
+ data = json.load(f)
+ out_dir = constants.GetOutDirectory()
+ test_mtime = max(
+ os.path.getmtime(os.path.join(out_dir, p)) for p in data['dex_files'])
+ else:
+ test_mtime = os.path.getmtime(test_apk_path)
+
+ try:
+ return instrumentation_test_instance.GetTestsFromPickle(
+ pickle_path, test_mtime)
+ except instrumentation_test_instance.TestListPickleException as e:
+ logging.info('Could not get tests from pickle: %s', e)
+ logging.info('Getting tests by having %s list them.',
+ self._test_instance.junit4_runner_class)
+ def list_tests(d):
+ def _run(dev):
+ with device_temp_file.DeviceTempFile(
+ dev.adb, suffix='.json',
+ dir=dev.GetExternalStoragePath()) as dev_test_list_json:
+ junit4_runner_class = self._test_instance.junit4_runner_class
+ test_package = self._test_instance.test_package
+ extras = {
+ 'log': 'true',
+ # Workaround for https://github.com/mockito/mockito/issues/922
+ 'notPackage': 'net.bytebuddy',
+ }
+ extras[_EXTRA_TEST_LIST] = dev_test_list_json.name
+ target = '%s/%s' % (test_package, junit4_runner_class)
+ timeout = 120
+ if self._test_instance.wait_for_java_debugger:
+ timeout = None
+ test_list_run_output = dev.StartInstrumentation(
+ target, extras=extras, retries=0, timeout=timeout)
+ if any(test_list_run_output):
+ logging.error('Unexpected output while listing tests:')
+ for line in test_list_run_output:
+ logging.error(' %s', line)
+ with tempfile_ext.NamedTemporaryDirectory() as host_dir:
+ host_file = os.path.join(host_dir, 'list_tests.json')
+ dev.PullFile(dev_test_list_json.name, host_file)
+ with open(host_file, 'r') as host_file:
+ return json.load(host_file)
+
+ return crash_handler.RetryOnSystemCrash(_run, d)
+
+ raw_test_lists = self._env.parallel_devices.pMap(list_tests).pGet(None)
+
+ # If all devices failed to list tests, raise an exception.
+ # Check that tl is not None and is not empty.
+ if all(not tl for tl in raw_test_lists):
+ raise device_errors.CommandFailedError(
+ 'Failed to list tests on any device')
+
+ # Get the first viable list of raw tests
+ raw_tests = [tl for tl in raw_test_lists if tl][0]
+
+ instrumentation_test_instance.SaveTestsToPickle(pickle_path, raw_tests)
+ return raw_tests
+
+ def _SaveTraceData(self, trace_device_file, device, test_class):
+ trace_host_file = self._env.trace_output
+
+ if device.FileExists(trace_device_file.name):
+ try:
+ java_trace_json = device.ReadFile(trace_device_file.name)
+ except IOError:
+ raise Exception('error pulling trace file from device')
+ finally:
+ trace_device_file.close()
+
+ process_name = '%s (device %s)' % (test_class, device.serial)
+ process_hash = int(hashlib.md5(process_name).hexdigest()[:6], 16)
+
+ java_trace = json.loads(java_trace_json)
+ java_trace.sort(key=lambda event: event['ts'])
+
+ get_date_command = 'echo $EPOCHREALTIME'
+ device_time = device.RunShellCommand(get_date_command, single_line=True)
+ device_time = float(device_time) * 1e6
+ system_time = trace_time.Now()
+ time_difference = system_time - device_time
+
+ threads_to_add = set()
+ for event in java_trace:
+ # Ensure thread ID and thread name will be linked in the metadata.
+ threads_to_add.add((event['tid'], event['name']))
+
+ event['pid'] = process_hash
+
+ # Adjust time stamp to align with Python trace times (from
+ # trace_time.Now()).
+ event['ts'] += time_difference
+
+ for tid, thread_name in threads_to_add:
+ thread_name_metadata = {'pid': process_hash, 'tid': tid,
+ 'ts': 0, 'ph': 'M', 'cat': '__metadata',
+ 'name': 'thread_name',
+ 'args': {'name': thread_name}}
+ java_trace.append(thread_name_metadata)
+
+ process_name_metadata = {'pid': process_hash, 'tid': 0, 'ts': 0,
+ 'ph': 'M', 'cat': '__metadata',
+ 'name': 'process_name',
+ 'args': {'name': process_name}}
+ java_trace.append(process_name_metadata)
+
+ java_trace_json = json.dumps(java_trace)
+ java_trace_json = java_trace_json.rstrip(' ]')
+
+ with open(trace_host_file, 'r') as host_handle:
+ host_contents = host_handle.readline()
+
+ if host_contents:
+ java_trace_json = ',%s' % java_trace_json.lstrip(' [')
+
+ with open(trace_host_file, 'a') as host_handle:
+ host_handle.write(java_trace_json)
+
+ def _SaveScreenshot(self, device, screenshot_device_file, test_name, results,
+ link_name):
+ screenshot_filename = '%s-%s.png' % (
+ test_name, time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()))
+ if device.FileExists(screenshot_device_file.name):
+ with self._env.output_manager.ArchivedTempfile(
+ screenshot_filename, 'screenshot',
+ output_manager.Datatype.PNG) as screenshot_host_file:
+ try:
+ device.PullFile(screenshot_device_file.name,
+ screenshot_host_file.name)
+ finally:
+ screenshot_device_file.close()
+ for result in results:
+ result.SetLink(link_name, screenshot_host_file.Link())
+
+ def _ProcessRenderTestResults(
+ self, device, render_tests_device_output_dir, results):
+
+ failure_images_device_dir = posixpath.join(
+ render_tests_device_output_dir, 'failures')
+ if not device.FileExists(failure_images_device_dir):
+ return
+
+ diff_images_device_dir = posixpath.join(
+ render_tests_device_output_dir, 'diffs')
+
+ golden_images_device_dir = posixpath.join(
+ render_tests_device_output_dir, 'goldens')
+
+ for failure_filename in device.ListDirectory(failure_images_device_dir):
+
+ with self._env.output_manager.ArchivedTempfile(
+ 'fail_%s' % failure_filename, 'render_tests',
+ output_manager.Datatype.PNG) as failure_image_host_file:
+ device.PullFile(
+ posixpath.join(failure_images_device_dir, failure_filename),
+ failure_image_host_file.name)
+ failure_link = failure_image_host_file.Link()
+
+ golden_image_device_file = posixpath.join(
+ golden_images_device_dir, failure_filename)
+ if device.PathExists(golden_image_device_file):
+ with self._env.output_manager.ArchivedTempfile(
+ 'golden_%s' % failure_filename, 'render_tests',
+ output_manager.Datatype.PNG) as golden_image_host_file:
+ device.PullFile(
+ golden_image_device_file, golden_image_host_file.name)
+ golden_link = golden_image_host_file.Link()
+ else:
+ golden_link = ''
+
+ diff_image_device_file = posixpath.join(
+ diff_images_device_dir, failure_filename)
+ if device.PathExists(diff_image_device_file):
+ with self._env.output_manager.ArchivedTempfile(
+ 'diff_%s' % failure_filename, 'render_tests',
+ output_manager.Datatype.PNG) as diff_image_host_file:
+ device.PullFile(
+ diff_image_device_file, diff_image_host_file.name)
+ diff_link = diff_image_host_file.Link()
+ else:
+ diff_link = ''
+
+ jinja2_env = jinja2.Environment(
+ loader=jinja2.FileSystemLoader(_JINJA_TEMPLATE_DIR),
+ trim_blocks=True)
+ template = jinja2_env.get_template(_JINJA_TEMPLATE_FILENAME)
+ # pylint: disable=no-member
+ processed_template_output = template.render(
+ test_name=failure_filename,
+ failure_link=failure_link,
+ golden_link=golden_link,
+ diff_link=diff_link)
+
+ with self._env.output_manager.ArchivedTempfile(
+ '%s.html' % failure_filename, 'render_tests',
+ output_manager.Datatype.HTML) as html_results:
+ html_results.write(processed_template_output)
+ html_results.flush()
+ for result in results:
+ result.SetLink(failure_filename, html_results.Link())
+
+ #override
+ def _ShouldRetry(self, test, result):
+ # We've tried to disable retries in the past with mixed results.
+ # See crbug.com/619055 for historical context and crbug.com/797002
+ # for ongoing efforts.
+ del test, result
+ return True
+
+ #override
+ def _ShouldShard(self):
+ return True
+
+ @classmethod
+ def _GetTimeoutScaleFromAnnotations(cls, annotations):
+ try:
+ return int(annotations.get('TimeoutScale', {}).get('value', 1))
+ except ValueError as e:
+ logging.warning("Non-integer value of TimeoutScale ignored. (%s)", str(e))
+ return 1
+
+ @classmethod
+ def _GetTimeoutFromAnnotations(cls, annotations, test_name):
+ for k, v in TIMEOUT_ANNOTATIONS:
+ if k in annotations:
+ timeout = v
+ break
+ else:
+ logging.warning('Using default 1 minute timeout for %s', test_name)
+ timeout = 60
+
+ timeout *= cls._GetTimeoutScaleFromAnnotations(annotations)
+
+ return timeout
+
+
+def _IsRenderTest(test):
+ """Determines if a test or list of tests has a RenderTest amongst them."""
+ if not isinstance(test, list):
+ test = [test]
+ return any([RENDER_TEST_FEATURE_ANNOTATION in t['annotations'].get(
+ FEATURE_ANNOTATION, {}).get('value', ()) for t in test])
diff --git a/deps/v8/build/android/pylib/local/device/local_device_instrumentation_test_run_test.py b/deps/v8/build/android/pylib/local/device/local_device_instrumentation_test_run_test.py
new file mode 100755
index 0000000000..fb96ee6bbd
--- /dev/null
+++ b/deps/v8/build/android/pylib/local/device/local_device_instrumentation_test_run_test.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env vpython
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for local_device_instrumentation_test_run."""
+
+# pylint: disable=protected-access
+
+import unittest
+
+from pylib.base import base_test_result
+from pylib.base import mock_environment
+from pylib.base import mock_test_instance
+from pylib.local.device import local_device_instrumentation_test_run
+
+class LocalDeviceInstrumentationTestRunTest(unittest.TestCase):
+
+ # TODO(crbug.com/797002): Decide whether the _ShouldRetry hook is worth
+ # retaining and remove these tests if not.
+
+ def testShouldRetry_failure(self):
+ env = mock_environment.MockEnvironment()
+ ti = mock_test_instance.MockTestInstance()
+ obj = (local_device_instrumentation_test_run
+ .LocalDeviceInstrumentationTestRun(env, ti))
+ test = {
+ 'annotations': {},
+ 'class': 'SadTest',
+ 'method': 'testFailure',
+ 'is_junit4': True,
+ }
+ result = base_test_result.BaseTestResult(
+ 'SadTest.testFailure', base_test_result.ResultType.FAIL)
+ self.assertTrue(obj._ShouldRetry(test, result))
+
+ def testShouldRetry_retryOnFailure(self):
+ env = mock_environment.MockEnvironment()
+ ti = mock_test_instance.MockTestInstance()
+ obj = (local_device_instrumentation_test_run
+ .LocalDeviceInstrumentationTestRun(env, ti))
+ test = {
+ 'annotations': {'RetryOnFailure': None},
+ 'class': 'SadTest',
+ 'method': 'testRetryOnFailure',
+ 'is_junit4': True,
+ }
+ result = base_test_result.BaseTestResult(
+ 'SadTest.testRetryOnFailure', base_test_result.ResultType.FAIL)
+ self.assertTrue(obj._ShouldRetry(test, result))
+
+ def testShouldRetry_notRun(self):
+ env = mock_environment.MockEnvironment()
+ ti = mock_test_instance.MockTestInstance()
+ obj = (local_device_instrumentation_test_run
+ .LocalDeviceInstrumentationTestRun(env, ti))
+ test = {
+ 'annotations': {},
+ 'class': 'SadTest',
+ 'method': 'testNotRun',
+ 'is_junit4': True,
+ }
+ result = base_test_result.BaseTestResult(
+ 'SadTest.testNotRun', base_test_result.ResultType.NOTRUN)
+ self.assertTrue(obj._ShouldRetry(test, result))
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/deps/v8/build/android/pylib/local/device/local_device_linker_test_run.py b/deps/v8/build/android/pylib/local/device/local_device_linker_test_run.py
new file mode 100644
index 0000000000..2a1520e003
--- /dev/null
+++ b/deps/v8/build/android/pylib/local/device/local_device_linker_test_run.py
@@ -0,0 +1,75 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import sys
+import traceback
+
+from pylib.base import base_test_result
+from pylib.linker import test_case
+from pylib.local.device import local_device_environment
+from pylib.local.device import local_device_test_run
+
+
+class LinkerExceptionTestResult(base_test_result.BaseTestResult):
+ """Test result corresponding to a python exception in a host-custom test."""
+
+ def __init__(self, test_name, exc_info):
+ """Constructs a LinkerExceptionTestResult object.
+
+ Args:
+ test_name: name of the test which raised an exception.
+ exc_info: exception info, ostensibly from sys.exc_info().
+ """
+ exc_type, exc_value, exc_traceback = exc_info
+ trace_info = ''.join(traceback.format_exception(exc_type, exc_value,
+ exc_traceback))
+ log_msg = 'Exception:\n' + trace_info
+
+ super(LinkerExceptionTestResult, self).__init__(
+ test_name,
+ base_test_result.ResultType.FAIL,
+ log="%s %s" % (exc_type, log_msg))
+
+
+class LocalDeviceLinkerTestRun(local_device_test_run.LocalDeviceTestRun):
+
+ def _CreateShards(self, tests):
+ return tests
+
+ def _GetTests(self):
+ return self._test_instance.GetTests()
+
+ def _GetUniqueTestName(self, test):
+ return test.qualified_name
+
+ def _RunTest(self, device, test):
+ assert isinstance(test, test_case.LinkerTestCaseBase)
+
+ try:
+ result = test.Run(device)
+ except Exception: # pylint: disable=broad-except
+ logging.exception('Caught exception while trying to run test: ' +
+ test.tagged_name)
+ exc_info = sys.exc_info()
+ result = LinkerExceptionTestResult(test.tagged_name, exc_info)
+
+ return result, None
+
+ def SetUp(self):
+ @local_device_environment.handle_shard_failures_with(
+ on_failure=self._env.BlacklistDevice)
+ def individual_device_set_up(dev):
+ dev.Install(self._test_instance.test_apk)
+
+ self._env.parallel_devices.pMap(individual_device_set_up)
+
+ def _ShouldShard(self):
+ return True
+
+ def TearDown(self):
+ pass
+
+ def TestPackage(self):
+ pass
diff --git a/deps/v8/build/android/pylib/local/device/local_device_monkey_test_run.py b/deps/v8/build/android/pylib/local/device/local_device_monkey_test_run.py
new file mode 100644
index 0000000000..fe178c8fdb
--- /dev/null
+++ b/deps/v8/build/android/pylib/local/device/local_device_monkey_test_run.py
@@ -0,0 +1,126 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+
+from devil.android import device_errors
+from devil.android.sdk import intent
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.local.device import local_device_test_run
+
+
+_CHROME_PACKAGE = constants.PACKAGE_INFO['chrome'].package
+
+class LocalDeviceMonkeyTestRun(local_device_test_run.LocalDeviceTestRun):
+ def __init__(self, env, test_instance):
+ super(LocalDeviceMonkeyTestRun, self).__init__(env, test_instance)
+
+ def TestPackage(self):
+ return 'monkey'
+
+ #override
+ def SetUp(self):
+ pass
+
+ #override
+ def _RunTest(self, device, test):
+ device.ClearApplicationState(self._test_instance.package)
+
+ # Chrome crashes are not always caught by Monkey test runner.
+ # Launch Chrome and verify Chrome has the same PID before and after
+ # the test.
+ device.StartActivity(
+ intent.Intent(package=self._test_instance.package,
+ activity=self._test_instance.activity,
+ action='android.intent.action.MAIN'),
+ blocking=True, force_stop=True)
+ before_pids = device.GetPids(self._test_instance.package)
+
+ output = ''
+ if before_pids:
+ if len(before_pids.get(self._test_instance.package, [])) > 1:
+ raise Exception(
+ 'At most one instance of process %s expected but found pids: '
+ '%s' % (self._test_instance.package, before_pids))
+ output = '\n'.join(self._LaunchMonkeyTest(device))
+ after_pids = device.GetPids(self._test_instance.package)
+
+ crashed = True
+ if not self._test_instance.package in before_pids:
+ logging.error('Failed to start the process.')
+ elif not self._test_instance.package in after_pids:
+ logging.error('Process %s has died.',
+ before_pids[self._test_instance.package])
+ elif (before_pids[self._test_instance.package] !=
+ after_pids[self._test_instance.package]):
+ logging.error('Detected process restart %s -> %s',
+ before_pids[self._test_instance.package],
+ after_pids[self._test_instance.package])
+ else:
+ crashed = False
+
+ success_pattern = 'Events injected: %d' % self._test_instance.event_count
+ if success_pattern in output and not crashed:
+ result = base_test_result.BaseTestResult(
+ test, base_test_result.ResultType.PASS, log=output)
+ else:
+ result = base_test_result.BaseTestResult(
+ test, base_test_result.ResultType.FAIL, log=output)
+ if 'chrome' in self._test_instance.package:
+ logging.warning('Starting MinidumpUploadService...')
+ # TODO(jbudorick): Update this after upstreaming.
+ minidump_intent = intent.Intent(
+ action='%s.crash.ACTION_FIND_ALL' % _CHROME_PACKAGE,
+ package=self._test_instance.package,
+ activity='%s.crash.MinidumpUploadService' % _CHROME_PACKAGE)
+ try:
+ device.RunShellCommand(
+ ['am', 'startservice'] + minidump_intent.am_args,
+ as_root=True, check_return=True)
+ except device_errors.CommandFailedError:
+ logging.exception('Failed to start MinidumpUploadService')
+
+ return result, None
+
+ #override
+ def TearDown(self):
+ pass
+
+ #override
+ def _CreateShards(self, tests):
+ return tests
+
+ #override
+ def _ShouldShard(self):
+ # TODO(mikecase): Run Monkey test concurrently on each attached device.
+ return False
+
+ #override
+ def _GetTests(self):
+ return ['MonkeyTest']
+
+ def _LaunchMonkeyTest(self, device):
+ try:
+ cmd = ['monkey',
+ '-p', self._test_instance.package,
+ '--throttle', str(self._test_instance.throttle),
+ '-s', str(self._test_instance.seed),
+ '--monitor-native-crashes',
+ '--kill-process-after-error']
+ for category in self._test_instance.categories:
+ cmd.extend(['-c', category])
+ for _ in range(self._test_instance.verbose_count):
+ cmd.append('-v')
+ cmd.append(str(self._test_instance.event_count))
+ return device.RunShellCommand(
+ cmd, timeout=self._test_instance.timeout, check_return=True)
+ finally:
+ try:
+ # Kill the monkey test process on the device. If you manually
+ # interrupt the test run, this will prevent the monkey test from
+ # continuing to run.
+ device.KillAll('com.android.commands.monkey')
+ except device_errors.CommandFailedError:
+ pass
diff --git a/deps/v8/build/android/pylib/local/device/local_device_perf_test_run.py b/deps/v8/build/android/pylib/local/device/local_device_perf_test_run.py
new file mode 100644
index 0000000000..bc828408a0
--- /dev/null
+++ b/deps/v8/build/android/pylib/local/device/local_device_perf_test_run.py
@@ -0,0 +1,538 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import io
+import json
+import logging
+import os
+import pickle
+import shutil
+import tempfile
+import threading
+import time
+import zipfile
+
+from devil.android import battery_utils
+from devil.android import device_errors
+from devil.android import device_list
+from devil.android import device_utils
+from devil.android import forwarder
+from devil.android.tools import device_recovery
+from devil.android.tools import device_status
+from devil.utils import cmd_helper
+from devil.utils import parallelizer
+from devil.utils import reraiser_thread
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.constants import host_paths
+from pylib.local.device import local_device_environment
+from pylib.local.device import local_device_test_run
+from py_trace_event import trace_event
+from py_utils import contextlib_ext
+
+
+class HeartBeat(object):
+
+ def __init__(self, shard, wait_time=60*10):
+ """ HeartBeat Logger constructor.
+
+ Args:
+ shard: A perf test runner device shard.
+ wait_time: time to wait between heartbeat messages.
+ """
+ self._shard = shard
+ self._running = False
+ self._timer = None
+ self._wait_time = wait_time
+
+ def Start(self):
+ if not self._running:
+ self._timer = threading.Timer(self._wait_time, self._LogMessage)
+ self._timer.start()
+ self._running = True
+
+ def Stop(self):
+ if self._running:
+ self._timer.cancel()
+ self._running = False
+
+ def _LogMessage(self):
+ logging.info('Currently working on test %s', self._shard.current_test)
+ self._timer = threading.Timer(self._wait_time, self._LogMessage)
+ self._timer.start()
+
+
+class TestShard(object):
+ def __init__(self, env, test_instance, tests, retries=3, timeout=None):
+ logging.info('Create shard for the following tests:')
+ for t in tests:
+ logging.info(' %s', t)
+ self._current_test = None
+ self._env = env
+ self._heart_beat = HeartBeat(self)
+ self._index = None
+ self._output_dir = None
+ self._retries = retries
+ self._test_instance = test_instance
+ self._tests = tests
+ self._timeout = timeout
+
+ def _TestSetUp(self, test):
+ if (self._test_instance.collect_chartjson_data
+ or self._tests[test].get('archive_output_dir')):
+ self._output_dir = tempfile.mkdtemp()
+
+ self._current_test = test
+ self._heart_beat.Start()
+
+ def _RunSingleTest(self, test):
+ self._test_instance.WriteBuildBotJson(self._output_dir)
+
+ timeout = self._tests[test].get('timeout', self._timeout)
+ cmd = self._CreateCmd(test)
+ cwd = os.path.abspath(host_paths.DIR_SOURCE_ROOT)
+
+ self._LogTest(test, cmd, timeout)
+
+ try:
+ start_time = time.time()
+
+ with contextlib_ext.Optional(
+ trace_event.trace(test),
+ self._env.trace_output):
+ exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
+ cmd, timeout, cwd=cwd, shell=True)
+ end_time = time.time()
+ chart_json_output = self._test_instance.ReadChartjsonOutput(
+ self._output_dir)
+ if exit_code == 0:
+ result_type = base_test_result.ResultType.PASS
+ else:
+ result_type = base_test_result.ResultType.FAIL
+ except cmd_helper.TimeoutError as e:
+ end_time = time.time()
+ exit_code = -1
+ output = e.output
+ chart_json_output = ''
+ result_type = base_test_result.ResultType.TIMEOUT
+ return self._ProcessTestResult(test, cmd, start_time, end_time, exit_code,
+ output, chart_json_output, result_type)
+
+ def _CreateCmd(self, test):
+ cmd = []
+ if self._test_instance.dry_run:
+ cmd.append('echo')
+ cmd.append(self._tests[test]['cmd'])
+ if self._output_dir:
+ cmd.append('--output-dir=%s' % self._output_dir)
+ return ' '.join(self._ExtendCmd(cmd))
+
+ def _ExtendCmd(self, cmd): # pylint: disable=no-self-use
+ return cmd
+
+ def _LogTest(self, _test, _cmd, _timeout):
+ raise NotImplementedError
+
+ def _LogTestExit(self, test, exit_code, duration):
+ # pylint: disable=no-self-use
+ logging.info('%s : exit_code=%d in %d secs.', test, exit_code, duration)
+
+ def _ExtendPersistedResult(self, persisted_result):
+ raise NotImplementedError
+
+ def _ProcessTestResult(self, test, cmd, start_time, end_time, exit_code,
+ output, chart_json_output, result_type):
+ if exit_code is None:
+ exit_code = -1
+
+ self._LogTestExit(test, exit_code, end_time - start_time)
+
+ archive_bytes = (self._ArchiveOutputDir()
+ if self._tests[test].get('archive_output_dir')
+ else None)
+ persisted_result = {
+ 'name': test,
+ 'output': [output],
+ 'chartjson': chart_json_output,
+ 'archive_bytes': archive_bytes,
+ 'exit_code': exit_code,
+ 'result_type': result_type,
+ 'start_time': start_time,
+ 'end_time': end_time,
+ 'total_time': end_time - start_time,
+ 'cmd': cmd,
+ }
+ self._ExtendPersistedResult(persisted_result)
+ self._SaveResult(persisted_result)
+ return result_type
+
+ def _ArchiveOutputDir(self):
+ """Archive all files in the output dir, and return as compressed bytes."""
+ with io.BytesIO() as archive:
+ with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as contents:
+ num_files = 0
+ for absdir, _, files in os.walk(self._output_dir):
+ reldir = os.path.relpath(absdir, self._output_dir)
+ for filename in files:
+ src_path = os.path.join(absdir, filename)
+ # We use normpath to turn './file.txt' into just 'file.txt'.
+ dst_path = os.path.normpath(os.path.join(reldir, filename))
+ contents.write(src_path, dst_path)
+ num_files += 1
+ if num_files:
+ logging.info('%d files in the output dir were archived.', num_files)
+ else:
+ logging.warning('No files in the output dir. Archive is empty.')
+ return archive.getvalue()
+
+ @staticmethod
+ def _SaveResult(result):
+ pickled = os.path.join(constants.PERF_OUTPUT_DIR, result['name'])
+ if os.path.exists(pickled):
+ with file(pickled, 'r') as f:
+ previous = pickle.load(f)
+ result['output'] = previous['output'] + result['output']
+ with file(pickled, 'w') as f:
+ pickle.dump(result, f)
+
+ def _TestTearDown(self):
+ if self._output_dir:
+ shutil.rmtree(self._output_dir, ignore_errors=True)
+ self._output_dir = None
+ self._heart_beat.Stop()
+ self._current_test = None
+
+ @property
+ def current_test(self):
+ return self._current_test
+
+
+class DeviceTestShard(TestShard):
+ def __init__(
+ self, env, test_instance, device, index, tests, retries=3, timeout=None):
+ super(DeviceTestShard, self).__init__(
+ env, test_instance, tests, retries, timeout)
+ self._battery = battery_utils.BatteryUtils(device) if device else None
+ self._device = device
+ self._index = index
+
+ @local_device_environment.handle_shard_failures
+ def RunTestsOnShard(self):
+ results = base_test_result.TestRunResults()
+ for test in self._tests:
+ tries_left = self._retries
+ result_type = None
+ while (result_type != base_test_result.ResultType.PASS
+ and tries_left > 0):
+ try:
+ self._TestSetUp(test)
+ result_type = self._RunSingleTest(test)
+ except device_errors.CommandTimeoutError:
+ result_type = base_test_result.ResultType.TIMEOUT
+ except (device_errors.CommandFailedError,
+ device_errors.DeviceUnreachableError):
+ logging.exception('Exception when executing %s.', test)
+ result_type = base_test_result.ResultType.FAIL
+ finally:
+ self._TestTearDown()
+ if result_type != base_test_result.ResultType.PASS:
+ try:
+ device_recovery.RecoverDevice(self._device, self._env.blacklist)
+ except device_errors.CommandTimeoutError:
+ logging.exception(
+ 'Device failed to recover after failing %s.', test)
+ tries_left -= 1
+
+ results.AddResult(base_test_result.BaseTestResult(test, result_type))
+ return results
+
+ def _LogTestExit(self, test, exit_code, duration):
+ logging.info('%s : exit_code=%d in %d secs on device %s',
+ test, exit_code, duration, str(self._device))
+
+ @trace_event.traced
+ def _TestSetUp(self, test):
+ if not self._device.IsOnline():
+ msg = 'Device %s is unresponsive.' % str(self._device)
+ raise device_errors.DeviceUnreachableError(msg)
+
+ logging.info('Charge level: %s%%',
+ str(self._battery.GetBatteryInfo().get('level')))
+ if self._test_instance.min_battery_level:
+ self._battery.ChargeDeviceToLevel(self._test_instance.min_battery_level)
+
+ logging.info('temperature: %s (0.1 C)',
+ str(self._battery.GetBatteryInfo().get('temperature')))
+ if self._test_instance.max_battery_temp:
+ self._battery.LetBatteryCoolToTemperature(
+ self._test_instance.max_battery_temp)
+
+ if not self._device.IsScreenOn():
+ self._device.SetScreen(True)
+
+ super(DeviceTestShard, self)._TestSetUp(test)
+
+ def _LogTest(self, test, cmd, timeout):
+ logging.debug("Running %s with command '%s' on shard %s with timeout %d",
+ test, cmd, str(self._index), timeout)
+
+ def _ExtendCmd(self, cmd):
+ cmd.extend(['--device=%s' % str(self._device)])
+ return cmd
+
+ def _ExtendPersistedResult(self, persisted_result):
+ persisted_result['host_test'] = False
+ persisted_result['device'] = str(self._device)
+
+ @trace_event.traced
+ def _TestTearDown(self):
+ try:
+ logging.info('Unmapping device ports for %s.', self._device)
+ forwarder.Forwarder.UnmapAllDevicePorts(self._device)
+ except Exception: # pylint: disable=broad-except
+ logging.exception('Exception when resetting ports.')
+ finally:
+ super(DeviceTestShard, self)._TestTearDown()
+
+class HostTestShard(TestShard):
+ def __init__(self, env, test_instance, tests, retries=3, timeout=None):
+ super(HostTestShard, self).__init__(
+ env, test_instance, tests, retries, timeout)
+
+ @local_device_environment.handle_shard_failures
+ def RunTestsOnShard(self):
+ results = base_test_result.TestRunResults()
+ for test in self._tests:
+ tries_left = self._retries + 1
+ result_type = None
+ while (result_type != base_test_result.ResultType.PASS
+ and tries_left > 0):
+ try:
+ self._TestSetUp(test)
+ result_type = self._RunSingleTest(test)
+ finally:
+ self._TestTearDown()
+ tries_left -= 1
+ results.AddResult(base_test_result.BaseTestResult(test, result_type))
+ return results
+
+ def _LogTest(self, test, cmd, timeout):
+ logging.debug("Running %s with command '%s' on host shard with timeout %d",
+ test, cmd, timeout)
+
+ def _ExtendPersistedResult(self, persisted_result):
+ persisted_result['host_test'] = True
+
+
+class LocalDevicePerfTestRun(local_device_test_run.LocalDeviceTestRun):
+
+ _DEFAULT_TIMEOUT = 5 * 60 * 60 # 5 hours.
+ _CONFIG_VERSION = 1
+
+ def __init__(self, env, test_instance):
+ super(LocalDevicePerfTestRun, self).__init__(env, test_instance)
+ self._devices = None
+ self._env = env
+ self._no_device_tests = {}
+ self._test_buckets = []
+ self._test_instance = test_instance
+ self._timeout = None if test_instance.no_timeout else self._DEFAULT_TIMEOUT
+
+ #override
+ def SetUp(self):
+ if os.path.exists(constants.PERF_OUTPUT_DIR):
+ shutil.rmtree(constants.PERF_OUTPUT_DIR)
+ os.makedirs(constants.PERF_OUTPUT_DIR)
+
+ #override
+ def TearDown(self):
+ pass
+
+ def _GetStepsFromDict(self):
+ # From where this is called one of these two must be set.
+ if self._test_instance.single_step:
+ return {
+ 'version': self._CONFIG_VERSION,
+ 'steps': {
+ 'single_step': {
+ 'device_affinity': 0,
+ 'cmd': self._test_instance.single_step
+ },
+ }
+ }
+ if self._test_instance.steps:
+ with file(self._test_instance.steps, 'r') as f:
+ steps = json.load(f)
+ if steps['version'] != self._CONFIG_VERSION:
+ raise TestDictVersionError(
+ 'Version is expected to be %d but was %d' % (self._CONFIG_VERSION,
+ steps['version']))
+ return steps
+ raise PerfTestRunGetStepsError(
+ 'Neither single_step or steps set in test_instance.')
+
+ def _SplitTestsByAffinity(self):
+ # This splits tests by their device affinity so that the same tests always
+ # run on the same devices. This is important for perf tests since different
+ # devices might yield slightly different performance results.
+ test_dict = self._GetStepsFromDict()
+ for test, test_config in sorted(test_dict['steps'].iteritems()):
+ try:
+ affinity = test_config.get('device_affinity')
+ if affinity is None:
+ self._no_device_tests[test] = test_config
+ else:
+ if len(self._test_buckets) < affinity + 1:
+ while len(self._test_buckets) != affinity + 1:
+ self._test_buckets.append(collections.OrderedDict())
+ self._test_buckets[affinity][test] = test_config
+ except KeyError:
+ logging.exception(
+ 'Test config for %s is bad.\n Config:%s', test, str(test_config))
+
+ @staticmethod
+ def _GetAllDevices(active_devices, devices_path):
+ try:
+ if devices_path:
+ devices = [device_utils.DeviceUtils(s)
+ for s in device_list.GetPersistentDeviceList(devices_path)]
+ if not devices and active_devices:
+ logging.warning('%s is empty. Falling back to active devices.',
+ devices_path)
+ devices = active_devices
+ else:
+ logging.warning('Known devices file path not being passed. For device '
+ 'affinity to work properly, it must be passed.')
+ devices = active_devices
+ except IOError as e:
+ logging.error('Unable to find %s [%s]', devices_path, e)
+ devices = active_devices
+ return sorted(devices)
+
+ #override
+ def RunTests(self, results):
+ def run_no_devices_tests():
+ if not self._no_device_tests:
+ return []
+ s = HostTestShard(self._env, self._test_instance, self._no_device_tests,
+ retries=3, timeout=self._timeout)
+ return [s.RunTestsOnShard()]
+
+ def device_shard_helper(shard_id):
+ if device_status.IsBlacklisted(
+ str(self._devices[shard_id]), self._env.blacklist):
+ logging.warning('Device %s is not active. Will not create shard %s.',
+ str(self._devices[shard_id]), shard_id)
+ return None
+ s = DeviceTestShard(self._env, self._test_instance,
+ self._devices[shard_id], shard_id,
+ self._test_buckets[shard_id],
+ retries=self._env.max_tries, timeout=self._timeout)
+ return s.RunTestsOnShard()
+
+ def run_devices_tests():
+ if not self._test_buckets:
+ return []
+ if self._devices is None:
+ self._devices = self._GetAllDevices(
+ self._env.devices, self._test_instance.known_devices_file)
+
+ device_indices = range(min(len(self._devices), len(self._test_buckets)))
+ shards = parallelizer.Parallelizer(device_indices).pMap(
+ device_shard_helper)
+ return [x for x in shards.pGet(self._timeout) if x is not None]
+
+ # Affinitize the tests.
+ self._SplitTestsByAffinity()
+ if not self._test_buckets and not self._no_device_tests:
+ raise local_device_test_run.NoTestsError()
+ host_test_results, device_test_results = reraiser_thread.RunAsync(
+ [run_no_devices_tests, run_devices_tests])
+
+ # Ideally, results would be populated as early as possible, so that in the
+ # event of an exception or timeout, the caller will still have partially
+ # populated results. This looks like it can be done prior to dispatching
+ # tests, but will hold off on making this change unless it looks like it
+ # might provide utility.
+ results.extend(host_test_results + device_test_results)
+
+ # override
+ def TestPackage(self):
+ return 'perf'
+
+ # override
+ def _CreateShards(self, _tests):
+ raise NotImplementedError
+
+ # override
+ def _GetTests(self):
+ return self._test_buckets
+
+ # override
+ def _RunTest(self, _device, _test):
+ raise NotImplementedError
+
+ # override
+ def _ShouldShard(self):
+ return False
+
+
+class OutputJsonList(LocalDevicePerfTestRun):
+ # override
+ def SetUp(self):
+ pass
+
+ # override
+ def RunTests(self, results):
+ result_type = self._test_instance.OutputJsonList()
+ result = base_test_result.TestRunResults()
+ result.AddResult(
+ base_test_result.BaseTestResult('OutputJsonList', result_type))
+
+ # Ideally, results would be populated as early as possible, so that in the
+ # event of an exception or timeout, the caller will still have partially
+ # populated results.
+ results.append(result)
+
+ # override
+ def _CreateShards(self, _tests):
+ raise NotImplementedError
+
+ # override
+ def _RunTest(self, _device, _test):
+ raise NotImplementedError
+
+
+class PrintStep(LocalDevicePerfTestRun):
+ # override
+ def SetUp(self):
+ pass
+
+ # override
+ def RunTests(self, results):
+ result_type = self._test_instance.PrintTestOutput()
+ result = base_test_result.TestRunResults()
+ result.AddResult(
+ base_test_result.BaseTestResult('PrintStep', result_type))
+
+ # Ideally, results would be populated as early as possible, so that in the
+ # event of an exception or timeout, the caller will still have partially
+ # populated results.
+ results.append(result)
+
+ # override
+ def _CreateShards(self, _tests):
+ raise NotImplementedError
+
+ # override
+ def _RunTest(self, _device, _test):
+ raise NotImplementedError
+
+
+class TestDictVersionError(Exception):
+ pass
+
+class PerfTestRunGetStepsError(Exception):
+ pass
diff --git a/deps/v8/build/android/pylib/local/device/local_device_test_run.py b/deps/v8/build/android/pylib/local/device/local_device_test_run.py
new file mode 100644
index 0000000000..62adfabfad
--- /dev/null
+++ b/deps/v8/build/android/pylib/local/device/local_device_test_run.py
@@ -0,0 +1,251 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import fnmatch
+import logging
+import posixpath
+import signal
+import thread
+import threading
+
+from devil import base_error
+from devil.android import crash_handler
+from devil.android import device_errors
+from devil.android.sdk import version_codes
+from devil.android.tools import device_recovery
+from devil.utils import signal_handler
+from pylib import valgrind_tools
+from pylib.base import base_test_result
+from pylib.base import test_run
+from pylib.base import test_collection
+from pylib.local.device import local_device_environment
+
+
+_SIGTERM_TEST_LOG = (
+ ' Suite execution terminated, probably due to swarming timeout.\n'
+ ' Your test may not have run.')
+
+
+def SubstituteDeviceRoot(device_path, device_root):
+ if not device_path:
+ return device_root
+ elif isinstance(device_path, list):
+ return posixpath.join(*(p if p else device_root for p in device_path))
+ else:
+ return device_path
+
+
+class TestsTerminated(Exception):
+ pass
+
+
+class InvalidShardingSettings(Exception):
+ def __init__(self, shard_index, total_shards):
+ super(InvalidShardingSettings, self).__init__(
+ 'Invalid sharding settings. shard_index: %d total_shards: %d'
+ % (shard_index, total_shards))
+
+
+class LocalDeviceTestRun(test_run.TestRun):
+
+ def __init__(self, env, test_instance):
+ super(LocalDeviceTestRun, self).__init__(env, test_instance)
+ self._tools = {}
+ env.SetPreferredAbis(test_instance.GetPreferredAbis())
+
+ #override
+ def RunTests(self, results):
+ tests = self._GetTests()
+
+ exit_now = threading.Event()
+
+ @local_device_environment.handle_shard_failures
+ def run_tests_on_device(dev, tests, results):
+ for test in tests:
+ if exit_now.isSet():
+ thread.exit()
+
+ result = None
+ rerun = None
+ try:
+ result, rerun = crash_handler.RetryOnSystemCrash(
+ lambda d, t=test: self._RunTest(d, t),
+ device=dev)
+ if isinstance(result, base_test_result.BaseTestResult):
+ results.AddResult(result)
+ elif isinstance(result, list):
+ results.AddResults(result)
+ else:
+ raise Exception(
+ 'Unexpected result type: %s' % type(result).__name__)
+ except device_errors.CommandTimeoutError:
+ if isinstance(test, list):
+ results.AddResults(
+ base_test_result.BaseTestResult(
+ self._GetUniqueTestName(t),
+ base_test_result.ResultType.TIMEOUT)
+ for t in test)
+ else:
+ results.AddResult(
+ base_test_result.BaseTestResult(
+ self._GetUniqueTestName(test),
+ base_test_result.ResultType.TIMEOUT))
+ except Exception as e: # pylint: disable=broad-except
+ if isinstance(tests, test_collection.TestCollection):
+ rerun = test
+ if (isinstance(e, device_errors.DeviceUnreachableError)
+ or not isinstance(e, base_error.BaseError)):
+ # If we get a device error but believe the device is still
+ # reachable, attempt to continue using it. Otherwise, raise
+ # the exception and terminate this run_tests_on_device call.
+ raise
+ finally:
+ if isinstance(tests, test_collection.TestCollection):
+ if rerun:
+ tests.add(rerun)
+ tests.test_completed()
+
+ logging.info('Finished running tests on this device.')
+
+ def stop_tests(_signum, _frame):
+ logging.critical('Received SIGTERM. Stopping test execution.')
+ exit_now.set()
+ raise TestsTerminated()
+
+ try:
+ with signal_handler.AddSignalHandler(signal.SIGTERM, stop_tests):
+ tries = 0
+ while tries < self._env.max_tries and tests:
+ logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries)
+ if tries > 0 and self._env.recover_devices:
+ if any(d.build_version_sdk == version_codes.LOLLIPOP_MR1
+ for d in self._env.devices):
+ logging.info(
+ 'Attempting to recover devices due to known issue on L MR1. '
+ 'See crbug.com/787056 for details.')
+ self._env.parallel_devices.pMap(
+ device_recovery.RecoverDevice, None)
+ elif tries + 1 == self._env.max_tries:
+ logging.info(
+ 'Attempting to recover devices prior to last test attempt.')
+ self._env.parallel_devices.pMap(
+ device_recovery.RecoverDevice, None)
+ logging.info('Will run %d tests on %d devices: %s',
+ len(tests), len(self._env.devices),
+ ', '.join(str(d) for d in self._env.devices))
+ for t in tests:
+ logging.debug(' %s', t)
+
+ try_results = base_test_result.TestRunResults()
+ test_names = (self._GetUniqueTestName(t) for t in tests)
+ try_results.AddResults(
+ base_test_result.BaseTestResult(
+ t, base_test_result.ResultType.NOTRUN)
+ for t in test_names if not t.endswith('*'))
+
+ # As soon as we know the names of the tests, we populate |results|.
+ # The tests in try_results will have their results updated by
+ # try_results.AddResult() as they are run.
+ results.append(try_results)
+
+ try:
+ if self._ShouldShard():
+ tc = test_collection.TestCollection(self._CreateShards(tests))
+ self._env.parallel_devices.pMap(
+ run_tests_on_device, tc, try_results).pGet(None)
+ else:
+ self._env.parallel_devices.pMap(
+ run_tests_on_device, tests, try_results).pGet(None)
+ except TestsTerminated:
+ for unknown_result in try_results.GetUnknown():
+ try_results.AddResult(
+ base_test_result.BaseTestResult(
+ unknown_result.GetName(),
+ base_test_result.ResultType.TIMEOUT,
+ log=_SIGTERM_TEST_LOG))
+ raise
+
+ tries += 1
+ tests = self._GetTestsToRetry(tests, try_results)
+
+ logging.info('FINISHED TRY #%d/%d', tries, self._env.max_tries)
+ if tests:
+ logging.info('%d failed tests remain.', len(tests))
+ else:
+ logging.info('All tests completed.')
+ except TestsTerminated:
+ pass
+
+ def _GetTestsToRetry(self, tests, try_results):
+
+ def is_failure_result(test_result):
+ if isinstance(test_result, list):
+ return any(is_failure_result(r) for r in test_result)
+ return (
+ test_result is None
+ or test_result.GetType() not in (
+ base_test_result.ResultType.PASS,
+ base_test_result.ResultType.SKIP))
+
+ all_test_results = {r.GetName(): r for r in try_results.GetAll()}
+
+ tests_and_names = ((t, self._GetUniqueTestName(t)) for t in tests)
+
+ tests_and_results = {}
+ for test, name in tests_and_names:
+ if name.endswith('*'):
+ tests_and_results[name] = (
+ test,
+ [r for n, r in all_test_results.iteritems()
+ if fnmatch.fnmatch(n, name)])
+ else:
+ tests_and_results[name] = (test, all_test_results.get(name))
+
+ failed_tests_and_results = (
+ (test, result) for test, result in tests_and_results.itervalues()
+ if is_failure_result(result)
+ )
+
+ return [t for t, r in failed_tests_and_results if self._ShouldRetry(t, r)]
+
+ def _ApplyExternalSharding(self, tests, shard_index, total_shards):
+ logging.info('Using external sharding settings. This is shard %d/%d',
+ shard_index, total_shards)
+
+ if total_shards < 0 or shard_index < 0 or total_shards <= shard_index:
+ raise InvalidShardingSettings(shard_index, total_shards)
+
+ return [
+ t for t in tests
+ if hash(self._GetUniqueTestName(t)) % total_shards == shard_index]
+
+ def GetTool(self, device):
+ if str(device) not in self._tools:
+ self._tools[str(device)] = valgrind_tools.CreateTool(
+ self._env.tool, device)
+ return self._tools[str(device)]
+
+ def _CreateShards(self, tests):
+ raise NotImplementedError
+
+ def _GetUniqueTestName(self, test):
+ # pylint: disable=no-self-use
+ return test
+
+ def _ShouldRetry(self, test, result):
+ # pylint: disable=no-self-use,unused-argument
+ return True
+
+ def _GetTests(self):
+ raise NotImplementedError
+
+ def _RunTest(self, device, test):
+ raise NotImplementedError
+
+ def _ShouldShard(self):
+ raise NotImplementedError
+
+
+class NoTestsError(Exception):
+ """Error for when no tests are found."""
diff --git a/deps/v8/build/android/pylib/local/device/local_device_test_run_test.py b/deps/v8/build/android/pylib/local/device/local_device_test_run_test.py
new file mode 100755
index 0000000000..525bf25200
--- /dev/null
+++ b/deps/v8/build/android/pylib/local/device/local_device_test_run_test.py
@@ -0,0 +1,174 @@
+#!/usr/bin/env vpython
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=protected-access
+
+import unittest
+
+from pylib.base import base_test_result
+from pylib.constants import host_paths
+from pylib.local.device import local_device_test_run
+
+with host_paths.SysPath(host_paths.PYMOCK_PATH):
+ import mock # pylint: disable=import-error
+
+
+class SubstituteDeviceRootTest(unittest.TestCase):
+
+ def testNoneDevicePath(self):
+ self.assertEquals(
+ '/fake/device/root',
+ local_device_test_run.SubstituteDeviceRoot(
+ None, '/fake/device/root'))
+
+ def testStringDevicePath(self):
+ self.assertEquals(
+ '/another/fake/device/path',
+ local_device_test_run.SubstituteDeviceRoot(
+ '/another/fake/device/path', '/fake/device/root'))
+
+ def testListWithNoneDevicePath(self):
+ self.assertEquals(
+ '/fake/device/root/subpath',
+ local_device_test_run.SubstituteDeviceRoot(
+ [None, 'subpath'], '/fake/device/root'))
+
+ def testListWithoutNoneDevicePath(self):
+ self.assertEquals(
+ '/another/fake/device/path',
+ local_device_test_run.SubstituteDeviceRoot(
+ ['/', 'another', 'fake', 'device', 'path'],
+ '/fake/device/root'))
+
+
+class TestLocalDeviceTestRun(local_device_test_run.LocalDeviceTestRun):
+
+ # pylint: disable=abstract-method
+
+ def __init__(self):
+ super(TestLocalDeviceTestRun, self).__init__(
+ mock.MagicMock(), mock.MagicMock())
+
+
+class TestLocalDeviceNonStringTestRun(
+ local_device_test_run.LocalDeviceTestRun):
+
+ # pylint: disable=abstract-method
+
+ def __init__(self):
+ super(TestLocalDeviceNonStringTestRun, self).__init__(
+ mock.MagicMock(), mock.MagicMock())
+
+ def _GetUniqueTestName(self, test):
+ return test['name']
+
+
+class LocalDeviceTestRunTest(unittest.TestCase):
+
+ def testGetTestsToRetry_allTestsPassed(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'Test1', base_test_result.ResultType.PASS),
+ base_test_result.BaseTestResult(
+ 'Test2', base_test_result.ResultType.PASS),
+ ]
+
+ tests = [r.GetName() for r in results]
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEquals(0, len(tests_to_retry))
+
+ def testGetTestsToRetry_testFailed(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'Test1', base_test_result.ResultType.FAIL),
+ base_test_result.BaseTestResult(
+ 'Test2', base_test_result.ResultType.PASS),
+ ]
+
+ tests = [r.GetName() for r in results]
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEquals(1, len(tests_to_retry))
+ self.assertIn('Test1', tests_to_retry)
+
+ def testGetTestsToRetry_testUnknown(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'Test2', base_test_result.ResultType.PASS),
+ ]
+
+ tests = ['Test1'] + [r.GetName() for r in results]
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEquals(1, len(tests_to_retry))
+ self.assertIn('Test1', tests_to_retry)
+
+ def testGetTestsToRetry_wildcardFilter_allPass(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'TestCase.Test1', base_test_result.ResultType.PASS),
+ base_test_result.BaseTestResult(
+ 'TestCase.Test2', base_test_result.ResultType.PASS),
+ ]
+
+ tests = ['TestCase.*']
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEquals(0, len(tests_to_retry))
+
+ def testGetTestsToRetry_wildcardFilter_oneFails(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'TestCase.Test1', base_test_result.ResultType.PASS),
+ base_test_result.BaseTestResult(
+ 'TestCase.Test2', base_test_result.ResultType.FAIL),
+ ]
+
+ tests = ['TestCase.*']
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEquals(1, len(tests_to_retry))
+ self.assertIn('TestCase.*', tests_to_retry)
+
+ def testGetTestsToRetry_nonStringTests(self):
+ results = [
+ base_test_result.BaseTestResult(
+ 'TestCase.Test1', base_test_result.ResultType.PASS),
+ base_test_result.BaseTestResult(
+ 'TestCase.Test2', base_test_result.ResultType.FAIL),
+ ]
+
+ tests = [
+ {'name': 'TestCase.Test1'},
+ {'name': 'TestCase.Test2'},
+ ]
+ try_results = base_test_result.TestRunResults()
+ try_results.AddResults(results)
+
+ test_run = TestLocalDeviceNonStringTestRun()
+ tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
+ self.assertEquals(1, len(tests_to_retry))
+ self.assertIsInstance(tests_to_retry[0], dict)
+ self.assertEquals(tests[1], tests_to_retry[0])
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)