summaryrefslogtreecommitdiff
path: root/deps/v8/tools/run_perf.py
blob: db4245f499fc07c3a32f435a78ec08d5f06f58a3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""
Performance runner for d8.

Call e.g. with tools/run-perf.py --arch ia32 some_suite.json

The suite json format is expected to be:
{
  "path": <relative path chunks to perf resources and main file>,
  "name": <optional suite name, file name is default>,
  "archs": [<architecture name for which this suite is run>, ...],
  "binary": <name of binary to run, default "d8">,
  "flags": [<flag to d8>, ...],
  "test_flags": [<flag to the test file>, ...],
  "run_count": <how often will this suite run (optional)>,
  "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
  "resources": [<js file to be moved to android device>, ...]
  "main": <main js perf runner file>,
  "results_regexp": <optional regexp>,
  "results_processor": <optional python results processor script>,
  "units": <the unit specification for the performance dashboard>,
  "tests": [
    {
      "name": <name of the trace>,
      "results_regexp": <optional more specific regexp>,
      "results_processor": <optional python results processor script>,
      "units": <the unit specification for the performance dashboard>,
    }, ...
  ]
}

The tests field can also nest other suites in arbitrary depth. A suite
with a "main" file is a leaf suite that can contain one more level of
tests.

A suite's results_regexp is expected to have one string place holder
"%s" for the trace name. A trace's results_regexp overwrites suite
defaults.

A suite's results_processor may point to an optional python script. If
specified, it is called after running the tests like this (with a path
relatve to the suite level's path):
<results_processor file> <same flags as for d8> <suite level name> <output>

The <output> is a temporary file containing d8 output. The results_regexp will
be applied to the output of this script.

A suite without "tests" is considered a performance test itself.

Full example (suite with one runner):
{
  "path": ["."],
  "flags": ["--expose-gc"],
  "test_flags": ["5"],
  "archs": ["ia32", "x64"],
  "run_count": 5,
  "run_count_ia32": 3,
  "main": "run.js",
  "results_regexp": "^%s: (.+)$",
  "units": "score",
  "tests": [
    {"name": "Richards"},
    {"name": "DeltaBlue"},
    {"name": "NavierStokes",
     "results_regexp": "^NavierStokes: (.+)$"}
  ]
}

Full example (suite with several runners):
{
  "path": ["."],
  "flags": ["--expose-gc"],
  "archs": ["ia32", "x64"],
  "run_count": 5,
  "units": "score",
  "tests": [
    {"name": "Richards",
     "path": ["richards"],
     "main": "run.js",
     "run_count": 3,
     "results_regexp": "^Richards: (.+)$"},
    {"name": "NavierStokes",
     "path": ["navier_stokes"],
     "main": "run.js",
     "results_regexp": "^NavierStokes: (.+)$"}
  ]
}

Path pieces are concatenated. D8 is always run with the suite's path as cwd.

The test flags are passed to the js test file after '--'.
"""

from collections import OrderedDict
import json
import logging
import math
import optparse
import os
import re
import subprocess
import sys

from testrunner.local import commands
from testrunner.local import utils

ARCH_GUESS = utils.DefaultArch()
SUPPORTED_ARCHS = ["arm",
                   "ia32",
                   "mips",
                   "mipsel",
                   "nacl_ia32",
                   "nacl_x64",
                   "x64",
                   "arm64"]

GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))


def LoadAndroidBuildTools(path):  # pragma: no cover
  assert os.path.exists(path)
  sys.path.insert(0, path)

  from pylib.device import adb_wrapper  # pylint: disable=F0401
  from pylib.device import device_errors  # pylint: disable=F0401
  from pylib.device import device_utils  # pylint: disable=F0401
  from pylib.perf import cache_control  # pylint: disable=F0401
  from pylib.perf import perf_control  # pylint: disable=F0401
  global adb_wrapper
  global cache_control
  global device_errors
  global device_utils
  global perf_control


def GeometricMean(values):
  """Returns the geometric mean of a list of values.

  The mean is calculated using log to avoid overflow.
  """
  values = map(float, values)
  return str(math.exp(sum(map(math.log, values)) / len(values)))


class Results(object):
  """Place holder for result traces."""
  def __init__(self, traces=None, errors=None):
    self.traces = traces or []
    self.errors = errors or []

  def ToDict(self):
    return {"traces": self.traces, "errors": self.errors}

  def WriteToFile(self, file_name):
    with open(file_name, "w") as f:
      f.write(json.dumps(self.ToDict()))

  def __add__(self, other):
    self.traces += other.traces
    self.errors += other.errors
    return self

  def __str__(self):  # pragma: no cover
    return str(self.ToDict())


class Measurement(object):
  """Represents a series of results of one trace.

  The results are from repetitive runs of the same executable. They are
  gathered by repeated calls to ConsumeOutput.
  """
  def __init__(self, graphs, units, results_regexp, stddev_regexp):
    self.name = graphs[-1]
    self.graphs = graphs
    self.units = units
    self.results_regexp = results_regexp
    self.stddev_regexp = stddev_regexp
    self.results = []
    self.errors = []
    self.stddev = ""

  def ConsumeOutput(self, stdout):
    try:
      result = re.search(self.results_regexp, stdout, re.M).group(1)
      self.results.append(str(float(result)))
    except ValueError:
      self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
                         % (self.results_regexp, self.name))
    except:
      self.errors.append("Regexp \"%s\" didn't match for test %s."
                         % (self.results_regexp, self.name))

    try:
      if self.stddev_regexp and self.stddev:
        self.errors.append("Test %s should only run once since a stddev "
                           "is provided by the test." % self.name)
      if self.stddev_regexp:
        self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
    except:
      self.errors.append("Regexp \"%s\" didn't match for test %s."
                         % (self.stddev_regexp, self.name))

  def GetResults(self):
    return Results([{
      "graphs": self.graphs,
      "units": self.units,
      "results": self.results,
      "stddev": self.stddev,
    }], self.errors)


class NullMeasurement(object):
  """Null object to avoid having extra logic for configurations that didn't
  run like running without patch on trybots.
  """
  def ConsumeOutput(self, stdout):
    pass

  def GetResults(self):
    return Results()


def Unzip(iterable):
  left = []
  right = []
  for l, r in iterable:
    left.append(l)
    right.append(r)
  return lambda: iter(left), lambda: iter(right)


def AccumulateResults(
    graph_names, trace_configs, iter_output, trybot, no_patch, calc_total):
  """Iterates over the output of multiple benchmark reruns and accumulates
  results for a configured list of traces.

  Args:
    graph_names: List of names that configure the base path of the traces. E.g.
                 ['v8', 'Octane'].
    trace_configs: List of "TraceConfig" instances. Each trace config defines
                   how to perform a measurement.
    iter_output: Iterator over the standard output of each test run.
    trybot: Indicates that this is run in trybot mode, i.e. run twice, once
            with once without patch.
    no_patch: Indicates weather this is a trybot run without patch.
    calc_total: Boolean flag to speficy the calculation of a summary trace.
  Returns: A "Results" object.
  """
  measurements = [
    trace.CreateMeasurement(trybot, no_patch) for trace in trace_configs]
  for stdout in iter_output():
    for measurement in measurements:
      measurement.ConsumeOutput(stdout)

  res = reduce(lambda r, m: r + m.GetResults(), measurements, Results())

  if not res.traces or not calc_total:
    return res

  # Assume all traces have the same structure.
  if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
    res.errors.append("Not all traces have the same number of results.")
    return res

  # Calculate the geometric means for all traces. Above we made sure that
  # there is at least one trace and that the number of results is the same
  # for each trace.
  n_results = len(res.traces[0]["results"])
  total_results = [GeometricMean(t["results"][i] for t in res.traces)
                   for i in range(0, n_results)]
  res.traces.append({
    "graphs": graph_names + ["Total"],
    "units": res.traces[0]["units"],
    "results": total_results,
    "stddev": "",
  })
  return res


def AccumulateGenericResults(graph_names, suite_units, iter_output):
  """Iterates over the output of multiple benchmark reruns and accumulates
  generic results.

  Args:
    graph_names: List of names that configure the base path of the traces. E.g.
                 ['v8', 'Octane'].
    suite_units: Measurement default units as defined by the benchmark suite.
    iter_output: Iterator over the standard output of each test run.
  Returns: A "Results" object.
  """
  traces = OrderedDict()
  for stdout in iter_output():
    if stdout is None:
      # The None value is used as a null object to simplify logic.
      continue
    for line in stdout.strip().splitlines():
      match = GENERIC_RESULTS_RE.match(line)
      if match:
        stddev = ""
        graph = match.group(1)
        trace = match.group(2)
        body = match.group(3)
        units = match.group(4)
        match_stddev = RESULT_STDDEV_RE.match(body)
        match_list = RESULT_LIST_RE.match(body)
        errors = []
        if match_stddev:
          result, stddev = map(str.strip, match_stddev.group(1).split(","))
          results = [result]
        elif match_list:
          results = map(str.strip, match_list.group(1).split(","))
        else:
          results = [body.strip()]

        try:
          results = map(lambda r: str(float(r)), results)
        except ValueError:
          results = []
          errors = ["Found non-numeric in %s" %
                    "/".join(graph_names + [graph, trace])]

        trace_result = traces.setdefault(trace, Results([{
          "graphs": graph_names + [graph, trace],
          "units": (units or suite_units).strip(),
          "results": [],
          "stddev": "",
        }], errors))
        trace_result.traces[0]["results"].extend(results)
        trace_result.traces[0]["stddev"] = stddev

  return reduce(lambda r, t: r + t, traces.itervalues(), Results())


class Node(object):
  """Represents a node in the suite tree structure."""
  def __init__(self, *args):
    self._children = []

  def AppendChild(self, child):
    self._children.append(child)


class DefaultSentinel(Node):
  """Fake parent node with all default values."""
  def __init__(self, binary = "d8"):
    super(DefaultSentinel, self).__init__()
    self.binary = binary
    self.run_count = 10
    self.timeout = 60
    self.path = []
    self.graphs = []
    self.flags = []
    self.test_flags = []
    self.resources = []
    self.results_regexp = None
    self.stddev_regexp = None
    self.units = "score"
    self.total = False


class GraphConfig(Node):
  """Represents a suite definition.

  Can either be a leaf or an inner node that provides default values.
  """
  def __init__(self, suite, parent, arch):
    super(GraphConfig, self).__init__()
    self._suite = suite

    assert isinstance(suite.get("path", []), list)
    assert isinstance(suite["name"], basestring)
    assert isinstance(suite.get("flags", []), list)
    assert isinstance(suite.get("test_flags", []), list)
    assert isinstance(suite.get("resources", []), list)

    # Accumulated values.
    self.path = parent.path[:] + suite.get("path", [])
    self.graphs = parent.graphs[:] + [suite["name"]]
    self.flags = parent.flags[:] + suite.get("flags", [])
    self.test_flags = parent.test_flags[:] + suite.get("test_flags", [])

    # Values independent of parent node.
    self.resources = suite.get("resources", [])

    # Descrete values (with parent defaults).
    self.binary = suite.get("binary", parent.binary)
    self.run_count = suite.get("run_count", parent.run_count)
    self.run_count = suite.get("run_count_%s" % arch, self.run_count)
    self.timeout = suite.get("timeout", parent.timeout)
    self.timeout = suite.get("timeout_%s" % arch, self.timeout)
    self.units = suite.get("units", parent.units)
    self.total = suite.get("total", parent.total)

    # A regular expression for results. If the parent graph provides a
    # regexp and the current suite has none, a string place holder for the
    # suite name is expected.
    # TODO(machenbach): Currently that makes only sense for the leaf level.
    # Multiple place holders for multiple levels are not supported.
    if parent.results_regexp:
      regexp_default = parent.results_regexp % re.escape(suite["name"])
    else:
      regexp_default = None
    self.results_regexp = suite.get("results_regexp", regexp_default)

    # A similar regular expression for the standard deviation (optional).
    if parent.stddev_regexp:
      stddev_default = parent.stddev_regexp % re.escape(suite["name"])
    else:
      stddev_default = None
    self.stddev_regexp = suite.get("stddev_regexp", stddev_default)


class TraceConfig(GraphConfig):
  """Represents a leaf in the suite tree structure."""
  def __init__(self, suite, parent, arch):
    super(TraceConfig, self).__init__(suite, parent, arch)
    assert self.results_regexp

  def CreateMeasurement(self, trybot, no_patch):
    if not trybot and no_patch:
      # Use null object for no-patch logic if this is not a trybot run.
      return NullMeasurement()

    return Measurement(
        self.graphs,
        self.units,
        self.results_regexp,
        self.stddev_regexp,
    )


class RunnableConfig(GraphConfig):
  """Represents a runnable suite definition (i.e. has a main file).
  """
  @property
  def main(self):
    return self._suite.get("main", "")

  def ChangeCWD(self, suite_path):
    """Changes the cwd to to path defined in the current graph.

    The tests are supposed to be relative to the suite configuration.
    """
    suite_dir = os.path.abspath(os.path.dirname(suite_path))
    bench_dir = os.path.normpath(os.path.join(*self.path))
    os.chdir(os.path.join(suite_dir, bench_dir))

  def GetCommandFlags(self, extra_flags=None):
    suffix = ["--"] + self.test_flags if self.test_flags else []
    return self.flags + (extra_flags or []) + [self.main] + suffix

  def GetCommand(self, shell_dir, extra_flags=None):
    # TODO(machenbach): This requires +.exe if run on windows.
    extra_flags = extra_flags or []
    cmd = [os.path.join(shell_dir, self.binary)]
    if self.binary != 'd8' and '--prof' in extra_flags:
      print "Profiler supported only on a benchmark run with d8"
    return cmd + self.GetCommandFlags(extra_flags=extra_flags)

  def Run(self, runner, trybot):
    """Iterates over several runs and handles the output for all traces."""
    stdout_with_patch, stdout_no_patch = Unzip(runner())
    return (
        AccumulateResults(
            self.graphs,
            self._children,
            iter_output=stdout_with_patch,
            trybot=trybot,
            no_patch=False,
            calc_total=self.total,
        ),
        AccumulateResults(
            self.graphs,
            self._children,
            iter_output=stdout_no_patch,
            trybot=trybot,
            no_patch=True,
            calc_total=self.total,
        ),
    )


class RunnableTraceConfig(TraceConfig, RunnableConfig):
  """Represents a runnable suite definition that is a leaf."""
  def __init__(self, suite, parent, arch):
    super(RunnableTraceConfig, self).__init__(suite, parent, arch)

  def Run(self, runner, trybot):
    """Iterates over several runs and handles the output."""
    measurement_with_patch = self.CreateMeasurement(trybot, False)
    measurement_no_patch = self.CreateMeasurement(trybot, True)
    for stdout_with_patch, stdout_no_patch in runner():
      measurement_with_patch.ConsumeOutput(stdout_with_patch)
      measurement_no_patch.ConsumeOutput(stdout_no_patch)
    return (
        measurement_with_patch.GetResults(),
        measurement_no_patch.GetResults(),
    )


class RunnableGenericConfig(RunnableConfig):
  """Represents a runnable suite definition with generic traces."""
  def __init__(self, suite, parent, arch):
    super(RunnableGenericConfig, self).__init__(suite, parent, arch)

  def Run(self, runner, trybot):
    stdout_with_patch, stdout_no_patch = Unzip(runner())
    return (
        AccumulateGenericResults(self.graphs, self.units, stdout_with_patch),
        AccumulateGenericResults(self.graphs, self.units, stdout_no_patch),
    )


def MakeGraphConfig(suite, arch, parent):
  """Factory method for making graph configuration objects."""
  if isinstance(parent, RunnableConfig):
    # Below a runnable can only be traces.
    return TraceConfig(suite, parent, arch)
  elif suite.get("main") is not None:
    # A main file makes this graph runnable. Empty strings are accepted.
    if suite.get("tests"):
      # This graph has subgraphs (traces).
      return RunnableConfig(suite, parent, arch)
    else:
      # This graph has no subgraphs, it's a leaf.
      return RunnableTraceConfig(suite, parent, arch)
  elif suite.get("generic"):
    # This is a generic suite definition. It is either a runnable executable
    # or has a main js file.
    return RunnableGenericConfig(suite, parent, arch)
  elif suite.get("tests"):
    # This is neither a leaf nor a runnable.
    return GraphConfig(suite, parent, arch)
  else:  # pragma: no cover
    raise Exception("Invalid suite configuration.")


def BuildGraphConfigs(suite, arch, parent):
  """Builds a tree structure of graph objects that corresponds to the suite
  configuration.
  """

  # TODO(machenbach): Implement notion of cpu type?
  if arch not in suite.get("archs", SUPPORTED_ARCHS):
    return None

  graph = MakeGraphConfig(suite, arch, parent)
  for subsuite in suite.get("tests", []):
    BuildGraphConfigs(subsuite, arch, graph)
  parent.AppendChild(graph)
  return graph


def FlattenRunnables(node, node_cb):
  """Generator that traverses the tree structure and iterates over all
  runnables.
  """
  node_cb(node)
  if isinstance(node, RunnableConfig):
    yield node
  elif isinstance(node, Node):
    for child in node._children:
      for result in FlattenRunnables(child, node_cb):
        yield result
  else:  # pragma: no cover
    raise Exception("Invalid suite configuration.")


class Platform(object):
  def __init__(self, options):
    self.shell_dir = options.shell_dir
    self.shell_dir_no_patch = options.shell_dir_no_patch
    self.extra_flags = options.extra_flags.split()

  @staticmethod
  def GetPlatform(options):
    if options.android_build_tools:
      return AndroidPlatform(options)
    else:
      return DesktopPlatform(options)

  def _Run(self, runnable, count, no_patch=False):
    raise NotImplementedError()  # pragma: no cover

  def Run(self, runnable, count):
    """Execute the benchmark's main file.

    If options.shell_dir_no_patch is specified, the benchmark is run once with
    and once without patch.
    Args:
      runnable: A Runnable benchmark instance.
      count: The number of this (repeated) run.
    Returns: A tuple with the benchmark outputs with and without patch. The
             latter will be None if options.shell_dir_no_patch was not
             specified.
    """
    stdout = self._Run(runnable, count, no_patch=False)
    if self.shell_dir_no_patch:
      return stdout, self._Run(runnable, count, no_patch=True)
    else:
      return stdout, None


class DesktopPlatform(Platform):
  def __init__(self, options):
    super(DesktopPlatform, self).__init__(options)

  def PreExecution(self):
    pass

  def PostExecution(self):
    pass

  def PreTests(self, node, path):
    if isinstance(node, RunnableConfig):
      node.ChangeCWD(path)

  def _Run(self, runnable, count, no_patch=False):
    suffix = ' - without patch' if no_patch else ''
    shell_dir = self.shell_dir_no_patch if no_patch else self.shell_dir
    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
    try:
      output = commands.Execute(
          runnable.GetCommand(shell_dir, self.extra_flags),
          timeout=runnable.timeout,
      )
    except OSError as e:  # pragma: no cover
      print title % "OSError"
      print e
      return ""
    print title % "Stdout"
    print output.stdout
    if output.stderr:  # pragma: no cover
      # Print stderr for debugging.
      print title % "Stderr"
      print output.stderr
    if output.timed_out:
      print ">>> Test timed out after %ss." % runnable.timeout
    if '--prof' in self.extra_flags:
      os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
      if os_prefix:
        tick_tools = os.path.join(TOOLS_BASE, "%s-tick-processor" % os_prefix)
        subprocess.check_call(tick_tools + " --only-summary", shell=True)
      else:  # pragma: no cover
        print "Profiler option currently supported on Linux and Mac OS."
    return output.stdout


class AndroidPlatform(Platform):  # pragma: no cover
  DEVICE_DIR = "/data/local/tmp/v8/"

  def __init__(self, options):
    super(AndroidPlatform, self).__init__(options)
    LoadAndroidBuildTools(options.android_build_tools)

    if not options.device:
      # Detect attached device if not specified.
      devices = adb_wrapper.AdbWrapper.Devices()
      assert devices and len(devices) == 1, (
          "None or multiple devices detected. Please specify the device on "
          "the command-line with --device")
      options.device = str(devices[0])
    self.adb_wrapper = adb_wrapper.AdbWrapper(options.device)
    self.device = device_utils.DeviceUtils(self.adb_wrapper)

  def PreExecution(self):
    perf = perf_control.PerfControl(self.device)
    perf.SetHighPerfMode()

    # Remember what we have already pushed to the device.
    self.pushed = set()

  def PostExecution(self):
    perf = perf_control.PerfControl(self.device)
    perf.SetDefaultPerfMode()
    self.device.RunShellCommand(["rm", "-rf", AndroidPlatform.DEVICE_DIR])

  def _PushFile(self, host_dir, file_name, target_rel=".",
                skip_if_missing=False):
    file_on_host = os.path.join(host_dir, file_name)
    file_on_device_tmp = os.path.join(
        AndroidPlatform.DEVICE_DIR, "_tmp_", file_name)
    file_on_device = os.path.join(
        AndroidPlatform.DEVICE_DIR, target_rel, file_name)
    folder_on_device = os.path.dirname(file_on_device)

    # Only attempt to push files that exist.
    if not os.path.exists(file_on_host):
      if not skip_if_missing:
        logging.critical('Missing file on host: %s' % file_on_host)
      return

    # Only push files not yet pushed in one execution.
    if file_on_host in self.pushed:
      return
    else:
      self.pushed.add(file_on_host)

    # Work-around for "text file busy" errors. Push the files to a temporary
    # location and then copy them with a shell command.
    output = self.adb_wrapper.Push(file_on_host, file_on_device_tmp)
    # Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)".
    # Errors look like this: "failed to copy  ... ".
    if output and not re.search('^[0-9]', output.splitlines()[-1]):
      logging.critical('PUSH FAILED: ' + output)
    self.adb_wrapper.Shell("mkdir -p %s" % folder_on_device)
    self.adb_wrapper.Shell("cp %s %s" % (file_on_device_tmp, file_on_device))

  def _PushExecutable(self, shell_dir, target_dir, binary):
    self._PushFile(shell_dir, binary, target_dir)

    # Push external startup data. Backwards compatible for revisions where
    # these files didn't exist.
    self._PushFile(
        shell_dir,
        "natives_blob.bin",
        target_dir,
        skip_if_missing=True,
    )
    self._PushFile(
        shell_dir,
        "snapshot_blob.bin",
        target_dir,
        skip_if_missing=True,
    )
    self._PushFile(
        shell_dir,
        "snapshot_blob_ignition.bin",
        target_dir,
        skip_if_missing=True,
    )

  def PreTests(self, node, path):
    suite_dir = os.path.abspath(os.path.dirname(path))
    if node.path:
      bench_rel = os.path.normpath(os.path.join(*node.path))
      bench_abs = os.path.join(suite_dir, bench_rel)
    else:
      bench_rel = "."
      bench_abs = suite_dir

    self._PushExecutable(self.shell_dir, "bin", node.binary)
    if self.shell_dir_no_patch:
      self._PushExecutable(
          self.shell_dir_no_patch, "bin_no_patch", node.binary)

    if isinstance(node, RunnableConfig):
      self._PushFile(bench_abs, node.main, bench_rel)
    for resource in node.resources:
      self._PushFile(bench_abs, resource, bench_rel)

  def _Run(self, runnable, count, no_patch=False):
    suffix = ' - without patch' if no_patch else ''
    target_dir = "bin_no_patch" if no_patch else "bin"
    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
    cache = cache_control.CacheControl(self.device)
    cache.DropRamCaches()
    binary_on_device = os.path.join(
        AndroidPlatform.DEVICE_DIR, target_dir, runnable.binary)
    cmd = [binary_on_device] + runnable.GetCommandFlags(self.extra_flags)

    # Relative path to benchmark directory.
    if runnable.path:
      bench_rel = os.path.normpath(os.path.join(*runnable.path))
    else:
      bench_rel = "."

    try:
      output = self.device.RunShellCommand(
          cmd,
          cwd=os.path.join(AndroidPlatform.DEVICE_DIR, bench_rel),
          timeout=runnable.timeout,
          retries=0,
      )
      stdout = "\n".join(output)
      print title % "Stdout"
      print stdout
    except device_errors.CommandTimeoutError:
      print ">>> Test timed out after %ss." % runnable.timeout
      stdout = ""
    return stdout


# TODO: Implement results_processor.
def Main(args):
  logging.getLogger().setLevel(logging.INFO)
  parser = optparse.OptionParser()
  parser.add_option("--android-build-tools",
                    help="Path to chromium's build/android. Specifying this "
                         "option will run tests using android platform.")
  parser.add_option("--arch",
                    help=("The architecture to run tests for, "
                          "'auto' or 'native' for auto-detect"),
                    default="x64")
  parser.add_option("--buildbot",
                    help="Adapt to path structure used on buildbots",
                    default=False, action="store_true")
  parser.add_option("--device",
                    help="The device ID to run Android tests on. If not given "
                         "it will be autodetected.")
  parser.add_option("--extra-flags",
                    help="Additional flags to pass to the test executable",
                    default="")
  parser.add_option("--json-test-results",
                    help="Path to a file for storing json results.")
  parser.add_option("--json-test-results-no-patch",
                    help="Path to a file for storing json results from run "
                         "without patch.")
  parser.add_option("--outdir", help="Base directory with compile output",
                    default="out")
  parser.add_option("--outdir-no-patch",
                    help="Base directory with compile output without patch")
  parser.add_option("--binary-override-path",
                    help="JavaScript engine binary. By default, d8 under "
                    "architecture-specific build dir. "
                    "Not supported in conjunction with outdir-no-patch.")

  (options, args) = parser.parse_args(args)

  if len(args) == 0:  # pragma: no cover
    parser.print_help()
    return 1

  if options.arch in ["auto", "native"]:  # pragma: no cover
    options.arch = ARCH_GUESS

  if not options.arch in SUPPORTED_ARCHS:  # pragma: no cover
    print "Unknown architecture %s" % options.arch
    return 1

  if options.device and not options.android_build_tools:  # pragma: no cover
    print "Specifying a device requires Android build tools."
    return 1

  if (options.json_test_results_no_patch and
      not options.outdir_no_patch):  # pragma: no cover
    print("For writing json test results without patch, an outdir without "
          "patch must be specified.")
    return 1

  workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))

  if options.buildbot:
    build_config = "Release"
  else:
    build_config = "%s.release" % options.arch

  if options.binary_override_path == None:
    options.shell_dir = os.path.join(workspace, options.outdir, build_config)
    default_binary_name = "d8"
  else:
    if not os.path.isfile(options.binary_override_path):
      print "binary-override-path must be a file name"
      return 1
    if options.outdir_no_patch:
      print "specify either binary-override-path or outdir-no-patch"
      return 1
    options.shell_dir = os.path.dirname(options.binary_override_path)
    default_binary_name = os.path.basename(options.binary_override_path)

  if options.outdir_no_patch:
    options.shell_dir_no_patch = os.path.join(
        workspace, options.outdir_no_patch, build_config)
  else:
    options.shell_dir_no_patch = None

  platform = Platform.GetPlatform(options)

  results = Results()
  results_no_patch = Results()
  for path in args:
    path = os.path.abspath(path)

    if not os.path.exists(path):  # pragma: no cover
      results.errors.append("Configuration file %s does not exist." % path)
      continue

    with open(path) as f:
      suite = json.loads(f.read())

    # If no name is given, default to the file name without .json.
    suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])

    # Setup things common to one test suite.
    platform.PreExecution()

    # Build the graph/trace tree structure.
    default_parent = DefaultSentinel(default_binary_name)
    root = BuildGraphConfigs(suite, options.arch, default_parent)

    # Callback to be called on each node on traversal.
    def NodeCB(node):
      platform.PreTests(node, path)

    # Traverse graph/trace tree and interate over all runnables.
    for runnable in FlattenRunnables(root, NodeCB):
      print ">>> Running suite: %s" % "/".join(runnable.graphs)

      def Runner():
        """Output generator that reruns several times."""
        for i in xrange(0, max(1, runnable.run_count)):
          # TODO(machenbach): Allow timeout per arch like with run_count per
          # arch.
          yield platform.Run(runnable, i)

      # Let runnable iterate over all runs and handle output.
      result, result_no_patch = runnable.Run(
          Runner, trybot=options.shell_dir_no_patch)
      results += result
      results_no_patch += result_no_patch
    platform.PostExecution()

  if options.json_test_results:
    results.WriteToFile(options.json_test_results)
  else:  # pragma: no cover
    print results

  if options.json_test_results_no_patch:
    results_no_patch.WriteToFile(options.json_test_results_no_patch)
  else:  # pragma: no cover
    print results_no_patch

  return min(1, len(results.errors))

if __name__ == "__main__":  # pragma: no cover
  sys.exit(Main(sys.argv[1:]))