File: benchmark_unittest.py

package info (click to toggle)
chromium 139.0.7258.138-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 6,120,676 kB
  • sloc: cpp: 35,100,869; ansic: 7,163,530; javascript: 4,103,002; python: 1,436,920; asm: 946,517; xml: 746,709; pascal: 187,653; perl: 88,691; sh: 88,436; objc: 79,953; sql: 51,488; cs: 44,583; fortran: 24,137; makefile: 22,147; tcl: 15,277; php: 13,980; yacc: 8,984; ruby: 7,485; awk: 3,720; lisp: 3,096; lex: 1,327; ada: 727; jsp: 228; sed: 36
file content (113 lines) | stat: -rw-r--r-- 3,758 bytes parent folder | download | duplicates (7)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
# Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""For all the benchmarks that set options, test that the options are valid."""

from collections import defaultdict
import unittest

from core import path_util
from core import perf_benchmark

from telemetry import benchmark as benchmark_module
from telemetry import decorators
from telemetry.testing import options_for_unittests
from telemetry.testing import progress_reporter

from py_utils import discover

def _GetAllPerfBenchmarks():
  return list(
      discover.DiscoverClasses(path_util.GetOfficialBenchmarksDir(),
                               path_util.GetPerfDir(),
                               benchmark_module.Benchmark,
                               index_by_class_name=True).values())


def _BenchmarkOptionsTestGenerator(benchmark):
  def testBenchmarkOptions(self):
    """Tests whether benchmark options can be constructed without errors."""
    try:
      options_for_unittests.GetRunOptions(benchmark_cls=benchmark)
    except benchmark_module.InvalidOptionsError as exc:
      self.fail(str(exc))
  return testBenchmarkOptions


class TestNoBenchmarkNamesDuplication(unittest.TestCase):

  def runTest(self):
    all_benchmarks = _GetAllPerfBenchmarks()
    names_to_benchmarks = defaultdict(list)
    for b in all_benchmarks:
      names_to_benchmarks[b.Name()].append(b)
    for n in names_to_benchmarks:
      self.assertEqual(
          1, len(names_to_benchmarks[n]),
          'Multiple benchmarks with the same name %s are '
          'found: %s' % (n, str(names_to_benchmarks[n])))


class TestBenchmarkNamingMobile(unittest.TestCase):

  # TODO(rnephew): This needs to be fixed after we move to CanRunOnBrowser.
  @decorators.Disabled('all')
  def runTest(self):
    all_benchmarks = _GetAllPerfBenchmarks()
    names_to_benchmarks = defaultdict(list)
    for b in all_benchmarks:
      names_to_benchmarks[b.Name()] = b

    for n, bench in names_to_benchmarks.items():
      if 'mobile' in n:
        enabled_tags = decorators.GetEnabledAttributes(bench)
        disabled_tags = decorators.GetDisabledAttributes(bench)

        self.assertTrue('all' in disabled_tags or 'android' in enabled_tags,
                        ','.join([
                            str(bench), bench.Name(),
                            str(disabled_tags), str(enabled_tags)]))


class TestNoOverrideCustomizeOptions(unittest.TestCase):

  def runTest(self):
    all_benchmarks = _GetAllPerfBenchmarks()
    for benchmark in all_benchmarks:
      self.assertEqual(
          True, issubclass(benchmark, perf_benchmark.PerfBenchmark),
          'Benchmark %s needs to subclass from PerfBenchmark' %
          benchmark.Name())
      self.assertEqual(
          benchmark.CustomizeOptions,
          perf_benchmark.PerfBenchmark.CustomizeOptions,
          'Benchmark %s should not override CustomizeOptions' %
          benchmark.Name())


class BenchmarkOptionsTest(unittest.TestCase):
  pass


def _AddBenchmarkOptionsTests(suite):
  # Using |index_by_class_name=True| allows returning multiple benchmarks
  # from a module.
  all_benchmarks = _GetAllPerfBenchmarks()
  for benchmark in all_benchmarks:
    if not benchmark.options:
      # No need to test benchmarks that have not defined options.
      continue

    setattr(BenchmarkOptionsTest, benchmark.Name(),
            _BenchmarkOptionsTestGenerator(benchmark))
    suite.addTest(BenchmarkOptionsTest(benchmark.Name()))


def load_tests(loader, standard_tests, pattern):
  del loader, pattern  # unused
  suite = progress_reporter.TestSuite()
  for t in standard_tests:
    suite.addTests(t)
  _AddBenchmarkOptionsTests(suite)
  return suite