File: perf_data_generator_unittest.py

package info (click to toggle)
chromium 139.0.7258.138-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 6,120,676 kB
  • sloc: cpp: 35,100,869; ansic: 7,163,530; javascript: 4,103,002; python: 1,436,920; asm: 946,517; xml: 746,709; pascal: 187,653; perl: 88,691; sh: 88,436; objc: 79,953; sql: 51,488; cs: 44,583; fortran: 24,137; makefile: 22,147; tcl: 15,277; php: 13,980; yacc: 8,984; ruby: 7,485; awk: 3,720; lisp: 3,096; lex: 1,327; ada: 727; jsp: 228; sed: 36
file content (127 lines) | stat: -rw-r--r-- 4,254 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
# Copyright 2017 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import json
import os
import tempfile
import unittest
from unittest import mock

import six

# This is necessary because io.StringIO in Python 2 does not accept str, only
# unicode. BytesIO works in Python 2, but then complains when given a str
# instead of bytes in Python 3.
if six.PY2:
  from cStringIO import StringIO  # pylint: disable=wrong-import-order,import-error
else:
  from io import StringIO  # pylint: disable=wrong-import-order

from core import perf_data_generator
from core.perf_data_generator import BenchmarkMetadata


class PerfDataGeneratorTest(unittest.TestCase):
  def setUp(self):
    # Test config can be big, so set maxDiff to None to see the full comparision
    # diff when assertEqual fails.
    self.maxDiff = None

  def test_get_scheduled_non_telemetry_benchmarks(self):
    tmpfile = tempfile.NamedTemporaryFile(delete=False)
    tmpfile.close()
    fake_perf_waterfall_file = tmpfile.name

    data = {
        'builder 1': {
          'isolated_scripts': [
            {'name': 'test_dancing'},
            {'name': 'test_singing'},
            {'name': 'performance_test_suite'},
          ],
          'scripts': [
            {'name': 'ninja_test'},
          ]
        },
        'builder 2': {
          'scripts': [
            {'name': 'gun_slinger'},
          ]
        }
      }
    try:
      with open(fake_perf_waterfall_file, 'w') as f:
        json.dump(data, f)
      benchmarks = perf_data_generator.get_scheduled_non_telemetry_benchmarks(
          fake_perf_waterfall_file)
      self.assertIn('ninja_test', benchmarks)
      self.assertIn('gun_slinger', benchmarks)
      self.assertIn('test_dancing', benchmarks)
      self.assertIn('test_singing', benchmarks)
    finally:
      os.remove(fake_perf_waterfall_file)


class TestIsPerfBenchmarksSchedulingValid(unittest.TestCase):
  def setUp(self):
    self.maxDiff = None
    self.original_GTEST_BENCHMARKS = copy.deepcopy(
        perf_data_generator.GTEST_BENCHMARKS)
    self.original_OTHER_BENCHMARKS = copy.deepcopy(
        perf_data_generator.OTHER_BENCHMARKS)
    self.test_stream = StringIO()
    self.mock_get_non_telemetry_benchmarks = mock.patch(
        'core.perf_data_generator.get_scheduled_non_telemetry_benchmarks')
    self.get_non_telemetry_benchmarks = (
        self.mock_get_non_telemetry_benchmarks.start())

  def tearDown(self):
    perf_data_generator.GTEST_BENCHMARKS = (
        self.original_GTEST_BENCHMARKS)
    perf_data_generator.OTHER_BENCHMARKS = (
        self.original_OTHER_BENCHMARKS)
    self.mock_get_non_telemetry_benchmarks.stop()

  def test_returnTrue(self):
    self.get_non_telemetry_benchmarks.return_value = {'honda'}

    perf_data_generator.GTEST_BENCHMARKS = {
        'honda': BenchmarkMetadata('baz@foo.com'),
    }
    perf_data_generator.OTHER_BENCHMARKS = {}
    valid = perf_data_generator.is_perf_benchmarks_scheduling_valid(
        'dummy', self.test_stream)

    self.assertEqual(self.test_stream.getvalue(), '')
    self.assertEqual(valid, True)

  def test_UnscheduledCppBenchmarks(self):
    self.get_non_telemetry_benchmarks.return_value = {'honda'}

    perf_data_generator.GTEST_BENCHMARKS = {
        'honda': BenchmarkMetadata('baz@foo.com'),
        'toyota': BenchmarkMetadata('baz@foo.com'),
    }
    perf_data_generator.OTHER_BENCHMARKS = {}
    valid = perf_data_generator.is_perf_benchmarks_scheduling_valid(
        'dummy', self.test_stream)

    self.assertEqual(valid, False)
    self.assertIn('Benchmark toyota is tracked but not scheduled',
        self.test_stream.getvalue())

  def test_UntrackedCppBenchmarks(self):
    self.get_non_telemetry_benchmarks.return_value = {'honda', 'tesla'}

    perf_data_generator.GTEST_BENCHMARKS = {
        'honda': BenchmarkMetadata('baz@foo.com'),
    }
    perf_data_generator.OTHER_BENCHMARKS = {}
    valid = perf_data_generator.is_perf_benchmarks_scheduling_valid(
        'dummy', self.test_stream)

    self.assertEqual(valid, False)
    self.assertIn(
        'Benchmark tesla is scheduled on perf waterfall but not tracked',
        self.test_stream.getvalue())