File: generate_legacy_perf_dashboard_json_unittest.py

package info (click to toggle)
chromium 138.0.7204.157-1
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 6,071,864 kB
  • sloc: cpp: 34,936,859; ansic: 7,176,967; javascript: 4,110,704; python: 1,419,953; asm: 946,768; xml: 739,967; pascal: 187,324; sh: 89,623; perl: 88,663; objc: 79,944; sql: 50,304; cs: 41,786; fortran: 24,137; makefile: 21,806; php: 13,980; tcl: 13,166; yacc: 8,925; ruby: 7,485; awk: 3,720; lisp: 3,096; lex: 1,327; ada: 727; jsp: 228; sed: 36
file content (107 lines) | stat: -rwxr-xr-x 3,666 bytes parent folder | download | duplicates (5)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
#!/usr/bin/env vpython3
# Copyright 2016 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.


import json
import os
import unittest

import six

import generate_legacy_perf_dashboard_json

class LegacyResultsProcessorUnittest(unittest.TestCase):
  def setUp(self):
    """Set up for all test method of each test method below."""
    super(LegacyResultsProcessorUnittest, self).setUp()
    if six.PY2:
      self.data_directory = os.path.join(
          os.path.dirname(os.path.abspath(__file__)), 'testdata')
    else:
      self.data_directory = os.path.join(
          os.path.dirname(os.path.abspath(__file__)), 'testdata', 'python3')

  def _ConstructDefaultProcessor(self):
    """Creates a LegacyResultsProcessor instance.

    Returns:
      An instance of LegacyResultsProcessor class
    """
    return generate_legacy_perf_dashboard_json.LegacyResultsProcessor()

  def _ProcessLog(self, log_processor, logfile):  # pylint: disable=R0201
    """Reads in a input log file and processes it.

    This changes the state of the log processor object; the output is stored
    in the object and can be gotten using the PerformanceLogs() method.

    Args:
      log_processor: An PerformanceLogProcessor instance.
      logfile: File name of an input performance results log file.
    """
    for line in open(os.path.join(self.data_directory, logfile)):
      log_processor.ProcessLine(line)

  def _CheckFileExistsWithData(self, logs, graph):
    """Asserts that |graph| exists in the |logs| dict and is non-empty."""
    self.assertTrue(graph in logs, 'File %s was not output.' % graph)
    self.assertTrue(logs[graph], 'File %s did not contain data.' % graph)

  def _ConstructParseAndCheckLogfiles(self, inputfiles, graphs):
    """Uses a log processor to process the given input files.

    Args:
      inputfiles: A list of input performance results log file names.
      logfiles: List of expected output ".dat" file names.

    Returns:
      A dictionary mapping output file name to output file lines.
    """
    parser = self._ConstructDefaultProcessor()
    for inputfile in inputfiles:
      self._ProcessLog(parser, inputfile)

    logs = json.loads(parser.GenerateGraphJson())
    for graph in graphs:
      self._CheckFileExistsWithData(logs, graph)

    return logs

  def _ConstructParseAndCheckJSON(
      self, inputfiles, logfiles, graphs):
    """Processes input with a log processor and checks against expectations.

    Args:
      inputfiles: A list of input performance result log file names.
      logfiles: A list of expected output ".dat" file names.
      subdir: Subdirectory containing expected output files.
      log_processor_class: A log processor class.
    """
    logs = self._ConstructParseAndCheckLogfiles(inputfiles, graphs)
    index = 0
    for filename in logfiles:
      graph_name = graphs[index]
      actual = logs[graph_name]
      path = os.path.join(self.data_directory, filename)
      expected = json.load(open(path))
      self.assertEqual(expected, actual, 'JSON data in %s did not match '
          'expectations.' % filename)

      index += 1


  def testSummary(self):
    graphs = ['commit_charge',
        'ws_final_total', 'vm_final_browser', 'vm_final_total',
        'ws_final_browser', 'processes', 'artificial_graph']
    # Tests the output of "summary" files, which contain per-graph data.
    input_files = ['graphing_processor.log']
    output_files = ['%s-summary.dat' % graph for graph in graphs]

    self._ConstructParseAndCheckJSON(input_files, output_files, graphs)


if __name__ == '__main__':
  unittest.main()