File: story_expectation_validator.py

package info (click to toggle)
chromium 138.0.7204.157-1
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 6,071,864 kB
  • sloc: cpp: 34,936,859; ansic: 7,176,967; javascript: 4,110,704; python: 1,419,953; asm: 946,768; xml: 739,967; pascal: 187,324; sh: 89,623; perl: 88,663; objc: 79,944; sql: 50,304; cs: 41,786; fortran: 24,137; makefile: 21,806; php: 13,980; tcl: 13,166; yacc: 8,925; ruby: 7,485; awk: 3,720; lisp: 3,096; lex: 1,327; ada: 727; jsp: 228; sed: 36
file content (92 lines) | stat: -rwxr-xr-x 3,265 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
#!/usr/bin/env vpython3
# Copyright 2017 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to check validity of StoryExpectations."""

import logging
import os

from core import benchmark_utils
from core import benchmark_finders
from core import path_util
path_util.AddTelemetryToPath()
path_util.AddAndroidPylibToPath()

from telemetry.story.typ_expectations import SYSTEM_CONDITION_TAGS

from typ import expectations_parser as typ_expectations_parser


CLUSTER_TELEMETRY_DIR = os.path.join(
    path_util.GetChromiumSrcDir(), 'tools', 'perf', 'contrib',
    'cluster_telemetry')
CLUSTER_TELEMETRY_BENCHMARKS = [
    ct_benchmark.Name() for ct_benchmark in
    benchmark_finders.GetBenchmarksInSubDirectory(CLUSTER_TELEMETRY_DIR)
]
MOBILE_PREFIXES = {'android', 'mobile'}
DESKTOP_PREFIXES = {
    'chromeos', 'desktop', 'linux', 'mac', 'win', 'sierra', 'highsierra'}


def is_desktop_tag(tag):
  return any(tag.lower().startswith(t) for t in DESKTOP_PREFIXES)


def is_mobile_tag(tag):
  return any(tag.lower().startswith(t) for t in MOBILE_PREFIXES)


def validate_story_names(benchmarks, test_expectations):
  stories = []
  for benchmark in benchmarks:
    if benchmark.Name() in CLUSTER_TELEMETRY_BENCHMARKS:
      continue
    story_set = benchmark_utils.GetBenchmarkStorySet(benchmark(),
                                                     exhaustive=True)
    stories.extend([benchmark.Name() + '/' + s.name for s in story_set.stories])
  broken_expectations = test_expectations.check_for_broken_expectations(stories)
  unused_patterns = ''
  for pattern in {e.test for e in broken_expectations}:
    unused_patterns += ("Expectations with pattern '%s'"
                        " do not apply to any stories\n" % pattern)
  assert not unused_patterns, unused_patterns


def validate_expectations_component_tags(test_expectations):
  expectations = []
  for exps in test_expectations.individual_exps.values():
    expectations.extend(exps)
  for exps in test_expectations.glob_exps.values():
    expectations.extend(exps)
  for e in expectations:
    if len(e.tags) > 1:
      has_mobile_tags = any(is_mobile_tag(t) for t in e.tags)
      has_desktop_tags = any(is_desktop_tag(t) for t in e.tags)
      assert not (has_mobile_tags and has_desktop_tags), (
              ("Expectation on %d is mixing "
               "mobile and desktop condition tags") % e.lineno)


def validate_supported_platform_lists(benchmarks):
  for b in benchmarks:
    assert all(tag.lower() in SYSTEM_CONDITION_TAGS
               for tag in b.SUPPORTED_PLATFORM_TAGS), (
        "%s's SUPPORTED_PLATFORM_TAGS contains a tag not"
        " defined in expectations.config" % b.Name())


def main():
  benchmarks = benchmark_finders.GetAllBenchmarks()
  with open(path_util.GetExpectationsPath()) as fp:
    raw_expectations_data = fp.read()
  test_expectations = typ_expectations_parser.TestExpectations()
  ret, msg = test_expectations.parse_tagged_list(raw_expectations_data)
  if ret:
    logging.error(msg)
    return ret
  validate_supported_platform_lists(benchmarks)
  validate_story_names(benchmarks, test_expectations)
  validate_expectations_component_tags(test_expectations)
  return 0