1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
|
#!/usr/bin/env vpython3
# Copyright 2018 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs tests to ensure annotation tests are working as expected.
"""
from __future__ import print_function
import os
import argparse
import sys
import tempfile
from annotation_tools import NetworkTrafficAnnotationTools
# If this test starts failing, please set TEST_IS_ENABLED to "False" and file a
# bug to get this reenabled, and cc the people listed in
# //tools/traffic_annotation/OWNERS.
TEST_IS_ENABLED = True
# If this test starts failing due to a critical bug in auditor.py, please set
# USE_PYTHON_AUDITOR to "False" and file a bug (see comment above).
USE_PYTHON_AUDITOR = True
MINIMUM_EXPECTED_NUMBER_OF_ANNOTATIONS = 260
class TrafficAnnotationTestsChecker():
def __init__(self,
build_path=None,
annotations_filename=None,
errors_filename=None):
"""Initializes a TrafficAnnotationTestsChecker object.
Args:
build_path: str Absolute or relative path to a fully compiled build
directory.
annotations_filename: str Path to a file to write annotations to.
errors_filename: str Path to a file to write errors to.
"""
self.tools = NetworkTrafficAnnotationTools(build_path)
self.last_result = None
self.persist_annotations = bool(annotations_filename)
if not annotations_filename:
annotations_file, annotations_filename = tempfile.mkstemp()
os.close(annotations_file)
self.annotations_filename = annotations_filename
if not errors_filename:
errors_file, errors_filename = tempfile.mkstemp()
errors_file.close()
self.errors_filename = errors_filename
def RunAllTests(self):
"""Runs all tests and returns the result."""
return self.CheckAuditorResults() and self.CheckOutputExpectations()
def CheckAuditorResults(self):
"""Runs auditor using different configurations, expecting to run error free,
and having equal results in the exported TSV file in all cases. The TSV file
provides a summary of all annotations and their content.
Returns:
bool True if all results are as expected.
"""
configs = [
# Similar to trybot.
[
"--test-only",
"--error-resilient",
],
# Failing on any runtime error.
[
"--test-only",
],
# No heuristic filtering.
[
"--test-only",
"--no-filtering",
],
]
self.last_result = None
for config in configs:
result = self._RunTest(config, USE_PYTHON_AUDITOR)
if not result:
print("No output for config: %s" % config)
return False
if self.last_result and self.last_result != result:
print("Unexpected different results for config: %s" % config)
return False
self.last_result = result
return True
def CheckOutputExpectations(self):
# This test can be replaced by getting results from a diagnostic mode call
# to traffic_annotation_auditor, and checking for an expected minimum number
# of items for each type of pattern that it extracts. E.g., we should have
# many annotations of each type (complete, partial, ...), functions that
# need annotations, direct assignment to mutable annotations, etc.
# |self.last_result| includes the content of the TSV file that the auditor
# generates. Counting the number of end of lines in the text will give the
# number of extracted annotations.
annotations_count = self.last_result.count("\n")
print("%i annotations found in auditor's output." % annotations_count)
if annotations_count < MINIMUM_EXPECTED_NUMBER_OF_ANNOTATIONS:
print("Annotations are expected to be at least %i." %
MINIMUM_EXPECTED_NUMBER_OF_ANNOTATIONS)
return False
return True
def _RunTest(self, args, use_python_auditor):
"""Runs the auditor test with given |args|, and returns the extracted
annotations.
Args:
args: list of str Arguments to be passed to auditor.
use_python_auditor: If True, test auditor.py instead of
traffic_annotation_auditor.exe.
Returns:
str Content of annotations.tsv file if successful, otherwise None.
"""
if use_python_auditor:
auditor_name = "auditor.py"
else:
auditor_name = "traffic_annotation_auditor"
print("Running %s using config: %s" % (auditor_name, args))
try:
os.remove(self.annotations_filename)
except OSError:
pass
stdout_text, stderr_text, return_code = self.tools.RunAuditor(
args + [
"--annotations-file", self.annotations_filename, "--errors-file",
self.errors_filename
], use_python_auditor)
annotations = None
if os.path.exists(self.annotations_filename):
# When tests are run on all files (without filtering), there might be some
# compile errors in irrelevant files on Windows that can be ignored.
if (return_code and "--no-filtering" in args and
sys.platform.startswith(('win', 'cygwin'))):
print("Ignoring return code: %i" % return_code)
return_code = 0
if not return_code:
annotations = open(self.annotations_filename).read()
if not self.persist_annotations:
os.remove(self.annotations_filename)
if annotations:
print("Test PASSED.")
else:
print("Test FAILED.")
if stdout_text:
print(stdout_text)
if stderr_text:
print(stderr_text)
return annotations
def main():
if not TEST_IS_ENABLED:
return 0
parser = argparse.ArgumentParser(
description="Traffic Annotation Tests checker.")
parser.add_argument(
'--build-path',
help='Specifies a compiled build directory, e.g. out/Debug. If not '
'specified, the script tries to guess it. Will not proceed if not '
'found.')
parser.add_argument(
'--annotations-file',
help='Optional path to a TSV output file with all annotations.')
parser.add_argument('--errors-file',
help="Optional path to a JSON output file with errors.")
args = parser.parse_args()
checker = TrafficAnnotationTestsChecker(args.build_path,
args.annotations_file,
args.errors_file)
return 0 if checker.RunAllTests() else 1
if '__main__' == __name__:
sys.exit(main())
|