File: coverage_summary.py

package info (click to toggle)
chromium-browser 41.0.2272.118-1
  • links: PTS, VCS
  • area: main
  • in suites: jessie-kfreebsd
  • size: 2,189,132 kB
  • sloc: cpp: 9,691,462; ansic: 3,341,451; python: 712,689; asm: 518,779; xml: 208,926; java: 169,820; sh: 119,353; perl: 68,907; makefile: 28,311; yacc: 13,305; objc: 11,385; tcl: 3,186; cs: 2,225; sql: 2,217; lex: 2,215; lisp: 1,349; pascal: 1,256; awk: 407; ruby: 155; sed: 53; php: 14; exp: 11
file content (265 lines) | stat: -rw-r--r-- 7,496 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
#!/usr/bin/python
# Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

import os
import sys
import coverage_helper

"""
Coverage for Linux

On the Linux Coverage Bots, coverage information is stored in
   scons-out/coverage-<os>-<arch>/coverage.lcov

The data is in the form of:
  SF:<file_name>
  DA:<line,hits>
  end_of_record

Where SF specifices the full path to the soruce file.  DA specifies a line
number in that source and how many hits happened on that line. 'end_of_record'
specifies the end of a source file.

The lcov data is generated by updating hit counts for any source file contained
in any instrumented executable.   The lcov data will only list lines which
were compiled into the executables.

The data is transformed into a dictionary containing per file coverage data
which is also mapped into per 'group' coverage data where a group is list
of paths of interest.
"""


#
# SortedOutput
#
# SortedOutput is an object for collecting lines of output which can be
# sorted in sections.  Lines are added via 'append' and sections are
# declared via the 'section' function, which can prepend a title and
# optionally sort the current section.
#
class SortedOutput(object):
  def __init__(self):
    self.out_lines = []
    self.q_lines = []

  def append(self, line):
    self.q_lines.append(line)

  def section(self, sort_it = True, title = None):
    # Insert title if one is provided
    if title: self.out_lines.append(title)

    # Sort the current lines if requested
    if sort_it: self.q_lines.sort()

    # Append the current set of lines to the output
    self.out_lines.extend(self.q_lines)
    self.q_lines = []

  def output(self, out = sys.stdout):
    # Append any lines in the outstanding unsorted list
    self.out_lines.extend(self.q_lines)
    for line in self.out_lines:
      out.write('%s\n' % line)

    # Clear the output
    self.out_lines = []
    self.q_lines = []

#
# CoverageParse
#
# Parse the 'lcov' data generated by the scons coverage target into a
# dictionary mapping file to a touple of lines hits and total lines.
def CoverageParse(lines, ignore_set):
  # Starting with an empty dictionary
  out = {}
  filename = None

  # Scan each input line
  for line in lines:
    line = line.strip()
    words = line.split(':')

    # Ignore malformed lines
    if len(words) < 1 or len(words) > 2: continue

    # If we are starting a new file, clear running totals
    if words[0] == 'SF':
      filename = words[1]
      if sys.platform == 'win32':
        if filename.startswith('/cygdrive/c/'):
          filename = 'C:/' + filename[len('/cygdrive/c/'):]
        if filename.startswith('/cygdrive/e/'):
          filename = 'E:/' + filename[len('/cygdrive/e/'):]
        filename = os.path.normpath(filename)
      total = 0
      used  = 0
      continue

    # If we have line data, append it to the list of line hits
    if words[0] == 'DA':
      # Split the data into line number/hits
      info = words[1].split(',')

      # Check if there are any hits on this line
      if int(info[1]): used += 1
      total += 1
      continue

    # If this is the end of a file, store the running totals
    if line == 'end_of_record':
      # If this file is not ignored, add it to the dictionary
      path, name = os.path.split(filename)
      if not name in ignore_set:
        out[filename] = (used, total)

      # Reset filename to None to force an exception of something goes wrong
      # in the parsing (such as missing a start of file marker)
      filename = None
      continue
  return out


def CoveragePercent(used, total, name):
  if total:
    percent = float(used) * float(100) / float(total)
    return '%6.2f  %d/%d %s' % (percent, used, total, name)
  else:
    return '%6.2f  > Unused < %s' % (0.0, name)


def ShortGroup(path):
  parts = path.rsplit('/native_client/', 1)
  if len(parts) > 1:
    name = parts[1]
  else:
    name = path
  return name.replace('/', '_')


def CoverageResult(used, total, name):
  # Perf dashboard format results.
  return [
      'RESULT coverage_percent_%s: coverage_percent_%s= %f coverage%%' % (
          name, name, used * 100.0 / total),
      'RESULT lines_%s: lines_%s= %d lines' % (
          name, name, total),
      'RESULT covered_%s: covered_%s= %d lines' % (
          name, name, used),
  ]


def CoverageProcess(platform, verbose = False):
  helper = coverage_helper.CoverageHelper()
  groups  = helper.groups
  filters = helper.path_filter

  # Load and parse the 'lcov' file
  covfile = 'scons-out/coverage-%s/coverage/coverage.lcov' % platform
  covdata = open(covfile, 'r').readlines()
  cov = CoverageParse(covdata, helper.ignore_set)

  # Get a sorted list of sources
  filelist = sorted(cov.keys())

  # Construct output objects
  filtered = SortedOutput()
  perfile = SortedOutput()
  pergroup = SortedOutput()
  summary = SortedOutput()
  results = []

  # Build the group dictionary with an empty list for each key
  group_data = {}
  for g in groups: group_data[g] = []

  for f in filelist:
    # Ignore test files and standard includes
    if f.find('test') >= 0: continue
    if f.find('/usr/include') >= 0: continue

    # If the first group this path matches
    group = None
    for g in groups:
      if f.find(g) >= 0:
        group = g
        break

    # Retrieved the used and total line information for this file
    used = cov[f][0]
    total = cov[f][1]

    # If this file matched a group, add it to that group's data
    if group: group_data[group].append((used, total))

    # In either case, add it to the per file data
    perfile.append(CoveragePercent(used, total, f))

  # Calculate the over all percentage by interating through non-filtered groups
  global_total = 0
  global_used = 0
  for group in groups:
    used = 0
    total = 0
    for u, t in group_data[group]:
      used += u
      total += t

    sgroup = ShortGroup(group)

    # If this is a filtered group, add it to the filtered output set
    if group in filters:
      filtered.append(CoveragePercent(used, total, sgroup))
    else:
      # Otherwise add it to the per group only if the group has hits
      if total:
        pergroup.append(CoveragePercent(used, total, sgroup))
        results.extend(CoverageResult(used, total, sgroup))
        global_total += total
        global_used += used

  # Add titles and sort each output group
  perfile.section(title = '\nPer file:')
  pergroup.section(title = '\nGroups:')
  filtered.section(title = '\nFiltered files:')

  summary.append(CoveragePercent(global_used, global_total, 'Overall'))
  summary.section(title = '\nSummary')
  results.extend(CoverageResult(global_used, global_total, 'Overall'))

  summary.output()
  pergroup.output()
  filtered.output()
  if verbose: perfile.output()

  # Emit perf dashboard results.
  print ''
  print 'Dashboard Results'
  print '-----------------'
  for line in results:
    print line
  print ''

  return (global_used * 100) / global_total

def main(argv):
  platform = argv[0]

  # TOODO Raise the coverage requirement once we get coverage to a reasonable
  # number.
  if platform.startswith('linux-'):
    coverage_target = 45
  else:
    coverage_target = 12

  if CoverageProcess(platform) >= coverage_target:
    return 0
  print 'Coverage bellow %d%%, failed.' % coverage_target
  return -1

if __name__ == '__main__':
  sys.exit(main(sys.argv[1:]))