File: report_tests.py

package info (click to toggle)
petsc 3.23.1%2Bdfsg1-1exp1
  • links: PTS, VCS
  • area: main
  • in suites: experimental
  • size: 515,576 kB
  • sloc: ansic: 751,607; cpp: 51,542; python: 38,598; f90: 17,352; javascript: 3,493; makefile: 3,157; sh: 1,502; xml: 619; objc: 445; java: 13; csh: 1
file content (350 lines) | stat: -rwxr-xr-x 14,148 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
#!/usr/bin/env python3
from __future__ import print_function
import glob, os, re, stat
import optparse
import inspect

"""
Quick script for parsing the output of the test system and summarizing the results.
"""

def inInstallDir():
  """
  When PETSc is installed then this file in installed in:
       <PREFIX>/share/petsc/examples/config/gmakegentest.py
  otherwise the path is:
       <PETSC_DIR>/config/gmakegentest.py
  We use this difference to determine if we are in installdir
  """
  thisscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
  dirlist=thisscriptdir.split(os.path.sep)
  if len(dirlist)>4:
    lastfour=os.path.sep.join(dirlist[len(dirlist)-4:])
    if lastfour==os.path.join('share','petsc','examples','config'):
      return True
    else:
      return False
  else:
    return False

def summarize_results(directory,make,ntime,etime,show_results):
  ''' Loop over all of the results files and summarize the results'''
  startdir = os.getcwd()
  try:
    os.chdir(directory)
  except OSError:
    print('# No test results in ', directory)
    return
  summary={'total':0,'success':0,'failed':0,'failures':[],'todo':0,'skip':0,
           'time':0, 'cputime':0}
  timesummary={}
  cputimesummary={}
  timelist=[]
  for cfile in glob.glob('*.counts'):
    with open(cfile, 'r') as f:
      for line in f:
        l = line.split()
        if l[0] == 'failures':
           if len(l)>1:
             summary[l[0]] += l[1:]
        elif l[0] == 'time':
           if len(l)==1: continue
           summary[l[0]] += float(l[1])
           summary['cputime'] += float(l[2])
           timesummary[cfile]=float(l[1])
           cputimesummary[cfile]=float(l[2])
           timelist.append(float(l[1]))
        elif l[0] not in summary:
           continue
        else:
           summary[l[0]] += int(l[1])

  failstr=' '.join(summary['failures'])
  if show_results:
    print("\n# -------------")
    print("#   Summary    ")
    print("# -------------")
    if failstr.strip(): print("# FAILED " + failstr)

    for t in "success failed todo skip".split():
      percent=summary[t]/float(summary['total'])*100
      print("# %s %d/%d tests (%3.1f%%)" % (t, summary[t], summary['total'], percent))
    print("#")
    if etime:
      print("# Wall clock time for tests: %s sec"% etime)
    print("# Approximate CPU time (not incl. build time): %s sec"% summary['cputime'])
  else:
    if failstr.strip(): print("\n\n# FAILED " + failstr)

  if failstr.strip():
      fail_targets=(
          re.sub('cmd-','',
          re.sub('diff-','',failstr+' '))
          )
      # Strip off characters from subtests
      fail_list=[]
      for failure in fail_targets.split():
         fail_list.append(failure.split('+')[0])
      fail_list=list(set(fail_list))
      fail_targets=' '.join(fail_list)

      # create simple little script
      sfile=os.path.join(os.path.dirname(os.path.abspath(os.curdir)),'echofailures.sh')
      with open(sfile,'w') as f:
          f.write('echo '+fail_targets.strip())
      st = os.stat(sfile)
      os.chmod(sfile, st.st_mode | stat.S_IEXEC)

      #Make the message nice
      makefile="gmakefile.test" if inInstallDir() else "gmakefile"

      print("#\n# To rerun failed tests: ")
      print("#     "+make+" -f "+makefile+" test test-fail=1")

  if ntime>0 and show_results:
      print("#\n# Timing summary (actual test time / total CPU time): ")
      timelist=list(set(timelist))
      timelist.sort(reverse=True)
      nlim=(ntime if ntime<len(timelist) else len(timelist))
      # Do a double loop to sort in order
      for timelimit in timelist[0:nlim]:
        for cf in timesummary:
          if timesummary[cf] == timelimit:
            print("#   %s: %.2f sec / %.2f sec" % (re.sub('.counts','',cf), timesummary[cf], cputimesummary[cf]))
  os.chdir(startdir)
  return

def get_test_data(directory):
    """
    Create dictionary structure with test data
    """
    startdir= os.getcwd()
    try:
        os.chdir(directory)
    except OSError:
        return
    # loop over *.counts files for all the problems tested in the test suite
    testdata = {}
    for cfile in glob.glob('*.counts'):
        # first we get rid of the .counts extension, then we split the name in two
        # to recover the problem name and the package it belongs to
        fname = cfile.split('.')[0]
        testname = fname.split('-')
        probname = ''
        for i in range(1,len(testname)):
            probname += testname[i]
        # we split the package into its subcomponents of PETSc module (e.g.: snes)
        # and test type (e.g.: tutorial)
        testname_list = testname[0].split('_')
        pkgname = testname_list[0]
        testtype = testname_list[-1]
        # in order to correct assemble the folder path for problem outputs, we
        # iterate over any possible subpackage names and test suffixes
        testname_short = testname_list[:-1]
        prob_subdir = os.path.join('', *testname_short)
        probfolder = 'run%s'%probname
        probdir = os.path.join('..', prob_subdir, 'examples', testtype, probfolder)
        if not os.path.exists(probdir):
            probfolder = probfolder.split('_')[0]
            probdir = os.path.join('..', prob_subdir, 'examples', testtype, probfolder)
        probfullpath=os.path.normpath(os.path.join(directory,probdir))
        # assemble the final full folder path for problem outputs and read the files
        try:
            with open('%s/diff-%s.out'%(probdir, probfolder),'r') as probdiff:
                difflines = probdiff.readlines()
        except IOError:
            difflines = []
        try:
            with open('%s/%s.err'%(probdir, probfolder),'r') as probstderr:
                stderrlines = probstderr.readlines()
        except IOError:
            stderrlines = []
        try:
            with open('%s/%s.tmp'%(probdir, probname), 'r') as probstdout:
                stdoutlines = probstdout.readlines()
        except IOError:
            stdoutlines = []
        # join the package, subpackage and problem type names into a "class"
        classname = pkgname
        for item in testname_list[1:]:
            classname += '.%s'%item
        # if this is the first time we see this package, initialize its dict
        if pkgname not in testdata.keys():
            testdata[pkgname] = {
                'total':0,
                'success':0,
                'failed':0,
                'errors':0,
                'todo':0,
                'skip':0,
                'time':0,
                'problems':{}
            }
        # add the dict for the problem into the dict for the package
        testdata[pkgname]['problems'][probname] = {
            'classname':classname,
            'time':0,
            'failed':False,
            'skipped':False,
            'diff':difflines,
            'stdout':stdoutlines,
            'stderr':stderrlines,
            'probdir':probfullpath,
            'fullname':fname
        }
        # process the *.counts file and increment problem status trackers
        with open(cfile, 'r') as f:
            for line in f:
                l = line.split()
                if l[0] == 'time':
                    if len(l)==1: continue
                    testdata[pkgname]['problems'][probname][l[0]] = float(l[1])
                    testdata[pkgname][l[0]] += float(l[1])
                elif l[0] in testdata[pkgname].keys():
                    # This block includes total, success, failed, skip, todo
                    num_int=int(l[1])
                    testdata[pkgname][l[0]] += num_int
                    if l[0] in ['failed']:
                        # If non-zero error code and non-zero stderr, something wrong
                        if len(testdata[pkgname]['problems'][probname]['stderr'])>0:
                            if not num_int: num_int=1
                        if num_int:
                            testdata[pkgname]['errors'] += 1
                            testdata[pkgname]['problems'][probname][l[0]] = True
                    if l[0] in ['skip'] and num_int:
                        testdata[pkgname]['problems'][probname][l[0]] = True
                else:
                    continue
    os.chdir(startdir)  # Keep function in good state
    return testdata

def show_fail(testdata):
    """ Show the failures and commands to run them
    """
    for pkg in testdata.keys():
        testsuite = testdata[pkg]
        for prob in testsuite['problems'].keys():
            p = testsuite['problems'][prob]
            cdbase='cd '+p['probdir']+' && '
            if p['skipped']:
                # if we got here, the TAP output shows a skipped test
                pass
            elif len(p['stderr'])>0:
                # if we got here, the test crashed with an error
                # we show the stderr output under <error>
                shbase=os.path.join(p['probdir'], p['fullname'])
                shfile=shbase+".sh"
                if not os.path.exists(shfile):
                    shfile=glob.glob(shbase+"*")[0]
                with open(shfile, 'r') as sh:
                    cmd = sh.read()
                print(p['fullname']+': '+cdbase+cmd.split('>')[0])
            elif len(p['diff'])>0:
                # if we got here, the test output did not match the stored output file
                # we show the diff between new output and old output under <failure>
                shbase=os.path.join(p['probdir'], 'diff-'+p['fullname'])
                shfile=shbase+".sh"
                if not os.path.exists(shfile):
                    shfile=glob.glob(shbase+"*")[0]
                with open(shfile, 'r') as sh:
                    cmd = sh.read()
                print(p['fullname']+': '+cdbase+cmd.split('>')[0])
                pass
    return

def generate_xml(testdata,directory):
    """ write testdata information into a jUnit formatted XLM file
    """
    startdir= os.getcwd()
    try:
        os.chdir(directory)
    except OSError:
        return
    junit = open('../testresults.xml', 'w')
    junit.write('<?xml version="1.0" ?>\n')
    junit.write('<testsuites>\n')
    for pkg in testdata.keys():
        testsuite = testdata[pkg]
        junit.write('  <testsuite errors="%i" failures="%i" name="%s" tests="%i">\n'%(
            testsuite['errors'], testsuite['failed'], pkg, testsuite['total']))
        for prob in testsuite['problems'].keys():
            p = testsuite['problems'][prob]
            junit.write('    <testcase classname="%s" name="%s" time="%f">\n'%(
                p['classname'], prob, p['time']))
            if p['skipped']:
                # if we got here, the TAP output shows a skipped test
                junit.write('      <skipped/>\n')
            elif p['failed']:
                # if we got here, the test crashed with an error
                # we show the stderr output under <error>
                junit.write('      <error type="crash">\n')
                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
                # many times error messages also go to stdout so we print both
                junit.write("stdout:\n")
                if len(p['stdout'])>0:
                    for line in p['stdout']:
                        junit.write("%s\n"%line.rstrip())
                junit.write("\nstderr:\n")
                for line in p['stderr']:
                    junit.write("%s\n"%line.rstrip())
                junit.write("]]>")
                junit.write('      </error>\n')
            elif len(p['diff'])>0:
                # if we got here, the test output did not match the stored output file
                # we show the diff between new output and old output under <failure>
                junit.write('      <failure type="output">\n')
                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
                for line in p['diff']:
                    junit.write("%s\n"%line.rstrip())
                junit.write("]]>")
                junit.write('      </failure>\n')
            junit.write('    </testcase>\n')
        junit.write('  </testsuite>\n')
    junit.write('</testsuites>')
    junit.close()
    os.chdir(startdir)
    return

def main():
    parser = optparse.OptionParser(usage="%prog [options]")
    parser.add_option('-d', '--directory', dest='directory',
                      help='Directory containing results of PETSc test system',
                      default=os.path.join(os.environ.get('PETSC_ARCH',''),
                                           'tests','counts'))
    parser.add_option('-e', '--elapsed_time', dest='elapsed_time',
                      help='Report elapsed time in output',
                      default=None)
    parser.add_option('-m', '--make', dest='make',
                      help='make executable to report in summary',
                      default='make')
    parser.add_option('-t', '--time', dest='time',
                      help='-t n: Report on the n number expensive jobs',
                      default=0)
    parser.add_option('-f', '--fail', dest='show_fail', action="store_true",
                      help='Show the failed tests and how to run them')
    parser.add_option('-s', '--show', dest='show_results', action="store_true",
                      help='Summarize the test results')
    options, args = parser.parse_args()

    # Process arguments
    if len(args) > 0:
      parser.print_usage()
      return

    # gmakefile.test is invoked frequently for searches and in those
    # cases we want to perform actions, but we don't want to
    # generate_xml or show the summarized results.

    if not options.show_fail:
      summarize_results(options.directory,options.make,int(options.time),
                        options.elapsed_time,options.show_results)
    testresults=get_test_data(options.directory)

    if options.show_fail:
      show_fail(testresults)
    # Don't generate xml if doing searches
    elif options.show_results:
      generate_xml(testresults, options.directory)

if __name__ == "__main__":
        main()