File: benchmark.py

package info (click to toggle)
python-escript 5.0-3
  • links: PTS, VCS
  • area: main
  • in suites: stretch
  • size: 87,772 kB
  • ctags: 49,550
  • sloc: python: 585,488; cpp: 133,173; ansic: 18,675; xml: 3,283; sh: 690; makefile: 215
file content (124 lines) | stat: -rw-r--r-- 4,452 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
#!/usr/bin/env python 

##############################################################################
#
# Copyright (c) 2003-2016 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
#
##############################################################################

from __future__ import print_function, division

import os,csv

device_id = '0'  # index of the device to use

binary_filename = '../spmv'                  # command used to run the tests
output_file = 'benchmark_output.log'        # file where results are stored


# The unstructured matrices are available online:
#    http://www.nvidia.com/content/NV_Research/matrices.zip

mats = []
unstructured_path = '~/scratch/Matrices/williams/mm/'
unstructured_mats = [('Dense','dense2.mtx'),
                     ('Protein','pdb1HYS.mtx'),
                     ('FEM/Spheres','consph.mtx'),
                     ('FEM/Cantilever','cant.mtx'),
                     ('Wind Tunnel','pwtk.mtx'),
                     ('FEM/Harbor','rma10.mtx'),
                     ('QCD','qcd5_4.mtx'),
                     ('FEM/Ship','shipsec1.mtx'),
                     ('Economics','mac_econ_fwd500.mtx'),
                     ('Epidemiology','mc2depi.mtx'),    
                     ('FEM/Accelerator','cop20k_A.mtx'),
                     ('Circuit','scircuit.mtx'),
                     ('Webbase','webbase-1M.mtx'),
                     ('LP','rail4284.mtx') ]
unstructured_mats = [ mat + (unstructured_path,) for mat in unstructured_mats]

structured_path = '~/scratch/Matrices/stencil/'
structured_mats = [('Laplacian_3pt_stencil',  '3pt_1000000.mtx'),
                   ('Laplacian_5pt_stencil',  '5pt_1000x1000.mtx'),
                   ('Laplacian_7pt_stencil',  '7pt_100x100x100.mtx'),
                   ('Laplacian_9pt_stencil',  '9pt_1000x1000.mtx'),
                   ('Laplacian_27pt_stencil', '27pt_100x100x100.mtx')]
structured_mats = [ mat + (structured_path,) for mat in structured_mats]

# assemble suite of matrices
trials = unstructured_mats  + structured_mats


def run_tests(value_type):
    # remove previous result (if present)
    open(output_file,'w').close()
    
    # run benchmark for each file
    for matrix,filename,path in trials:
        matrix_filename = path + filename

        # setup the command to execute
        cmd = binary_filename 
        cmd += ' ' + matrix_filename                  # e.g. pwtk.mtx
        cmd += ' --device=' + device_id               # e.g. 0 or 1
        cmd += ' --value_type=' + value_type          # e.g. float or double

        # execute the benchmark on this file
        os.system(cmd)
    
    # process output_file
    matrices = {}
    results = {}
    kernels = set()
    #
    fid = open(output_file)
    for line in fid.readlines():
        tokens = dict( [tuple(part.split('=')) for part in line.split()] )
    
        if 'file' in tokens:
            file = os.path.split(tokens['file'])[1]
            matrices[file] = tokens
            results[file] = {}
        else:
            kernel = tokens['kernel']
            results[file][kernel] = tokens
            kernels.add(tokens['kernel'])
    
    ## put CPU results before GPU results
    #kernels = ['csr_serial'] + sorted(kernels - set(['csr_serial']))
    kernels = sorted(kernels)

    # write out CSV formatted results
    def write_csv(field):
        fid = open('bench_' + value_type + '_' + field + '.csv','w')
        writer = csv.writer(fid)
        writer.writerow(['matrix','file','rows','cols','nonzeros'] + kernels)
        
        for (matrix,file,path) in trials:
            line = [matrix, file, matrices[file]['rows'], matrices[file]['cols'], matrices[file]['nonzeros']]
        
            matrix_results = results[file]
            for kernel in kernels:
                if kernel in matrix_results:
                    line.append( matrix_results[kernel][field] )
                else:
                    line.append(' ')
            writer.writerow( line )
        fid.close()
    
    write_csv('gflops') #GFLOP/s
    write_csv('gbytes') #GBytes/s


run_tests('float')
run_tests('double')