File: TestParallelNumpy.py

package info (click to toggle)
vtk7 7.1.1%2Bdfsg2-8
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 127,396 kB
  • sloc: cpp: 1,539,584; ansic: 124,382; python: 78,038; tcl: 47,013; xml: 8,142; yacc: 5,040; java: 4,439; perl: 3,132; lex: 1,926; sh: 1,500; makefile: 126; objc: 83
file content (279 lines) | stat: -rw-r--r-- 9,846 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
""" Test for various numpy_interface modules. Main goal is to test
parallel algorithms in vtk.numpy_interface.algorithms."""

from __future__ import print_function

import sys
try:
    import numpy
except ImportError:
    print("Numpy (http://numpy.scipy.org) not found.", end=' ')
    print("This test requires numpy!")
    sys.exit(0)

import vtk
from vtk.test import Testing
import vtk.numpy_interface.dataset_adapter as dsa
import vtk.numpy_interface.algorithms as algs
from mpi4py import MPI

c = vtk.vtkMPIController()
#c.SetGlobalController(None)
rank = c.GetLocalProcessId()
size = c.GetNumberOfProcesses()

def PRINT(text, values):
    if values is dsa.NoneArray:
        values = numpy.array(0, dtype=numpy.float64)
    else:
        values = numpy.array(numpy.sum(values)).astype(numpy.float64)
    res = numpy.array(values)
    MPI.COMM_WORLD.Allreduce([values, MPI.DOUBLE], [res, MPI.DOUBLE], MPI.SUM)
    assert numpy.abs(res) < 1E-5
    if rank == 0:
        print(text, res)

def testArrays(rtData, rtData2, grad, grad2, total_npts):
    " Test various parallel algorithms."
    if rank == 0:
        print('-----------------------')
    PRINT( "SUM ones:", algs.sum(rtData / rtData) - total_npts )

    PRINT( "SUM sin:", (algs.sum(algs.sin(rtData) + 1) - numpy.sum(numpy.sin(rtData2) + 1)) / numpy.sum(numpy.sin(rtData2) + 1) )

    PRINT( "rtData min:", algs.min(rtData) - numpy.min(rtData2) )
    PRINT( "rtData max:", algs.max(rtData) - numpy.max(rtData2) )
    PRINT( "rtData sum:", (algs.sum(rtData) - numpy.sum(rtData2)) / (2*numpy.sum(rtData2)) )
    PRINT( "rtData mean:", (algs.mean(rtData) - numpy.mean(rtData2)) / (2*numpy.mean(rtData2)) )
    PRINT( "rtData var:", (algs.var(rtData) - numpy.var(rtData2)) / numpy.var(rtData2) )
    PRINT( "rtData std:", (algs.std(rtData) - numpy.std(rtData2)) / numpy.std(rtData2) )

    PRINT( "grad min:", algs.min(grad) - numpy.min(grad2) )
    PRINT( "grad max:", algs.max(grad) - numpy.max(grad2) )
    PRINT( "grad min 0:", algs.min(grad, 0) - numpy.min(grad2, 0) )
    PRINT( "grad max 0:", algs.max(grad, 0) - numpy.max(grad2, 0) )
    PRINT( "grad min 1:", algs.sum(algs.min(grad, 1)) - numpy.sum(numpy.min(grad2, 1)) )
    PRINT( "grad max 1:", algs.sum(algs.max(grad, 1)) - numpy.sum(numpy.max(grad2, 1)) )
    PRINT( "grad sum 1:", algs.sum(algs.sum(grad, 1)) - numpy.sum(numpy.sum(grad2, 1)) )
    PRINT( "grad var:", (algs.var(grad) - numpy.var(grad2)) / numpy.var(grad2) )
    PRINT( "grad var 0:", (algs.var(grad, 0) - numpy.var(grad2, 0)) / numpy.var(grad2, 0) )

w = vtk.vtkRTAnalyticSource()
# Update with ghost level because gradient needs it
# to be piece independent
w.UpdatePiece(rank, size, 1)

print(w.GetOutput())
print(w.GetOutputInformation(0))

# The parallel arrays that we care about
ds = dsa.WrapDataObject(w.GetOutput())
rtData = ds.PointData['RTData']
grad = algs.gradient(rtData)
ds.PointData.append(grad, 'gradient')

# Crop the any ghost points out
org_ext = w.GetOutput().GetExtent()
ext = list(org_ext)
wext = w.GetOutputInformation(0).Get(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT())
for i in range(3):
    if ext[2*i] != wext[2*i]:
        ext[2*i] = ext[2*i] + 2
    if ext[2*i+1] != wext[2*i+1]:
        ext[2*i+1] = ext[2*i+1] - 1
if ext != list(org_ext):
    w.GetOutput().Crop(ext)

# Croppped arrays
rtData = ds.PointData['RTData']
grad = ds.PointData['gradient']

# The whole dataset so that we can compare
# against parallel algorithms.
w2 = vtk.vtkRTAnalyticSource()
w2.Update()

ds2 = dsa.WrapDataObject(w2.GetOutput())
rtData2 = ds2.PointData['RTData']
grad2 = algs.gradient(rtData2)

npts = numpy.array(numpy.int32(ds.GetNumberOfPoints()))
total_npts = numpy.array(npts)
MPI.COMM_WORLD.Allreduce([npts, MPI.INT], [total_npts, MPI.INT], MPI.SUM)

# Test simple distributed data.
testArrays(rtData, rtData2, grad, grad2, total_npts)

# Check that we can disable parallelism by using a dummy controller
# even when a global controller is set
assert algs.sum(rtData / rtData, controller=vtk.vtkDummyController()) != total_npts

# Test where arrays are NoneArray on one of the ranks.
if size > 1:
    if rank == 0:
        rtData3 = rtData2
        grad3 = grad2
    else:
        rtData3 = dsa.NoneArray
        grad3 = dsa.NoneArray

    testArrays(rtData3, rtData2, grad3, grad2, total_npts)

# Test composite arrays
rtData3 = dsa.VTKCompositeDataArray([rtData, dsa.NoneArray])
grad3 = dsa.VTKCompositeDataArray([dsa.NoneArray, grad])

testArrays(rtData3, rtData2, grad3, grad2, total_npts)

# Test where arrays are NoneArray on one of the ranks
# and composite on others.
if size > 1:
    if rank == 1:
        rtData3 = dsa.VTKCompositeDataArray([rtData2])
        grad3 = dsa.VTKCompositeDataArray([grad2])
    else:
        rtData3 = dsa.NoneArray
        grad3 = dsa.NoneArray

    testArrays(rtData3, rtData2, grad3, grad2, total_npts)

# Test composite arrays with multiple blocks.

# Split the local image to 2.
datasets = []
for i in range(2):
    image = vtk.vtkImageData()
    image.ShallowCopy(w.GetOutput())
    t = vtk.vtkExtentTranslator()
    wext = image.GetExtent()
    t.SetWholeExtent(wext)
    t.SetPiece(i)
    t.SetNumberOfPieces(2)
    t.PieceToExtent()
    ext = list(t.GetExtent())

    # Crop the any ghost points out
    for i in range(3):
        if ext[2*i] != wext[2*i]:
            ext[2*i] = ext[2*i] + 1
    if ext != list(org_ext):
        image.Crop(ext)

    datasets.append(dsa.WrapDataObject(image))

rtData3 = dsa.VTKCompositeDataArray([datasets[0].PointData['RTData'], datasets[1].PointData['RTData']])
grad3 = dsa.VTKCompositeDataArray([datasets[0].PointData['gradient'], datasets[1].PointData['gradient']])

testArrays(rtData3, rtData2, grad3, grad2, total_npts)

# Test min/max per block
NUM_BLOCKS = 10

w = vtk.vtkRTAnalyticSource()
w.SetWholeExtent(0, 10, 0, 10, 0, 10)
w.Update()

c = vtk.vtkMultiBlockDataSet()
c.SetNumberOfBlocks(size*NUM_BLOCKS)

if rank == 0:
    start = 0
    end = NUM_BLOCKS
else:
    start = rank*NUM_BLOCKS - 3
    end = start + NUM_BLOCKS

for i in range(start, end):
    a = vtk.vtkImageData()
    a.ShallowCopy(w.GetOutput())
    c.SetBlock(i, a)

if rank == 0:
    c.SetBlock(NUM_BLOCKS - 1, vtk.vtkPolyData())

cdata = dsa.WrapDataObject(c)
rtdata = cdata.PointData['RTData']
rtdata = algs.abs(rtdata)
g = algs.gradient(rtdata)
g2 = algs.gradient(g)

res = True
dummy = vtk.vtkDummyController()
for axis in [None, 0]:
    for array in [rtdata, g, g2]:
        if rank == 0:
            array2 = array/2
            min = algs.min_per_block(array2, axis=axis)
            res &= numpy.all(min.Arrays[NUM_BLOCKS - 1] == numpy.min(array, axis=axis))
            all_min = algs.min(min, controller=dummy)
            all_min_true = numpy.min([algs.min(array, controller=dummy), algs.min(array2, controller=dummy)])
            res &= all_min == all_min_true
            max = algs.max_per_block(array2, axis=axis)
            res &= numpy.all(max.Arrays[NUM_BLOCKS - 1] == numpy.max(array, axis=axis))
            all_max = algs.max(max, controller=dummy)
            all_max_true = numpy.max([algs.max(array, controller=dummy), algs.max(array2, controller=dummy)])
            res &= all_max == all_max_true
            sum = algs.sum_per_block(array2, axis=axis)
            sum_true = numpy.sum(array2.Arrays[0]) * (NUM_BLOCKS-1)
            sum_true += numpy.sum(array.Arrays[0]) * 3
            res &= numpy.sum(algs.sum(sum, controller=dummy) - algs.sum(sum_true, controller=dummy)) == 0
            mean = algs.mean_per_block(array2, axis=axis)
            res &= numpy.sum(mean.Arrays[0] - numpy.mean(array2.Arrays[0], axis=axis)) < 1E-6
            if len(array.Arrays[0].shape) == 1:
                stk = numpy.hstack
            else:
                stk = numpy.vstack
            res &= numpy.sum(mean.Arrays[NUM_BLOCKS-2] - numpy.mean(stk((array.Arrays[0], array2.Arrays[0])), axis=axis)) < 1E-4
        elif rank == 2:
            min = algs.min_per_block(dsa.NoneArray, axis=axis)
            max = algs.max_per_block(dsa.NoneArray, axis=axis)
            sum = algs.sum_per_block(dsa.NoneArray, axis=axis)
            mean = algs.mean_per_block(dsa.NoneArray, axis=axis)
        else:
            min = algs.min_per_block(array, axis=axis)
            max = algs.max_per_block(array, axis=axis)
            sum = algs.sum_per_block(array, axis=axis)
            mean = algs.mean_per_block(array, axis=axis)
        if array is g and axis == 0:
            ug = algs.unstructured_from_composite_arrays(mean, [(mean, 'mean')])
            if mean is dsa.NoneArray:
                res &= ug.GetNumberOfPoints() == 0
            else:
                _array = ug.GetPointData().GetArray('mean')
                ntuples = _array.GetNumberOfTuples()
                for i in range(ntuples):
                    if rank == 1:
                        idx = i+3
                    else:
                        idx = i
                    res &= _array.GetTuple(i) == tuple(mean.Arrays[idx])

res &= algs.min_per_block(dsa.NoneArray) is dsa.NoneArray

if rank == 0:
    min = algs.min_per_block(rtdata.Arrays[0]/2)
elif rank == 2:
    min = algs.min_per_block(dsa.NoneArray)
    res &= min is dsa.NoneArray
else:
    min = algs.min_per_block(rtdata.Arrays[0])

if rank == 0:
    min = algs.min(rtdata.Arrays[0])
    res &= min == numpy.min(rtdata.Arrays[0])
else:
    min = algs.min(dsa.NoneArray)
    res &= min is dsa.NoneArray

res &= algs.min(dsa.NoneArray) is dsa.NoneArray

if rank == 0:
    res &= numpy.all(algs.min(g2, axis=0) == numpy.min(g2.Arrays[0], axis=0))
else:
    res &= algs.min(dsa.NoneArray, axis=0) is dsa.NoneArray

res = numpy.array(res, dtype=numpy.bool)
all_res = numpy.array(res)
mpitype = algs._lookup_mpi_type(numpy.bool)
MPI.COMM_WORLD.Allreduce([res, mpitype], [all_res, mpitype], MPI.LAND)
assert all_res