File: parallelwave-mpi.py

package info (click to toggle)
ipyparallel 8.8.0-6
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 12,412 kB
  • sloc: python: 21,991; javascript: 267; makefile: 29; sh: 28
file content (271 lines) | stat: -rwxr-xr-x 7,281 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
#!/usr/bin/env python
"""
A simple python program of solving a 2D wave equation in parallel.
Domain partitioning and inter-processor communication
are done by an object of class MPIRectPartitioner2D
(which is a subclass of RectPartitioner2D and uses MPI via mpi4py)

An example of running the program is (8 processors, 4x2 partition,
400x100 grid cells)::

   $ ipcluster start --engines=MPIExec -n 8 # start 8 engines with mpiexec
   $ python parallelwave-mpi.py --grid 400 100 --partition 4 2

See also parallelwave-mpi, which runs the same program, but uses MPI
(via mpi4py) for the inter-engine communication.

Authors
-------

 * Xing Cai
 * Min Ragan-Kelley

"""

import argparse
import time

from numpy import sqrt

import ipyparallel as ipp


def setup_partitioner(index, num_procs, gnum_cells, parts):
    """create a partitioner in the engine namespace"""
    global partitioner
    p = MPIRectPartitioner2D(my_id=index, num_procs=num_procs)  # noqa: F821
    p.redim(global_num_cells=gnum_cells, num_parts=parts)
    p.prepare_communication()
    # put the partitioner into the global namespace:
    partitioner = p


def setup_solver(*args, **kwargs):
    """create a WaveSolver in the engine namespace"""
    global solver
    solver = WaveSolver(*args, **kwargs)  # noqa: F821


def wave_saver(u, x, y, t):
    """save the wave log"""
    global u_hist
    global t_hist
    t_hist.append(t)
    u_hist.append(1.0 * u)


# main program:
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    paa = parser.add_argument
    paa(
        '--grid',
        '-g',
        type=int,
        nargs=2,
        default=[100, 100],
        dest='grid',
        help="Cells in the grid, e.g. --grid 100 200",
    )
    paa(
        '--partition',
        '-p',
        type=int,
        nargs=2,
        default=None,
        help="Process partition grid, e.g. --partition 4 2 for 4x2",
    )
    paa('-c', type=float, default=1.0, help="Wave speed (I think)")
    paa('-Ly', type=float, default=1.0, help="system size (in y)")
    paa('-Lx', type=float, default=1.0, help="system size (in x)")
    paa('-t', '--tstop', type=float, default=1.0, help="Time units to run")
    paa(
        '--profile',
        type=str,
        default='default',
        help="Specify the ipcluster profile for the client to connect to.",
    )
    paa(
        '--save',
        action='store_true',
        help="Add this flag to save the time/wave history during the run.",
    )
    paa(
        '--scalar',
        action='store_true',
        help="Also run with scalar interior implementation, to see vector speedup.",
    )

    ns = parser.parse_args()
    # set up arguments
    grid = ns.grid
    partition = ns.partition
    Lx = ns.Lx
    Ly = ns.Ly
    c = ns.c
    tstop = ns.tstop
    if ns.save:
        user_action = wave_saver
    else:
        user_action = None

    num_cells = 1.0 * (grid[0] - 1) * (grid[1] - 1)
    final_test = True

    # create the Client
    rc = ipp.Client(profile=ns.profile)
    num_procs = len(rc.ids)

    if partition is None:
        partition = [1, num_procs]

    assert partition[0] * partition[1] == num_procs, (
        "can't map partition %s to %i engines"
        % (
            partition,
            num_procs,
        )
    )

    view = rc[:]
    print(f"Running {grid} system on {partition} processes until {tstop:f}")

    # functions defining initial/boundary/source conditions
    def I(x, y):
        from numpy import exp

        return 1.5 * exp(-100 * ((x - 0.5) ** 2 + (y - 0.5) ** 2))

    def f(x, y, t):
        return 0.0
        # from numpy import exp,sin
        # return 10*exp(-(x - sin(100*t))**2)

    def bc(x, y, t):
        return 0.0

    # initial imports, setup rank
    view.execute(
        '\n'.join(
            [
                "from mpi4py import MPI",
                "import numpy",
                "mpi = MPI.COMM_WORLD",
                "my_id = MPI.COMM_WORLD.Get_rank()",
            ]
        ),
        block=True,
    )

    # initialize t_hist/u_hist for saving the state at each step (optional)
    view['t_hist'] = []
    view['u_hist'] = []

    # set vector/scalar implementation details
    impl = {}
    impl['ic'] = 'vectorized'
    impl['inner'] = 'scalar'
    impl['bc'] = 'vectorized'

    # execute some files so that the classes we need will be defined on the engines:
    view.run('RectPartitioner.py')
    view.run('wavesolver.py')

    # setup remote partitioner
    # note that Reference means that the argument passed to setup_partitioner will be the
    # object named 'my_id' in the engine's namespace
    view.apply_sync(
        setup_partitioner, ipp.Reference('my_id'), num_procs, grid, partition
    )
    # wait for initial communication to complete
    view.execute('mpi.barrier()')
    # setup remote solvers
    view.apply_sync(
        setup_solver,
        I,
        f,
        c,
        bc,
        Lx,
        Ly,
        partitioner=ipp.Reference('partitioner'),
        dt=0,
        implementation=impl,
    )

    # lambda for calling solver.solve:
    def _solve(*args, **kwargs):
        return solver.solve(*args, **kwargs)

    if ns.scalar:
        impl['inner'] = 'scalar'
        # run first with element-wise Python operations for each cell
        t0 = time.time()
        ar = view.apply_async(
            _solve,
            tstop,
            dt=0,
            verbose=True,
            final_test=final_test,
            user_action=user_action,
        )
        if final_test:
            # this sum is performed element-wise as results finish
            s = sum(ar)
            # the L2 norm (RMS) of the result:
            norm = sqrt(s / num_cells)
        else:
            norm = -1
        t1 = time.time()
        print(f'scalar inner-version, Wtime={t1 - t0:g}, norm={norm:g}')

    impl['inner'] = 'vectorized'
    # setup new solvers
    view.apply_sync(
        setup_solver,
        I,
        f,
        c,
        bc,
        Lx,
        Ly,
        partitioner=ipp.Reference('partitioner'),
        dt=0,
        implementation=impl,
    )
    view.execute('mpi.barrier()')

    # run again with numpy vectorized inner-implementation
    t0 = time.time()
    ar = view.apply_async(
        _solve,
        tstop,
        dt=0,
        verbose=True,
        final_test=final_test,
        user_action=user_action,
    )
    if final_test:
        # this sum is performed element-wise as results finish
        s = sum(ar)
        # the L2 norm (RMS) of the result:
        norm = sqrt(s / num_cells)
    else:
        norm = -1
    t1 = time.time()
    print(f'vector inner-version, Wtime={t1 - t0:g}, norm={norm:g}')

    # if ns.save is True, then u_hist stores the history of u as a list
    # If the partion scheme is Nx1, then u can be reconstructed via 'gather':
    if ns.save and partition[-1] == 1:
        import matplotlib.pyplot as plt

        view.execute('u_last=u_hist[-1]')
        # map mpi IDs to IPython IDs, which may not match
        ranks = view['my_id']
        targets = range(len(ranks))
        for idx in range(len(ranks)):
            targets[idx] = ranks.index(idx)
        u_last = rc[targets].gather('u_last', block=True)
        plt.pcolor(u_last)
        plt.show()