File: test_netpar.py

package info (click to toggle)
neuron 8.2.6-2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 34,760 kB
  • sloc: cpp: 149,571; python: 58,465; ansic: 50,329; sh: 3,510; xml: 213; pascal: 51; makefile: 35; sed: 5
file content (127 lines) | stat: -rw-r--r-- 3,052 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
# Error tests for netpar.cpp
# Some minor coverage increase for netpar.cpp when nhost > 1

# Prepend the location of the current script to the search path, so we can
# import from test_hoc_po.
import os, sys

sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))

from neuron import h

pc = h.ParallelContext()

from neuron.expect_hocerr import expect_err
from test_hoc_po import Ring

cvode = h.CVode()


def run(tstop):
    pc.set_maxstep(1)
    h.finitialize(-65)
    pc.psolve(tstop)


def mpi_test1():  # pgvts_deliver and pc.spike_record
    dtsav = h.dt
    r = Ring(3, 2)
    for gid, cell in r.cells.items():
        if type(cell) == type(h):
            cell.axon.disconnect()
            pc.multisplit(cell.axon(0), gid)
            pc.multisplit(cell.soma(1), gid)

    pc.multisplit()
    gids = h.Vector([gid for gid in r.cells])
    pc.spike_record(gids, r.spiketime, r.spikegid)
    cvode.active(1)
    cvode.condition_order(1)  # investigate why condition_order(2) fails
    cvode.debug_event(1)
    run(3.0)

    cvode.queue_mode(1, 0)
    pc.spike_compress(3, 1)

    cvode.active(0)
    h.dt = dtsav
    pc.gid_clear()
    cvode.debug_event(0)
    cvode.condition_order(1)


def mpi_test2():
    return


def err_test1():
    r = Ring(3, 2)
    expect_err("pc.set_gid2node(0, pc.id())")
    nc = pc.gid_connect(10000, r.cells[0].syn)
    expect_err("pc.set_gid2node(10000, pc.id())")
    expect_err("pc.threshold(10000)")
    expect_err("pc.cell(10000)")
    expect_err("pc.cell(99999)")
    expect_err("pc.cell(0, None)")
    expect_err("pc.gid_connect(0, None)")
    pc.set_gid2node(99999, pc.id())
    expect_err("pc.gid_connect(99999, r.cells[0].syn)")
    expect_err("pc.gid_connect(1, r.cells[0].syn, h.Vector())")
    nc = h.NetCon(None, r.cells[1].syn)
    expect_err("pc.gid_connect(1, r.cells[0].syn, nc)")
    nc = h.NetCon(None, r.cells[0].syn)
    pc.gid_connect(1, r.cells[0].syn, nc)

    pc.gid_clear()
    del nc, r
    locals()


def err_test2():
    r = Ring(3, 2)
    r.ncs[0].delay = 0
    pc.nthread(2)
    pc.set_maxstep(1)
    expect_err("h.finitialize(-65)")
    pc.nthread(1)
    r.ncs[0].delay = h.dt
    pc.set_maxstep(h.dt)
    h.finitialize(-65)
    if "'NRN_ENABLE_MPI=OFF'" not in h.nrnversion(6):
        # Fixed step method does not allow a mindelay < dt + 1e-10
        expect_err("pc.psolve(1.0)")  # wonder if this is too stringent an error
    else:
        pc.psolve(1.0)
    r.ncs[0].delay = 0
    assert pc.set_maxstep(1) == 1.0
    cvode.queue_mode(0, 1)
    pc.set_maxstep(1)
    h.finitialize(-65)
    pc.psolve(1)
    assert cvode.queue_mode() == 0
    v1 = h.Vector(10)
    pc.max_histogram(v1)
    v2 = h.Vector(10)
    pc.max_histogram()
    # cover nrn_gid2outpresyn
    pc.prcellstate(10000, "tmp")
    pc.prcellstate(0, "tmp")

    pc.gid_clear()
    del r
    locals()


def test_1():
    if pc.nhost() > 1:
        mpi_test1()
        mpi_test2()
    else:
        err_test1()
        err_test2()


if __name__ == "__main__":
    test_1()
    pc.barrier()
    h.quit()