File: test_wheels.sh

package info (click to toggle)
neuron 8.2.6-2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 34,760 kB
  • sloc: cpp: 149,571; python: 58,465; ansic: 50,329; sh: 3,510; xml: 213; pascal: 51; makefile: 35; sed: 5
file content (354 lines) | stat: -rwxr-xr-x 12,368 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
#!/usr/bin/env bash
# A simple set of tests checking if a wheel is working correctly
set -xe

# See CMake's CMAKE_HOST_SYSTEM_PROCESSOR documentation
# On the systems where we are building wheel we can rely
# on uname -m. Note that this is just wheel testing script.
ARCH_DIR=`uname -m`

if [ ! -f setup.py ]; then
    echo "Error: Please launch $0 from the root dir"
    exit 1
fi

if [ "$#" -lt 2 ]; then
    echo "Usage: $(basename $0) python_exe python_wheel [use_virtual_env]"
    exit 1
fi

# cli parameters
python_exe=$1   # python to be used for testing
python_wheel=$2 # python wheel to be tested
use_venv=$3     # if $3 is not "false" then use virtual environment

# There are some considerations to test coreneuron with gpu support:
# - if coreneuron support exist then we can always run all tests on cpu
# - if coreneuron gpu support exist then we can run always binaries like
#   nrniv and nrniv-core without existence of NVC/NVC++ compilers
# - if coreneuron gpu support exist and nvc compiler available then we
#   can compile mod files and run tests via special binary
# - Note that the tests that use coreneuron can not be launched using
#   python or nrniv -python because in gpu build coreneuron is created
#   as a static library and linked to special. Hence only special can
#   be used to launch GPU tests
has_coreneuron=false   # true if coreneuron support is available
has_gpu_support=false  # true if coreneuron gpu support is available
has_dev_env=true       # true if c/c++ dev environment exist to compile mod files
run_gpu_test=false     # true if test should be run on the gpu

# python version being used
python_ver=$("$python_exe" -c "import sys; print('%d%d' % tuple(sys.version_info)[:2])")


run_mpi_test () {
  mpi_launcher=${1}
  mpi_name=${2}
  mpi_module=${3}

  echo "======= Testing $mpi_name ========"
  if [ -n "$mpi_module" ]; then
     echo "Loading module $mpi_module"
     if [[ $(hostname -f) = *r*bbp.epfl.ch* ]]; then
        echo "\tusing unstable on BB5"
        module load unstable
     fi
     module load $mpi_module
  fi

  # hoc and python based test
  $mpi_launcher -n 2 $python_exe src/parallel/test0.py -mpi --expected-hosts 2
  $mpi_launcher -n 2 nrniv src/parallel/test0.hoc -mpi --expected-hosts 2

  # TODO: run coreneuron binary shipped inside wheel. This can not be executed in platform
  #       independent manner because we need to pre-load libmpi library beforehand e.g. using
  #       LD_PRELOAD mechanism. For now, execute without --mpi argument
  if [[ "$has_coreneuron" == "true" ]] && [[ $mpi_name == *"MPICH"* || $mpi_name == *"Intel MPI"* ]]; then
      site_package_dir=`$python_exe -c 'import os, neuron; print(os.path.dirname(neuron.__file__))'`
      corenrn_mpi_lib=`ls $site_package_dir/.data/lib/libcorenrnmpi_mpich*`
      $mpi_launcher -n 1 nrniv-core --datpath external/coreneuron/tests/integration/ring --mpi-lib=$corenrn_mpi_lib
      diff -w out.dat external/coreneuron/tests/integration/ring/out.dat.ref
  fi

  # rest of the tests we need development environment. For GPU wheel
  # make sure we have necessary compiler.
  if [[ "$has_dev_env" == "false" ]]; then
      echo "WARNING: Development environment missing, skipping rest of the MPI tests!"
      return
  fi

  # build new special
  rm -rf $ARCH_DIR
  nrnivmodl tmp_mod

  # run python test via nrniv and special (except on azure pipelines)
  if [[ "$SKIP_EMBEDED_PYTHON_TEST" != "true" ]]; then
    $mpi_launcher -n 2 ./$ARCH_DIR/special -python src/parallel/test0.py -mpi --expected-hosts 2
    $mpi_launcher -n 2 nrniv -python src/parallel/test0.py -mpi --expected-hosts 2
  fi

  # coreneuron execution via neuron
  if [[ "$has_coreneuron" == "true" ]]; then
    rm -rf $ARCH_DIR
    nrnivmodl -coreneuron "test/coreneuron/mod files/"

    # python as a launcher can be used only with non-gpi build
    if [[ "$has_gpu_support" == "false" ]]; then
      $mpi_launcher -n 1 $python_exe test/coreneuron/test_direct.py
    fi

    # using -python doesn't work on Azure CI
    if [[ "$SKIP_EMBEDED_PYTHON_TEST" != "true" ]]; then
      if [[ "$has_gpu_support" == "false" ]]; then
        $mpi_launcher -n 2 nrniv -python -mpi test/coreneuron/test_direct.py
      fi
      run_on_gpu=$([ "$run_gpu_test" == "true" ] && echo "1" || echo "0")
      NVCOMPILER_ACC_TIME=1 CORENRN_ENABLE_GPU=$run_on_gpu $mpi_launcher -n 2 ./$ARCH_DIR/special -python -mpi test/coreneuron/test_direct.py
    fi
  fi

  if [ -n "$mpi_module" ]; then
     echo "Unloading module $mpi_module"
     module unload $mpi_module
  fi
  echo -e "----------------------\n\n"
}


run_serial_test () {
    # Test 1: run base tests for within python
    $python_exe -c "import neuron; neuron.test(); neuron.test_rxd()"

    # Test 2: execute nrniv
    nrniv -c "print \"hello\""

    # Test 3: run coreneuron binary shipped inside wheel
    if [[ "$has_coreneuron" == "true" ]]; then
        nrniv-core --datpath external/coreneuron/tests/integration/ring
        diff -w out.dat external/coreneuron/tests/integration/ring/out.dat.ref
    fi

    # rest of the tests we need development environment
    if [[ "$has_dev_env" == "false" ]]; then
        echo "WARNING: Development environment missing, skipping rest of the serial tests!"
        return
    fi

    # Test 4: execute nrnivmodl
    rm -rf $ARCH_DIR
    nrnivmodl tmp_mod

    # Test 5: execute special hoc interpreter
    ./$ARCH_DIR/special -c "print \"hello\""

    # Test 6: run basic tests via python while loading shared library
    $python_exe -c "import neuron; neuron.test(); neuron.test_rxd(); quit()"

    # Test 7: run basic test to use compiled mod file
    $python_exe -c "import neuron; from neuron import h; s = h.Section(); s.insert('cacum'); quit()"

    # Test 8: run basic tests via special : azure pipelines get stuck with their
    # own python from hosted cache (most likely security settings).
    if [[ "$SKIP_EMBEDED_PYTHON_TEST" != "true" ]]; then
      ./$ARCH_DIR/special -python -c "import neuron; neuron.test(); neuron.test_rxd(); quit()"
      nrniv -python -c "import neuron; neuron.test(); neuron.test_rxd(); quit()"
    else
      $python_exe -c "import neuron; neuron.test(); neuron.test_rxd(); quit()"
    fi

    # Test 9: coreneuron execution via neuron
    if [[ "$has_coreneuron" == "true" ]]; then
      rm -rf $ARCH_DIR

      # first test vanialla coreneuron support, without nrnivmodl
      if [[ "$has_gpu_support" == "false" ]]; then
        $python_exe test/coreneuron/test_psolve.py
      fi

      nrnivmodl -coreneuron "test/coreneuron/mod files/"

      # coreneuron+gpu can be used via python but special only
      if [[ "$has_gpu_support" == "false" ]]; then
        $python_exe test/coreneuron/test_direct.py
      fi

      # using -python doesn't work on Azure CI
      if [[ "$SKIP_EMBEDED_PYTHON_TEST" != "true" ]]; then
        # we can run special with or without gpu wheel
        ./$ARCH_DIR/special -python test/coreneuron/test_direct.py

        # python and nrniv can be used only for non-gpu wheel
        if [[ "$has_gpu_support" == "false" ]]; then
          nrniv -python test/coreneuron/test_direct.py
        fi

        if [[ "$run_gpu_test" == "true" ]]; then
          NVCOMPILER_ACC_TIME=1 CORENRN_ENABLE_GPU=1 ./$ARCH_DIR/special -python test/coreneuron/test_direct.py
        fi
      fi

      rm -rf $ARCH_DIR
    fi


    # Test 10: run demo
    neurondemo -c 'demo(4)' -c 'run()' -c 'quit()'

    # Test 11: modlunit available (and can find nrnunits.lib)
    modlunit tmp_mod/cacum.mod
}


run_parallel_test() {
    # this is for MacOS system
    if [[ "$OSTYPE" == "darwin"* ]]; then
      # assume both MPIs are installed via brew.

      brew unlink openmpi
      brew link mpich
      BREW_PREFIX=$(brew --prefix)

      # TODO : latest mpich has issuee on Azure OSX
      if [[ "$CI_OS_NAME" == "osx" ]]; then
          export DYLD_LIBRARY_PATH=${BREW_PREFIX}/opt/mpich/lib:$DYLD_LIBRARY_PATH
          run_mpi_test "${BREW_PREFIX}/opt/mpich/bin/mpirun" "MPICH" ""
      fi

      brew unlink mpich
      brew link openmpi
      export DYLD_LIBRARY_PATH=${BREW_PREFIX}/opt/open-mpi/lib:$DYLD_LIBRARY_PATH
      run_mpi_test "${BREW_PREFIX}/opt/open-mpi/bin/mpirun" "OpenMPI" ""

    # CI Linux or Azure Linux
    elif [[ "$CI_OS_NAME" == "linux" || "$AGENT_OS" == "Linux" ]]; then
      # make debugging easier
      sudo update-alternatives --get-selections | grep mpi
      sudo update-alternatives --list mpi-x86_64-linux-gnu
      # choose mpich
      sudo update-alternatives --set mpi-x86_64-linux-gnu /usr/include/x86_64-linux-gnu/mpich
      run_mpi_test "mpirun.mpich" "MPICH" ""
      # choose openmpi
      sudo update-alternatives --set mpi-x86_64-linux-gnu /usr/lib/x86_64-linux-gnu/openmpi/include
      run_mpi_test "mpirun.openmpi" "OpenMPI" ""

    # BB5 with multiple MPI libraries
    elif [[ $(hostname -f) = *r*bbp.epfl.ch* ]]; then
      run_mpi_test "srun" "HPE-MPT" "hpe-mpi"
      run_mpi_test "mpirun" "Intel MPI" "intel-oneapi-mpi"
      run_mpi_test "srun" "MVAPICH2" "mvapich2"

    # circle-ci build
    elif [[ "$CIRCLECI" == "true" ]]; then
      sudo update-alternatives --set mpi-aarch64-linux-gnu /usr/include/aarch64-linux-gnu/mpich
      run_mpi_test "mpirun.mpich" "MPICH" ""
      sudo update-alternatives --set mpi-aarch64-linux-gnu /usr/lib/aarch64-linux-gnu/openmpi/include
      run_mpi_test "mpirun.openmpi" "OpenMPI" ""

    # linux desktop or docker container used for wheel
    else
      export PATH=/opt/mpich/bin:$PATH
      export LD_LIBRARY_PATH=/opt/mpich/lib:$LD_LIBRARY_PATH
      run_mpi_test "mpirun" "MPICH" ""

      export PATH=/opt/openmpi/bin:$PATH
      export LD_LIBRARY_PATH=/opt/openmpi/lib:$LD_LIBRARY_PATH
      run_mpi_test "mpirun" "OpenMPI" ""
    fi
}


test_wheel () {
    # sample mod file for nrnivmodl check
    mkdir -p tmp_mod
    cp share/examples/nrniv/nmodl/cacum.mod tmp_mod/

    echo "Using `which $python_exe` : `$python_exe --version`"
    echo "=========== SERIAL TESTS ==========="
    run_serial_test

    echo "=========== MPI TESTS ============"
    run_parallel_test

    #clean-up
    rm -rf tmp_mod $ARCH_DIR
}


test_wheel_basic_python () {
    echo "=========== BASIC PYTHON TESTS ==========="
    $python_exe -c "import neuron; neuron.test(); neuron.test_rxd()"
}


echo "== Testing $python_wheel using $python_exe ($python_ver) =="


# creat python virtual environment and use `python` as binary name
# because it will be correct one from venv.
if [[ "$use_venv" != "false" ]]; then
  echo " == Creating virtual environment == "
  venv_name="nrn_test_venv_${python_ver}"
  $python_exe -m venv $venv_name
  . $venv_name/bin/activate
  python_exe=`which python`
else
  echo " == Using global install == "
fi


# gpu wheel needs updated pip
$python_exe -m pip install --upgrade pip

# we'll want setuptools installed and up-to-date
$python_exe -m pip install --upgrade setuptools


# install test requirements
$python_exe -m pip install -r packaging/python/test_requirements.txt
$python_exe -m pip install $python_wheel
$python_exe -m pip show neuron \
    || $python_exe -m pip show neuron-nightly \
    || $python_exe -m pip show neuron-gpu \
    || $python_exe -m pip show neuron-gpu-nightly


# check the existence of coreneuron support
compile_options=`nrniv -nobanner -nogui -c 'nrnversion(6)'`
if echo $compile_options | grep "NRN_ENABLE_CORENEURON=ON" > /dev/null ; then
  has_coreneuron=true
fi

# check if the gpu support is enabled
if echo $compile_options | grep "CORENRN_ENABLE_GPU=ON" > /dev/null ; then
  has_gpu_support=true
fi

# in case of gpu support, nvc/nvc++ compiler must exist to compile mod files
if [[ "$has_gpu_support" == "true" ]]; then
  if ! command -v nvc &> /dev/null; then
    has_dev_env=false
  fi

  # check if nvidia gpu exist (todo: not a robust check)
  if pgaccelinfo -nvidia | grep -q "Device Name"; then
    run_gpu_test=true
  fi
fi


# run tests with latest NumPy
echo " == Running tests with latest NumPy == "
test_wheel

# run basic python tests with oldest supported NumPy
echo " == Running basic python tests with oldest supported NumPy == "
$python_exe -m pip install -r packaging/python/oldest_numpy_requirements.txt
test_wheel_basic_python

# cleanup
if [[ "$use_venv" != "false" ]]; then
  deactivate
fi

#rm -rf $venv_name
echo "Removed $venv_name"