File: benchmark_spikequeue.py

package info (click to toggle)
brian 2.9.0-2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 6,872 kB
  • sloc: python: 51,820; cpp: 2,033; makefile: 108; sh: 72
file content (44 lines) | stat: -rw-r--r-- 1,687 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import timeit
import itertools

import numpy as np

GENERAL_SETUP =  ['import numpy as np',
                  'from brian2.tests.test_spikequeue import create_all_to_all, create_one_to_one',
                  'from brian2.units.stdunits import ms',
                  'from brian2.synapses.spikequeue import SpikeQueue']

def get_setup_code(N, create_func):
    return GENERAL_SETUP + [
        'synapses, delays = {}({})'.format(create_func, N),
        'queue = SpikeQueue(synapses, delays, 0.1*ms)']

def test_compress(N, create_func):
    setup_code = get_setup_code(N, create_func)
    number = 1000/N
    results = timeit.repeat('queue.compress()', ';'.join(setup_code), repeat=5,
                            number=number)
    return np.array(results) / number


def test_push(N, create_func):
    setup_code = get_setup_code(N, create_func) + ['queue.compress()']
    number = 5000/N
    results = timeit.repeat('queue.push(np.arange({}));queue.next()'.format(N),
                            ';'.join(setup_code), repeat=5,
                            number=number)
    return np.array(results) / number


def run_benchmark(test_func, N, create_func):
    result = test_func(N, create_func)
    print('{} -- {}({}) : {}'.format(test_func.__name__, create_func, N,
                                     np.median(result)))


if __name__ == '__main__':
    for test, N, create_func in itertools.product((test_compress, test_push),
                                                  (10, 100),
                                                  ('create_all_to_all',
                                                   'create_one_to_one')):
        run_benchmark(test, N, create_func)