1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
|
{#
USES_VARIABLES { _synaptic_pre, _synaptic_post, N,
N_pre, N_post, _source_offset, _target_offset }
#}
{# WRITES_TO_READ_ONLY_VARIABLES { _synaptic_pre, _synaptic_post, N}
#}
{# ITERATE_ALL { _idx } #}
{% extends 'common_group.py_' %}
{% block maincode %}
# Only need to do the initial import once, and trying is not expensive
# only catching exceptions is expensive (which we just do the first time)
try:
_done_initial_import
except:
_done_initial_import = True
import sys as _sys
from numpy.random import rand as _np_rand
if _sys.version_info[0]==2:
range = xrange
from numpy.random import binomial as _binomial
import bisect as _bisect
def _sample_without_replacement(low, high, step, p=None, size=None):
if p == 0:
return _numpy.array([], dtype=_numpy.int32)
if p == 1:
return _numpy.arange(low, high, step)
if step > 0:
n = (high-low-1)//step+1
else:
n = (low-high-1)//-step+1
if n <= 0 or (size is not None and size == 0):
return _numpy.array([], dtype=_numpy.int32)
elif size is not None and size < 0:
{% if skip_if_invalid %}
return _numpy.array([], dtype=_numpy.int32)
{% else %}
raise IndexError(f"Requested sample size {size} is negative.")
{% endif %}
elif size is not None and size > n:
{% if skip_if_invalid %}
size = n
{% else %}
raise IndexError(f"Requested sample size {size} is bigger than the "
f"population size {n}.")
{% endif %}
if n == size:
return _numpy.arange(low, high, step)
if p is None:
if step == 1:
_choice = _numpy.random.choice(n, size=size, replace=False) + low
else:
_chose_from = _numpy.arange(low, high, step)
_choice = _numpy.random.choice(_chose_from, size=size, replace=False)
return _numpy.sort(_choice)
if n <= 1000 or p > 0.25: # based on rough profiling
samples, = (_np_rand(n)<p).nonzero()
else:
# use jump method
pconst = 1.0/_numpy.log(1-p) # this will be fine as 0<p<0.25
start = 0 # start will be the smallest number we can produce (with a jump of 0)
all_samples = [] # if we have to draw multiple times, we'll use this
while 1: # we potentially have to redraw random numbers multiple times
# this constant was determined by fairly rough and ready profiling, and can
# probably be improved. The idea is that the expected number of items we'll
# need is n*p (or (n-start)*p if we've already done up to start). We add
# a small proportion (this constant can probably be tweaked to be better)
# to reduce the number of times we need to redraw samples. Finally, we add
# 10 which is basically because once you get down to doing as few as this,
# the main factor that determines how long it takes is the Python overheads,
# so we should never do less than some small constant (again, another constant
# that can probably be tweaked).
m = int(_numpy.ceil((n-start)*p*1.05))+10
# this formula gives the jump sizes, i.e. the space between hits (where rand()<p).
# however it can't therefore give a jump size of 0, but we could have rand()<p for
# for the first array. Therefore we set start to the actual first place we'd like
# to see a hit minus one, allowing us to have start as the first hit.
jump = _numpy.array(_numpy.ceil(_numpy.log(_np_rand(m))*pconst), dtype=_numpy.int32)
jump[0] += start-1
samples = _numpy.cumsum(jump)
all_samples.append(samples)
start = samples[-1]+1 # this is the starting point for the next round
if start >= n:
break
if len(all_samples) > 1:
samples = _numpy.hstack(all_samples)
samples = samples[:_bisect.bisect_left(samples, n)] # only keep the ones in the range we want
return samples*step+low
# number of synapses in the beginning
_old_num_synapses = {{N}}
# number of synapses during the creation process
_cur_num_synapses = _old_num_synapses
# scalar code
_vectorisation_idx = 1
{{scalar_code['setup_iterator']|autoindent}}
{{scalar_code['generator_expr']|autoindent}}
{{scalar_code['create_cond']|autoindent}}
{{scalar_code['update']|autoindent}}
{# For a connect call j='k+i for k in range(0, N_post, 2) if k+i < N_post'
"j" is called the "result index" (and "_post_idx" the "result index array", etc.)
"i" is called the "outer index" (and "_pre_idx" the "outer index array", etc.)
"k" is called the inner variable #}
for _{{outer_index}} in range({{outer_index_size}}):
_raw{{outer_index_array}} = _{{outer_index}} + {{outer_index_offset}}
{% if not result_index_condition %}
{{vector_code['create_cond']|autoindent}}
if not _cond:
continue
{% endif %}
{{vector_code['setup_iterator']|autoindent}}
{% if iterator_func=='range' %}
{{inner_variable}} = _numpy.arange(_iter_low, _iter_high, _iter_step)
{% elif iterator_func=='sample' %}
{% if iterator_kwds['sample_size'] == 'fixed' %}
{{inner_variable}} = _sample_without_replacement(_iter_low, _iter_high, _iter_step, None, size=_iter_size)
{% else %}
{{inner_variable}} = _sample_without_replacement(_iter_low, _iter_high, _iter_step, _iter_p)
{% endif %}
{% endif %}
_vectorisation_idx = {{inner_variable}}
{{vector_code['generator_expr']|autoindent}}
_vectorisation_idx = _{{result_index}}
_raw{{result_index_array}} = _{{result_index}} + {{result_index_offset}}
{% if result_index_condition %}
{% if result_index_used %}
{# The condition could index outside of array range #}
{% if skip_if_invalid %}
if _numpy.isscalar(_{{result_index}}):
if _{{result_index}}<0 or _{{result_index}}>={{result_index_size}}:
continue
else:
_in_range = _numpy.logical_and(_{{result_index}}>=0, _{{result_index}}<{{result_index_size}})
_{{result_index}} = _{{result_index}}[_in_range]
{% else %}
if _numpy.isscalar(_{{result_index}}):
_min_val = _max_val = _{{result_index}}
elif _{{result_index}}.shape == (0, ):
continue
else:
_min_val = _numpy.min(_{{result_index}})
_max_val = _numpy.max(_{{result_index}})
if _min_val < 0:
# Stick to % formatting, since curly braces are used by Jinja2 already
raise IndexError("index {{result_index}}=%d outside allowed range from 0 to %d, and the condition refers to a post-synaptic variable" % (_min_val, {{result_index_size}}-1))
elif _max_val >= {{result_index_size}}:
raise IndexError("index {{result_index}}=%d outside allowed range from 0 to %d, and the condition refers to a post-synaptic variable" % (_max_val, {{result_index_size}}-1))
{% endif %}
{% endif %}
{{vector_code['create_cond']|autoindent}}
{% endif %}
{% if if_expression!='True' and result_index_condition %}
_{{result_index}}, _cond = _numpy.broadcast_arrays(_{{result_index}}, _cond)
_{{result_index}} = _{{result_index}}[_cond]
{% else %}
_{{result_index}}, {{inner_variable}} = _numpy.broadcast_arrays(_{{result_index}}, {{inner_variable}})
{% endif %}
{% if skip_if_invalid %}
if _numpy.isscalar(_{{result_index}}):
if _{{result_index}}<0 or _{{result_index}}>={{result_index_size}}:
continue
else:
_in_range = _numpy.logical_and(_{{result_index}}>=0, _{{result_index}}<{{result_index_size}})
_{{result_index}} = _{{result_index}}[_in_range]
{% elif not result_index_used %}
{# Otherwise, we already checked before #}
if _numpy.isscalar(_{{result_index}}):
_min_val = _max_val = _{{result_index}}
elif _{{result_index}}.shape == (0, ):
continue
else:
_min_val = _numpy.min(_{{result_index}})
_max_val = _numpy.max(_{{result_index}})
if _min_val < 0:
raise IndexError("index _{{result_index}}=%d outside allowed range from 0 to %d" % (_min_val, {{result_index_size}}-1))
elif _max_val >= {{result_index_size}}:
raise IndexError("index _{{result_index}}=%d outside allowed range from 0 to %d" % (_max_val, {{result_index_size}}-1))
{% endif %}
_vectorisation_idx = _{{result_index}}
_raw{{result_index_array}} = _{{result_index}} + {{result_index_offset}}
{{vector_code['update']|autoindent}}
if not _numpy.isscalar(_n):
# The "n" expression involved the target variable
{{result_index_array}} = {{result_index_array}}.repeat(_n[_j])
elif _n != 1:
# We have a target-independent number
{{result_index_array}} = {{result_index_array}}.repeat(_n)
_numnew = len({{result_index_array}})
_new_num_synapses = _cur_num_synapses + _numnew
{{_dynamic__synaptic_pre}}.resize(_new_num_synapses)
{{_dynamic__synaptic_post}}.resize(_new_num_synapses)
{{_dynamic__synaptic_pre}}[_cur_num_synapses:] = _pre_idx
{{_dynamic__synaptic_post}}[_cur_num_synapses:] = _post_idx
_cur_num_synapses += _numnew
# Resize all dependent dynamic arrays (synaptic weights, delays, etc.) and set
# the total number of synapses
_owner._resize(_cur_num_synapses)
# And update N_incoming, N_outgoing and synapse_number
_owner._update_synapse_numbers(_old_num_synapses)
{% endblock %}
|