1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
|
"""
Experimental API for developing split-apply-combine style algorithms on
coolers.
"""
from functools import partial, reduce
from multiprocess import Lock
from .core import get
from .util import partition
__all__ = ["partition", "split", "lock"]
"""
Two possible reasons for using a lock
(1) Prevent a concurrent process from opening an HDF5 file while the same
file is open for writing. In order for reading processes to obtain the correct
state, make sure the writing process finishes writing (flushes its buffers and
actually closes the file) before reading processes attempt to open it.
This explicit synchronization shouldn't be necessary if using the file in
SWMR mode.
See also:
* <https://support.hdfgroup.org/HDF5/hdf5-quest.html#grdwt>
* <https://support.hdfgroup.org/projects/SWMR>
(2) Synchronize file access when opened before a fork(). Fork-based (Unix)
multiprocessing and concurrent reading are compatible as long as the fork
happens before the child processes open the file. If an HDF5 file is already
open before forking, the child processes inherit the same global HDF5 state,
which leads to a race condition that causes simultaneous access to fail. One
can either use a lock to prevent the race condition, or close and re-open the
file in the workers after the fork.
See also:
* <https://groups.google.com/forum/#!topic/h5py/bJVtWdFtZQM>
* <https://github.com/h5py/h5py/issues/591#issuecomment-116785660>.
"""
lock = Lock()
def apply_pipeline(funcs, prepare, get, key):
chunk = get(key)
if prepare is not None:
data = prepare(chunk)
for func in funcs:
data = func(chunk, data)
else:
data = chunk
for func in funcs:
data = func(data)
return data
class MultiplexDataPipe:
"""
Create an extendable pipeline of callables to be applied independently to
each of a collection of inputs and produce a collection of outputs.
New tasks are appended with the ``pipe`` method. Pipeline execution can be
multiplexed using any ``map`` implementation, e.g. multiprocessing Pool.map
or ipyparallel view.map for distributed execution.
Depending on the ``map`` implementation results may be
* yielded sequentially or online:
Python 3 ``map`` or ``itertools.imap``, ``Pool.imap``,
``Pool.imap_unordered``
* gathered and returned once all outputs are finished:
Python 2 ``map``, ``Pool.map``, ``Pool.map_async``
The pipeline can be run using one of:
* ``gather``:
Results are gathered and combined after all pipelines complete.
* ``reduce``:
Results are sequentially folded using a binary operator. This can
save on memory when using a sequential or online ``map``
implementation.
Both methods are blocking, regardless of the blocking behavior of the
``map`` implementation (e.g., ``Pool.map_async``).
Notes
-----
Python's multiprocessing module uses Pickle for serialization, which has
several limitations. Consider using a parallel map implementation that uses
a more versatile serializer, such as dill or cloudpickle.
See also
--------
http://stackoverflow.com/a/26521507 for a discussion of the differences
between multiprocessing Pool implementations.
Examples
--------
>>> X = np.arange(30)
>>> spans = [(0, 10), (10, 20), (20, 30)]
>>> dp = MultiplexDataPipe(lambda span: X[span[0]:span[1]], spans, map)
>>> dp = dp.pipe(lambda x: x - 1).pipe(sum)
>>> dp.gather()
[35, 135, 235]
>>> dp.reduce(lambda x, y: x + y, 0)
405
"""
def __init__(self, get, keys, map):
"""
Parameters
----------
get : callable
Callable used to be used by workers that fetches the data
corresponding to any of the provided keys
keys : iterable
Keys corresponding to input data
map : callable
Implementation of a map functor
"""
self.get = get
self.keys = list(keys)
self.map = map
self.funcs = []
self._prepare = None
def __copy__(self):
other = self.__class__(self.get, self.keys, self.map)
other.funcs = list(self.funcs)
other._prepare = self._prepare
return other
def __reduce__(self):
d = self.__dict__.copy()
d.pop("map", None)
return d
def __iter__(self):
return iter(self.run())
def prepare(self, func):
"""
Prepend a task that initializes the data for transformation.
This optional step allows one to keep the original data chunk pristine.
The callable ``func`` should return an initial object to pass along the
pipeline for transformation. Subsequent callables in the pipeline will
take two arguments instead of one:
* chunk: original data chunk
* data: transformed data passed along the pipeline
Parameters
----------
func : function/callable
Returns
-------
A new datapipe with the initializer set.
"""
self._prepare = func
return self
def pipe(self, func, *args, **kwargs):
"""
Append new task(s) to the pipeline
Parameters
----------
func : function/callable or sequence of callables
If a single function is provided, additional positional and keyword
arguments can be provided and will be curried into the function.
Returns
-------
A new datapipe with the additional task(s) appended.
"""
other = self.__copy__()
if args or kwargs:
addon = [partial(func, *args, **kwargs)]
else:
try:
addon = list(func)
except TypeError:
addon = [func]
other.funcs += addon
return other
def run(self):
"""
Run the pipeline
Output depends on map implementation.
"""
pipeline = partial(apply_pipeline, self.funcs, self._prepare, self.get)
return self.map(pipeline, self.keys)
def gather(self, combine=list, *args, **kwargs):
"""
Run the pipeline and gather outputs
Parameters
----------
combine : callable, optional
Callable to consume the output. Default is builtin list.
Returns
-------
Output of ``combine``
"""
return combine(iter(self.run()), *args, **kwargs)
def reduce(self, binop, init):
"""
Run the pipeline and fold outputs cumulatively as they are returned
Parameters
----------
binop : binary operation
A function of two arguments that returns a single value.
init : object
The initial value of the accumulation.
Returns
-------
Reduced output
"""
return reduce(binop, iter(self.run()), init)
class chunkgetter:
def __init__(self, clr, include_chroms=False, include_bins=True, use_lock=False):
self.cooler = clr
self.include_chroms = include_chroms
self.include_bins = include_bins
self.use_lock = use_lock
def __call__(self, span):
lo, hi = span
chunk = {}
try:
if self.use_lock:
lock.acquire()
with self.cooler.open("r") as grp:
if self.include_chroms:
chunk["chroms"] = get(grp["chroms"], as_dict=True)
if self.include_bins:
chunk["bins"] = get(grp["bins"], as_dict=True)
chunk["pixels"] = get(grp["pixels"], lo, hi, as_dict=True)
finally:
if self.use_lock:
lock.release()
return chunk
def split(clr, map=map, chunksize=10_000_000, spans=None, **kwargs):
if spans is None:
spans = partition(0, clr.info["nnz"], chunksize)
return MultiplexDataPipe(chunkgetter(clr, **kwargs), spans, map)
|