File: valcov.py

package info (click to toggle)
microbegps 1.0.0-5
  • links: PTS, VCS
  • area: main
  • in suites: bullseye, sid
  • size: 166,876 kB
  • sloc: python: 2,786; makefile: 12
file content (573 lines) | stat: -rw-r--r-- 17,031 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
"""
Created on Fri Aug 31 2012 14:05

Copyright (c) 2012, Martin S. Lindner and Maximilian Kollock, LindnerM@rki.de, 
Robert Koch-Institut, Germany,
All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
    * Redistributions of source code must retain the above copyright
      notice, this list of conditions and the following disclaimer.
    * Redistributions in binary form must reproduce the above copyright
      notice, this list of conditions and the following disclaimer in the
      documentation and/or other materials provided with the distribution.
    * The name of the author may not be used to endorse or promote products
      derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL MARTIN S. LINDNER OR MAXIMILIAN KOLLOCK BE LIABLE 
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""



import numpy as np
import scipy.stats as stats

from scipy.special import digamma, betainc
from scipy.optimize import newton


"""----------------------------------------------------------------------------
    Define the distributions
----------------------------------------------------------------------------"""

class Distribution:
	_p1 = None
	_p2 = None
	_name = "General Distribution"
	_dof = 0 # Number of degrees of freedom

	alpha = 1.
	def __str__(self):
		""" return the name of the distribution """
		return self._name
	
	def set_par(self,p1=None, p2=None):
		""" explicitly set a parameter """
		if p1 != None:
			self._p1 = p1
		if p2 != None:
			self._p2 = p2

	def pmf(self, x):
		""" return the value of the probability mass function at x """
		return x*0.

	def estimate_par(self, data=None, weights=None):
		""" estimate the distribution parameters from the data and the weights 
		(if provided) """
		pass

	def init_par(self, mean=None, var=None):
		""" estimate initial distribution parameters given the mean and 
		variance of the data"""
		pass

	def report_stats(self, width=20):
		""" return a string that reports information about the distribution """
		return str(self._name).ljust(width) + str(self.alpha).ljust(width) + \
	           str(self._p1).ljust(width) + str(self._p2).ljust(width)


class Zero(Distribution):
	_name = "Zero"
	_dof = 1

	def pmf(self,x):
		if isinstance(x,np.ndarray):
			return (x==0).astype(np.float)
		else:
			return float(x==0)


class NBinom(Distribution):
	_name = "NBinom"
	_dof = 3
	_use_MOM = False
	
	def pmf(self, x):
		return stats.nbinom.pmf(x,self._p1,self._p2)

	def estimate_par(self, data, weights=None):
		if weights == None:
			weights = data*0. + 1.
		norm = np.sum(weights)
		mean = np.sum(data*weights)/(norm + 10**(-25))
		var = np.sum((data - mean)**2 * weights) / (norm + 10**(-25))

		if self._use_MOM:
			if var < mean:
				var = 1.01*mean
			self._p1 = mean**2 / (var - mean)
			self._p2 = mean / var
		else:
			def dp1_llnbinom(param,obs,obs_w):
				# param: parameter 1
				# obs:   observed values
				# obs_w: weight of each value
				N = np.sum(obs_w)
				data_mean = np.sum(obs*obs_w)/(N)
				return np.sum(digamma(obs+param)*obs_w) - N*digamma(param) + \
			           N*np.log(data_mean/(param+data_mean))
			try:
				self._p1 = newton(dp1_llnbinom,self._p1, args=(data,weights),
								  maxiter=10000)
				self._p2 = (self._p1)/(self._p1+mean)
			except:
				print "Warning: MLE for negative binomial failed. Using MOM."
				if var < mean:
					print "Warning: var < mean"
					var = 1.01*mean
				self._p1 = mean**2 / (var - mean)
				self._p2 = mean / var
				


	def report_stats(self, width=20):
		""" return a string that reports information about the distribution """
		return str(self._name).ljust(width) + str(self.alpha).ljust(width) + \
	           str(self._p1).ljust(width) + str(self._p2).ljust(width) + \
			   str(stats.nbinom.mean(self._p1,self._p2)).ljust(width)


		
class Poisson(Distribution):
	_name = "Poisson"
	_dof = 2

	def pmf(self,x):
		return stats.poisson.pmf(x,self._p1)

	def estimate_par(self, data, weights=None):
		mean = np.sum(data*weights) / (np.sum(weights) )
		self._p1 = mean



class TailDistribution(Distribution):
	_name = "Tail"
	_dof = 1
	_norm = False  # normalization; recalculate only if necessary
	_parent = None
	_active = True # switch tail on/off

	def set_par(self,p1=None, p2=None):
		""" explicitly set a parameter """
		if p1 != None:
			self._p1 = p1
		if p2 != None:
			self._p2 = p2
		self._norm = False

	def estimate_par(self, data=None, weights=None):
		""" Do not estimate parameters, but obtain parameters from parent 
		distribution """
		self._p1 = self._parent._p1
		self._p2 = self._parent._p2
		self._norm = False



class NbTail(TailDistribution):
	_name = "Tail"
	_dof = 1
	def __init__(self, nbinom):
		""" NbTail distribution must be connected to a negative binomial"""
		if isinstance(nbinom, NBinom):
			self._parent = nbinom
		else:
			raise(Exception("NbTail must be connected to a NBinom object"))
	
	def pmf(self, x):
		if np.isscalar(x) and x == 0:
			return 0.
		if stats.nbinom.mean(self._p1, self._p2) < 2.:
			self._active = False # switch tail permanently off		
		if self._active == False:
			return 0*x


		def betaincreg(x,p1,p2):
			return 1-betainc(p1,x+1,p2)

		# calculate normalization up to certain precision
		if not self._norm:
			ks = int(max(2,stats.nbinom.ppf(0.999999,self._p1,self._p2)))
			norm = np.sum(betaincreg(np.arange(1,ks),self._p1,self._p2))
		else:
			norm = self._norm
		# now return the value(s)
		ret = betaincreg(x,self._p1,self._p2) / norm
		if not np.isscalar(x):
			ret[np.where(x==0)] = 0.
		return ret



class PoissonTail(TailDistribution):
	_name = "Tail of Poisson"
	_dof = 1
	def __init__(self, poisson):
		""" PoissonTail distribution must be connected to a poisson 
		distribution """
		if isinstance(poisson, Poisson):
			self._parent = poisson
		else:
			raise(Exception("PoissonTail must be connected to a Poisson object"))
	
	def pmf(self, x):
		if self._p1 < 2.:
			self._active = False # switch tail permanently off
		if self._active == False:
			return 0*x

		if np.isscalar(x) and x == 0:
			return 0.

		xmax = int(max(np.max(x),5,stats.poisson.ppf(0.999999,self._p1)))+1
		xs = np.arange(0,xmax, dtype=np.float)
		#backward cumulative sum
		tx = np.cumsum((stats.poisson.pmf(xs, self._p1)/xs)[::-1])[::-1]
		tx[0] = 0
		tx /= np.sum(tx)
		return tx[x]
		


class Geom(Distribution):
	""" Geometric distribution for fitting distances between reads. """
	_name = "Geometric"
	_dof = 2

	def pmf(self,x):
		return np.power(1-self._p1,x)*self._p1

	def estimate_par(self, data, weights=None):
		mean = np.sum(data*weights) / (np.sum(weights) )
		self._p1 = 1./(1+mean)
		
	def report_stats(self, width=20):
		""" return a string that reports information about the distribution """
		return str(self._name).ljust(width) + str(self.alpha).ljust(width) + \
	            str(self._p1).ljust(width) + '[None]'.ljust(width) + \
			  str(1./self._p1).ljust(width)


def build_mixture_model(dist_str):
	""" Build a mixture model from a string """
	num_dist = len(dist_str)
	mm = np.array([Zero()]*num_dist)
	it = 0	
	for dist in dist_str:
		if dist == "z":
			it += 1
		elif dist=="n":
			mm[it] = NBinom()
			mm[it]._use_MOM = True
			it += 1
		elif dist=="N":
			mm[it] = NBinom()
			mm[it]._use_MOM = False
			it += 1
		elif dist == "t":
			if it > 0 and isinstance(mm[it-1], NBinom):
				mm[it] = NbTail(mm[it-1])
				it += 1
			elif it > 0 and isinstance(mm[it-1], Poisson):
				mm[it] = PoissonTail(mm[it-1])
				it += 1
			else:
				raise Exception("Error: wrong input '%s'. A Tail distribution "
								"(t) must follow a Negative Binomial (n|N) or "
								"Poisson (p)."%dist_str)
		elif dist == "p":
			mm[it] = Poisson()
			it+=1
		elif dist == "g":
			mm[it] = Geom()
			it+=1
		else:
			raise Exception("Input Error: distribution %s not recognized!"%dist)

	# initialize all distributions with equal weights
	alpha = 1./len(mm)
	for dist in mm:
		dist.alpha = alpha
		
	return mm
	

def init_gamma(mixture_model, dataset):
	""" create initial responsibilities gamma. The probability that a coverage 
	belongs to a distribution is equal for all distributions."""
	N_mm = len(mixture_model)
	N_cov = len(dataset.value)
	
	return np.array([[1./N_mm for d in mixture_model] for i in range(N_cov)])



"""----------------------------------------------------------------------------
    Define DataSet to store all relevant information (incl. GCP)
----------------------------------------------------------------------------"""
class DS_cov:
	def __init__(self,ref):
		""" fill dataset with data provided in Reference ref """
		t_lns = [t.length for t in ref.targets.itervalues()]
		# r_info contains read position and length for each read
		r_info = [[[r[0],r[1]] for r in t.reads.itervalues()] for t in ref.targets.itervalues()]

		# pile up coverage for each target and summarize in one array
		cov = [np.zeros((l,),dtype=np.int16) for l in t_lns]
		rlen = 0
		rds = 0
		for c,ri in zip(cov,r_info):
			for p,l in ri:
				c[p:(p+l)] += 1
				rds += 1
				rlen += l
		one_cov = np.concatenate(cov)
		
		self.value = np.unique(one_cov)
		self.count = np.array( [np.sum(one_cov==v) for v in self.value] )
		self.total = rds
		self.rlen = rlen/float(rds)
		self.glen = sum(t_lns)

class DS_dst:
	def __init__(self,ref):
		""" fill dataset with data provided in Reference ref """
		t_lns = [t.length for t in ref.targets.itervalues()]
		r_pos = [[r[0] for r in t.reads.itervalues()] for t in ref.targets.itervalues()]
		r_len = [[r[1] for r in t.reads.itervalues()] for t in ref.targets.itervalues()]
		r_len = np.concatenate([np.array(ls) for ls in r_len])
		positions = [np.sort(np.array(ps)) for ps in r_pos]
		distances = np.concatenate([p[1:] - p[:-1] for p in positions])
		
		self.value = np.unique(distances)
		self.count = np.array( [np.sum(distances==v) for v in self.value] )
		self.total = sum([len(p) for p in positions])
		self.rlen = np.mean(r_len)
		self.glen = sum(t_lns)






"""----------------------------------------------------------------------------
    Iterative Method function definitions
----------------------------------------------------------------------------"""


def update_gamma(data_set, mixture_model, gamma):
	""" Update the probability gamma_i(x), that a position with coverage x 
	belongs to distribution i
	"""

	num_dist = len(mixture_model)
	for it in range(len(data_set.value)):
		# calculate probability that coverages[it] belongs to a distribution
		prob = [dist.alpha*dist.pmf(data_set.value[it]) for dist in mixture_model]
		if sum(prob) <= 0:
			gamma[it,:] = 0.
		else:
			gamma[it,:] = np.array([prob[i]/sum(prob) for i in range(num_dist)])
	
	return 1


def update_alpha(data_set, mixture_model, gamma):		
	""" Update alpha_i, the proportion of data that belongs to 
	distribution i """

	for i in range(len(mixture_model)-1):
		w_probs = np.array([p*w for p,w in zip(gamma[:,i],data_set.count)])
		mixture_model[i].alpha = np.sum(w_probs) / np.sum(data_set.count)

	mixture_model[-1].alpha = 1 - sum([d.alpha for d in mixture_model[:-1]])

	return 1


def iterative_fitting(data_set, mixture_model, gamma, iterations):
	""" Generator fuction to run the iterative method. Operates directly on the
	data structures mixture_model and gamma. """
	
	for i in range(iterations):
		# Expectation-step: update gammas
		update_gamma(data_set, mixture_model, gamma)
		
		# temporarily store old parameters
		old_p1 = np.array([d._p1 or 0 for d in mixture_model])
		old_p2 = np.array([d._p2 or 0 for d in mixture_model])
		old_alpha = np.array([d.alpha for d in mixture_model])

		# Parameter estimation step
		for j,dist in enumerate(mixture_model):
			dist_weights = gamma[:,j]*data_set.count
			dist.estimate_par(data_set.value, dist_weights)
		update_alpha(data_set, mixture_model, gamma)

		# calculate relative change of the parameters
		new_p1 = np.array([d._p1 or 0 for d in mixture_model])
		new_p2 = np.array([d._p2 or 0 for d in mixture_model])
		new_alpha = np.array([d.alpha for d in mixture_model])
		
		rel_p1 = np.max(np.abs(new_p1-old_p1) / (new_p1 + 1e-20))
		rel_p2 = np.max(np.abs(new_p2-old_p2) / (new_p2 + 1e-20))
		rel_alpha = np.max(np.abs(new_alpha-old_alpha) / (new_alpha + 1e-20))

		max_change = np.max([rel_p1, rel_p2, rel_alpha])

		# maximum CDF difference
		xs = np.arange(np.max(data_set.value)+1,dtype=np.int)
		ref_pdf = np.zeros((np.max(data_set.value)+1,))
		for dist in mixture_model:
			ref_pdf += dist.pmf(xs)*dist.alpha
		
		obs_pdf = np.zeros((np.max(data_set.value)+1,))
		obs_pdf[data_set.value.astype(np.int)] = data_set.count/float(np.sum(data_set.count))
		max_cdf_diff = np.max(np.abs(np.cumsum(ref_pdf)-np.cumsum(obs_pdf)))

		yield i, max_change, max_cdf_diff
		

"""----------------------------------------------------------------------------
    Convenience functions to calculate the validity
----------------------------------------------------------------------------"""

def validity_from_coverage(DS, poisson=True, tail=True, max_iter=50, iter_threshold=0.01,
					  init_alpha=None, data_cutoff=0.95):

	# weight cutoff. Coverages above this value have no weight in parameter estimation
	try:
		cutoff = max(int(np.max(DS.value[np.where(np.cumsum(DS.count)<= \
				 data_cutoff*DS.glen)])+1),10)
	except:
		cutoff = max(np.max(DS.value)+1,10)

	DS.count = DS.count[np.where(DS.value < cutoff)]
	DS.value = DS.value[np.where(DS.value < cutoff)]
	DS.glen = np.sum(DS.count)

	distributions = 'z'
	if poisson:
		distributions += 'p'
	else:
		distributions += 'n'
	if tail:
		distributions += 't'

	MM = build_mixture_model(distributions)
	
	# set initial proportions for each distribution
	if init_alpha:
		alpha = init_alpha
		if len(init_alpha) < len(MM):
			rest_alpha = 1. - sum(init_alpha)
			rest_models = len(MM) - len(init_alpha)
			alpha.append([rest_alpha / rest_models] * rest_models)			
		
		for dist,a in zip(MM,alpha):
			dist.alpha = float(a)
		
	# initialize distribution parameters
	data_mean = np.mean(DS.value[1:]*DS.count[1:]/np.mean(DS.count))
	data_var = np.sum((DS.value-data_mean)**2*DS.count)/DS.glen

	for dist in MM:
		if isinstance(dist,Zero) or isinstance(dist,NbTail) \
		or isinstance(dist,PoissonTail):
			dist.estimate_par()
			continue
		if isinstance(dist,NBinom):
			m,v = data_mean,data_var
			if m > v:
				v = 1.01*m
			dist.set_par(m**2 / (v - m) , m / v )
			continue
		if isinstance(dist,Poisson):
			dist.set_par(data_mean)

	# set initial values for the responsibilities gamma
	GAMMA = init_gamma(MM,DS)
	
	# run the iteration, repeatedly update the variables MM and GAMMA
	for i,change,diff in iterative_fitting(DS, MM, GAMMA, max_iter):	
		if change < iter_threshold:
			break
	
	# estimate the validity as one minus the alpha of the zero distribution
	val = 1. - MM[0].alpha
	
	if isinstance(MM[1],NBinom):
		cov = stats.nbinom.mean(MM[1]._p1,MM[1]._p2)
	elif isinstance(MM[1],Poisson):
		cov = MM[1]._p1
	else:
		cov = -1
		
	return val,cov
	



def validity_from_spaces(DS, num_dist = 3, max_iter = 50, iter_threshold = 0.01, 
		init_mean = [2,1000,100000], init_alpha = [0.5,0.49,0.01]):
	
	MM = build_mixture_model("g"*num_dist)
	
	# set initial proportions for each distribution
	if init_alpha:
		alpha = init_alpha
		if len(init_alpha) < len(MM):
			# create the missing alpha values
			rest_alpha = 1. - sum(init_alpha)
			rest_models = len(MM) - len(init_alpha)
			alpha.append([rest_alpha / rest_models] * rest_models)			
		
		for dist,a in zip(MM,alpha):
			dist.alpha = float(a)
		
	# initialize distribution parameters
	factors = np.arange(num_dist)
	data_mean = np.power(10.,factors)

	if init_mean:
		# use mean values provided by the user, where possible
		for i,m in enumerate(init_mean):
			if i < num_dist:
				data_mean[i] = m

	for i,dist in enumerate(MM):
		dist.set_par(1./(1.+data_mean[i]))

	# set initial values for the responsibilities gamma
	GAMMA = init_gamma(MM,DS)

	# run the iteration, repeatedly update the variables MM and GAMMA
	for i,change,diff in iterative_fitting(DS, MM, GAMMA, max_iter):	
		if change < iter_threshold:
			break

	# calculate the validity
	if num_dist == 3:
		corr = 1.-MM[0].alpha
	else:
		corr = 1.
	val = corr*DS.total/(MM[num_dist-2]._p1*DS.glen)
	cov = MM[num_dist-2]._p1*DS.rlen

	return val,cov