File: algorithms-lnmf.R

package info (click to toggle)
r-cran-nmf 0.23.0-1
  • links: PTS, VCS
  • area: main
  • in suites: bullseye, sid
  • size: 3,344 kB
  • sloc: cpp: 680; ansic: 7; makefile: 2
file content (87 lines) | stat: -rw-r--r-- 2,610 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
# Algorithm for Nonnegative Matrix Factorization: Local NMF (LNMF)
#
# @author Renaud Gaujoux
# @created 21 Jul 2009

#' @include registry-algorithms.R
NULL

###% \item{\sQuote{lnmf}}{ Local Nonnegative Matrix Factorization. Based on a 
###% regularized Kullback-Leibler divergence, it uses a modified version of 
###% Lee and Seung's multiplicative updates.
###% See \emph{Li et al. (2001)}.}

###% Algorithm for Nonnegative Matrix Factorization: Local NMF (LNMF).
###%
###% The local NMF algorithm is minimizes use the following Kullback-Leibler divergence based objective function:
###% $$ 
###% \sum_{i=1}^m\sum_{j=1}^n\left(X_{ij} \log\frac{X_{ij}}{(WH)_{ij}} - X_{ij} + (WH)_{ij} + \alpha U_{ij}\right) - \beta \sum_i V_{ij},
###% $$
###% where $\alpha, \beta > 0$ are some constants, $U = W^TW$ and $V = HH^T$.
###%
###% TODO: add explaination for each terms (see Wild 2002)
###%
###% @references Learning spatially localized, parts-based representation
###% , S.Z. Li, X.W. Hou, and H.J. Zhang.
###% , In Proceedings of IEEE International Conference on Computer Vision and Pattern Recognition
###% , December 2001
nmf_update_R.lnmf <- function(i, v, data, ...){
	
	# retrieve each factor
	w <- .basis(data); h <- .coef(data);
	
	# update H 
	h <- sqrt( h * crossprod(w, v / (w %*% h)) )
	
	# update W using the standard divergence based update
	w <- R_std.divergence.update.w(v, w, h, w %*% h)
	
	# scale columns of W
	w <- sweep(w, 2L, colSums(w), "/", check.margin=FALSE)	
	
	#every 10 iterations: adjust small values to avoid underflow 
	if( i %% 10 == 0 ){
		#precision threshold for numerical stability
		eps <- .Machine$double.eps
		h[h<eps] <- eps;
		w[w<eps] <- eps;
	}
		
	# return updated data	
	.basis(data) <- w; .coef(data) <- h
	return(data)
}

nmf_update.lnmf <- function(i, v, data, ...){
	
	# retrieve each factor
	w <- .basis(data); h <- .coef(data);
	
	# update H 
	h <- sqrt( h * crossprod(w, v / (w %*% h)) )
	
	# update W using the standard divergence based update
	w <- std.divergence.update.w(v, w, h)
	
	# scale columns of W
	w <- apply(w, 2, function(x) x/sum(x))
	
	#every 10 iterations: adjust small values to avoid underflow 
	if( i %% 10 == 0 ){
		#precision threshold for numerical stability
		eps <- .Machine$double.eps
		h[h<eps] <- eps;
		w[w<eps] <- eps;
	}
	
	# return updated data	
	.basis(data) <- w; .coef(data) <- h
	return(data)
}

# register the LNMF algorithm
#nmfAlgorithm.lnmf_R <- setNMFMethod('.R#lnmf', objective='KL'
#		, Update=nmf_update_R.lnmf
#		, Stop='connectivity')
#
#nmfAlgorithm.lnmf <- setNMFMethod('lnmf', '.R#lnmf', Update=nmf_update.lnmf)