File: ifa-lmp.R

package info (click to toggle)
r-cran-openmx 2.21.1%2Bdfsg-1
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 14,412 kB
  • sloc: cpp: 36,577; ansic: 13,811; fortran: 2,001; sh: 1,440; python: 350; perl: 21; makefile: 5
file content (159 lines) | stat: -rw-r--r-- 5,679 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
################################################################################
## Test estimation of LMP model from rpf package
## Author: Carl F. Falk

require(OpenMx)
require(rpf)

suppressWarnings(RNGversion("3.5"))
set.seed(1)

data(LSAT6)
dat<-expandDataFrame(LSAT6, freqName="Freq")
dat<-mxFactor(as.data.frame(dat), levels=0:1)
N<-nrow(dat)
ni<-ncol(dat)
colnames(dat)<-paste("X",1:ni,sep="")


################################################################################
## k=0 is 1st order polynomial. Should be the same fit as 2PL

k<-0
par<-c(.1,0)
spec<-list()
spec[1:ni]<- list(rpf.lmp(k))

startingValues<-matrix(par,ncol=length(spec),
  nrow=length(par))

imat<-mxMatrix(name="item", values=startingValues, free=TRUE,
  dimnames=list(c("omega","xi"),colnames(dat)))

k0Model <- mxModel(model="k0Model", imat,
  mxData(observed=dat, type="raw"),
  mxExpectationBA81(spec,qwidth=5,qpoints=49),
    mxFitFunctionML(),
    mxComputeSequence(list(
      mxComputeEM('expectation', 'scores',
        mxComputeNewtonRaphson())
  )))

k0Model<-mxRun(k0Model)


## Try 2PL Model
par<-c(1,0)
spec<-list()
spec[1:ni]<-list(rpf.grm(2))

startingValues<-matrix(par,ncol=length(spec),
  nrow=length(par))

imat<-mxMatrix(name="item", values=startingValues, free=TRUE,
  dimnames=list(c("Slope","Intercept"),colnames(dat)))

twoplModel <- mxModel(model="twoplModel", imat,
  mxData(observed=dat, type="raw"),
  mxExpectationBA81(spec,qwidth=5,qpoints=49),
  mxFitFunctionML(),
  mxComputeSequence(list(
    mxComputeEM('expectation', 'scores',
      mxComputeNewtonRaphson())
  )))

twoplModel<-mxRun(twoplModel)

## 4933.307
omxCheckCloseEnough(k0Model$output$fit,twoplModel$output$fit,.0001)

################################################################################
## k=1 is 3rd order polynomial. Test w/ priors vs. hard-coded param estimates
## obtained from CFF's own estimation code

k<-1
par<-c(.1,0,0,-1)

## prior means and variance
pmalpha<-0
pmtau<- -1
pvar<-50

spec<-list()
spec[1:ni]<-list(rpf.lmp(k))

startingValues<-matrix(par,ncol=length(spec),
  nrow=length(par))

imat<-mxMatrix(name="item", values=startingValues, free=TRUE,
  dimnames=list(c("omega","xi","alpha","tau"),colnames(dat)))

imat$labels['alpha',]<-paste('alpha',1:ni,sep="")
imat$labels['tau',]<-paste('tau',1:ni,sep="")

## Set up priors for alpha and tau; these are intended to be diffuse priors for stabilizing estmiation
## e.g., pi(tau) ~ N(-1,50), pi(alpha) ~ N(0,50), or with "50" replaced with some large value
## TO DO: use ifatools for gaussian priors
gaussPriorAlpha <- mxMatrix(name="gaussPriorAlpha",nrow=1, ncol=length(1:ni),
  free=TRUE, labels=imat$labels['alpha',1:ni],values=imat$values['alpha',1:ni])
gaussPriorTau <- mxMatrix(name="gaussPriorTau",nrow=1, ncol=length(1:ni),
  free=TRUE, labels=imat$labels['tau',1:ni],values=imat$values['tau',1:ni])

gaussMalpha <- mxMatrix(name="gaussMalpha", nrow=1, ncol=length(1:ni), values=pmalpha)
gaussSDalpha <- mxMatrix(name="gaussSDalpha", nrow=1, ncol=length(1:ni),values=sqrt(pvar))
gaussMtau <- mxMatrix(name="gaussMtau", nrow=1, ncol=length(1:ni), values=pmtau)
gaussSDtau <- mxMatrix(name="gaussSDtau", nrow=1, ncol=length(1:ni), values=sqrt(pvar))

## How much -2logLik changes due to the priors and resulting derivatives
gaussFitAlpha <- mxAlgebra(sum(log(2*pi) + 2*log(gaussSDalpha) +
  (gaussPriorAlpha-gaussMalpha)^2/gaussSDalpha^2), name="gaussFitAlpha")
gaussGradAlpha <- mxAlgebra(2*(gaussPriorAlpha - gaussMalpha)/gaussSDalpha^2, name="gaussGradAlpha",
  dimnames=list(c(),gaussPriorAlpha$labels))
gaussHessAlpha <- mxAlgebra(vec2diag(2/gaussSDalpha^2), name="gaussHessAlpha",
  dimnames=list(gaussPriorAlpha$labels, gaussPriorAlpha$labels))

gaussFitTau <- mxAlgebra(sum(log(2*pi) + 2*log(gaussSDtau) +
  (gaussPriorTau-gaussMtau)^2/gaussSDtau^2), name="gaussFitTau")
gaussGradTau <- mxAlgebra(2*(gaussPriorTau - gaussMtau)/gaussSDtau^2, name="gaussGradTau",
  dimnames=list(c(),gaussPriorTau$labels))
gaussHessTau <- mxAlgebra(vec2diag(2/gaussSDtau^2), name="gaussHessTau",
  dimnames=list(gaussPriorTau$labels, gaussPriorTau$labels))

gaussModelAlpha <- mxModel(model="gaussModelAlpha", gaussPriorAlpha, gaussMalpha, gaussSDalpha,
  gaussFitAlpha, gaussGradAlpha, gaussHessAlpha,
  mxFitFunctionAlgebra("gaussFitAlpha", gradient="gaussGradAlpha", hessian="gaussHessAlpha"))

gaussModelTau <- mxModel(model="gaussModelTau", gaussPriorTau, gaussMtau, gaussSDtau,
  gaussFitTau, gaussGradTau, gaussHessTau,
  mxFitFunctionAlgebra("gaussFitTau", gradient="gaussGradTau", hessian="gaussHessTau"))

itemModel <- mxModel(model="itemModel", imat,
  mxData(observed=dat, type="raw"),
  mxExpectationBA81(spec,qwidth=5,qpoints=49),
  mxFitFunctionML())

fit<- mxFitFunctionMultigroup(groups=c('itemModel', 'gaussModelAlpha','gaussModelTau'))

k1PriorModel <- mxModel(model="k1PriorModel", itemModel, gaussModelAlpha, gaussModelTau,
  fit,
  mxComputeSequence(list(
    mxComputeEM('itemModel.expectation', 'scores',
      mxComputeNewtonRaphson(maxIter=10000),
      maxIter=2000)
                   )))

k1PriorModel<-mxRun(k1PriorModel)

## log likelihood if Bayesian estimates substituted into ML fit function
omxCheckCloseEnough(k1PriorModel$output$algebras$itemModel.fitfunction,4929.896,1e-2)

est<-k1PriorModel$output$estimate

myEst<-c(-0.27386336, 2.69911765,-0.06118075,-3.03394846,
  -0.10517433, 0.66718611,-0.72263680,-2.99031027,
  -0.60439972, 0.65458936, 1.23987805,-1.55414690,
  -2.09965081, 1.09230606,-2.87804350,-0.82664116,
  -0.91245939, 1.74431613,-1.42863857,-2.27822768)

## Should be close, but maybe not exact due to the scaling of some parameters
omxCheckCloseEnough(est,myEst,.1)