1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MxCompute.R
\name{mxComputeGradientDescent}
\alias{mxComputeGradientDescent}
\alias{MxComputeGradientDescent-class}
\title{Optimize parameters using a gradient descent optimizer}
\usage{
mxComputeGradientDescent(
freeSet = NA_character_,
...,
engine = NULL,
fitfunction = "fitfunction",
verbose = 0L,
tolerance = NA_real_,
useGradient = deprecated(),
warmStart = NULL,
nudgeZeroStarts = mxOption(NULL, "Nudge zero starts"),
maxMajorIter = NULL,
gradientAlgo = deprecated(),
gradientIterations = deprecated(),
gradientStepSize = deprecated()
)
}
\arguments{
\item{freeSet}{names of matrices containing free parameters.}
\item{...}{Not used. Forces remaining arguments to be specified by name.}
\item{engine}{specific 'CSOLNP', 'SLSQP', or 'NPSOL'}
\item{fitfunction}{name of the fitfunction (defaults to 'fitfunction')}
\item{verbose}{integer. Level of run-time diagnostic output. Set to zero to disable}
\item{tolerance}{how close to the optimum is close enough (also known as the optimality tolerance)}
\item{useGradient}{\lifecycle{soft-deprecated}}
\item{warmStart}{a Cholesky factored Hessian to use as the NPSOL Hessian starting value (preconditioner)}
\item{nudgeZeroStarts}{whether to nudge any zero starting values prior to optimization (default TRUE)}
\item{maxMajorIter}{maximum number of major iterations}
\item{gradientAlgo}{\lifecycle{soft-deprecated}}
\item{gradientIterations}{\lifecycle{soft-deprecated}}
\item{gradientStepSize}{\lifecycle{soft-deprecated}}
}
\description{
This optimizer does not require analytic derivatives of the fit
function. The fully open-source CRAN version of OpenMx offers 2 choices,
CSOLNP and SLSQP (from the NLOPT collection). The OpenMx Team's version of
OpenMx offers the choice of three optimizers: CSOLNP, SLSQP, and NPSOL.
}
\details{
All three optimizers can use analytic gradients, and only NPSOL
uses \code{warmStart}. To customize more options, see
\link{mxOption}.
}
\examples{
data(demoOneFactor)
factorModel <- mxModel(name ="One Factor",
mxMatrix(type="Full", nrow=5, ncol=1, free=FALSE, values=0.2, name="A"),
mxMatrix(type="Symm", nrow=1, ncol=1, free=FALSE, values=1, name="L"),
mxMatrix(type="Diag", nrow=5, ncol=5, free=TRUE, values=1, name="U"),
mxAlgebra(expression=A \%*\% L \%*\% t(A) + U, name="R"),
mxExpectationNormal(covariance="R", dimnames=names(demoOneFactor)),
mxFitFunctionML(),
mxData(observed=cov(demoOneFactor), type="cov", numObs=500),
mxComputeSequence(steps=list(
mxComputeGradientDescent(),
mxComputeNumericDeriv(),
mxComputeStandardError(),
mxComputeHessianQuality()
)))
factorModelFit <- mxRun(factorModel)
factorModelFit$output$conditionNumber # 29.5
}
\references{
Luenberger, D. G. & Ye, Y. (2008). \emph{Linear and nonlinear programming.} Springer.
}
|