File: genD.Rd

package info (click to toggle)
r-cran-numderiv 2012.9-1-1
  • links: PTS
  • area: main
  • in suites: jessie, jessie-kfreebsd
  • size: 184 kB
  • sloc: makefile: 1
file content (102 lines) | stat: -rw-r--r-- 4,270 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
\name{genD}
\alias{genD}
\alias{genD.default}

\title{Generate Bates and Watts D Matrix}
\description{Generate a matrix of function derivative information.}
\usage{
    genD(func, x, method="Richardson",
                   method.args=list(), ...)
    \method{genD}{default}(func, x, method="Richardson",
      method.args=list(), ...)
}
\arguments{
    \item{func}{a function for which the first (vector) argument 
        is used as a parameter vector.}
    \item{x}{The parameter vector first argument to \code{func}.}
    \item{method}{one of \code{"Richardson"} or \code{"simple"} indicating 
    the method to use for the aproximation.}
    \item{method.args}{arguments passed to method.  See \code{\link{grad}}. 
       (Arguments not specified remain with their default values.)}
    \item{...}{any additional arguments passed to \code{func}.
          WARNING: None of these should have names matching other arguments of this function.}
}
\value{
A list with elements as follows:
   \code{D} is a matrix of first and second order partial
      derivatives organized in the same manner as Bates and 
      Watts, the number of rows is equal to the length of the result of
      \code{func}, the first p columns are the Jacobian, and the 
      next p(p+1)/2 columns are the lower triangle of the second derivative
      (which is the Hessian for a scalar valued \code{func}).
   \code{p} is the length of \code{x} (dimension of the parameter space).
   \code{f0} is the function value at the point where the matrix \code{D} 
        was calculated. 
   The  \code{genD} arguments \code{func}, \code{x}, \code{d}, \code{method},
   and  \code{method.args} also are returned in the list.
}
\details{
   The derivatives are calculated numerically using Richardson improvement.
   Methods "simple" and "complex" are not supported in this function.
   The "Richardson" method calculates a numerical approximation of the first 
   and second derivatives of \code{func} at the point \code{x}. 
   For a scalar valued function these are the gradient vector and 
   Hessian matrix. (See \code{\link{grad}} and \code{\link{hessian}}.)
   For a vector valued function the first derivative is the Jacobian matrix 
   (see \code{\link{jacobian}}). 
   For the Richardson method 
   \code{methods.args=list(eps=1e-4, d=0.0001, zero.tol=sqrt(.Machine$double.eps/7e-7), 
   r=4, v=2)} is set as the default.
   See \code{\link{grad}}
   for more details on the Richardson's extrapolation parameters. 

   The the first order derivative with respect to \eqn{x_i}{x_i} is 

   \deqn{f'_{i}(x) = <f(x_{1},\dots,x_{i}+d,\dots,x_{n}) -
               f(x_{1},\dots,x_{i}-d,\dots,x_{n})>/(2*d)}{%
 	 f'_{i}(x) = <f(x_{1},\dots,x_{i}+d,\dots,x_{n}) -
               f(x_{1},\dots,x_{i}-d,\dots,x_{n})>/(2*d)}

   The second order derivative with respect to \eqn{x_i}{x_i} is 

  \deqn{f''_{i}(x) = <f(x_{1},\dots,x_{i}+d,\dots,x_{n}) -
                   2 *f(x_{1},\dots,x_{n}) +
                    f(x_{1},\dots,x_{i}-d,\dots,x_{n})>/(d^2) }{%
	f''_{i}(x) = <f(x_{1},\dots,x_{i}+d,\dots,x_{n}) -
                   2 *f(x_{1},\dots,x_{n}) +
                    f(x_{1},\dots,x_{i}-d,\dots,x_{n})>/(d^2) }	    

   The second order derivative with respect to \eqn{x_i, x_j}{x_i, x_j} is 

  \deqn{f''_{i,j}(x) = <f(x_{1},\dots,x_{i}+d,\dots,x_{j}+d,\dots,x_{n}) -
                    2 *f(x_{1},\dots,x_{n}) + }{%
        f''_{i,j}(x) = <f(x_{1},\dots,x_{i}+d,\dots,x_{j}+d,\dots,x_{n}) -
                    2 *f(x_{1},\dots,x_{n}) + }

  \deqn{f(x_{1},\dots,x_{i}-d,\dots,x_{j}-d,\dots,x_{n})>/(2*d^2) -
		      (f''_{i}(x) + f''_{j}(x))/2 }{%
        f(x_{1},\dots,x_{i}-d,\dots,x_{j}-d,\dots,x_{n})>/(2*d^2) -
		      (f''_{i}(x) + f''_{j}(x))/2 }

}

\references{ 
   Linfield, G.R. and Penny, J.E.T. (1989) "Microcomputers in Numerical Analysis."
    Halsted Press.
   
   Bates, D.M. & Watts, D. (1980), "Relative Curvature Measures of Nonlinearity."
   J. Royal Statistics Soc. series B, 42:1-25

   Bates, D.M. and Watts, D. (1988) "Non-linear Regression Analysis and Its Applications."
   Wiley.
}

\seealso{
   \code{\link{hessian}}, 
   \code{\link{grad}}
}
\examples{
    func <- function(x){c(x[1], x[1], x[2]^2)}
    z <- genD(func, c(2,2,5))
}
\keyword{multivariate}