1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
|
#
# fields is a package for analysis of spatial data written for
# the R software environment.
# Copyright (C) 2024 Colorado School of Mines
# 1500 Illinois St., Golden, CO 80401
# Contact: Douglas Nychka, douglasnychka@gmail.com,
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the R software environment if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# or see http://www.r-project.org/Licenses/GPL-2
##END HEADER
"sim.Krig" <- function(object, xp, M = 1,
verbose = FALSE, ...) {
tau2 <- object$best.model[2]
sigma <- object$best.model[3]
#
# check for unique rows of xp
if (any(duplicated(xp))) {
stop(" predictions locations should be unique")
}
#
# set up various sizes of arrays
m <- nrow(xp)
n <- nrow(object$xM)
N <- length(object$y)
if (verbose) {
cat(" m,n,N", m, n, N, fill = TRUE)
}
#transform the new points
xc <- object$transform$x.center
xs <- object$transform$x.scale
xpM <- scale(xp, xc, xs)
# complete set of points for prediction.
# check for replicates and adjust
x <- rbind(object$xM, xpM)
if (verbose) {
cat("full x ", fill = TRUE)
print(x)
}
#
# find indices of all rows of xp that correspond to rows of
# xM and then collapse x to unique rows.
rep.x.info <- fields.duplicated.matrix(x)
x <- as.matrix(x[!duplicated(rep.x.info), ])
if (verbose) {
cat("full x without duplicates ", fill = TRUE)
print(x)
}
N.full <- nrow(x)
if (verbose) {
cat("N.full", N.full, fill = TRUE)
}
# these give locations in x matrix to reconstruct xp matrix
xp.ind <- rep.x.info[(1:m) + n]
if (verbose) {
print(N.full)
print(x)
}
if (verbose) {
cat("reconstruction of xp from collapsed locations",
fill = TRUE)
print(x[xp.ind, ])
}
#
# Sigma is full covariance at the data locations and at prediction points.
#
Sigma <- sigma * do.call(object$cov.function.name, c(object$args,
list(x1 = x, x2 = x)))
#
# square root of Sigma for simulating field
# Cholesky is fast but not very stable.
#
# the following code line is similar to chol(Sigma)-> Scol
# but adds possible additional arguments controlling the Cholesky
# from the Krig object.
#
Schol <- do.call("chol", c(list(x = Sigma), object$chol.args))
#
# output matrix to hold results
N.full <- nrow(x)
out <- matrix(NA, nrow = m, ncol = M)
#
# find conditional mean field from initial fit
# don't multiply by sd or add mean if this is
# a correlation model fit.
# (these are added at the predict step).
h.hat <- predict(object, xp, ...)
# marginal standard deviation of field.
temp.sd <- 1
#
#
# this is not 1 if Krig object is a corelation model.
if (object$correlation.model) {
if (!is.na(object$sd.obj[1])) {
temp.sd <- predict(object$sd.obj, x)
}
}
#
# Define W2i for simulating errors.
#
W2i <- Krig.make.Wi(object)$W2i
for (k in 1:M) {
# simulate full field
h <- t(Schol) %*% rnorm(N.full)
# value of simulated field at observations
#
# NOTE: fixed part of model (null space) need not be simulated
# because the estimator is unbiased for this part.
# the variability is still captured because the fixed part
# is still estimated as part of the predict step below
h.data <- h[1:n]
# expand the values according to the replicate pattern
h.data <- h.data[object$rep.info]
# create synthetic data
y.synthetic <- h.data + sqrt(tau2) * W2i %d*% rnorm(N)
# predict at xp using these data
# and subtract from 'true' value
# note that true values of field have to be expanded in the
# case of common locations between xM and xp.
h.true <- (h[xp.ind])
temp.error <- predict(object, xp, y = y.synthetic, eval.correlation.model = FALSE, ...) -
h.true
# add the error to the actual estimate (conditional mean)
# and adjust by marginal standard deviation
out[,k ] <- h.hat + temp.error * temp.sd
}
out
}
|