File: PPC-loo.Rd

package info (click to toggle)
r-cran-bayesplot 1.14.0-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 7,288 kB
  • sloc: sh: 13; makefile: 2
file content (361 lines) | stat: -rw-r--r-- 13,584 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ppc-loo.R
\name{PPC-loo}
\alias{PPC-loo}
\alias{ppc_loo_pit_overlay}
\alias{ppc_loo_pit_data}
\alias{ppc_loo_pit_qq}
\alias{ppc_loo_pit_ecdf}
\alias{ppc_loo_pit}
\alias{ppc_loo_intervals}
\alias{ppc_loo_ribbon}
\title{LOO predictive checks}
\usage{
ppc_loo_pit_overlay(
  y,
  yrep,
  lw = NULL,
  ...,
  psis_object = NULL,
  pit = NULL,
  samples = 100,
  size = 0.25,
  alpha = 0.7,
  boundary_correction = TRUE,
  grid_len = 512,
  bw = "nrd0",
  trim = FALSE,
  adjust = 1,
  kernel = "gaussian",
  n_dens = 1024
)

ppc_loo_pit_data(
  y,
  yrep,
  lw = NULL,
  ...,
  psis_object = NULL,
  pit = NULL,
  samples = 100,
  bw = "nrd0",
  boundary_correction = TRUE,
  grid_len = 512
)

ppc_loo_pit_qq(
  y,
  yrep,
  lw = NULL,
  ...,
  psis_object = NULL,
  pit = NULL,
  compare = c("uniform", "normal"),
  size = 2,
  alpha = 1
)

ppc_loo_pit_ecdf(
  y,
  yrep,
  lw = NULL,
  ...,
  psis_object = NULL,
  pit = NULL,
  K = NULL,
  prob = 0.99,
  plot_diff = FALSE,
  interpolate_adj = NULL
)

ppc_loo_pit(
  y,
  yrep,
  lw,
  pit = NULL,
  compare = c("uniform", "normal"),
  ...,
  size = 2,
  alpha = 1
)

ppc_loo_intervals(
  y,
  yrep,
  psis_object,
  ...,
  subset = NULL,
  intervals = NULL,
  prob = 0.5,
  prob_outer = 0.9,
  alpha = 0.33,
  size = 1,
  fatten = 2.5,
  linewidth = 1,
  order = c("index", "median")
)

ppc_loo_ribbon(
  y,
  yrep,
  psis_object,
  ...,
  subset = NULL,
  intervals = NULL,
  prob = 0.5,
  prob_outer = 0.9,
  alpha = 0.33,
  size = 0.25
)
}
\arguments{
\item{y}{A vector of observations. See \strong{Details}.}

\item{yrep}{An \code{S} by \code{N} matrix of draws from the posterior (or prior)
predictive distribution. The number of rows, \code{S}, is the size of the
posterior (or prior) sample used to generate \code{yrep}. The number of columns,
\code{N} is the number of predicted observations (\code{length(y)}). The columns of
\code{yrep} should be in the same order as the data points in \code{y} for the plots
to make sense. See the \strong{Details} and \strong{Plot Descriptions} sections for
additional advice specific to particular plots.}

\item{lw}{A matrix of (smoothed) log weights with the same dimensions as
\code{yrep}. See \code{\link[loo:psis]{loo::psis()}} and the associated \code{weights()} method as well as
the \strong{Examples} section, below. If \code{lw} is not specified then
\code{psis_object} can be provided and log weights will be extracted.}

\item{...}{Currently unused.}

\item{psis_object}{If using \strong{loo} version \verb{2.0.0} or greater, an
object returned by the \code{psis()} function (or by the \code{loo()} function
with argument \code{save_psis} set to \code{TRUE}).}

\item{pit}{For \code{ppc_loo_pit_overlay()}, \code{ppc_loo_pit_qq()}, and
\code{ppc_loo_pit_ecdf()} optionally a vector of precomputed PIT values that
can be specified instead of \code{y}, \code{yrep}, and \code{lw} (these are all ignored
if \code{pit} is specified). If not specified the PIT values are computed
internally before plotting.}

\item{samples}{For \code{ppc_loo_pit_overlay()}, the number of data sets (each
the same size as \code{y}) to simulate from the standard uniform
distribution. The default is 100. The density estimate of each dataset is
plotted as a thin line in the plot, with the density estimate of the LOO
PITs overlaid as a thicker dark line.}

\item{alpha, size, fatten, linewidth}{Arguments passed to code geoms to control
plot aesthetics. For \code{ppc_loo_pit_qq()} and \code{ppc_loo_pit_overlay()},\code{size}
and \code{alpha} are passed to \code{\link[ggplot2:geom_point]{ggplot2::geom_point()}} and
\code{\link[ggplot2:geom_density]{ggplot2::geom_density()}}, respectively. For \code{ppc_loo_intervals()}, \code{size}
\code{linewidth} and \code{fatten} are passed to \code{\link[ggplot2:geom_linerange]{ggplot2::geom_pointrange()}}. For
\code{ppc_loo_ribbon()}, \code{alpha} and \code{size}  are passed to
\code{\link[ggplot2:geom_ribbon]{ggplot2::geom_ribbon()}}.}

\item{boundary_correction}{For \code{ppc_loo_pit_overlay()}, when set to \code{TRUE}
(the default) the function will compute boundary corrected density values
via convolution and a Gaussian filter, also known as the reflection method
(Boneva et al., 1971). As a result, parameters controlling the standard
kernel density estimation such as \code{adjust}, \code{kernel} and \code{n_dens} are
ignored. NOTE: The current implementation only works well for continuous
observations.}

\item{grid_len}{For \code{ppc_loo_pit_overlay()}, when \code{boundary_correction} is
set to \code{TRUE} this parameter specifies the number of points used to
generate the estimations. This is set to 512 by default.}

\item{bw, adjust, kernel, n_dens}{Optional arguments passed to
\code{\link[stats:density]{stats::density()}} to override default kernel density estimation
parameters. \code{n_dens} defaults to \code{1024}.}

\item{trim}{Passed to \code{\link[ggplot2:geom_density]{ggplot2::stat_density()}}.}

\item{compare}{For \code{ppc_loo_pit_qq()}, a string that can be either
\code{"uniform"} or \code{"normal"}. If \code{"uniform"} (the default) the Q-Q plot
compares computed PIT values to the standard uniform distribution. If
\code{compare="normal"}, the Q-Q plot compares standard normal quantiles
calculated from the PIT values to the theoretical standard normal
quantiles.}

\item{K}{For \code{ppc_loo_pit_ecdf()} an optional integer defining the number
of equally spaced evaluation points for the PIT-ECDF. Reducing K when
using \code{interpolate_adj = FALSE} makes computing the confidence bands
faster. If \code{pit} is supplied, defaults to \code{length(pit)}, otherwise
\code{yrep} determines the maximum accuracy of the estimated PIT values and
\code{K} is set to \code{min(nrow(yrep) + 1, 1000)}.}

\item{prob, prob_outer}{Values between \code{0} and \code{1} indicating the desired
probability mass to include in the inner and outer intervals. The defaults
are \code{prob=0.5} and \code{prob_outer=0.9} for \code{ppc_loo_intervals()} and
\code{prob = 0.99} for \code{ppc_loo_pit_ecdf()}.}

\item{plot_diff}{For \code{ppc_loo_pit_ecdf()}, a boolean defining whether to
plot the difference between the observed PIT-ECDF and the theoretical
expectation for uniform PIT values rather than plotting the regular ECDF.
The default is \code{FALSE}, but for large samples we recommend setting
\code{plot_diff = TRUE} to better use the plot area.}

\item{interpolate_adj}{For \code{ppc_loo_pit_ecdf()}, a boolean defining if the
simultaneous confidence bands should be interpolated based on precomputed
values rather than computed exactly. Computing the bands may be
computationally intensive and the approximation gives a fast method for
assessing the ECDF trajectory. The default is to use interpolation if \code{K}
is greater than 200.}

\item{subset}{For \code{ppc_loo_intervals()} and \code{ppc_loo_ribbon()}, an optional
integer vector indicating which observations in \code{y} (and \code{yrep}) to
include. Dropping observations from \code{y} and \code{yrep} manually before passing
them to the plotting function will not work because the dimensions will not
match up with the dimensions of \code{psis_object}, but if all of \code{y} and \code{yrep}
are passed along with \code{subset} then \strong{bayesplot} can do the subsetting
internally for \code{y}, \code{yrep} \emph{and} \code{psis_object}. See the \strong{Examples}
section for a demonstration.}

\item{intervals}{For \code{ppc_loo_intervals()} and \code{ppc_loo_ribbon()}, optionally
a matrix of pre-computed LOO predictive intervals that can be specified
instead of \code{yrep} (ignored if \code{intervals} is specified). If not specified
the intervals are computed internally before plotting. If specified,
\code{intervals} must be a matrix with number of rows equal to the number of
data points and five columns in the following order: lower outer interval,
lower inner interval, median (50\%), upper inner interval and upper outer
interval (column names are ignored).}

\item{order}{For \code{ppc_loo_intervals()}, a string indicating how to arrange
the plotted intervals. The default (\code{"index"}) is to plot them in the
order of the observations. The alternative (\code{"median"}) arranges them
by median value from smallest (left) to largest (right).}
}
\value{
A ggplot object that can be further customized using the \strong{ggplot2} package.
}
\description{
Leave-One-Out (LOO) predictive checks. See the \strong{Plot Descriptions} section,
below, and \href{https://github.com/jgabry/bayes-vis-paper#readme}{Gabry et al. (2019)}
for details.
}
\section{Plot Descriptions}{

\describe{
\item{\code{ppc_loo_pit_overlay()}, \code{ppc_loo_pit_qq()}, \code{ppc_loo_pit_ecdf()}}{
The calibration of marginal predictions can be assessed using probability
integral transformation (PIT) checks. LOO improves the check by avoiding the
double use of data. See the section on marginal predictive checks in Gelman
et al. (2013, p. 152--153) and section 5 of Gabry et al. (2019) for an
example of using \strong{bayesplot} for these checks.

The LOO PIT values are asymptotically uniform (for continuous data) if the
model is calibrated. The \code{ppc_loo_pit_overlay()} function creates a plot
comparing the density of the LOO PITs (thick line) to the density estimates
of many simulated data sets from the standard uniform distribution (thin
lines). See Gabry et al. (2019) for an example of interpreting the shape of
the miscalibration that can be observed in these plots.

The \code{ppc_loo_pit_qq()} function provides an alternative visualization of
the miscalibration with a quantile-quantile (Q-Q) plot comparing the LOO
PITs to the standard uniform distribution. Comparing to the uniform is not
good for extreme probabilities close to 0 and 1, so it can sometimes be
useful to set the \code{compare} argument to \code{"normal"}, which will
produce a Q-Q plot comparing standard normal quantiles calculated from the
PIT values to the theoretical standard normal quantiles. This can help see
the (mis)calibration better for the extreme values. However, in most cases
we have found that the overlaid density plot (\code{ppc_loo_pit_overlay()})
function will provide a clearer picture of calibration problems than the
Q-Q plot.

The \code{ppc_loo_pit_ecdf()} function visualizes the empirical cumulative
distribution function (ECDF) of the LOO PITs overlaid with simultaneous
confidence intervals for a standard uniform sample. For large samples,
these confidence intervals are visually very narrow. Setting the
\code{plot_diff} argument to \code{TRUE} transforms the plot to display the
difference of the ECDF and the theoretical expectation, which can aid in
the visual assessment of calibration.
}
\item{\code{ppc_loo_intervals()}, \code{ppc_loo_ribbon()}}{
Similar to \code{\link[=ppc_intervals]{ppc_intervals()}} and \code{\link[=ppc_ribbon]{ppc_ribbon()}} but the intervals are for
the LOO predictive distribution.
}
}
}

\examples{
\dontrun{
library(rstanarm)
library(loo)

head(radon)
fit <- stan_lmer(
  log_radon ~ floor + log_uranium + floor:log_uranium
    + (1 + floor | county),
  data = radon,
  iter = 100,
  chains = 2,
  cores = 2
)
y <- radon$log_radon
yrep <- posterior_predict(fit)

loo1 <- loo(fit, save_psis = TRUE, cores = 4)
psis1 <- loo1$psis_object
lw <- weights(psis1) # normalized log weights

# marginal predictive check using LOO probability integral transform
color_scheme_set("orange")
ppc_loo_pit_overlay(y, yrep, lw = lw)

ppc_loo_pit_qq(y, yrep, lw = lw)
ppc_loo_pit_qq(y, yrep, lw = lw, compare = "normal")

# predictive calibration check using LOO probability integral transform
ppc_loo_pit_ecdf(y, yrep, lw)

# With `plot_diff = TRUE` it is easier to assess the calibration.
ppc_loo_pit_ecdf(y, yrep, lw, plot_diff = TRUE)

# can use the psis object instead of lw
ppc_loo_pit_qq(y, yrep, psis_object = psis1)

# loo predictive intervals vs observations
keep_obs <- 1:50
ppc_loo_intervals(y, yrep, psis_object = psis1, subset = keep_obs)

color_scheme_set("gray")
ppc_loo_intervals(y, yrep,
  psis_object = psis1, subset = keep_obs,
  order = "median"
)
}

}
\references{
Gelman, A., Carlin, J. B., Stern, H. S., Dunson, D. B., Vehtari,
A., and Rubin, D. B. (2013). \emph{Bayesian Data Analysis.} Chapman & Hall/CRC
Press, London, third edition. (p. 152--153)

Gabry, J. , Simpson, D. , Vehtari, A. , Betancourt, M. and
Gelman, A. (2019), Visualization in Bayesian workflow.
\emph{J. R. Stat. Soc. A}, 182: 389-402. doi:10.1111/rssa.12378.
(\href{https://rss.onlinelibrary.wiley.com/doi/full/10.1111/rssa.12378}{journal version},
\href{https://arxiv.org/abs/1709.01449}{arXiv preprint},
\href{https://github.com/jgabry/bayes-vis-paper}{code on GitHub})

Vehtari, A., Gelman, A., and Gabry, J. (2017). Practical
Bayesian model evaluation using leave-one-out cross-validation and WAIC.
\emph{Statistics and Computing}. 27(5), 1413--1432.
doi:10.1007/s11222-016-9696-4. arXiv preprint:
\url{https://arxiv.org/abs/1507.04544}

Boneva, L. I., Kendall, D., & Stefanov, I. (1971). Spline
transformations: Three new diagnostic aids for the statistical
data-analyst. \emph{J. R. Stat. Soc. B} (Methodological), 33(1), 1-71.
https://www.jstor.org/stable/2986005.
}
\seealso{
Other PPCs: 
\code{\link{PPC-censoring}},
\code{\link{PPC-discrete}},
\code{\link{PPC-distributions}},
\code{\link{PPC-errors}},
\code{\link{PPC-intervals}},
\code{\link{PPC-overview}},
\code{\link{PPC-scatterplots}},
\code{\link{PPC-test-statistics}}
}
\concept{PPCs}