1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Copyright (c) 2003-2018 by The University of Queensland
% http://www.uq.edu.au
%
% Primary Business: Queensland, Australia
% Licensed under the Apache License, version 2.0
% http://www.apache.org/licenses/LICENSE-2.0
%
% Development until 2012 by Earth Systems Science Computational Center (ESSCC)
% Development 2012-2013 by School of Earth Sciences
% Development from 2014 by Centre for Geoscience Computing (GeoComp)
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{Non-Linear Partial Differential Equations}
\label{APP: NEWTON}
The solution $u_i$ is given as a solution of
the nonlinear equation
\begin{equation} \label{APP NEWTON EQU 40}
\int_{\Omega} v_{i,j} \cdot X_{ij} + v_{i} \cdot Y_{i} \; dx
+ \int_{\partial \Omega} v_{i} \cdot y_{i} \; ds = 0
\end{equation}
for all smooth $v_i$ with $v_i=0$ where $q_i>0$ and
\begin{equation} \label{APP NEWTON EQU 40b}
u_i=r_i \mbox{ where } q_i>0
\end{equation}
where $X_{ij}$ and $Y_i$ are non-linear functions of the solution $u_k$ and its gradient $u_{k,l}$
and $y_i$ is a function of solution $u_k$. For further convenience we will use the
notation
\begin{equation} \label{APP NEWTON EQU 40c}
<F(u),v> :=\int_{\Omega} v_{i,j} \cdot X_{ij} + v_{i} \cdot Y_{i} \; dx
+ \int_{\partial \Omega} v_{i} \cdot y_{i} \; ds
\end{equation}
for all smooth $v$ on $\Omega$. If one interprets $F(u)$ as defined above as a functional
over the set of admissible functions $v$
equation~(\ref{APP NEWTON EQU 40}) can be written in compact formulation
\begin{equation} \label{APP NEWTON EQU 40d}
F(u)= 0
\end{equation}
\section{Newton-Raphson Scheme}
This equation is iteratively solved by the Newton-Raphson method\index{Newton-Raphson method}, see \cite{Kelley2004a}.
Starting with the initial guess
$u^{(0)}$ the sequence
\begin{equation} \label{APP NEWTON EQU 43}
u^{(\nu)}= u^{(\nu-1)} - \delta^{(\nu-1)}
\end{equation}
for $\nu=1,2,\ldots \;$ generates the (general) Newton-Raphson iteration for the
solution $u$. The correction $\delta^{(\nu-1)}$ is the solution of the linear problem
\begin{equation} \label{APP NEWTON EQU 43b0}
< \fracp{F}{u^{(\nu-1)}} \delta^{(\nu-1)} ,v > = <F(u^{(\nu-1)}),v>
\end{equation}
for all smooth $v$ on $\Omega$ with $v_i=0$ where $q_i>0$.
where
\begin{equation} \label{APP NEWTON EQU 43b}
< \fracp{F}{u} \cdot \delta ,v > =
\int_{\Omega} \left( \fracp{X_{ij}}{u_{k,l}} v_{i,j}\delta_{k,l} +
\fracp{X_{ij}}{u_{k}} v_{i,j}\delta_{k} + \fracp{Y_{i}}{u_{k,l}} v_{i}\delta_{k,l} +
\fracp{Y_{i}}{u_{k}} v_{i}\delta_{k} \right) \; dx
+ \int_{\partial \Omega}
\fracp{y_{i}}{u_{k}} v_{i}\delta_{k} \; ds
\end{equation}
It is assumed that the initial guess $u^{(0)}$ fulfills the constraint~(\ref{APP NEWTON EQU 40b}).
The $\delta^{(\nu-1)}$ has to fulfill the homogeneous constraint.
Notice that the calculation of $\delta^{(\nu-1)}$ requires the solution of a linear PDE
as presented in section~\ref{SEC LinearPDE}.
The Newton iteration should be stopped in the $k$-th step if for
all components of the solution the error of the Newton
approximation is lower than the given relative tolerance {rtol}:
\begin{equation}\label{APP NEWTON EQU 61}
\| u_{i} - u_{i}^{(\nu)} \|_{\infty} \le \mbox{rtol} \cdot \|u_{i} \|_{\infty} \; ,
\end{equation}
for all components $i$
where $\|. \|_{\infty}$ denoted the $L^{sup}$ norm. To measure the quality of the solution approximation
on the level of the equation we introduce the weak norm
\begin{equation}\label{APP NEWTON EQU 62}
\| F(u) \|_{i} := \sup_{v , v=0 \mbox{ where } q_{i}>0 } \frac{<F(u), ve_{i}>}{\|v\|_1}
\end{equation}
where $(e_{i})_{j}=\delta_{ij}$ and $\|v\|_1=\int_{\Omega} |v| \,dx$ is the $L^1$ norm of $v$
\footnote{In practice a discretization method is applied to solve the update $\delta^{(\nu-1)}$.
In this case also an approximation of $\| F(u) \|_{i}$ is calculated taking the maximum over all
base function used to represent the solution $u$.}.
The stopping criterion (\ref{APP NEWTON EQU 61}) is changed to the level of
equation. We use the reasonable heuristic but mathematically
incorrect argument that the change on the level of the solution and
the change on the level of the equation are proportional:
\begin{equation}\label{APP NEWTON EQU 64}
\frac{ \| u_i - u^{(\nu)}_i \|_{\infty} }{ \| 0 - F(u^{(\nu)}) \|_{i} } =
\frac{ \| \delta^{(\nu)}_i \|_{\infty} }{ \| F(u^{(\nu)}) - F(u^{(\nu-1)}) \|_{i} } \; .
\end{equation}
where we assume that that component $\nu$ $F(u)$ is mainly controlled by component $\nu$ of the solution.
We assume that the term $F(u^{(\nu)})$ can be neglected versus
$F(u^{(\nu-1)})$ since $u^{(\nu)}$ is a better approximation,
and use the stopping criterion in the formulation:
\begin{equation} \label{APP NEWTON EQU 65}
\| F(u^{(\nu)}) \|_i \le
\frac{ \| F(u^{(\nu-1)})\|_{i} \cdot \|u_{i} \|_{\infty} } { \| \delta^{(\nu)}_i \| _{\infty} }
\,\mbox{\it rtol}\, =:\, \mbox{\it qtol}_i \; ,
\end{equation}
which has to hold for all components $\nu$.
Now {\it qtol} defines a tolerance for the level of equation. This stopping criterion is not free of problems, because a
decrease of the defect $F(u^{(\nu)})$ coupled with a constant
correction $\delta^{(\nu)}$ suggests a good approximation.
But the quality of the approximation $u^{(\nu)}$ is ensured if the
Newton iteration converges quadratically. This convergence behavior
is given by the error estimation:
\begin{equation}
\| u - u^{(\nu)} \|_{\infty} \le C \;
\| u - u^{(\nu-1)} \|_{\infty}^2
\end{equation}
with a positive value $C$, see \cite{Kelley2004a}. Therefore a quadratic
convergence of the Newton iteration can be assumed if the corrections
of the current and the last step fulfill the following condition:
\begin{equation} \label{APP NEWTON EQU 66}
\max_{i}
\frac{\| \delta^{(\nu)}_i \|_{\infty} }{\| \delta^{(\nu-1)}_i \|_{\infty} }
< \frac{1}{5} \;,
\end{equation}
where the limit $\frac{1}{5}$ was found by a large number of experiments.
The approximation $u^{(\nu)}$ is accepted if the conditions
(\ref{APP NEWTON EQU 65}) and (\ref{APP NEWTON EQU 66}) hold. Consequently a
safe approximation requires at least two Newton steps.
To stop a divergent iteration, which occurs for a bad initial solution,
the norms of the defects for the $(k-1)$-th and $k$-th
Newton step are compared. Here we use the estimation
\begin{equation} \label{APP NEWTON EQU 67}
\| F(u^{(\nu)}) \|_i \le
\gamma \| F(u^{(\nu-1)}) \|_i
\end{equation}
for the defects. The value $0<\gamma<1$ depends on $F$
and the distance of the initial guess $u^{(0)}$ to the true solution $u$.
Since the constant $\gamma$ is unknown and can be close to one,
convergence is assumed if the following (weaker) condition holds
for all components $i$:
\begin{equation} \label{APP NEWTON EQU 68}
\| F(u^{(\nu)}) \|_i
< \| F(u^{(\nu-1)}) \|_i \; .
\end{equation}
If condition (\ref{APP NEWTON EQU 68}) fails, divergence may begin. Therefore
under-relaxation is started. Beginning with $\omega=1$
the Newton-Raphson iteration is computed by
\begin{equation} \label{APP NEWTON EQU 69}
u^{(\nu)} = u^{(\nu-1)} - \omega \, \delta^{(\nu)}
\end{equation}
instead of (\ref{APP NEWTON EQU 43}). If this new iteration fulfills the
condition (\ref{APP NEWTON EQU 68}) of decreasing defect, we accept it. Otherwise
we put $\omega \rightarrow \frac{\omega}{2}$, recompute $u^{(\nu)}$ from equation
(\ref{APP NEWTON EQU 69}) and retry condition (\ref{APP NEWTON EQU 68}) for the
new $u^{(\nu)}$, and so on until either condition (\ref{APP NEWTON EQU 68})
holds or $\omega$ becomes to small ($\omega < \omega_{lim}=0.01$). In the latter case the
iteration gives up. The under-relaxation
converges only linearly for $\omega<1$. it is a rather robust
procedure.
which switches back to $\omega=1$ as soon as possible.
The price for the robustness is the additional computation of the
defects.
Due to the quadratic convergence near the solution the error decreases
rapidly. Then the solution will not change much and it will not be
necessary to mount a new coefficient matrix in each iteration step.
This algorithm is called the simplified Newton method. It converges
linearly by
\begin{equation}
\| u_i - u^{(\nu)}_i \|_{\infty} \le \gamma^{\nu}
\| u_i - u ^{(0)}_i \|_{\infty} \; ,
\end{equation}
where $\gamma$ equals the $\gamma$ in the estimation (\ref{APP NEWTON EQU 67}).
If the general iteration converges quadratically
(condition (\ref{APP NEWTON EQU 66}) holds) and we have
\begin{equation}
\| F(u^{(\nu)}) \|_i < 0.1
\| F(u^{(\nu-1)}) \|_i
\end{equation}
for all components $\nu$,
we can expect $\gamma \le 0.1$. Then the simplified iteration produces
one digit in every step and so we change from the general to the
simplified method. The `slow' convergence requires more iteration steps,
but the expensive mounting of the coefficient matrix is saved.
The lienar PDE~(\ref{APP NEWTON EQU 43b}) is solved with with a certain tolerance, namely when
the defect of the current
approximation of $\delta^{(\nu)}$ relative to the defect of
the current Newton iteration is lower than {LINTOL}. To avoid
wasting CPU time the FEMLIN iteration must be controlled by an
efficient stopping criterion. We set
\begin{equation}
\mbox{LINTOL} = 0.1 \cdot \max ( \left( \frac{\|\delta^{(\nu)}_i\|_{\infty}}{\|u_i^{(\nu)}\|_{\infty}} \right)^2
,\min_{i} \frac{\mbox{qtol}_i}{\|F(u^{(\nu)})\|_{i}} )
\end{equation}
but restrict {LINTOL} by
\begin{equation}
10^{-4} \le \mbox{LINTOL} \le 0.1 \quad \mbox{ and } \quad
\mbox{LINTOL}=0.1 \; \mbox{ for }\nu=0 \;.
\end{equation}
The first term means that it would be useless to compute digits by the
linear solver, which are overwritten by the next Newton step. In the
region of quadratic convergence the number of significant digits is
doubled in each Newton step, i.e.~later digits are overwritten by the
following Newton-Raphson correction, see \cite{Schoenauer1981a}. The second
mean that no digits should be computed which are in significance
below the prescribed tolerance {\it rtol}. The number $0.1$ is a `safety factor' to take care
of the coarse norm estimations. Figure~\ref{APP NEWTON PIC 61} shows the workflow of
the Newton-Raphson update algorithm.
\begin{figure}
\begin{center}
{
\unitlength0.92mm
\begin{picture}(200,235) \thicklines
\put(-10,-065){\begin{picture}(170,280) \thicklines
\newsavebox{\BZar}
\savebox{\BZar}(0,0) {
\thicklines
\put(0,10){\line(-3,-1){30}}
\put(0,10){\line(3,-1){30}}
\put(0,-10){\line(-3,1){30}}
\put(0,-10){\line(3,1){30}}
\put(0,20){\vector(0,-1){10}}
\put(0,-10){\vector(0,-1){10}}
\put(30,0){\vector(1,0){25}} }
\newsavebox{\BZal}
\savebox{\BZal}(0,0) {
\thicklines
\put(0,10){\line(-3,-1){30}}
\put(0,10){\line(3,-1){30}}
\put(0,-10){\line(-3,1){30}}
\put(0,-10){\line(3,1){30}}
\put(0,20){\vector(0,-1){10}}
\put(0,-10){\vector(0,-1){10}}
\put(-30,0){\vector(-1,0){10}} }
\put(20,285){\framebox(60,20){\parbox{48mm}
{Start: \\ $\nu=0$ , $\omega=1$ \\
calculate $F(u^{(0)})$ }} }
\put(50,285){\vector(0,-1){10}}
\put(20,265){\framebox(60,10){\parbox{48mm}
{next iteration: $\nu \leftarrow \nu+1$}} }
\put(50,265){\vector(0,-1){10}}
\put(20,240){\framebox(60,15){\parbox{54mm}
{\hspace*{2.00mm} Solve \\
$\fracp{F}{ u^{(\nu-1)}} \delta^{(\nu)} =
F(u^{(\nu-1)})$}} }
\put(50,240){\vector(0,-1){10}}
\put(20,220){\framebox(60,10){\parbox{48mm}
{$\omega = \min(2\,\omega,1)$}} }
\put(50,220){\vector(0,-1){10}}
\put(120,225){\line(-3,-1){30}}
\put(120,225){\line(3,-1){30}}
\put(120,205){\line(-3,1){30}}
\put(120,205){\line(3,1){30}}
\put(110,214){$\omega \le \omega_{lim}$ ?}
\put(83,219){no}
\put(153,219){yes}
\put(090,215){\vector(-1,0){40}}
\put(150,215){\line(1,0){05}}
\put(20,200){\framebox(60,10){\parbox{48mm}
{$u^{(\nu)} = u^{(k-1)} - \omega\delta^{(\nu)}$}} }
\put(50,180){\usebox{\BZar}}
\put(30,179){$F(u^{(\nu)}) < F(u^{(\nu-1)})$ ?}
\put(83,184){no}
\put(55,165){yes}
\put(105,175){\framebox(30,10){\parbox{23mm}
{$\omega=\omega/2$}} }
\put(120,185){\vector(0,1){20}}
\put(20,145){\framebox(60,15){\parbox{48mm}
{evaluate stopping criteria~(\ref{APP NEWTON EQU 65}) \\
and if not simplified mode~(\ref{APP NEWTON EQU 66}) } } }
\put(50,145){\vector(0,-1){10}}
\put(50,125){\usebox{\BZar}}
\put(34,120){\shortstack{stopping criterion \\ satisfied ?}}
\put(83,129){yes}
\put(55,110){no}
\put(105,117.5){\framebox(30,15){\parbox{23mm}
{Newton ends \\ successfully!}} }
\put(135,125){\vector(1,0){20}}
\put(50,095){\usebox{\BZal}}
\put(26,093.5){$F(u^{(\nu)}) < 0.1 F(u^{(\nu-1)})$ ?}
\put(12,099){no}
\put(55,080){yes}
\put(20,065){\framebox(60,10){\parbox{48mm}
{switch to simplified Newton!}} }
\put(20,070){\vector(-1,0){10}}
\put(155,215){\vector(0,-1){60}}
\put(140,145){\framebox(30,10){\parbox{23mm}
{Newton fails!}} }
\put(155,145){\vector(0,-1){070}}
\put(155,070){\oval(40,10)\makebox(0,0){END}}
\put(010,280){\line(0,-1){210}}
\put(010,280){\vector(1,0){40}}
\end{picture} }
\end{picture} }
\end{center}
\caption{\label{APP NEWTON PIC 61}Flow diagram of the Newton-Raphson algorithm}
\end{figure}
\section{Local Sensitivity Analysis}
If the coefficients
$X_{ij}$, $Y_i$ and $y_{i}$ in equation~(\ref{APP NEWTON EQU 40})
depend on a vector of input factors $f_i$ index{input factor} and its gradient one is interested in how the solution $u_i$
is changing if the input factors are changed. This problem is called a local sensitivity
analysis \index{local sensitivity analysis}. If $u(f)$ denotes the
solution of equation~(\ref{APP NEWTON EQU 40}) for the input factor $p$
and $u(f+\alpha \cdot g)$ denotes the solution
for a perturbed value $f+\alpha \cdot g$ for input factor $f$ where
$q$ denotes the direction of perturbation and $\alpha$ is the small scaling factor.
The derivative of the solution in the direction $g$ is defined as
\begin{equation}
\fracp{u}{g} : = \lim_{\alpha \rightarrow 0} \frac{ u(f+\alpha \cdot g) - u(f)}{\alpha}
\end{equation}
In practice one needs to distinguish between the cases of a spatially constant
spatially variable. In the first case $g$ is set to a unit vector while
in a second case an appropriate function needs to be given for $g$.
The function $\fracp{u}{g}$ is calculated from solving the equation
\begin{align} \label{APP NEWTON EQU 100}
\int_{\Omega} \left( \fracp{X_{ij}}{u_{k,l}} v_{i,j} \left(\fracp{u_k}{g}\right)_{,l} +
\fracp{X_{ij}}{u_{k}} v_{i,j}\fracp{u_k}{g} + \fracp{Y_{i}}{u_{k,l}} v_{i}\left(\fracp{u_k}{g}\right)_{,l} +
\fracp{Y_{i}}{u_{k}} v_{i}\fracp{u_k}{g}\right) \; dx
+ \int_{\partial \Omega}
\fracp{y_{i}}{u_{k}} v_{i}\fracp{u_k}{g}\; ds \\
+ \int_{\Omega} v_{i,j} \left( \fracp{X_{ij}}{f_{k,l}} g_{k,l} + \fracp{X_{ij}}{f_{k}} g_k \right)
+ v_{i} \left( \fracp{Y_{i}}{f_{k,l}} g_{k,l} + \fracp{Y_{i}}{f_{k}} g_k \right) \; dx
+ \int_{\partial \Omega} v_{i}
\fracp{y_{i}}{f_{k}} g_k\; ds
\end{align}
for all smooth $v$ on $\Omega$ with $v_i=0$ where $q_i>0$
for the unknown sensitivity $\fracp{u}{g}$.
Notice that this equation is similar to the equation which needs to be solved for
the Newton-Raphson correction $\delta_k$, see equation~(\ref{APP NEWTON EQU 43b}).
|