File: p_laplacian.tex

package info (click to toggle)
rheolef 7.1-6
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 77,392 kB
  • sloc: cpp: 105,337; sh: 16,014; makefile: 5,293; python: 1,359; xml: 221; yacc: 218; javascript: 202; awk: 61; sed: 5
file content (1277 lines) | stat: -rw-r--r-- 49,504 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
%\section{The $p$-Laplacian problem}

% ---------------------------------- 
\subsection{Problem statement}
% ---------------------------------- 
\label{sec-p-laplacian}%
\pbindex{p-Laplacian}%
  Let us consider the classical $p$-Laplacian problem
  with homogeneous Dirichlet boundary conditions
  in a domain bounded $\Omega \subset \mathbb{R}^d$,
  $d=1,2,3$:

  {\it (P): find $u$, defined in $\Omega$ such that:}
  \begin{eqnarray*}
      -{\rm div}\left(\eta\left(|\bnabla u|^{2}\right) \bnabla u\right)&=& f \ {\rm in}\ \Omega \\
      u &=& 0 \ {\rm on}\ \partial \Omega
  \end{eqnarray*}
  where $\eta: z\in\mathbb{R}^+ \longmapsto z^\frac{p-2}{2}\in\mathbb{R}^+$.
  Several variants of the $\eta$ can be considered:
  see~\citet{Sar-2016-cfma} for practical and useful examples:
\cindex{benchmark!pipe flow}%
  this problem represents a pipe flow of a non-Newtonian power-law fluid.
  Here $p\in ]1,+\infty[$ and $f$ are known.
  For the computational examples, we choose $f=1$.
\cindex{boundary condition!Dirichlet}%
\pbindex{Poisson}%
  When $p=2$, this problem reduces to the linear Poisson problem
  with homogeneous Dirichlet boundary conditions.
  Otherwise, for any $p>1$, the nonlinear problem is equivalent to the following minimization problem:

  {\it (MP): find $u\in W^{1,p}_0(\Omega)$ such that:}
  $$
          u
          \ \ = \ \ 
          \argmin_{v\in W^{1,p}_0(\Omega)}
          \ \ 
          \Frac{1}{2}
          \int_\Omega H\left(|\nabla v |^2\right) \, {\rm d}x
          -
          \int_\Omega f\,v \, {\rm d}x,
  $$
  where $H$ denotes the primitive of $\eta$:
  \[
     H(z) = \int_0^z \eta(z) \, {\rm d}z = \Frac{2 z^p}{p}
  \]
\cindex{space!$W^{1,p}$}%
\cindex{space!$W^{1,p}_0$}%
\cindex{space!$W^{-1,p}$}%
  Here $W^{1,p}_0(\Omega)$ denotes the usual
  Sobolev spaces of functions in $W^{1,p}(\Omega)$
  We also assume that $f\in W^{-1,p}(\Omega)$,
  where $W^{-1,p}_0(\Omega)$ denotes the dual space of $W^{1,p}_0(\Omega)$
  that vanishes on the boundary~\citep[p.~118]{Bre-1983}.
  The variational formulation of this problem expresses:
  
  {\it (VF): find $u\in W^{1,p}_0(\Omega)$ such that:}
  $$
      a(u;u,v) = l(v),
      \ \forall v \in W^{1,p}_0(\Omega)
  $$
  where $a(.,.)$ and $l(.)$ are defined 
  for any $u_0,u,v\in  W^{1,p}(\Omega)$ by
  \begin{eqnarray}
          a(u_0;u,v) &=& \int_\Omega \eta\left(|\nabla u_0 |^{2}\right) \nabla u . \nabla v \, {\rm d}x,
	  \ \ \forall u,v \in W^{1,p}_0(\Omega)
	\label{eq-p-laplacian-a}
	  \\
          l(v) &=& \int_\Omega f\, v \, {\rm d}x,
	  \ \ \forall u,v \in L^2(\Omega)
	\label{eq-p-laplacian-l}
  \end{eqnarray}
\cindex{norm!in $W^{1,p}$}%
\cindex{norm!in $W^{1,p}_0$}%
  The quantity $a(u;u,u)^{1/p} = \|\bnabla u\|_{0,p,\Omega}$ 
  induces a norm in  $W^{1,p}_0$, equivalent to the standard norm.
\cindex{form!energy}%
  The form $a(.;.,.)$ is bilinear with respect to the two last variable
  and is related to the {\it energy} form.
 
% ---------------------------------- 
\subsection{The fixed-point algorithm}
% ---------------------------------- 
\subsubsection*{Principe of the algorithm}
\cindex{method!fixed-point}%
  This nonlinear problem is then reduced to a sequence
  of linear subproblems by using the fixed-point algorithm.
  The sequence $\left(u^{(n)}\right)_{n\geq 0}$ is defined
  by recurrence as:
  \begin{itemize}
  \item $n=0$:
  let $u^{(0)}\in W^{1,p}_0(\Omega)$ be known.
  \item $n\geq 0$:
  suppose that $u^{(n)}\in W^{1,p}_0(\Omega)$ is known and
  find $u^{*}\in W^{1,p}_0(\Omega)$ such that:
  $$
      a\left(u^{(n)} ;u^{*},v\right) = l(v),
      \ \forall v \in W^{1,p}_0(\Omega)
  $$
  and then set
  $$ 
      u^{(n+1)} = \omega u^* + (1-\omega)*u^{(n)}
  $$
  \end{itemize}
\cindex{method!fixed-point!relaxation}%
  Here $\omega>0$ is the relaxation parameter: when $\omega=1$ we obtain
  the usual un-relaxed fixed point algorithm. For stiff nonlinear problems, we will
  consider the under-relaxed case $0<\omega<1$.
  Let $u^{(n+1)}=G\left(u^{(n)}\right)$ denotes the operator
  that solve the previous linear subproblem for a given $u^{(n)}$.
  Since the solution $u$ satisfies $u=G(u)$, it is a fixed-point of $G$.

  Let us introduce a mesh ${\cal T}_h$ of $\Omega$
  and the finite dimensional space  $X_h$ of continuous
  piecewise polynomial functions
  and $V_h$, the subspace of $X_h$
  containing elements that vanishes on the boundary of $\Omega$:
  \begin{eqnarray*}
      X_h &=& \{ v_h \in C^{0}_0\left(\overline{\Omega}\right); \ 
          v_{h/K} \in P_k, \ 
	  \forall K \in {\cal T}_h \}
      \\
      V_h &=& \{ v_h \in X_h; \ v_h = 0 \mbox{ on } \partial\Omega \} 
  \end{eqnarray*}
  where $k=1$ or $2$.
  The approximate problem expresses:
  suppose that $u^{(n)}_h\in V_h$ is known and
  find $u^{*}_h\in V_h$ such that:
  $$
      a\left(u^{(n)}_h ;u^{*}_h,v_h\right) = l(v_h),
      \ \forall v_h \in V_h
  $$
  By developing $u_h^*$ on a basis of $V_h$, this 
  problem reduces to a linear system.

% --------------------------------------
\myexamplelicense{p_laplacian_fixed_point.cc}
% --------------------------------------

\subsubsection*{Comments}
\pbindex{Poisson}%
\cindex{form!weighted}%
\cindex{form!{$\eta\nabla u.\nabla v$}}%
\findex{integrate}%
\findex{compose}%
\findex{norm2}%
\findex{grad}%
The implementation with \Rheolef\ involves a weighted forms:
the tensor-valued weight $\eta\left(\left|\nabla u^{(n)}_h\right|^2\right)$ is inserted 
in the variational expression passed to the \code{integrate} function.
The construction of the weighted form $a(.;.,.)$ writes:
\begin{lstlisting}[numbers=none,frame=none]
  form a = integrate(compose(eta(p),norm2(grad(uh)))*dot(grad(u),grad(v)));
\end{lstlisting}
Remarks the usage of the \code{compose}, \code{norm2} and \code{grad} library functions.
The weight $\eta\left(\left|\nabla u^{(n)}_h\right|^2\right)$ is represented by
the \code{compose(eta(p),norm2(grad(uh)))} sub-expression.
This weight is evaluated on the
fly at the quadrature nodes during the assembly process
implemented by the \code{integrate} function.
%
Also, notice the distinction between $u_h$, that represents the value of the
solution at step $n$, and the trial $u$ and test $v$ functions, that represents
any elements of the function space $X_h$.
These functions appear in the \code{dot(grad(u),grad(v))} sub-expression.
%% \cindex{quadrature formula}%
%% \cindex{form!weighted!quadrature formula}%
%% As the integrals involved by this weighted form cannot be computed exactly
%% for a general $\eta$ function, a quadrature formula is used:
%%   \[
%%      \int_K f(x) \, {\rm d}x
%%       =
%%      \sum_{q=0}^{n_K-1} f(x_{K,q})\,\omega_{K,q}
%%      +
%%      {\cal O}(h^{k'+1})
%%   \]
%% where $(x_{K,q},\omega_{K,q})_{0\leq q < n_K}$ are the quadrature nodes and weights
%% on $K$ and $k'$ is the order of the quadrature:
%% when $f$ is a polynomial of degree less than $k'$, the integral is exact.
%% The bilinear form $a(.,.)$ introduced in \eqref{eq-p-laplacian-a}
%% is then re-defined for all $u_0,u,v \in X_h$ by:
%%   \begin{equation}
%%      a(u_0;u,v)
%%      = \sum_{K\in {\cal T}_h}
%%        \sum_{q=0}^{n_K-1}
%%        \eta\left(|\nabla u_0 (x_{K,q}) |^{2}\right)
%%        \ \nabla u (x_{K,q}) . \nabla v (x_{K,q})
%%        \ \, \omega_{K,q}
%%      \label{eq-p-laplacian-a-quad}
%%   \end{equation}
%% \clindex{integrate_option}%
%%   We choose the Gauss quadrature formula and the order $k'$ is choosen as $k'=2k-1$:
%%   the number $n_K$ of nodes and weights in $K$ is adjusted correspondingly.
%%   This choice writes:
%% \begin{lstlisting}[numbers=none,frame=none]
%%   integrate_option iopt;
%%   iopt.set_family (integrate_option::gauss);
%%   iopt.set_order  (2*Xh.degree()-1);
%% \end{lstlisting}
%%   while the \code{iopt} variable is send as an optional argument to the
%%   weighted form $a(.,.)$ declaration.
%%   Remark that the integral would be exact for a constant weight.
%%   For a general weight, this choice also guarantee that
%%   the approximate solution $u_h$ converges optimally with mesh refinements
%%   to the exact solution $u$ (see~\citealp[p.~129]{RavTho-1983}).
%%   Note also that the Gauss quadrature formula is convenient here,
%%   as quadrature nodes are internal to the elements:
%%   evaluation of $\eta$ does not occurs at the domain boundaries,
%%   where the weight function could be singular when $p<2$ and
%%   where the gradient vanishes, e.g. at corners.

% --------------------------------------
\myexamplelicense{eta.h}
% --------------------------------------

  The $\eta$ function is implemented separately, in file
  named \code{eta.h} in order to easily change its definition.
  The \code{derivative} member function is not yet used here:
  it is implemented for a forthcoming application (the Newton method).
  Note the guards that check for division by zero and send a message
  related to the mesh: this will be commentated in the next paragraph.
%
  Finally, the fixed-point algorithm is initiated with $u^{(0)}$ as
  the solution of the linear problem associated to $p=2$,
  i.e. the standard Poisson problem with Dirichlet boundary conditions.
% --------------------------------------
\myexamplelicense{dirichlet.icc}
% --------------------------------------

% -------------------------------
\subsubsection*{Running the program}
% -------------------------------
  Compile the program, as usual:
\begin{verbatim}
   make p_laplacian_fixed_point
\end{verbatim}
  and enter the commands:
\begin{verbatim}
  mkgeo_ugrid -t 50 > square.geo
  geo square.geo
\end{verbatim}
  The triangular mesh has a boundary domain named \code{boundary}.
\begin{verbatim}
  ./p_laplacian_fixed_point square.geo P1 1.5 > square.field
\end{verbatim}

  \begin{figure}[htb]
    %\begin{center}
       \mbox{}\hspace{-2.5cm}
       \begin{tabular}{cccc}
          \includegraphics[height=4.5cm]{p-laplacian-square-p=1,25-elevation.png} &
          \includegraphics[height=4.5cm]{p-laplacian-square-p=2-elevation.png} &
          \includegraphics[height=4.5cm]{p-laplacian-square-p=2,5-elevation.png} &
          \includegraphics[width=1cm]{lunettes-stereo.png}
       \end{tabular}
    %\end{center}
    \caption{The $p$-Laplacian for $d=2$:
	elevation view for $p=1.25$ (left), $p=2$ (center) and $p=2.5$ (right).}
    \label{fig-p-laplacian}
  \end{figure}
  Run the field visualization:
\pindexopt{field}{-cut}%
\pindexopt{field}{-origin}%
\pindexopt{field}{-normal}%
\pindexopt{field}{-elevation}%
\pindexopt{field}{-gnuplot}%
\begin{verbatim}
  field square.field -elevation -stereo
  field square.field -cut -origin 0.5 0.5 -normal 1 1 -gnuplot
\end{verbatim}
\cindex{visualization!elevation view}%
  The first command shows an elevation view of the
  solution (see Fig.~\ref{fig-p-laplacian}) while the second one
  shows a cut along the first bisector $x_0=x_1$.
  Observe that the solution becomes flat at the center when $p$ decreases.
  The $p=2$ case, corresponding to the linear case, is showed for the
  purpose of comparison.

  There is a technical issue concerning the mesh:
  the computation could failed on some mesh that presents
  at least one triangle with two edges on the boundary: 
\begin{verbatim}
  mkgeo_grid -t 50 > square-bedge.geo
  geo square-bedge.geo
  ./p_laplacian_fixed_point square-bedge.geo P1 1.5 > square-bedge.field
\end{verbatim}
  The computation stops and claims a division by zero: the three nodes of such a
  triangle, the three nodes are on the boundary, where $u_h=0$ is prescribed:
  thus $\bnabla u_h=0$ uniformly inside this element.
  Note that this failure occurs only for linear approximations: the computation
  works well on such meshes for $P_k$ approximations with $k\geq 2$.
\pindex{mkgeo_grid}%
\pindex{mkgeo_ugrid}%
\pindex{gmsh}%
\pindexopt{bamg}{-splitpbedge}%
  While the \code{mkgeo_grid} generates uniform meshes that have such triangles,
  the \code{mkgeo_ugrid} calls the \code{gmsh} generator that automatically
  splits the triangles with two boundary edges.
  When using \code{bamg}, you should consider the \code{-splitpbedge} option.

% ---------------------------------------------------------------
\subsubsection*{Convergence properties of the fixed-point algorithm}
% ---------------------------------------------------------------
\cindex{residual term}%
\cindex{convergence!residue!rate}%
  The fixed-point algorithm prints also
  $r_n$, the norm of the residual term, at each iteration $n$,
  and the convergence rate $v_n=\log_{10}(r_n/r_0)/n$.
  The residual term of the non-linear variational formulation is defined by:
  $$	
     r_h^{(n)} \in V_h
     \ \ \mbox{ and } \ \  
     m\left( r_h^{(n)}, v_h \right)
     =
     a\left(u_h^{(n)}; \, u_h^{(n)}, v_h \right)
     -
     l(v_h)
     , \ \ \forall v_h \in V_h
  $$
  where $m(.,.)$ denotes the $L^2$ scalar product.
  Clearly, $u_h^{(n)}$ is a solution if and only if $r_h^{(n)}=0$.

  For clarity, let us drop temporarily the $n$ index of the current iteration.
  The field $r_h\in V_h$ can be extended as a field $r_h \in X_h$ with
  vanishing components on the boundary.
  The previous relation writes, after expansion of the bilinear forms and
  fields on the unknown and blocked parts (see page~\pageref{field-u-b}
  for the notations):
\begin{verbatim}
               m.uu*rh.u = a.uu*uh.u + a.ub*ub.b - lh.u
                    rh.b = 0
\end{verbatim}
  This relation expresses that the residual term $r_h$ is obtained by
  solving a linear system involving the mass matrix.

\cindex{space!dual}%
\cindex{space!$W^{-1,p}$, dual of $W^{1,p}_0$}%
\cindex{norm!in $W^{-1,p}$}%
  It remains to choose a good norm for estimating this residual term.
  For the corresponding continuous formulation, we have:
  \[
     r 
     =
     -{\rm div}\left(\eta\left(|\bnabla u|^{2}\right) \bnabla u\right) - f
     \ \in W^{-1,p}(\Omega)
  \]
  Thus, for the continuous formulation, the residual term may be measured
  with the $W^{-1,p}(\Omega)$ norm.
  It is defined, for all $\varphi\in W^{-1,p}(\Omega)$, by duality:
  $$
    \|\varphi\|_{-1,p,\Omega}
	=
	\sup_{\stackrel{\varphi\in W^{1,p}_0(\Omega)}{v\neq 0}}
	\Frac{\langle \varphi,v \rangle}{\|v\|_{1,p,\Omega}}
	=
	\sup_{\stackrel{v\in W^{1,p}_0(\Omega)}{\|v\|_{1,p,\Omega}=1}}
	\langle \varphi,v \rangle
  $$
\cindex{space!duality bracket $\langle .,. \rangle$}%
  where $\langle .,. \rangle$ denotes the duality bracked
  between $W^{1,p}_0(\Omega)$ and $W^{-1,p}(\Omega)$.

\cindex{norm!in $W^{-1,p}$!discrete version}%
  By analogy, let us introduce the discrete $W^{-1,p}(\Omega)$ norm,
  denoted as $\|.\|_{-1,h}$,  defined by duality for all $\varphi_h\in V_h$ by:
  $$
    \|\varphi_h\|_{-1,h}
    	\ = \ 
	\sup_{\stackrel{v_h\in V_h}{\|v_h\|_{1,p,\Omega}=1}}
	\langle \varphi_h,v_h \rangle
  $$
  The dual of space of the finite element space $V_h$ is identified to $V_h$
  and the duality bracket is the Euclidean scalar product of
  $\mathbb{R}^{{\rm dim}(V_h)}$.
  Then, $\|\varphi_h\|_{-1,h}$ is the largest absolute value of components
  of $\varphi_h$ considered as a vector of $\mathbb{R}^{{\rm dim}(V_h)}$.
  With the notations of the \Rheolef\  library, it simply writes:
  \begin{center}
            \verb+Float r = rh.u().max_abs()+
  \end{center}

  \begin{figure}[htb]
     %\begin{center}
     \mbox{}\hspace{-0cm}
       \begin{tabular}{rrr}
          \includegraphics[height=6cm]{p-laplacian-fixed-point-p=1,5-res.pdf} &
          \includegraphics[height=6cm]{p-laplacian-fixed-point-p=1,5-Pk.pdf} \\
          \includegraphics[height=6cm]{p-laplacian-square-r1.pdf} &
          \includegraphics[height=6cm]{p-laplacian-square-r2.pdf}
       \end{tabular}
    %\end{center}
     \caption{The fixed-point algorithm on the $p$-Laplacian for $d=2$:
	when $p=3/2$,
	independence of the convergence properties of the residue
		(top-left) with mesh refinement;
		(top-right) with polynomial order $P_k$;
        when $h=1/50$ and $k=1$, convergence
	(bottom-left) for $p>2$ and
	(bottom-right) for $p<2$.
     }
     \label{fig-p-laplacian-rate-1}
  \end{figure}
  Fig~\ref{fig-p-laplacian-rate-1}.top-left shows that 
  the residual term decreases exponentially versus $n$,
  since the slope of the plot in semi-log scale tends to be strait. 
  Moreover, observe that the slope is independent of the mesh size $h$.
  Also, by virtue of the previous careful definition of the residual term
  and its corresponding norm, all the slopes falls into a master curve.

  These invariance properties applies also to the
  polynomial approximation $P_k$~:
  Fig~\ref{fig-p-laplacian-rate-1}.top-right
  shows that all the curves tends to collapse when $k$ increases.
  Thus, the convergence properties of the algorithm are now investigated
  on a fixed mesh $h=1/50$ and for a fixed polynomial approximation $k=1$.

  Fig~\ref{fig-p-laplacian-rate-1}.bottom-left
  and~\ref{fig-p-laplacian-rate-1}.bottom-right
  show the convergence versus the power-law index $p$:
  observe that the convergence becomes easier when $p$ approaches
  $p=2$, where the problem is linear.
  In that case, the convergence occurs in one iteration.
  Nevertheless, it appears two limitations.
  From one hand, when $p \rightarrow 3$ the convergence starts to slow down
  and $p\geq 3$ cannot be solved by this algorithm (it will be solved later
  in this chapter).
  From other hand, when $p \rightarrow 1$, the convergence slows down too
  and numerical rounding effets limits the convergence: the machine precision
  canot be reached.
  \begin{figure}[htb]
     %\begin{center}
       \begin{tabular}{rr}
          \includegraphics[height=6cm]{p-laplacian-rate.pdf} &
          \includegraphics[height=6cm]{p-laplacian-rate-log.pdf}
       \end{tabular}
    %\end{center}
     \caption{The fixed-point algorithm on the $p$-Laplacian for $d=2$:
	(left) convergence rate versus $p$;
	(right) convergence rate versus $p$ in semi-log scale.
     }
     \label{fig-p-laplacian-rate-2}
  \end{figure}
  Let us introduce the convergence rate $v_n=\log_{10}(r_n/r_0)/n$
  it tends to a constant, denoted as $\bar{v}$ and:
  \mbox{$
	r_n \approx r_0\times 10^{-\bar{v}\,n}
  $}.
  Observe on Fig~\ref{fig-p-laplacian-rate-2}.left
  that $\bar{v}$ tends to $+\infty$ when $p=2$, since
  the system becomes linear and the algorithm converge in one iteration. 
  Observe also that $\bar{v}$ tends to zero for $p=1$ and $p=3$
  since the algorithm diverges.
  Fig~\ref{fig-p-laplacian-rate-2}.right shows the same plot in semi-log
  scale and shows that $\bar{v}$ behaves as:
  \mbox{$
	\bar{v} \approx -\log_{10}\,|p-2|
  $}.
\cindex{convergence!residue!rate}%
  This study shows that the residual term of the fixed point
  algorithm behaves as:
  $$
	r_n \approx r_0\,|p-2|^n
  $$
% ---------------------------------------------------------------
\subsubsection*{Improvement by relaxation}
% ---------------------------------------------------------------
  \begin{figure}[htb]
     %\begin{center}
       \begin{tabular}{rr}
          \includegraphics[height=7cm]{p-laplacian-relax-1.pdf} &
          \includegraphics[height=7cm]{p-laplacian-relax-2.pdf} \\
          \includegraphics[height=7cm]{p-laplacian-relax-opt.pdf} &
          \includegraphics[height=7cm]{p-laplacian-relax-opt-rate.pdf}
       \end{tabular}
     %\end{center}
     \caption{The fixed-point algorithm on the $p$-Laplacian for $d=2$:
	effect of the relaxation parameter $\omega$
	(top-left) when $p<2$;
	(top-right) when $p>2$;
	(bottom-left) optimal $\omega_{\rm opt}$;
	(bottom-right) optimal $\bar{v}_{\rm opt}$.
       }
     \label{fig-p-laplacian-relax}
  \end{figure}
\cindex{method!fixed-point!relaxation}%
  The relaxation parameter can improve the fixed-point algorithm:
  for instance, for $p=3$ and $\omega=0.5$ we get a convergent sequence:
\begin{verbatim}
  ./p_laplacian_fixed_point square.geo P1 3 0.5 > square.field
\end{verbatim}
  Observe on Fig.~\ref{fig-p-laplacian-relax} the effect on the relaxation
  parameter $\omega$ upon the convergence rate $\bar{v}$: for $p<2$ it can
  improve it and for $p>2$, it can converge when $p>3$.
  For each $p$, there is clearly an optimal relaxation parameter, denoted
  by $\omega_{\rm opt}$.
  A simple fit shows that (see Fig.~\ref{fig-p-laplacian-relax}.bottom-left):
  \[
    \omega_{\rm opt} = 2/p
  \]
  Let us denote $\bar{v}_{\rm opt}$ the corresponding rate of convergence.
  Fig.~\ref{fig-p-laplacian-relax}.top-right shows that the convergence is
  dramatically improved when $p>2$ while the gain is less pronounced when $p<2$.
  Coveniently replacing the extra parameter $\omega$ on the command line
  by \code{-} leads to compute automatically $\omega=\omega_{\rm opt}$:
  the fixed-point algorithm is always convergent with
  an optimal convergent rate, e.g.:
\begin{verbatim}
  ./p_laplacian_fixed_point square.geo P1 4.0 - > square.field
\end{verbatim}
  There is no way to improve more the fixed point algorithm:
  the next paragraph shows a different algorithm that dramatically
  accelerates the computation of the solution.

\clearpage
% ---------------------------------- 
\subsection{The Newton algorithm}
% ---------------------------------- 
\label{sec-newton-method}%
\subsubsection*{Principe of the algorithm}
\cindex{method!Newton}%
  An efficient alternative to the fixed-point algorithm is
  to solve the nonlinear problem $(P)$
  by using the Newton algorithm.
  Let us consider the following operator:
  $$
    \begin{array}{ccccl}
    F &:& W^{1,p}_0(\Omega) & \longrightarrow & W^{-1,p}(\Omega)
 	\\
      & & u                 & \longmapsto & 
      F(u) = -{\rm div}\left(\eta\left(|\bnabla u|^{2}\right) \bnabla u\right) - f
    \end{array}
  $$
\cindex{residual term}%
  The $F$ operator computes simply the residual term and
  the problem expresses now as:
    find $u\in W^{1,p}_0(\Omega)$ such that $F(u)=0$.

  The Newton algorithm reduces the nonlinear problem into a sequence
  of linear subproblems:
  the sequence $\left(u^{(n)}\right)_{n\geq 0}$ is 
  classically defined by recurrence as:
  \begin{itemize}
  \item $n=0$:
  let $u^{(0)}\in W^{1,p}_0(\Omega)$ be known.
  \item $n\geq 0$:
  suppose that $u^{(n)}$ is known,
  find $\delta u^{(n)}$, defined in $\Omega$, such that:
  $$
      F'\left(u^{(n)}\right) \ \delta u^{(n)}
      =
      -F\left(u^{(n)}\right)
  $$ 
  and then compute explicitly:
  $$
      u^{(n+1)} := u^{(n)} + \delta u^{(n)}
  $$
  \end{itemize}
\pbindex{linear tangent}%
\cindex{Fr\'echet derivative}%
  The notation $F'(u)$ stands for the Fr\'echet derivative of $F$,
  as an operator from $W^{-1,p}(\Omega)$ into $W^{1,p}_0(\Omega)$.
  For any $r\in W^{-1,p}(\Omega)$, the linear tangent
  problem writes:\\
  \ \ \ find $\delta u\in W^{1,p}_0(\Omega)$ such that:\\
  \[
    F'(u)\,\delta u = -r
  \]
  After the computation of the Fr\'echet derivative,
  we obtain the strong form of this problem:\\
  \ \ $(LT)$: find $\delta u$, defined in $\Omega$, such that
  \begin{eqnarray*}
      -{\rm div}\left(\eta\left(|\bnabla u|^{2}\right) \bnabla(\delta u)
        + 2\eta'\left(|\bnabla u|^{2}\right) \left\{\bnabla u.\bnabla(\delta u)\right\}
          \bnabla u \right)
      &=&
      -r
      \ \ \ {\rm in}\ \Omega \\
      \delta u &=& 0
      \ \ \ \ \ {\rm on}\ \partial \Omega
  \end{eqnarray*}
  where 
  $$
      \eta'(z) = \Frac{1}{2} (p-2)z^\frac{p-4}{2}, \ \forall z > 0
  $$
\cindex{problem!Poisson!non-constant tensorial coefficients}%
\cindex{boundary condition!Dirichlet}%
  This is a Poisson-like
  problem with homogeneous Dirichlet boundary conditions
  and a non-constant tensorial coefficient.
  The variational form of the linear tangent problem writes:\\
  \ \ $(VLT)$: find $\delta u\in W^{1,p}_0(\Omega)$ such that
  $$
	a_1(u;\delta u,\delta v) = l_1(v),
	\ \ \forall \delta v \in W^{1,p}_0(\Omega)
  $$
  where the $a_1(.;.,.)$ is defined
  for any $u, \delta u, \delta v\in W^{1,p}_0(\Omega)$  by:
  \begin{eqnarray*}
	a_1(u;\delta u,\delta v) 
	&=&
	\int_\Omega 
          \left(
              \eta\left(|\bnabla u|^{2}\right)
              \bnabla(\delta u).\bnabla(\delta v)
            + 2\eta'\left(|\bnabla u|^{2}\right) 
              \left\{\bnabla u.\bnabla(\delta u)\right\}
              \left\{\bnabla u.\bnabla(\delta v)\right\}
          \right)
          \, {\rm d}x
      \\
	l_1(v) 
	&=&
	-
	\int_\Omega 
          r \, v
          \, {\rm d}x
  \end{eqnarray*}
  For any $\boldsymbol{\xi}\in\mathbb{R}^d$
  let us denote by $\nu(\boldsymbol{\xi})$
  the following $d\times d$ matrix:
  $$
     \nu(\boldsymbol{\xi})
     =
     \eta\left(|\boldsymbol{\xi}|^{2}\right) \, I
     +
     2\eta'\left(|\boldsymbol{\xi}|^{2}\right)
     \ \boldsymbol{\xi}\otimes \boldsymbol{\xi}
  $$
  where $I$ stands for the $d$-order identity matrix.
  Then the $a_1$ expresses in a more compact form:
  $$
	a_1(u;\delta u,\delta v) 
	=
	\int_\Omega 
     	  \left(
            \nu(\bnabla u)
            \bnabla(\delta u)
          \right)
          . \bnabla(\delta v)
          \, {\rm d}x
  $$
  Clearly $a_1$ is linear and symmetric with respect to the two
  last variables.

%% \subsubsection*{Ellipticity of $a_1(u;.,.)$ in $H^1_0(\Omega)$ (TODO)}
%% \cindex{ellipticity!bilinear form}%
%%   The ellipticity~\citep[p.~37]{RavTho-1983}
%%   of the bilinear form $a(u;.,.)$ ensures that
%%   the linear tangent subproblem admits exactly one solution,
%%   i.e. that $F'(u)$ is non-singular.
%%   This properties is essential for the Newton algorithm to be well-posed.
%% \cindex{space!$L^p$}%
%% \cindex{norm!in $L^p$}%
%%   Let $\|.\|_{p,\Omega}$ be the standard norm 
%%   of $L^p(\Omega)$ for any $p>1$.
%%   Let also $\|.\|^2=\|.\|_{2,\Omega}$ denotes the $L^2(\Omega)$ norm.
%%   When $p>2$ we clearly have
%%   \begin{eqnarray*}
%%      a_0(u;\delta u,\delta u)
%%      &\geq&
%%      \int_\Omega
%%         |\bnabla u|^{p-2} |\bnabla(\delta u)|^2
%%         \, {\rm d}x
%%   \end{eqnarray*}
%% \cindex{norm!in $H_0^{1}$}%
%% \cindex{norm!in $H^{1}$}%
%% \cindex{inequality!Poincarr\'e}%
%%   From the Poincarr\'e inequality~\citep[p.~18]{RavTho-1983}
%%   we know that $\|\bnabla(.)\|$ induces in $H^1_0(\Omega)$
%%   a norm equivalent to the standard $H^1(\Omega)$ norm,
%%   then $a_0$ is $H^1_0$-elliptic with a constant equal
%%   to $\|\bnabla u\|^{p-2}_{\infty,\Omega}$.
%%   When $p<2$, we
%%   have 
%%   $$
%%     a_0(u;\delta u,\delta u) 
%%     =
%%     \|\bnabla u\|^{p-2} \|\bnabla(\delta u)\|^2
%%     -
%%     (2-p)\|\bnabla u\|^{p-4} m(\bnabla u,\bnabla(\delta u))^2
%%   $$
%% \cindex{inequality!Cauchy-Schwartz}%
%%   From the Cauchy-Schwartz inequality~\citep[p.~78]{Bre-1983}, we have
%%   $m(\bnabla u,\delta u) \leq \|\bnabla u\|\,\|\bnabla(\delta u)\|$
%%   and then:
%%   $$
%%     a_0(u;\delta u,\delta u) 
%%     \geq
%%     \|\bnabla u\|^{p-2} \|\bnabla(\delta u)\|^2
%%     -
%%     (2-p)\|\bnabla u\|^{p-2} \|\bnabla(\delta u)\|^2
%%     =
%%     (p-1)\|\bnabla u\|^{p-2} \|\bnabla(\delta u)\|^2
%%   $$
%%   and then $a_0$ is elliptic with a constant equal
%%   to $(p-1)\|\bnabla u\|^{p-2}$.
%% \cindex{Lax-Milgram theorem}
%%   Thus, from the Lax-Milgram theorem~\citep[p.~84]{Bre-1983}, the linear tangent 
%%   subproblem is always well-posed under the condition
%%   $\bnabla u \neq 0$.
%% 
%%   \begin{quote}
%%   \begin{bf}
%%    TODO: The proof this bad~!
%%    See more bibliography on the subject.
%%    The $H^1_0$ ellipticity is not sufficient:
%%    the $W^{1,p}_0$ ellipticity is required here.
%%    See with the Holder inequality~?
%%   \end{bf}
%%   \end{quote}
%%   

%---------------------------------
\myexamplelicense{p_laplacian_newton.cc}
\myexamplelicense{p_laplacian.h}
%---------------------------------
\subsubsection*{Comments}
\findex{newton}%
  The Newton algorithm is implemented in a generic way,
  for any $F$ function,
  by the \code{newton} function of the \Rheolef\  library.
  The reference manual for the \code{newton} generic function is available online:
\clindex{reference manual}%
\clindex{man}%
\begin{verbatim}
  man newton
\end{verbatim}
  The function $F$ and its derivative $F'$ are provided by a template class argument.
  Here, the~\code{p_laplacian} class describes our $F$ function, i.e. our problem
  to solve: its interface is defined in the file \reffile{p_laplacian.h}
  and its implementation in~\reffile{p_laplacian1.icc}
  and~\reffile{p_laplacian2.icc}.
  The introduction of the class~\code{p_laplacian} will allow an easy 
  exploration of some variants of the Newton algorithm for this problem,
  as we will see in the next section.
%---------------------------------
\myexamplelicense{p_laplacian1.icc}
%---------------------------------

The residual term $F(u_h)$ is computed by the member
function \code{residual} while the resolution
of $F'(u_h)\delta u_h = Mr_h$ is performed by the function \code{derivative_solve}.
The derivative $F'(u_h)$ is computed separately by the function \code{update_derivative}:
\findex{integrate}%
\findex{compose}%
\findex{grad}%
\cindex{form!weighted!quadrature formula}%
\cindex{form!weighted!tensorial weight}%
\cindex{form!{$(\eta\nabla u).\nabla v$}}%
\begin{lstlisting}[numbers=none,frame=none]
  a1 = integrate(dot(compose(nu<eta>(eta(p),d),grad(uh))*grad(u),grad(v)));
\end{lstlisting}
  Note that the $a_1(u;.,.)$ bilinear form is a tensorial weighted form, where 
  $\nu=\nu(\bnabla u)$ is the weight tensor.
  The tensorial weight $\nu$ is inserted as $(\nu \nabla u).\nabla v$ in the
  variational expression for the \code{integrate} function.
  As the tensor $\nu$ is symmetric, the bilinear form $a_1(.,.)$ is also symmetric.
%%   As the weight is non-polynomial for general $\eta$ function
%%   and a quadrature formula is used:
%%   \begin{equation}
%%      a_1(u_0;u,v)
%%      = \sum_{K\in {\cal T}_h}
%%        \sum_{q=0}^{n_K-1}
%%        \left( 
%%          \nu\left(\nabla u_0 (x_{K,q})\right)
%%          \ \nabla u (x_{K,q}) . \nabla v (x_{K,q})
%%        \right)
%%        \ \, \omega_{K,q}
%%      \label{eq-p-laplacian-a1-quad}
%%   \end{equation}
%%   By using exactly the same quadrature for computing
%%   both $a_1(.,.)$ and $a(.,.)$ in \eqref{eq-p-laplacian-a1-quad},
%%   then we have that $F'$ is always the derivative of $F$ at the discrete level:
%%   while, in general, the derivation and the discretization of problems does not
%%   commute, it is the case when using the same quadrature formulae on both problems.
%%   This is an important aspect of the Newton method at discrete level,
%%   for obtaining an optimal convergence rate of the residual terms versus $n$.
  
  The linear system involving the derivative $F'(u_h)$ is solved by the \code{p_laplacian}
  member function \code{derivative_solve}. 
  Finally, applying the generic Newton method requires
  a stopping criteria on the residual term: this is the aim of the
  member function \code{dual_space_norm}.
  The three last member functions are not used by the Newton algorithm, but
  by its extension, the damped Newton method, that will be presented later.

%---------------------------------
\myexamplelicense{p_laplacian2.icc}
%---------------------------------

\cindex{function!class-function object}%
  The $\nu$ function is implemented for a generic $\eta$ function, as a class-function
  that accept as template agument another class-function.

%---------------------------------
\myexamplelicense{nu.h}
%---------------------------------

\subsubsection*{Running the program}

  \begin{figure}[htb]
     \begin{center}
       \begin{tabular}{c}
          \includegraphics[height=7cm]{p-laplacian-newton-p=3.pdf} 
       \end{tabular}
     \end{center}
     \caption{The Newton algorithm on the $p$-laplacian for $d=2$:
		comparison with the fixed-point algorithm.
     }
     \label{fig-p-laplacian-newton-cmp}
  \end{figure}
  Enter:
\begin{verbatim}
  make p_laplacian_newton
  mkgeo_ugrid -t 50 > square.geo
  ./p_laplacian_newton square.geo P1 3 > square.field
  field square.field -elevation -stereo
\end{verbatim}
  The program prints at each iteration $n$,
  the residual term $r_n$ in discrete $L^2(\Omega)$ norm.
  Convergence occurs in less than ten iterations: it dramatically improves
  the previous algorithm (see Fig.~\ref{fig-p-laplacian-newton-cmp}).
\cindex{convergence!residue!super-linear}%
  Observe that the slope is no more constant in semi-log scale:
  the convergence rate accelerates and the slope tends to 
  be vertical, the so-called super-linear convergence. 
  This is the major advantage of the Newton method.
  \begin{figure}[htb]
     %\begin{center}
       \mbox{}\hspace{-1cm}
       \begin{tabular}{cc}
          \includegraphics[height=7cm]{p-laplacian-square-newton-p=3-n.pdf} &
          \includegraphics[height=7cm]{p-laplacian-square-newton-p=3-Pk.pdf} \\
          \includegraphics[height=7cm]{p-laplacian-newton-p=1,5-n.pdf} &
          \includegraphics[height=7cm]{p-laplacian-newton-square-r2.pdf}
       \end{tabular}
     %\end{center}
     \caption{The Newton algorithm on the $p$-Laplacian for $d=2$:
		(top-left) comparison with the fixed-point algorithm;
	when $p=3$, independence of the convergence properties of the residue
		(top-left) with mesh refinement;
		(top-right) with polynomial order $P_k$;
	(bottom-left) mesh-dependence convergence when $p<2$;
	(bottom-right) overshoot when $p>2$.}
     \label{fig-p-laplacian-newton-rate}
  \end{figure}
  Figs.~\ref{fig-p-laplacian-newton-rate}.top-left
  and.~\ref{fig-p-laplacian-newton-rate}.top-bottom
  shows that the algorithm converge when $p\geq 3$ and that
  the convergence properties are independent of the mesh size $h$ and the 
  polynomial order $k$.
\cindex{method!fixed-point}%
  There are still two limitations of the method.
  From one hand, the Newton algorithm is no more independent of $h$ and $k$ when $p \leq 3/2$
  and to tends to diverges in that case when $h$ tends to zero
     (see Fig.~\ref{fig-p-laplacian-newton-rate}.bottom-left).
  From other hand, when $p$ becomes large
     (see Fig.~\ref{fig-p-laplacian-newton-rate}.bottom-right),
  an overshoot in the convergence
  tends to increase and destroy the convergence, due to rounding problems.
  In order to circumvent these limitations, another strategy is considered
  in the next section: the damped Newton algorithm.

\clearpage
% ---------------------------------- 
\subsection{The damped Newton algorithm}
% ---------------------------------- 
\subsubsection*{Principe of the algorithm}
\cindex{method!Newton!damped}%
  The Newton algorithm diverges when 
  the initial $u^{(0)}$ is too far from a solution, e.g. when
  $p$ is not at the vicinity of $2$.
  Our aim is to modify the Newton algorithm and to obtain
  a {\em globally convergent algorithm},
  i.e to converge to a solution for any initial $u^{(0)}$.
  By this way, the algorithm should converge for any value of $p\in ]1,+\infty [$.
  The basic idea is to decrease the step length while
  maintaining the direction
  of the original Newton algorithm:
  \[
  	u^{(n+1)} := u^{(n)} + \lambda_n \, \delta u^{(n)}
  \]
  where $\lambda^{(n)} \in ]0,1]$ and
  $\delta u^{(n)}$ is the direction from the Newton algorithm, given by:
  \[
  	F'\left(u^{(n)}\right)\  \delta u^{(n)} = -F\left(u^{(n)}\right)
  \]
  Let $V$ a Banach space and
  let $T: V \rightarrow \mathbb{R}$ defined for any $v\in V$ by:
  \[
  	T(v) = \Frac{1}{2} \|C^{-1}F(v)\|_{V}^2,
  \]
  where $C$ is some non-singular operator, easy to invert,
  used as a non-linear preconditioner.
  The simplest case, without preconditioner, is $C=I$.
  The $T$ function furnishes a measure of the residual term in $L^2$ norm.
  The convergence is global when for any initial $u^{(0)}$, we have for any $n\geq 0$:
  \begin{equation}
  	T\left(u^{(n+1)}\right) 
  	\leq
  	T\left(u^{(n)}\right)
  	+
  	\alpha
  	\left\langle 
               T'\left(u^{(n)}\right)
               ,\ 
               u^{(n+1)}-u^{(n)}
   	\right\rangle_{V',V}
  	\label{eq-backtracking}
  \end{equation}
  where $\langle .,.\rangle_{V',V}$ is the duality product
  between $V$ and its dual $V'$,
  and $\alpha\in ]0,1[$ is a small parameter.
  Note that
  \begin{eqnarray*}
    T'(u) &=& \{C^{-1}F'(u)\}^* C^{-1}F(u)
  \end{eqnarray*}
  where the superscript $^*$ denotes the adjoint operator,
  i.e. the transpose matrix the in finite dimensional case.
  % For a purpose of simplicity, we consider here the finite dimensional case.
  In practice we consider $\alpha=10^{-4}$ and
  we also use a minimal step length $\lambda_{\rm min} = 1/10$ 
  in order to avoid too small steps.
  Let us consider a fixed step $n\geq 0$:
  for convenience the $n$ superscript is dropped
  in $u^{(n)}$ and $\delta u^{(n)}$.
  Let $g: \mathbb{R} \rightarrow \mathbb{R}$ defined
  for any $\lambda \in  \mathbb{R}$ by:
  \[
  	g(\lambda) = T\left(u+\lambda \delta u\right)
  \]
  Then~:
  \begin{eqnarray*}
  	g'(\lambda)
  	&=& \langle T'(u+\lambda \delta u) ,\, \delta u\rangle_{V',V}
		\\
  	&=& \langle C^{-1}F(u+\lambda \delta u) ,\
		    F'(u+\lambda \delta u)C^{-1}\delta u \rangle_{V,V'}
  \end{eqnarray*}
\cindex{operator!adjoint}%
  where the superscript $^*$ denotes the adjoint operator,
  i.e. the transpose matrix the in finite dimensional case.
  The practical algorithm for obtaining $\lambda$
  was introduced first in~\citep{DenSch-1983} and
  is also presented in~\citep[p.~385]{PreTeuVetFla-1997-C-2ed}.
  The step length $\lambda$ that satisfy $\eqref{eq-backtracking}$
  is computed by using a finite sequence
  $\lambda_k$, $k=0,1\ldots$ with a second order recurrence:
  \begin{itemize}
  \item $k=0$~: initialization $\lambda_0=1$.
  If \eqref{eq-backtracking} is satisfied 
  with $u + \lambda_0 \, d$ then 
  let $\lambda:=\lambda_0$ and the sequence stop here.
  
  \item $k=1$~: first order recursion.
  The quantities $g(0)=f(u)$ et $g'(0)=\langle f'(u),\, d\rangle$
  are already computed at initialization.
  Also, we already have computed $g(1)=f(u+d)$ when
  verifying whether~\eqref{eq-backtracking} was satisfied.
  Thus, we consider the following approximation
  of $g(\lambda)$ by a second order polynomial:
  \[
      \tilde{g}_1(\lambda)
  	=
   	\{ g(1) - g(0) - g'(0)\} \lambda^2
   	+ g'(0) \lambda
   	+ g(0)
  \]
  After a short computation,
  we find that the minimum of this polynomial is:
  \[
  	\tilde{\lambda}_1
  	=
  	\Frac{-g'(0)}{2 \{ g(1) - g(0) - g'(0)\} }
  \]
  Since the initialization at $k=0$ does not satisfy
  \eqref{eq-backtracking}, it is possible to show that,
  when $\alpha$ is small enough, we have $\tilde{\lambda}_1 \leq 1/2$
  and $\tilde{\lambda}_1 \approx 1/2$.
  Let $ \lambda_1 := \max(\lambda_{\rm min},\tilde{\lambda}_1)$.
  If \eqref{eq-backtracking} is satisfied 
  with $u + \lambda_1 \, d$ then 
  let $\lambda:=\lambda_1$ and the sequence stop here.
  
  \item $k\geq 2$~: second order recurrence.
  The quantities $g(0)=f(u)$ et $g'(0)=\rangle f'(u),\, d\langle$
  are available, together with
  $\lambda_{k-1}$, $g(\lambda_{k-1})$,
  $\lambda_{k-2}$ and $g(\lambda_{k-2})$.
  Then, $g(\lambda)$ is approximated by the following third order
  polynomial:
  \[
      \tilde{g}_{k}(\lambda)
  	=
   	  a \lambda^3
   	+ b \lambda^2
   	+ g'(0) \lambda
   	+ g(0)
  \]
  where $a$ et $b$ are expressed by:
  \[
  	\left( \begin{array}{c}
  		a
  		\\
  		b
  	\end{array} \right)
  	=
  	\Frac{1}{\lambda_{k-1}- \lambda_{k-2}}
  	\left( \begin{array}{cc}
  		  \Frac{1}{\lambda_{k-1}^2} &
  		- \Frac{1}{\lambda_{k-2}^2} \\
  		- \Frac{\lambda_{k-2}}{\lambda_{k-1}^2} &
  		  \Frac{\lambda_{k-1}}{\lambda_{k-2}^2}
  	\end{array} \right)
  	\left( \begin{array}{c}
   		g(\lambda_{k-1}) - g'(0) \lambda_{k-1} - g(0)
  		\\
   		g(\lambda_{k-2}) - g'(0) \lambda_{k-2} - g(0)
  	\end{array} \right)
  \]
  The minimum of $\tilde{g}_{k}(\lambda)$ is
  \[
  	\tilde{\lambda}_k
  	=
  	\Frac{-b + \sqrt{b^2 - 3ag'(0)}}{3a}
  \]
  Let $\lambda_k = \min(1/2\,\lambda_k,\max(\tilde{\lambda}_{k}/10,\tilde{\lambda}_{k+1})$
  in order for $\lambda_k$ to be 
  at the same order of magnitude as $\lambda_{k-1}$.
  If \eqref{eq-backtracking} is satisfied 
  with $u + \lambda_k \, d$ then 
  let $\lambda:=\lambda_k$ and the sequence stop here.
  \end{itemize}

The sequence $(\lambda_k)_{k\geq 0}$ is strictly
decreasing: when the stopping criteria is not satisfied
until $\lambda_k$ reaches the machine precision $\varepsilon_{\rm mach}$
then the algorithm stops with an error.

% ---------------------------------------
\myexamplelicense{p_laplacian_damped_newton.cc}
% ---------------------------------------

\subsubsection*{Comments}
\findex{damped_newton}%
  The \code{damped_newton} function implements the
  damped Newton algorithm for a generic $T(u)$ function,
  i.e. a generic nonlinear preconditioner.
  This algorithms use a backtrack strategy implemented 
  in the file \file{newton-backtrack.h} of the \Rheolef\  library.
  The simplest choice of the identity preconditioner $C=I$
  i.e. $T(u) = \|F(u)\|^2_{V'}/2$
  is showed in file~\code{damped-newton.h}.
  The gradient at $\lambda=0$ is
  \begin{eqnarray*}
  	T'(u) = F'(u)^*F(u)
  \end{eqnarray*}
  and the slope at $\lambda=0$ is:
  \begin{eqnarray*}
  	g'(0)
  	&=& \langle T'(u) ,\, \delta u\rangle_{V',V}
		\\
  	&=& \langle F(u) ,\
		    F'(u)\delta u \rangle_{V',V'}
		\\
  	&=& -\| F(u) \|_{V'}^2
  \end{eqnarray*}
  The \reffile{p_laplacian_damped_newton.cc}
  is the application program to the $p$-Laplacian problem
  together with the $\|.\|_{L^2(\Omega)}$ discrete norm for the function $T$.

%ICI
\subsubsection*{Running the program}
  \begin{figure}[htb]
    %\begin{center}
       \mbox{}\hspace{-1cm}
       \begin{tabular}{ccc}
          \includegraphics[height=6.0cm]{p-laplacian-square-p=1,15-elevation.png} &
          \includegraphics[height=6.0cm]{p-laplacian-square-p=7-elevation.png} &
          \includegraphics[width=1cm]{lunettes-stereo.png}
       \end{tabular}
    %\end{center}
    \caption{The $p$-Laplacian for $d=2$:
	elevation view for $p=1.15$ (left) and $p=7$ (right).}
    \label{fig-p-laplacian-cont}
  \end{figure}
  As usual, enter:
\begin{verbatim}
  make p_laplacian_damped_newton
  mkgeo_ugrid -t 50 > square.geo
  ./p_laplacian_damped_newton square.geo P1 1.15 | field -stereo -elevation -
  ./p_laplacian_damped_newton square.geo P1 7    | field -stereo -elevation -
\end{verbatim}
  See Fig.~\ref{fig-p-laplacian-cont} for the elevation view of the solution.
  The algorithm is now quite robust:
  the convergence occurs for quite large range of $p>1$ values
  and extends the range previously presented on Fig.~\ref{fig-p-laplacian}.
  The only limitation is now due to machine roundoff on some architectures.

  \begin{figure}[htb]
     %\begin{center}
       \begin{tabular}{cc}
          \includegraphics[height=7cm]{p-laplacian-damped-newton-p=1,5-n.pdf} &
          \includegraphics[height=7cm]{p-laplacian-damped-newton-p=1,5-Pk.pdf} \\
          \includegraphics[height=7cm]{p-laplacian-damped-newton-n=50-P1-p1.pdf} &
          \includegraphics[height=7cm]{p-laplacian-damped-newton-n=50-P1-p2.pdf} 
       \end{tabular}
    %\end{center}
     \caption{The damped Newton algorithm on the $p$-Laplacian for $d=2$:
	when $p=1.5$ and $h=1/50$,
        convergence properties of the residue
		(top-left) with mesh refinement;
		(top-right) with polynomial order $P_k$;
	(bottom-left) convergence when $p<2$;
	(bottom-right) when $p>2$.}
     \label{fig-p-laplacian-damped-newton-rate}
  \end{figure}
Figs.~\ref{fig-p-laplacian-damped-newton-rate}.top shows
that the convergence properties seems to slightly 
depend on the mesh refinement. Nevertheless, there are quite good and support
both mesh refinement and high order polynomial degree.
When $p$ is far from $p=2$, i.e. either close to one or large,
Figs.~\ref{fig-p-laplacian-damped-newton-rate}.bottom shows
that the convergence becomes slower and that the first linear regime,
corresponding to the line search, becomes longer. This first regime
finishes by a brutal super-linear regime, where the residual terms
fall in few iterations to the machine precision.

% ---------------------------------- 
\subsection{Error analysis}
% ---------------------------------- 
\cindex{error analysis}%
\cindex{convergence!error!versus mesh}%
\cindex{convergence!error!versus polynomial degree}%
  \begin{figure}[htb]
    %\begin{center}
       \mbox{}\hspace{-1cm}
       \begin{tabular}{cc}
          \includegraphics[height=7.0cm]{p-laplacian-fixed-point-p=1,5-err-lp.pdf} &
          \includegraphics[height=7.0cm]{p-laplacian-fixed-point-p=1,5-err-linf.pdf}  \\
          \includegraphics[height=7.0cm]{p-laplacian-fixed-point-p=1,5-err-w1p.pdf} &
       \end{tabular}
    %\end{center}
    \caption{The $p$-Laplacian for $d=2$:
	error analysis.
    }
    \label{fig-p-laplacian-err}
  \end{figure}

While there is no simple explicit expression
for the exact solution in the square $\Omega=]0,1[^2$,
there is one when considering $\Omega$ as the unit circle:
\[
    u(x) = 
	\Frac{(p-1)\ 2^{-\frac{1}{p-1}}}
             {p}
	\left(
	  1
	  -
          \left( x_0^2+x_1^2 \right)^\frac{p}{p-1)}
        \right)
\]
% ---------------------------------------
\myexamplelicense{p_laplacian_circle.h}
% ---------------------------------------

% ---------------------------------------
\myexamplelicense{p_laplacian_error.cc}
% ---------------------------------------
\findex{integrate}%
\cindex{functor}%
\clindex{integrate_option}%
Note, in the file \reffile{p_laplacian_error.cc},
the usage of the \code{integrate} function,
together with a quadrature formula specification,
for computing the errors in $L^p$ norm and $W^{1,p}$ semi-norm.
Note also the flexibility of expressions, mixing together \code{field}s
as \code{uh} and functors, as \code{u_exact}.
The whole expression is evaluated by the \code{integrate}
function at quadrature points inside each element of the mesh.

By this way, the error analysis investigation becomes easy:
\begin{verbatim}
  make p_laplacian_error
  mkgeo_ball -t 10 -order 2 > circle-10-P2.geo
  ./p_laplacian_damped_newton circle-10-P2.geo P2 1.5 | ./p_laplacian_error
\end{verbatim}
We can vary both the mesh size and the polynomial order
and the error plots are showed on Fig.~\ref{fig-p-laplacian-err}
for both the $L^2$, $L^\infty$ norms and the $W^{1,p}$ semi-norm.
Observe the optimal error behavior:
the slopes in the log-log scale are the same as those obtained
by a direct Lagrange interpolation of the exact solution.

%% % ----------------------------------------------------
%% \subsection{The affine-invariant damped Newton algorithm}
%% % ----------------------------------------------------
%% \subsubsection*{Principe of the algorithm}
%% \cindex{method!affine-invariant damped Newton}%
%%   
%%   The so-called {\em natural monotonicity criterion}~\citep{Deu-2004}
%%   corresponds to the choice of the nonlinear preconditioner
%%   $C=F'\left(u^{(n)}\right)$ at iteration $n$.
%%   Remark that $T$ now depends also upon the iteration $n$,
%%   i.e. for any $u$:
%%   \begin{eqnarray*}
%% 	T_n(u) 
%% 	&=& \Frac{1}{2} \left\|F'\left(u^{(n)}\right)^{-1}F(u)\right\|^2_{V}
%% 		\\
%% 	&=& \Frac{1}{2} \left\|\,\overline{\delta u}\,\right\|^2_{V}
%%   \end{eqnarray*}
%%   where $\overline{\delta u}$ satisfies:
%%   \[
%% 	F'\left(u^{(n)}\right)
%% 	\overline{\delta u}
%% 	=
%% 	F(u)
%%   \]
%%   Remark that at $u=u^{(n)}$:
%%   \begin{eqnarray*}
%% 	T_n\left(u^{(n)}\right)
%% 	&=& \Frac{1}{2} \left\|F'\left(u^{(n)}\right)^{-1}F\left(u^{(n)}\right)\right\|^2_{V}
%% 		\\
%%  	&=&  \Frac{1}{2} \left\| \delta u^{(n)} \right\|^2_{V}
%%   \end{eqnarray*}
%%   The gradient at $u=u^{(n)}$ is:
%%   \begin{eqnarray*}
%% 	T_n'\left(u^{(n)}\right)
%% 	&=& -\delta u^{(n)}
%%   \end{eqnarray*}
%%   and the slope at $\lambda=0$ is:
%%   \begin{eqnarray*}
%%   	g_n'(0)
%%   	&=& -\| \delta u^{(n)} \|_{V}^2
%%   \end{eqnarray*}
%%   The file \reffile{deuflhard-newton.h} implements this preconditioner
%%   while the \reffile{p-laplacian-deuflhard-newton.cc}
%%   is the application program to the $p$-Laplacian problem
%%   together with the $\|.\|_{L^2(\Omega)}$ discrete norm for the function $T$.
%% 
%% \myexamplelicense{deuflhard-newton.h}
%% \myexamplelicense{p-laplacian-deuflhard-newton.cc}
%% 
%% \subsubsection*{Running the program}
%%   The results for $p=1.2$ or $p=1.1$ are still worst than for
%%   the unpreconditioned damped Newton method.
%%   Thus, these results are not presented here.
%% 
%% % --------------------------
%% \subsection{Mesh adaptation}
%% % --------------------------
%%   Let us now turn to the adaptive mesh procedure.
%% 
%% \myexamplelicense{p-laplacian-damped-newton-adapt.cc}
%% 
%% % --------------------------
%% \subsection{Conclusion}
%% % --------------------------
%%   Positive points:
%%   \begin{itemize}
%%   \item super-linear convergence~: very fast
%%   \item mesh invariance~: large meshes could be used
%%   \end{itemize}
%%   Limitation:
%%   \begin{itemize}
%%   \item with P2 elements, convergence is very bad for $p<1.5$
%%   \item Deuflhard preconditioner is worst than no preconditioning:
%%   \begin{verbatim}
%%     ./p-laplacian-deuflhard-newton square-10 P2 1.5
%%   \end{verbatim}
%%   while the corresponding unpreconditioning version
%%   \begin{verbatim}
%%     ./p-laplacian-damped-newton square-10 P2 1.5
%%   \end{verbatim}
%%   is convergent.
%%   Since both methods diverges for P2 and $p=1.6$, it is perhaps a rounding
%%   problem with the \code{pow} function. Trying with high precision floats~?
%%   \end{itemize}