File: gnmOverview.Rnw

package info (click to toggle)
r-cran-gnm 1.1-5-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 3,220 kB
  • sloc: ansic: 127; sh: 13; makefile: 6
file content (2869 lines) | stat: -rw-r--r-- 127,270 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
%\VignetteIndexEntry{Generalized nonlinear models in R: An overview of the gnm package}
%\VignetteKeywords{Generalized Nonlinear Models}
%\VignettePackage{gnm}

\documentclass[a4paper]{article}

\usepackage[english]{babel} % to avoid et~al with texi2pdf
\usepackage{Sweave}
%\usepackage{alltt} % now replaced by environments Sinput, Soutput, Scode
\usepackage{amsmath}
%\usepackage{times}
%\usepackage[scaled]{couriers}
\usepackage{txfonts} % Times, with Belleek math font and txtt for monospaced
\usepackage[scaled=0.92]{helvet}
%\usepackage[T1]{fontenc}
%\usepackage[expert,altbullet,lucidasmallerscale]{lucidabr}
\usepackage{booktabs}
\usepackage[round,authoryear]{natbib}
\usepackage[left=2cm,top=2.5cm,nohead]{geometry}
\usepackage{hyperref}
\usepackage{array} % for paragraph columns in tables
%\usepackage{moreverb}

\setkeys{Gin}{width=0.6\textwidth}


%% The next few definitions from "Writing Vignettes for Bioconductor Packages"
%% by R Gentleman
\newcommand{\Robject}[1]{{\emph{\texttt{#1}}}}
\newcommand{\Rfunction}[1]{{\emph{\texttt{#1}}}}
\newcommand{\Rcode}[1]{{\emph{\texttt{#1}}}}
\newcommand{\Rpackage}[1]{{\textsf{#1}}}
\newcommand{\Rclass}[1]{{\emph{#1}}}
\newcommand{\Rmethod}[1]{{\emph{\texttt{#1}}}}
\newcommand{\Rfunarg}[1]{{\emph{\texttt{#1}}}}

\newcommand{\R}{\textsf{R}}

\newcommand\twiddle{{\char'176}}

%\setlength{\oddsidemargin}{0.5in}
%\setlength{\evensidemargin}{0.5in}
%\setlength{\textwidth}{5.5in}

\setlength{\itemindent}{1cm}

\title{Generalized nonlinear models in \R: An overview of the
\Rpackage{gnm} package}

\author{Heather Turner and David Firth\footnote{
This work was supported by the Economic and Social Research Council (UK)
through Professorial Fellowship RES-051-27-0055.}\\
\emph{University of Warwick, UK}
}

\date{For \Rpackage{gnm} version \Sexpr{packageDescription("gnm")[["Version"]]} , \Sexpr{Sys.Date()}}

\begin{document}
\maketitle

{\small
 \tableofcontents
}

<<echo=false,results=hide>>=
options(SweaveHooks = list(eval = function() options(show.signif.stars = FALSE)))
@

\section{Introduction}

The \Rpackage{gnm} package provides facilities for fitting
\emph{generalized nonlinear models}, i.e., regression models in which the
link-transformed mean is described as a sum of predictor terms, some of
which may be non-linear in the unknown parameters.  Linear and generalized
linear models,
as handled by the \Rfunction{lm} and \Rfunction{glm} functions in \R, are
included in the class
of generalized nonlinear models, as the special case in which there is no
nonlinear term.

This document gives an extended overview of the \Rpackage{gnm} package, with
some examples of applications.  The primary package documentation in the
form of standard help pages, as viewed in \R\ by, for example, \Rcode{?gnm} or
\Rcode{help(gnm)}, is supplemented rather than replaced by the present
document.

We begin below with a preliminary note (Section \ref{sec:glms}) on some
ways in which
the \Rpackage{gnm} package extends \R's facilities for specifying, fitting
and working with generalized \emph{linear} models.  Then (Section
\ref{sec:nonlinear} onwards) the facilities for nonlinear terms are introduced,
explained and exemplified.

The \Rpackage{gnm} package is installed in the standard way for CRAN packages,
for example by using \Rfunction{install.packages}.  Once installed, the
package is loaded into an \R\ session by
<<Load_gnm>>=
library(gnm)
@

\section{Generalized linear models}
\label{sec:glms}

\subsection{Preamble}

Central to the facilities provided by the \Rpackage{gnm} package is the
model-fitting function \Rfunction{gnm}, which interprets a model formula
and returns a model object.  The user interface of \Rfunction{gnm} is patterned
after \Rfunction{glm} (which is included in \R's standard \Rpackage{stats}
package), and indeed
\Rfunction{gnm} can be viewed as a replacement for
\Rfunction{glm} for specifying and fitting generalized linear models.
In general there is no reason to prefer \Rfunction{gnm} to \Rfunction{glm} for
fitting generalized linear models, except perhaps when the model involves a
large number of incidental parameters which are treatable
by \Rfunction{gnm}'s \emph{eliminate} mechanism (see Section \ref{sec:eliminate}).

While the main purpose of the \Rpackage{gnm} package is to extend the class of
models to include nonlinear terms, some of the new functions and methods
can be used also with the familiar \Rfunction{lm} and \Rfunction{glm}
model-fitting functions.  These are: three new data-manipulation functions
\Rfunction{Diag}, \Rfunction{Symm} and \Rfunction{Topo}, for setting up structured interactions between factors; a new \Rclass{family} function,
\Rfunction{wedderburn},
for modelling a continuous
response variable in $[0,1]$ with the variance function
$V(\mu) = \mu^2(1-\mu)^2$ as in \citet{Wedd74};
and a new generic function \Rfunction{termPredictors} which
extracts the contribution of each term to the predictor from a fitted model
object.  These functions are briefly introduced here, before we move on to
the main purpose of the package,
nonlinear models, in Section \ref{sec:nonlinear}.

\subsection{\Rfunction{Diag} and \Rfunction{Symm}}

When dealing with \emph{homologous} factors, that is, categorical variables
whose levels are the same, statistical models often involve structured
interaction terms which exploit the inherent symmetry.  The functions
\Rfunction{Diag} and \Rfunction{Symm} facilitate the specification of such
structured interactions.

As a simple example of their use, consider the log-linear models of
\emph{quasi-independence}, \emph{quasi-symmetry} and \emph{symmetry}
for a square contingency table.  \citet{Agre02}, Section 10.4, gives data on
migration between regions of the USA between 1980 and 1985:
<<migrationData>>=
count <- c(11607,   100,   366,   124,
              87, 13677,   515,   302,
             172,   225, 17819,   270,
              63,   176,   286, 10192 )
region <- c("NE", "MW", "S", "W")
row <-  gl(4, 4, labels = region)
col <-  gl(4, 1, length = 16, labels = region)
@
The comparison of models reported by Agresti can be achieved as follows:
<<squareTableModels>>=
independence <- glm(count ~ row + col, family = poisson)
quasi.indep <- glm(count ~ row + col + Diag(row, col), family = poisson)
symmetry <- glm(count ~ Symm(row, col), family = poisson)
quasi.symm <- glm(count ~ row + col + Symm(row, col), family = poisson)
comparison1 <- anova(independence, quasi.indep, quasi.symm)
print(comparison1, digits = 7)
comparison2 <- anova(symmetry, quasi.symm)
print(comparison2)
@

The \Rfunction{Diag} and \Rfunction{Symm} functions also generalize the notions
of diagonal and symmetric interaction to cover situations involving more than
two homologous factors.

\subsection{\Rfunction{Topo}}

More general structured interactions than those provided by \Rfunction{Diag}
and \Rfunction{Symm} can be specified using the function \Rfunction{Topo}.
(The name of this function is short for `topological interaction',
which is the nomenclature
often used in sociology for factor interactions with structure derived from
subject-matter theory.)

The \Rfunction{Topo} function operates on any number ($k$, say)
of input factors, and
requires an argument named \Rfunarg{spec} which must be an array of
dimension $L_1 \times \ldots \times L_k$, where $L_i$ is the number of
levels for the $i$th factor.  The \Rfunarg{spec} argument specifies
the interaction level corresponding to every possible combination of
the input factors, and the result is a new factor representing the specified
interaction.

As an example, consider fitting the `log-multiplicative layer effects' models
described in \citet{Xie92}.  The data are 7 by 7 versions of social mobility
tables from \citet{Erik82}:
<<EriksonData>>=
### Collapse to 7 by 7 table as in Erikson et al. (1982)
erikson <- as.data.frame(erikson)
lvl <- levels(erikson$origin)
levels(erikson$origin) <- levels(erikson$destination) <-
    c(rep(paste(lvl[1:2], collapse = " + "), 2), lvl[3],
      rep(paste(lvl[4:5], collapse = " + "), 2), lvl[6:9])
erikson <- xtabs(Freq ~ origin + destination + country, data = erikson)
@
From sociological theory --- for which see \citet{Erik82} or \citet{Xie92} ---
the log-linear interaction between origin and destination is assumed to have
a particular structure:
\begin{Sinput}
> levelMatrix <- matrix(c(2, 3, 4, 6, 5, 6, 6,
+                         3, 3, 4, 6, 4, 5, 6,
+                         4, 4, 2, 5, 5, 5, 5,
+                         6, 6, 5, 1, 6, 5, 2,
+                         4, 4, 5, 6, 3, 4, 5,
+                         5, 4, 5, 5, 3, 3, 5,
+                         6, 6, 5, 3, 5, 4, 1), 7, 7, byrow = TRUE)
\end{Sinput}
The models of table 3 of \citet{Xie92} can now be fitted as follows:
\begin{Sinput}
> ## Null association between origin and destination
> nullModel <- gnm(Freq ~ country:origin + country:destination,
+                  family = poisson, data = erikson, verbose = FALSE)
>
> ## Interaction specified by levelMatrix, common to all countries
> commonTopo <- update(nullModel, ~ . +
+                      Topo(origin, destination, spec = levelMatrix),
+                      verbose = FALSE)
>
> ## Interaction specified by levelMatrix, different multiplier for each country
> multTopo <- update(nullModel, ~ . +
+                    Mult(Exp(country), Topo(origin, destination, spec = levelMatrix)),
+                    verbose = FALSE)
>
> ## Interaction specified by levelMatrix, different effects for each country
> separateTopo <- update(nullModel, ~ . +
+                        country:Topo(origin, destination, spec = levelMatrix),
+                        verbose = FALSE)
>
> anova(nullModel, commonTopo, multTopo, separateTopo)
\end{Sinput}
\begin{Soutput}
Analysis of Deviance Table

Model 1: Freq ~ country:origin + country:destination
Model 2: Freq ~ Topo(origin, destination, spec = levelMatrix) + country:origin +
    country:destination
Model 3: Freq ~ Mult(country, Topo(origin, destination, spec = levelMatrix)) +
    country:origin + country:destination
Model 4: Freq ~ country:origin + country:destination + country:Topo(origin,
    destination, spec = levelMatrix)
  Resid. Df Resid. Dev  Df Deviance
1       108     4860.0
2       103      244.3   5   4615.7
3       101      216.4   2     28.0
4        93      208.5   8      7.9
\end{Soutput}
Here we have used \Rfunction{gnm} to fit all of these log-link models; the
first, second and fourth are log-linear and could equally well have been fitted
using \Rfunction{glm}.

\subsection{The \Rfunction{wedderburn} family}

In \citet{Wedd74} it was suggested to represent the mean of
a continuous response variable in
$[0,1]$ using a quasi-likelihood model with logit link and the
variance function $\mu^2(1-\mu)^2$.  This is not one of the variance
functions made available as standard in \R's \Rfunction{quasi} family.  The
\Rfunction{wedderburn} family provides it.  As an example, Wedderburn's
analysis of data on leaf blotch on barley can be reproduced as follows:
<<wedderburn>>=
##  data from Wedderburn (1974), see ?barley
logitModel <- glm(y ~ site + variety, family = wedderburn, data = barley)
fit <- fitted(logitModel)
print(sum((barley$y - fit)^2 / (fit * (1-fit))^2))
@
This agrees with the chi-squared value reported on page 331 of \citet{McCu89},
which differs slightly from Wedderburn's own reported value.

\subsection{\Rfunction{termPredictors}}
\label{sec:termPredictors}

The generic function \Rfunction{termPredictors} extracts a term-by-term
decomposition of the predictor function in a linear, generalized linear or
generalized nonlinear model.

As an illustrative example, we can decompose the linear predictor in the above
quasi-symmetry model as follows:
<<termPredictors>>=
print(temp <- termPredictors(quasi.symm))
rowSums(temp) - quasi.symm$linear.predictors
@

Such a decomposition might be useful, for example, in assessing the relative
contributions of different terms or groups of terms.

\section{Nonlinear terms}
\label{sec:nonlinear}

The main purpose of the \Rpackage{gnm} package is to provide
a flexible framework for the specification and
estimation of generalized models with nonlinear terms. The facility provided
with \Rfunction{gnm} for the specification of nonlinear terms is designed to be
compatible with the symbolic language used in \Rclass{formula}
objects. Primarily, nonlinear terms are specified in the model formula as calls
to functions of the class \Rclass{nonlin}.
There are a number of \Rclass{nonlin} functions included in the
\Rpackage{gnm} package. Some of these specify simple mathematical functions of
predictors: \Rfunction{Exp}, \Rfunction{Mult}, and \Rfunction{Inv}.
%\Rfunction{Log}, \Rfunction{Raise} (to raise to a constant power), and \Rfunction{Logit}.
Others
specify more specialized nonlinear terms, in particular \Rfunction{MultHomog}
specifies homogeneous multiplicative interactions and \Rfunction{Dref} specifies
diagonal reference terms. Users may also define their own \Rclass{nonlin}
functions.

\subsection{Basic mathematical functions of predictors}
\label{sec:Basic}

Most of the \Rclass{nonlin} functions included in \Rpackage{gnm} are basic
mathematical functions of predictors:
\begin{description}
\setlength{\itemindent}{-0.5cm}
\item[\Rfunction{Exp}:] the exponential of a predictor
\item[\Rfunction{Inv}:] the reciprocal of a predictor
%\item[\Rfunction{Log}:] the natural logarithm of a predictor
%\item[\Rfunction{Logit}:] the logit of a predictor
\item[\Rfunction{Mult}:] the product of predictors
%\item[\Rfunction{Raise}:] a predictor raised to a constant power
\end{description}
Predictors are specified by symbolic expressions that are interpreted as the
right-hand side of a \Rclass{formula} object, except that an intercept is
\textbf{not} added by default.

The predictors may contain nonlinear terms, allowing more complex functions to
be built up. For example, suppose we wanted to specify a logistic predictor with
the same form as that used by \Rfunction{SSlogis} (a
  selfStart model for use with \Rfunction{nls} --- see
  section~\ref{sec:gnmVnls} for
  more on \Rfunction{gnm} vs.\ \Rfunction{nls}):
\[\frac{\text{Asym}}{1 + \exp((\text{xmid} - x)/\text{scal})}.\]
This expression could be simplified by re-parameterizing in terms of xmid/scal
and 1/scal, however we shall continue with this form for illustration.
We could express this predictor symbolically as follows
\begin{Scode}
~ -1 + Mult(1, Inv(Const(1) + Exp(Mult(1 + offset(-x), Inv(1)))))
\end{Scode}
where \Rfunction{Const} is a convenience function to specify a constant in a
\Rclass{nonlin} term, equivalent to \Rcode{offset(rep(1, nObs))} where
\Robject{nObs} is the number of observations. However, this is rather convoluted
and it may be preferable to define a specialized \Rclass{nonlin} function in
such a case. Section \ref{sec:nonlin.functions} explains how users can define
custom \Rclass{nonlin} functions, with a function to specify logistic terms as
an example.

One family of models usefully specified with the basic functions is the
family of models with multiplicative interactions. For example, the row-column
association model
\[
\log \mu_{rc} = \alpha_r + \beta_c + \gamma_r\delta_c,
\]
also known as the Goodman RC model \citep{Good79}, would be specified
as a log-link model (for response variable \Robject{resp}, say), with formula
\begin{Scode}
resp ~ R + C + Mult(R, C)
\end{Scode}
where \Robject{R} and \Robject{C} are row and column factors respectively. In
some contexts, it may be desirable to constrain one or more of the constituent
multipliers\footnote{
A note on terminology: the rather cumbersome phrase `constituent multiplier', or
sometimes the abbreviation `multiplier', will
be used throughout this document in preference to the more elegant and standard
mathematical term `factor'.  This will avoid possible confusion with the
completely different meaning of the word `factor' --- that is, a categorical
variable --- in \R.
} in a multiplicative interaction to be nonnegative . This may be achieved by
specifying the multiplier as an exponential, as in the following `uniform
difference' model \citep{Xie92, Erik92}
\[
\log \mu_{rct} = \alpha_{rt} + \beta_{ct} + e^{\gamma_t}\delta_{rc},
\]
which would be represented by a formula of the form
\begin{Scode}
resp ~ R:T + C:T + Mult(Exp(T), R:C)
\end{Scode}

\subsection{\Rfunction{MultHomog}}

\Rfunction{MultHomog} is a \Rclass{nonlin} function to specify
multiplicative interaction terms in which the constituent
multipliers are the effects of two or more factors and the effects of these
factors are constrained to be equal when the factor levels are equal. The
arguments of \Rfunction{MultHomog} are the factors in the interaction,
which are
assumed to be objects of class \Rclass{factor}.

As an example, consider the following association model with homogeneous
row-column effects:
\[\log \mu_{rc} = \alpha_r + \beta_c + \theta_{r}I(r=c) + \gamma_r\gamma_c.\]
To fit this model, with response variable named \Robject{resp}, say,
the formula argument to \Rfunction{gnm} would be
\begin{Scode}
resp ~ R + C + Diag(R, C) + MultHomog(R, C)
\end{Scode}

If the factors passed to \Rfunction{MultHomog} do not have
exactly the same levels,
a common set of levels is obtained by taking the union of the levels of each
factor, sorted into increasing order.

\subsection{\Rfunction{Dref}}
\label{sec:Dref function}

\Rfunction{Dref} is a \Rclass{nonlin} function to fit diagonal reference terms
 \citep{Sobe81, Sobe85} involving
two or more factors with a common set of levels. A diagonal reference term
comprises an additive component for each factor. The component for factor $f$
is given by
\[
w_f\gamma_l
\]
for an observation with level $l$ of factor $f$, where $w_f$ is the weight for
factor $f$ and $\gamma_l$ is the ``diagonal effect'' for level $l$.

The weights are constrained to be nonnegative and to sum to one so that a
``diagonal effect'', say $\gamma_l$, is the value of the diagonal reference term
for data points with level $l$ across the factors. \Rfunction{Dref} specifies
the constraints on the weights by defining them as
\[
w_f = \frac{e^{\delta_f}}{\sum_i e^{\delta_i}}
\]
where the $\delta_f$ are the parameters to be estimated.

Factors defining the diagonal reference term
are passed as unspecified arguments to
\Rfunction{Dref}.  For example, the following diagonal reference model for
a contingency table classified by the row factor \Robject{R}
and the column factor \Robject{C},
\[
\mu_{rc} =\frac{e^{\delta_1}}{e^{\delta_1} + e^{\delta_2}}\gamma_r +
\frac{e^{\delta_2}}{e^{\delta_1} + e^{\delta_2}}\gamma_c,
\]
would be specified by a formula of the form
\begin{Scode}
resp ~ -1 + Dref(R, C)
\end{Scode}

The \Rfunction{Dref} function has one specified argument, \Rfunarg{delta},
which is a formula with no left-hand side, specifying the dependence (if any)
of $\delta_f$ on  covariates.
For example, the formula
\begin{Scode}
resp ~ -1 + x + Dref(R, C, delta = ~ 1 + x)
\end{Scode}
specifies the generalized diagonal reference model
\[
\mu_{rci} = \beta x_i + \frac{e^{\xi_{01} + \xi_{11}x_i}}{e^{\xi_{01} + \xi_{11}x_i} + e^{\xi_{02} + \xi_{12}x_i}}\gamma_r +
\frac{e^{\xi_{02} + \xi_{12}x_i}}{e^{\xi_{01} + \xi_{11}x_i} + e^{\xi_{02} + \xi_{12}x_i}}\gamma_c.
\]
The default value of \Rfunarg{delta} is \Robject{\twiddle 1}, so that
constant weights are estimated. The coefficients returned by \Rfunction{gnm} are
those that are directly estimated, i.e. the $\delta_f$ or the $\xi_{.f}$, rather
than the implied weights $w_f$. However, these weights may be obtained from a
fitted model using the \Rfunction{DrefWeights} function, which computes the
corresponding standard errors using the delta method.

\subsection{\Rfunction{instances}}
\label{sec:instances}

Multiple instances of a linear term will be aliased with each other, but this is
not necessarily the case for nonlinear terms. Indeed, there are certain types of
model where adding further instances of a nonlinear term is a natural way to extend
the model. For example, Goodman's RC model, introduced in section \ref{sec:Basic}
\[
\log \mu_{rc} = \alpha_r + \beta_c + \gamma_r\delta_c,
\]
is naturally extended to the RC(2) model, with a two-component interaction
\[
\log \mu_{rc} = \alpha_r + \beta_c + \gamma_r\delta_c + \theta_r\phi_c.
\]

Currently all of the \Rclass{nonlin} functions in \Rpackage{gnm} except
\Rpackage{Dref} have
an \Rfunarg{inst} argument to allow the specification of multiple instances. So the
RC(2) model could be specified as follows
\begin{Scode}
resp ~ R + C + Mult(R, C, inst = 1) + Mult(R, C, inst = 2)
\end{Scode}
The convenience function \Rfunction{instances} allows multiple instances of a
term to be specified at once
\begin{Scode}
resp ~ R + C + instances(Mult(R, C), 2)
\end{Scode}
The formula is expanded by \Rfunction{gnm}, so that the instances are treated as
separate terms. The \Rfunction{instances} function may be used with any function
with an \Rfunarg{inst} argument.

\subsection{Custom \Rclass{nonlin} functions}
\label{sec:nonlin.functions}

\subsubsection{General description}

Users may write their own \Rclass{nonlin} functions to specify nonlinear terms
which can not (easily) be specified using the \Rclass{nonlin} functions in the
\Rpackage{gnm} package. A function of class \Rclass{nonlin} should return a list
of arguments for the internal function \Rfunction{nonlinTerms}. The following
arguments must be specified in all cases:
\begin{description}
\setlength{\itemindent}{-0.5cm}
\item[\Robject{predictors}:] a list of symbolic expressions or formulae with no
    left hand side which represent (possibly nonlinear)
    predictors that form part of the term.
\item[\Robject{term}:] a function that takes the arguments \Rfunarg{predLabels}
    and \Rfunarg{varLabels}, which are labels generated by \Rfunction{gnm} for
    the specified predictors and variables (see below), and returns a deparsed
    mathematical expression of the nonlinear term.  Only functions recognised by
    \Rfunction{deriv} should be used in the expression, e.g. \Rfunction{+}
    rather than \Rfunction{sum}.
\end{description}
If predictors are named, these names are used as a prefix for parameter labels
or as the parameter label itself in the single-parameter case.

The following arguments of \Rfunction{nonlinTerms} must be specified whenever
applicable to the nonlinear term:
\begin{description}
\setlength{\itemindent}{-0.5cm}
\item[\Robject{variables}:] a list of expressions representing variables in the term
    (variables with a coefficient of 1).
\item[\Robject{common}:] a numeric index of \Rfunarg{predictors} with
    duplicated indices identifying single factor predictors for which
    homologous effects are to be estimated.
\end{description}
The arguments below are optional:
\begin{description}
\setlength{\itemindent}{-0.5cm}
\item[\Robject{call}:] a call to be used as a prefix for parameter labels.
\item[\Robject{match}:] (if \Robject{call} is non-\Rcode{NULL}) a numeric index
    of \Robject{predictors} specifying which arguments of \Robject{call} the
    predictors match to --- zero indicating no match. If \Rcode{NULL},
    predictors will not be matched to the arguments of \Robject{call}.
\item[\Robject{start}:] a function which takes a named vector of parameters
    corresponding to the predictors and returns a vector of
    starting values for those parameters. This function is ignored if the
    term is nested within another nonlinear term.
\end{description}

Predictors which are matched to a specified argument of \Robject{call} should be
given the same name as the argument. Matched predictors are labelled using ``dot-style''
labelling, e.g. the label for the intercept in the first
constituent multiplier of the term \Rcode{Mult(A, B)} would be \Rcode{"Mult(.\ + A, 1 +
B).(Intercept)"}. It is recommended that matches are specified wherever
possible, to ensure parameter labels are well-defined.

The arguments of \Rclass{nonlin} functions are as suited to the particular term,
but will usually include symbolic representations of predictors in the term
and/or the names of variables in the term. The function may also have an
\Rfunarg{inst} argument to allow specification of multiple instances (see
\ref{sec:instances}).

\subsubsection{Example: a logistic function}

As an example, consider writing a \Rclass{nonlin} function for
the logistic term discussed in \ref{sec:Basic}:
\[\frac{\text{Asym}}{1 + \exp((\text{xmid} - x)/\text{scal})}.\]
We can consider \emph{Asym}, \emph{xmid} and \emph{scal}
as the parameters of three separate predictors,
each with a single intercept term. Thus we specify the \Rfunarg{predictors}
argument to \Rfunction{nonlinTerms} as
\begin{Scode}
predictors = list(Asym = 1, xmid = 1, scal = 1)
\end{Scode}
The term also depends on the variable $x$, which would need to be specified by
the user. Suppose this is specified to our \Rclass{nonlin} function through an
argument named \Rfunarg{x}. Then our \Rclass{nonlin} function would specify the
following \Rfunarg{variables} argument
\begin{Scode}
variables = list(substitute(x))
\end{Scode}
We need to use \Rfunction{substitute} here to list the variable specified by the user
rather than the variable named \Rcode{``x''} (if it exists).

Our \Rclass{nonlin} function must also specify the \Rfunarg{term} argument to
\Rfunction{nonlinTerms}. This is a function that will paste together an
expression for the term, given labels for the predictors and the variables:
\begin{Scode}
    term = function(predLabels, varLabels) {
      paste(predLabels[1], "/(1 + exp((", predLabels[2], "-",
      varLabels[1], ")/", predLabels[3], "))")
    }
\end{Scode}

We now have all the necessary ingredients of a \Rclass{nonlin} function to specify
the logistic term. Since the parameterization does not depend on user-specified
values, it does not make sense to use call-matched labelling in this case. The
labels for our parameters will be taken from the labels of the
\Rfunarg{predictors} argument. Since we do not anticipate fitting models with
multiple logistic terms, our \Rclass{nonlin} function will not specify a
\Rfunarg{call} argument with which to prefix the parameter labels. We do
however, have some idea of useful starting values, so we will specify the
\Rfunarg{start} argument as
\begin{Scode}
start = function(theta){
    theta[3] <- 1
    theta
}
\end{Scode}
which sets the initial scale parameter to one.

Putting all these ingredients together we have
\begin{Scode}
Logistic <- function(x){
    list(predictors = list(Asym = 1, xmid = 1, scal = 1),
         variables = list(substitute(x)),
         term = function(predLabels, varLabels) {
             paste(predLabels[1], "/(1 + exp((", predLabels[2], "-",
             varLabels[1], ")/", predLabels[3], "))")
         },
         start = function(theta){
             theta[3] <- 1
             theta
         })
}
class(Logistic) <- "nonlin"
\end{Scode}

\subsubsection{Example: \Rfunction{MultHomog}}

The \Rfunction{MultHomog} function included in the \Rpackage{gnm} package
provides a further example of a \Rclass{nonlin}
function, showing how to specify a term with quite different features from the
preceding example.  The definition is
\begin{Scode}
MultHomog <- function(..., inst = NULL){
    dots <- match.call(expand.dots = FALSE)[["..."]]
    list(predictors = dots,
         common = rep(1, length(dots)),
         term = function(predLabels, ...) {
             paste("(", paste(predLabels, collapse = ")*("), ")", sep = "")},
         call = as.expression(match.call()))
}
class(MultHomog) <- "nonlin"
\end{Scode}
Firstly, the interaction may be based on any number of factors, hence the use
of the special ``\Rfunarg{...}'' argument. The use of \Rfunction{match.call} is
analogous to the use of \Rfunction{substitute} in the \Rfunction{Logistic}
function: to obtain expressions for the factors as specified by the user.

The returned \Rfunarg{common} argument specifies that homogeneous effects are to
be estimated across all the specified factors. The term only depends on these
factors, but the \Rfunarg{term} function allows for the empty
\Robject{varLabels} vector that will be passed to it, by having a
``\Rfunarg{...}'' argument.

Since the user may wish to specify multiple instances, the \Rfunarg{call}
argument to \Rfunction{nonlinTerms} is specified, so that parameters in
different instances of the term will have unique labels (due to the
\Rfunarg{inst} argument in the call). However as the expressions passed to
``\Rfunarg{...}'' may only represent single factors, rather than
general predictors, it is not necessary to use call-matched labelling, so the
\Rfunarg{match} argument is not specified here.

% Dref starting values as example of ensuring the arbitrariness of the final
% parameterization is emphasised (see old plug-in section)?

\section{Controlling the fitting procedure}

The \Rfunction{gnm} function has a number of arguments which affect the way a
model will be fitted.  Basic control parameters can be set using the arguments
%\Rfunarg{checkLinear},
\Rfunarg{lsMethod}, \Rfunarg{ridge}, \Rfunarg{tolerance},
\Rfunarg{iterStart} and \Rfunarg{iterMax}. Starting values for the parameter
estimates can be set by \Rfunarg{start} or they can be generated from starting
values for the predictors on the link or response scale via \Rfunarg{etastart} or
\Rfunarg{mustart} respectively. Parameters can be constrained via
\Rfunarg{constrain} and \Rfunarg{constrainTo} arguments, while parameters of a
stratification factor can be handled more efficiently by specifying the factor
in an \Rfunarg{eliminate} argument. These options are described in more detail below.

\subsection{Basic control parameters}

%By default, \Rfunction{gnm} will use \Rfunction{glm.fit} to fit models where the
%predictor is linear and \Rfunarg{eliminate} is \Rcode{NULL}. This behaviour can
%be overridden by setting \Rfunarg{checkLinear} to \Rcode{FALSE}.
%%% At present there is no advantage to doing this! Parameterization would be
%%% the same.
The arguments \Rfunarg{iterStart} and \Rfunarg{iterMax} control respectively the
number of starting iterations (where applicable) and the number of main
iterations used by the fitting algorithm. The progress of these iterations can
be followed by setting either \Rfunarg{verbose} or \Rfunarg{trace} to \Robject{TRUE}.
If \Rfunarg{verbose} is \Robject{TRUE} and \Rfunarg{trace} is \Robject{FALSE},
which is the default setting, progress is indicated by printing the character
``.'' at the beginning of each iteration.  If \Rfunarg{trace} is \Robject{TRUE},
the deviance is printed at the beginning of each iteration (over-riding the
printing of ``.'' if necessary). Whenever \Rfunarg{verbose} is \Robject{TRUE},
additional messages indicate each stage of the fitting process and diagnose any
errors that cause that cause the algorithm to restart.

Prior to solving the (typically rank-deficient) least squares problem at the
heart of the \Rfunction{gnm} fitting algorithm, the design matrix is
standardized and regularized (in the Levenberg-Marquardt sense); the
\Rfunarg{ridge} argument provides a degree of control over the regularization
performed (smaller values may sometimes give faster convergence
but can lead to numerical instability).

The fitting algorithm will terminate before the number of main iterations has
reached \Rfunarg{iterMax} if the convergence criteria have been met, with
tolerance specified by \Rfunarg{tolerance}. Convergence is judged by comparing
the squared components of the score vector with corresponding elements of the
diagonal of the Fisher information matrix. If, for all components of the score
vector, the ratio is less than \Robject{tolerance\^{}2},
or the corresponding diagonal
element of the Fisher information matrix is less than 1e-20, the algorithm is
deemed to have converged.

\subsection{Specifying starting values}
\label{sec:start}

\subsubsection{Using \Rfunarg{start}}

In some contexts, the default starting values may not be appropriate and the
fitting algorithm will fail to converge, or perhaps only converge after a large number
of iterations. Alternative starting values may be passed on to \Rfunction{gnm}
by specifying a \Rfunarg{start} argument. This should be a numeric vector of
length equal to the number of parameters (or possibly the non-eliminated
parameters, see Section \ref{sec:eliminate}), however missing starting values
(\Robject{NA}s) are allowed.

If there is no user-specified starting value for a parameter, the
default value is used. This feature is particularly useful when adding terms to
a model, since the estimates from the original model can be used as starting
values, as in this example:
\begin{Scode}
model1 <- gnm(mu ~ R + C + Mult(R, C))
model2 <- gnm(mu ~ R + C + instances(Mult(R, C), 2),
              start = c(coef(model1), rep(NA, 10)))
\end{Scode}
The \Rfunction{gnm} call can be made with \Rcode{method = "coefNames"} to
identify the parameters of a model prior to estimation, to assist with the
specification of arguments such as \Rfunarg{start}.  For example, to get the number \Rcode{10} for the value of \Rfunarg{start} above, we could have done
\begin{Scode}
gnm(mu ~ R + C + instances(Mult(R, C), 2), method = "coefNames")
\end{Scode}
from whose output it would be seen that there are 10 new coefficients in
\Robject{model2}.  When called with \Rcode{method = "coefNames"},
\Rfunction{gnm} makes no attempt to fit the specified model;
instead it returns just the
names that the coefficients in the fitted model object would have.

The starting procedure used by \Rfunction{gnm} is as follows:
\begin{enumerate}
\item
Begin with all parameters set to \Rcode{NA}.
\item
\label{i:nonlin}
Replace \Rcode{NA} values with any starting values set by
\Rclass{nonlin} functions.
\item
\label{i:start}
Replace current values with any (non-\Rcode{NA}) starting values specified by
the \Rfunarg{start} argument of \Rfunction{gnm}.
\item
\label{i:constrain}
Set any values specified by the \Rfunarg{constrain} argument to the values
specified by the \Rfunarg{constrainTo} argument (see Section \ref{sec:constrain}).
\item
\label{i:gnmStart}
Categorise remaining \Rcode{NA} parameters as linear or nonlinear, treating
non-\Rcode{NA} parameters as fixed. Initialise the nonlinear parameters by
generating values $\theta_i$ from the Uniform($-0.1$, $0.1$) distribution and
shifting these values away from zero as follows
\begin{equation*}
\theta_i = \begin{cases}
    \theta_i - 0.1 &  \text{if } \theta_i < 1 \\
    \theta_i + 0.1 & \text{otherwise}
\end{cases}
\end{equation*}
\item
Compute the \Rfunction{glm} estimate of the linear parameters, offsetting the
contribution to the predictor of any terms fully determined by steps
\ref{i:nonlin} to \ref{i:gnmStart}.
\item
\label{i:iter}
Run starting iterations: update nonlinear parameters one at a time, jointly
re-estimating linear parameters after each round of updates.
\end{enumerate}
Note that no starting iterations (step \ref{i:iter}) will be run if all parameters are
linear, or if all nonlinear parameters are specified by \Rfunarg{start},
\Rfunarg{constrain} or a \Rclass{nonlin} function.

\subsubsection{Using \Rfunarg{etastart} or \Rfunarg{mustart}}

An alternative way to set starting values for the parameters is to specify
starting values for the predictors.

If there are linear parameters in the model, the predictor starting values are
first used to fit a model with only the linear terms (offsetting any terms fully
specified by starting values given by \Rfunarg{start}, \Rfunarg{constrain} or a
\Rclass{nonlin} function). In this case the parameters corresponding to the predictor
starting values can be computed analytically. If the fitted model reproduces
the predictor starting values, then these values contain no further information
and they are replaced using the \Rfunction{initialize} function of the specified
\Rfunarg{family}.

The predictor starting values or their replacement are then used as the response
variable in a nonlinear least squares model with only the unspecified nonlinear terms,
offsetting the contribution of any other terms. Since the model is over-parameterized,
the model is approximated using \Rfunarg{iterStart} iterations of the
``L-BFGS-B'' algorithm of \Rfunction{optim}, assuming parameters lie in the
range (-10, 10).

Starting values for the predictors can be specified explicitly via
\Rfunarg{etastart} or implicitly by passing starting values for the fitted
means to \Rfunarg{mustart}. For example, when extending a model, the fitted
predictors from the first model can be used to find starting values for the
parameters of the second model:
\begin{Scode}
model1 <- gnm(mu ~ R + C + Mult(R, C))
model2 <- gnm(mu ~ R + C + instances(Mult(R, C), 2), etastart = model1$predictors)
\end{Scode}
%$
Using \Rfunction{etastart} avoids the one-parameter-at-a-time starting
iterations, so is quicker than using \Rfunction{start} to pass on information
from a nested model. However \Rfunction{start} will generally produce better
starting values so should be used when feasible. For multiplicative terms, the
\Rfunction{residSVD} functions provides a better way to avoid starting iterations.

\subsection{Using \Rfunarg{constrain}}
\label{sec:constrain}

By default, \Rfunction{gnm} only imposes identifiability constraints according
to the general conventions used by \Robject{R} to handle linear aliasing. Therefore
models that have any nonlinear terms will be typically be over-parameterized,
and \Rfunction{gnm} will return a random parameterization for unidentified
coefficients (determined by the randomly chosen starting values for the iterative algorithm, step 5 above).

To illustrate this point, consider the following application of \Rfunction{gnm},
discussed later in Section \ref{sec:RCmodels}:
<<RC_homogeneous_model_1>>=
set.seed(1)
RChomog1 <- gnm(Freq ~ origin + destination + Diag(origin, destination) +
               MultHomog(origin, destination), family = poisson,
               data = occupationalStatus, verbose = FALSE)
@
Running the analysis again from a different seed
<<RC_homogeneous_model_2>>=
set.seed(2)
RChomog2 <- update(RChomog1)
@
gives a different representation of the same model:
<<Compare_coefficients>>=
compareCoef <- cbind(coef(RChomog1), coef(RChomog2))
colnames(compareCoef) <- c("RChomog1", "RChomog2")
round(compareCoef, 4)
@
Even though the linear terms are constrained, the parameter estimates for the
main effects of \Robject{origin} and \Robject{destination} still change, because
these terms are aliased with the higher order multiplicative interaction, which
is unconstrained.

Standard errors are only meaningful for identified parameters and hence the
output of \Rmethod{summary.gnm} will show clearly which coefficients are
estimable:
<<Summarize_model>>=
summary(RChomog2)
@

Additional constraints may be specified through the \Rfunarg{constrain} and
\Rfunarg{constrainTo} arguments of \Rfunction{gnm}. These arguments specify
respectively parameters that are to be constrained in the fitting process and
the values to which they should be constrained. Parameters may be specified by a
regular expression to match against the parameter names, a numeric vector of
indices, a character vector of names, or, if \Rcode{constrain = "[?]"} they can
be selected through a \emph{Tk} dialog. The values to constrain to should be
specified by a numeric vector; if \Rfunarg{constrainTo} is missing, constrained
parameters will be set to zero.

In the case above, constraining one level of the homogeneous multiplicative
factor is sufficient to make the parameters of the nonlinear term
identifiable, and hence all parameters in the model
identifiable. Figure~\ref{fig:Tk} illustrates how the coefficient to be constrained
may be specified via a \emph{Tk} dialog, an approach which can be helpful in
interactive R sessions.

% here illustrate TclTk dialog, but explain other methods better for reproducibility
\begin{figure}[tp]
    \centering
    \begin{tabular}[!h]{m{0.6\linewidth}m{0.4\linewidth}}
        \scalebox{0.9}{\includegraphics{screenshot1.png}} &
        When \Rfunction{gnm} is called with  \Rcode{constrain = "[?]"},
          a \emph{Tk} dialog is shown listing the coefficients in the model.\\
        \scalebox{0.9}{\includegraphics{screenshot2.png}} &
        Scroll through the coefficients and click to select
          a single coefficient to constrain. To select multiple coefficients,
          hold down the \texttt{Ctrl} key whilst clicking. The \texttt{Add}
          button will become active when coefficient(s) have been selected.\\
        \scalebox{0.9}{\includegraphics{screenshot3.png}} &
        Click the \texttt{Add} button to add the selected coefficients
          to the list of coefficients to be constrained. To remove coefficients
          from the list, select the coefficients in the right pane and click
          \texttt{Remove}. Click \texttt{OK} when you have finalised the list.\\
    \end{tabular}
    \caption{Selecting coefficients to constrain with the \emph{Tk} dialog.}
    \label{fig:Tk}
\end{figure}

However for reproducible code, it is best to specify the constrained
coefficients directly. For example, the following code specifies that the last
level of the homogeneous multiplicative factor should be constrained to zero,
<<RC_homogeneous_constrained_model1>>=
set.seed(1)
RChomogConstrained1 <- update(RChomog1, constrain = length(coef(RChomog1)))
@
Since all the parameters are now constrained, re-fitting the model will give the
same results, regardless of the random seed set beforehand:
<<RC_homogeneous_constrained_model2>>=
set.seed(2)
RChomogConstrained2 <- update(RChomogConstrained1)
identical(coef(RChomogConstrained1), coef(RChomogConstrained2))
@

It is not usually so straightforward to constrain all the parameters in a
generalized nonlinear model. However use of \Rfunarg{constrain} in conjunction
with \Rfunarg{constrainTo} is usually sufficient to make coefficients of
interest identifiable . The functions \Rfunction{checkEstimable} or
\Rfunction{getContrasts}, described in Section \ref{sec:Methods},
may be used to check whether particular combinations of parameters
are estimable.

\subsection{Using \Rfunarg{eliminate}}
\label{sec:eliminate}

When a model contains the additive effect of a factor which has a large
number of levels, the iterative algorithm by which maximum likelihood estimates
are computed can usually
be accelerated by use of the \Rfunarg{eliminate} argument to \Rfunction{gnm}. A factor passed to \Rfunarg{eliminate} specifies the first term in the
model, replacing any intercept term. So, for example
\begin{Scode}
gnm(mu ~ A + B + Mult(A, B), eliminate = strata1:strata2)
\end{Scode}
is equivalent, in terms of the structure of the model, to
\begin{Scode}
gnm(mu ~ -1 + strata1:strata2 + A + B + Mult(A, B))
\end{Scode}
However, specifying a factor through \Rfunarg{eliminate} has two
advantages over the standard specification. First, the structure of the
eliminated factor is exploited so that computational speed is
improved ---
substantially so if the number of eliminated parameters is large.
Second, eliminated parameters are returned separately from non-eliminated
parameters (as an attribute of the \Robject{coefficients} component of the
returned object). Thus eliminated parameters are excluded from printed model
summaries by default and disregarded by \Rclass{gnm} methods that would not be
relevant to such parameters (see Section \ref{sec:Methods}).

The \Rfunarg{eliminate} feature is useful, for example, when
multinomial-response models are fitted by using the well known equivalence
between multinomial and (conditional) Poisson likelihoods.  In such situations
the sufficient statistic involves a potentially large number of fixed
multinomial row totals, and the corresponding parameters are of no
substantive interest.  For an application see Section \ref{sec:Stereotype} below.
Here we give an artificial illustration: 1000 randomly-generated trinomial
responses, and a single predictor variable (whose effect on the data generation
is null):
<<Eliminate_Eg>>=
set.seed(1)
n <- 1000
x <- rep(rnorm(n), rep(3, n))
counts <- as.vector(rmultinom(n, 10, c(0.7, 0.1, 0.2)))
rowID <- gl(n, 3, 3 * n)
resp <- gl(3, 1, 3 * n)
@
The logistic model for dependence on \Robject{x} can be fitted as a Poisson
log-linear model\footnote{For this particular example, of course, it would be
more economical to fit the model directly using \Rfunction{multinom} (from the
recommended package \Rpackage{nnet}).  But fitting as here via the
`Poisson trick' allows the model to be elaborated within the \Rpackage{gnm}
framework using \Rfunction{Mult} or other \Rclass{nonlin} terms.}, using
either \Rfunction{glm} or \Rfunction{gnm}:
\begin{Sinput}
> ## Timings on a Xeon 2.33GHz, under Linux
> system.time(temp.glm <- glm(counts ~ rowID + resp + resp:x,
                              family = poisson))[1]
\end{Sinput}
\begin{Soutput}
user.self
   37.126
\end{Soutput}
\begin{Sinput}
> system.time(temp.gnm <- gnm(counts ~ resp + resp:x, eliminate = rowID,
                              family = poisson, verbose = FALSE))[1]
\end{Sinput}
\begin{Soutput}
user.self
     0.04
\end{Soutput}
\begin{Sinput}
> c(deviance(temp.glm), deviance(temp.gnm))
\end{Sinput}
\begin{Soutput}
[1] 2462.556 2462.556
\end{Soutput}
Here the use of \Rfunarg{eliminate} causes the \Rfunction{gnm} calculations to
run much more quickly than \Rfunction{glm}.  The speed advantage increases with
the number of eliminated parameters (here 1000). By default,the eliminated
parameters do not appear in printed model summaries as here:
\begin{Sinput}
> summary(temp.gnm)
\end{Sinput}
\begin{Soutput}
Call:

gnm(formula = counts ~ resp + resp:x, eliminate = rowID, family = poisson,
    verbose = FALSE)

Deviance Residuals:
      Min         1Q     Median         3Q        Max
-2.852038  -0.786172  -0.004534   0.645278   2.755013

Coefficients of interest:
         Estimate Std. Error z value Pr(>|z|)
resp2   -1.961448   0.034007 -57.678   <2e-16
resp3   -1.255846   0.025359 -49.523   <2e-16
resp1:x -0.007726   0.024517  -0.315    0.753
resp2:x -0.023340   0.037611  -0.621    0.535
resp3:x  0.000000         NA      NA       NA

(Dispersion parameter for poisson family taken to be 1)

Std. Error is NA where coefficient has been constrained or is unidentified

Residual deviance: 2462.6 on 1996 degrees of freedom
AIC: 12028

Number of iterations: 4
\end{Soutput}
although the \Rmethod{summary} method has a logical \Rfunarg{with.eliminate}
that can toggled so that the eliminated parameters are included if desired.

The \Rfunarg{eliminate} feature as implemented in \Rpackage{gnm} extends the
earlier work of \cite{Hatz04} to a broader class of models and to
over-parameterized model representations.

\section{Methods and accessor functions}
\label{sec:Methods}

\subsection{Methods}
\label{sec:specificMethods}

The \Rfunction{gnm} function returns
an object of class \Robject{c("gnm", "glm", "lm")}. There
are several methods that have been written for objects of class \Rclass{glm}
or \Rclass{lm} to facilitate inspection of fitted models.
Out of the generic functions in the \Rpackage{base},
\Rpackage{stats} and \Rpackage{graphics} packages for which methods have been
written
for \Rclass{glm} or \Rclass{lm} objects, Figure \ref{fig:glm.lm} shows those
that can be used to analyse \Rclass{gnm} objects, whilst Figure
\ref{fig:!glm.lm} shows
those that are not implemented for \Rclass{gnm} objects.

\begin{figure}[!tbph]
    \centering
    \begin{fbox}
        {
          \begin{tabular*}{7.5cm}{@{\extracolsep{\fill}}lll@{\extracolsep{\fill}}}
              add1$^*$	&	family		&		print	\\
              anova	&	formula	&	profile	\\
              case.names		&	hatvalues	&	residuals	\\
              coef		&	labels	&	rstandard	\\
              cooks.distance		&	logLik	&	summary	\\
              confint		&	model.frame	&	variable.names	\\
              deviance		&	model.matrix	&	vcov	\\
              drop1$^*$	&	plot	&	weights	\\
              extractAIC		&	predict		&		\\
          \end{tabular*}
        }
    \end{fbox}
    \caption{Generic functions in the \Rpackage{base}, \Rpackage{stats} and
      \Rpackage{graphics} packages that can be used to analyse
      \Rclass{gnm} objects. Starred functions are implemented for models with
      linear terms only.}
    \label{fig:glm.lm}
\end{figure}

\begin{figure}[!tbph]
    \centering
    \begin{fbox}
        {
          \begin{tabular*}{4.5cm}{@{\extracolsep{\fill}}ll@{\extracolsep{\fill}}}
              alias		&		effects		\\
              dfbeta		&		influence		\\
              dfbetas		&		kappa		\\
              dummy.coef		&		proj		\\
          \end{tabular*}
        }
    \end{fbox}
    \caption{Generic functions in the \Rpackage{base}, \Rpackage{stats} and
      \Rpackage{graphics} packages for which methods have been written for
      \Rclass{glm} or \Rclass{lm} objects, but which are \emph{not}
      implemented for \Rclass{gnm} objects.}
    \label{fig:!glm.lm}
\end{figure}

In addition to the accessor functions shown in Figure \ref{fig:glm.lm}, the
\Rpackage{gnm} package provides a new generic function called
\Rfunction{termPredictors}
that has methods for objects of class \Rclass{gnm}, \Rclass{glm} and
\Rclass{lm}. This function returns the additive contribution of each term to
the predictor.  See Section \ref{sec:termPredictors} for an example of its use.

Most of the functions listed in Figure \ref{fig:glm.lm} can be used as they would be
for \Rclass{glm} or \Rclass{lm} objects, however care must be taken with
\Rmethod{vcov.gnm}, as the variance-covariance matrix will depend on the
parameterization of the model. In particular, standard errors calculated using
the variance-covariance matrix will only be valid for parameters or contrasts
that are estimable!

Similarly, \Rmethod{profile.gnm} and \Rmethod{confint.gnm} are only applicable
to estimable parameters. The deviance function of a generalized nonlinear model
can sometimes be far from quadratic and \Rmethod{profile.gnm} attempts to detect
asymmetry or asymptotic behaviour in order to return a sufficient profile for a
given parameter. As an example, consider the following model, described later in
Section \ref{sec:Unidiff}:

\begin{Scode}
unidiff <- gnm(Freq ~ educ*orig + educ*dest + Mult(Exp(educ), orig:dest),
               constrain = "[.]educ1", family = poisson, data = yaish,
               subset = (dest != 7))
prof <- profile(unidiff, which = 61:65, trace = TRUE)
\end{Scode}

If the deviance is quadratic in a given parameter, the profile trace will be
linear. We can plot the profile traces as follows:

\begin{figure}[!tbph]
\begin{center}
\scalebox{1.1}{\includegraphics{fig-profilePlot.pdf}}
\end{center}
\caption{Profile traces for the multipliers of the orig:dest association}
\label{fig:profilePlot}
\end{figure}

From these plots we can see that the deviance is approximately quadratic
in \Robject{Mult(Exp(.), orig:dest).educ2}, asymmetric in
\Robject{Mult(Exp(.), orig:dest).educ3} and \Robject{Mult(Exp(.),
  orig:dest).educ4} and asymptotic in \Robject{Mult(Exp(.), orig:dest).educ5}.
When the deviance is approximately quadratic in a given parameter,
\Rmethod{profile.gnm} uses the same stepsize for profiling above and below the
original estimate:

\begin{Sinput}
> diff(prof[[2]]$par.vals[, "Mult(Exp(.), orig:dest).educ2"])
\end{Sinput}
\begin{Soutput}
 [1] 0.1053072 0.1053072 0.1053072 0.1053072 0.1053072 0.1053072 0.1053072
 [8] 0.1053072 0.1053072 0.1053072
\end{Soutput}

When the deviance is asymmetric, \Rmethod{profile.gnm} uses different
step sizes to accommodate the skew:
\begin{Sinput}
> diff(prof[[4]]$par.vals[, "Mult(Exp(.), orig:dest).educ4"])
\end{Sinput}
\begin{Soutput}
 [1] 0.2018393 0.2018393 0.2018393 0.2018393 0.2018393 0.2018393 0.2018393
 [8] 0.2018393 0.2018393 0.2243673 0.2243673 0.2243673 0.2243673 0.2243673
\end{Soutput}

Finally, the presence of an asymptote is recorded in the \Robject{"asymptote"}
attribute of the returned profile:
\begin{Sinput}
> attr(prof[[5]], "asymptote")
\end{Sinput}
\begin{Soutput}
[1]  TRUE FALSE
\end{Soutput}

This information is used by \Rmethod{confint.gnm} to return infinite limits for
confidence intervals, as appropriate:
\begin{Sinput}
> confint(prof, level = 0.95)
\end{Sinput}
\begin{Soutput}
                                   2.5 %     97.5 %
Mult(Exp(.), orig:dest).educ1         NA         NA
Mult(Exp(.), orig:dest).educ2 -0.5978901  0.1022447
Mult(Exp(.), orig:dest).educ3 -1.4836854 -0.2362378
Mult(Exp(.), orig:dest).educ4 -2.5792398 -0.2953420
Mult(Exp(.), orig:dest).educ5       -Inf -0.7006889
\end{Soutput}

\subsection{\Rfunction{ofInterest} and \Rfunction{pickCoef}}
\label{sec:ofInterest}

It is quite common for a statistical model to have a large number of
parameters, but for only a subset of these parameters be of interest when it
comes to interpreting the model.

The \Rfunarg{ofInterest} argument to \Rfunction{gnm} allows the user to specify
a subset of the parameters which are of interest, so that \Rclass{gnm} methods
will focus on these parameters. In particular, printed model summaries will only
show the parameters of interest, whilst methods for which a subset of parameters
may be selected will by default select the parameters of interest, or where this
may not be appropriate, provide a \emph{Tk} dialog for selection from the parameters of
interest. Parameters may be specified to the \Rfunarg{ofInterest} argument by a
regular expression to match against parameter names, by a numeric vector of
indices, by a character vector of names, or, if \Rcode{ofInterest = "[?]"}
they can
be selected through a \emph{Tk} dialog.

The information regarding the parameters of interest is held in the
\Robject{ofInterest} component of \Rclass{gnm} objects, which is a named vector
of numeric indices, or \Robject{NULL} if all parameters are of interest. This
component may be accessed or replaced using \Rfunction{ofInterest} or
\Rfunction{ofInterest<-} respectively.

The \Rfunction{pickCoef} function provides a simple way to obtain the indices of
coefficients from any model object. It takes the model object as its first
argument and has an optional \Rfunarg{regexp} argument. If a regular expression
is passed to \Rfunarg{regexp}, the coefficients are selected by matching this
regular expression against the coefficient names.
Otherwise, coefficients may be selected via a \emph{Tk} dialog.

So, returning to the example from the last section, if we had set
\Robject{ofInterest} to index the education multipliers as follows
\begin{Scode}
ofInterest(unidiff) <- pickCoef(unidiff, "[.]educ")
\end{Scode}
then it would not have been necessary to specify the \Rfunarg{which} argument of
\Rfunction{profile} as these parameters would have been selected by default.


\subsection{\Rfunction{checkEstimable}}
\label{sec:checkEstimable}

The \Rfunction{checkEstimable} function can be used to check the
estimability of a linear combination of parameters.  For non-linear
combinations the same function can be used to check estimability based on
the (local) vector of partial derivatives.  The \Rfunction{checkEstimable}
function provides a numerical version of the sort of algebraic test
described in \citet{CatcMorg97}.

Consider the following model, which is described later in Section
\ref{sec:Unidiff}:
<<Double_UNIDIFF_model>>=
doubleUnidiff <- gnm(Freq ~ election:vote + election:class:religion
                     + Mult(Exp(election), religion:vote) +
                     Mult(Exp(election), class:vote), family = poisson,
                     data = cautres)
@
The effects of the first constituent multiplier in the first multiplicative
interaction are identified when the parameter for one of the levels --- say for the first level --- is
constrained to zero. The parameters to be
estimated are then the differences between each other level and the
first. These differences can be represented by a contrast matrix as
follows:
<<Contrast_matrix>>=
coefs <- names(coef(doubleUnidiff))
contrCoefs <- coefs[grep(", religion:vote", coefs)]
nContr <- length(contrCoefs)
contrMatrix <- matrix(0, length(coefs), nContr,
                      dimnames = list(coefs, contrCoefs))
contr <- contr.sum(contrCoefs)
# switch round to contrast with first level
contr <- rbind(contr[nContr, ], contr[-nContr, ])
contrMatrix[contrCoefs, 2:nContr] <- contr
contrMatrix[contrCoefs, 2:nContr]
@
Then their estimability can be checked using \Rfunction{checkEstimable}
<<Check_estimability_1>>=
checkEstimable(doubleUnidiff, contrMatrix)
@
which confirms that the effects for the other three levels are estimable when
the parameter for the first level is set to zero.

However, applying the equivalent constraint to the second constituent
multiplier in the interaction is not sufficient to make the parameters in that
multiplier estimable:
<<Check_estimability_2>>=
coefs <- names(coef(doubleUnidiff))
contrCoefs <- coefs[grep("[.]religion", coefs)]
nContr <- length(contrCoefs)
contrMatrix <- matrix(0, length(coefs), length(contrCoefs),
                      dimnames = list(coefs, contrCoefs))
contr <- contr.sum(contrCoefs)
contrMatrix[contrCoefs, 2:nContr] <- rbind(contr[nContr, ], contr[-nContr, ])
checkEstimable(doubleUnidiff, contrMatrix)
@

\subsection{\Rfunction{getContrasts}, \Rfunction{se}}
\label{sec:getContrasts}

To investigate simple ``sum to zero'' contrasts such as those above, it
is easiest to use the \Rfunction{getContrasts} function, which checks the
estimability of possibly scaled contrasts and returns the parameter estimates
with their standard errors. Returning to the example of the first constituent
multiplier in the first multiplicative interaction term, the differences between
each election and the first can be obtained as follows:
<<Get_contrasts_1>>=
myContrasts <- getContrasts(doubleUnidiff,
                            pickCoef(doubleUnidiff, ", religion:vote"))
myContrasts
@ %def
Visualization of estimated contrasts using
`quasi standard errors' \citep{Firt03,FirtMene04} is achieved by plotting
the resulting object:
<<qvplot, fig = TRUE, include = FALSE>>=
plot(myContrasts,
  main = "Relative strength of religion-vote association, log scale",
xlab = "Election", levelNames = 1:4)
@
\begin{figure}[!tbph]
    \begin{center}
        \includegraphics{gnmOverview-qvplot.pdf}
    \end{center}
    \caption{Relative strength of religion-vote association, log scale}
    \label{fig:qvplot}
\end{figure}
%Attempting to obtain the equivalent contrasts for the second
%(religion-vote association) multiplier produces the
%following result:
%<<Get_contrasts_2>>=
%coefs.of.interest <- grep("[.]religion", names(coef(doubleUnidiff)))
%getContrasts(doubleUnidiff, coefs.of.interest)
%@ %def
By default, \Rfunction{getContrasts} uses the first parameter of the specified
set as the reference level; alternatives may be set via the \Rfunarg{ref}
argument.

In the above example, the simple contrasts are estimable without scaling. In
certain other applications, for example row-column association models (see
Section~\ref{sec:RCmodels}), the contrasts are identified only after fixing
their scale.  A more general family of \emph{scaled} contrasts for a set of
parameters $\gamma_r, r = 1, \ldots, R$ is given by
\begin{equation*}
    \gamma^*_r = \frac{\gamma_r - \overline{\gamma}_w}{
      \sqrt{\sum_r v_r  (\gamma_r - \overline{\gamma}_u)^2}}
\end{equation*}
where $\overline{\gamma}_w = \sum w_r \gamma_r$ is the reference level against
which the contrasts are taken, $\overline{\gamma}_u = \sum u_r \gamma_r$
is a possibly different
weighted mean of the parameters to be used as reference level for a set
of ``scaling contrasts'', and $v_r$ is a further
set of weights. Thus, for example, the choice
\[
w_r=
\begin{cases}
1&(r=1)\\
0&\hbox{(otherwise)}
\end{cases},
\qquad
u_r=v_r=1/R
\]
specifies contrasts with the first level, with the coefficients scaled to have
variance 1\null.
This general type of
scaling can be obtained by specifying the form of
$\overline{\gamma}_u$ and $v_r$
via the \Rfunarg{scaleRef} and \Rfunarg{scaleWeights} arguments of
\Rfunction{getContrasts}.

As an example, consider the following model, described in Section~\ref{sec:RCmodels}:
@
<<RCmodel>>=
mentalHealth$MHS <- C(mentalHealth$MHS, treatment)
mentalHealth$SES <- C(mentalHealth$SES, treatment)
RC1model <- gnm(count ~ SES + MHS + Mult(SES, MHS),
                family = poisson, data = mentalHealth)
@ %def
The effects of the constituent multipliers of the multiplicative interaction are
identified when both their scale and location are constrained. A simple way to
achieve this is to set the first parameter to zero and the last parameter to one:
@
<<RCmodel_constrained>>=
RC1model2 <- gnm(count ~ SES + MHS + Mult(1, SES, MHS),
                 constrain = "[.]SES[AF]", constrainTo = c(0, 1),
                 ofInterest = "[.]SES",
                 family = poisson, data = mentalHealth)
summary(RC1model2)
@ %def
Note that a constant multiplier must be incorporated into the interaction term,
i.e., the multiplicative
term \Rcode{Mult(SES, MHS)} becomes \Rcode{Mult(1, SES, MHS)},
in order
to maintain equivalence with the original model specification. The constraints
specified for \Robject{RC1model2} result in the estimation of
scaled contrasts with level \Rcode{A} of \Rcode{SES}, in which the scaling
fixes the magnitude of the contrast between   level \Rcode{F} and
level \Rcode{A} to be equal to 1\null.
The equivalent use of \Rfunction{getContrasts}, together
with the \emph{unconstrained} fit (\Robject{RC1model}),
in this case is as follows:
@
<<getContrasts_simple>>=
getContrasts(RC1model, pickCoef(RC1model, "[.]SES"), ref = "first",
             scaleRef = "first", scaleWeights = c(rep(0, 5), 1))
@ %def
Quasi-variances and standard errors are not returned here as they can not
(currently) be computed for scaled contrasts. When the scaling uses the same
reference level as the contrasts, equal scale weights produce ``spherical''
contrasts, whilst unequal weights produce ``elliptical''
contrasts. Further examples
are given in Sections~\ref{sec:RCmodels} and \ref{sec:GAMMI}.

For more general linear combinations of parameters
than contrasts,
the lower-level \Rfunction{se} function (which is called internally by
\Rfunction{getContrasts} and by the \Rmethod{summary} method)
can be used directly.  See \Rcode{help(se)} for details.

\subsection{\Rfunction{residSVD}}
\label{sec:residSVD}
Sometimes it is useful to operate on the residuals of a model in order
to create informative summaries of residual variation, or to obtain good
starting values for additional parameters in a more elaborate model.  The
relevant arithmetical operations are weighted means of the so-called
\emph{working residuals}.

The \Rfunction{residSVD} function facilitates one particular residual
analysis that is often useful when considering multiplicative interaction
between factors as a model elaboration: in effect, \Rfunction{residSVD}
provides a direct estimate of the parameters of such an interaction, by
performing an appropriately weighted singular value decomposition on the
working residuals.

As an illustration, consider the barley data from \citet{Wedd74}. These data
have the following two-way structure:
<<two-way>>=
xtabs(y ~ site + variety, barley)
@
In Section~\ref{sec:biplot} a biplot model is proposed for these data, which
comprises a two-component interaction between the cross-classifying factors. In
order to fit this model, we can proceed by fitting a smaller model,
then use \Rfunction{residSVD} to obtain starting values for the parameters
in the bilinear term:
@
<<residSVD>>=
emptyModel <- gnm(y ~ -1, family = wedderburn, data = barley)
biplotStart <- residSVD(emptyModel, barley$site, barley$variety, d = 2)
biplotModel <- gnm(y ~ -1 + instances(Mult(site, variety), 2),
family = wedderburn, data = barley, start =  biplotStart)
@ %def
In this instance, the
use of purposive (as opposed to the default, random) starting values
had little effect: the fairly large number of iterations needed in this
example is caused by a rather flat (quasi-)likelihood surface near the
maximum, not by
poor starting values.  In other situations, the use of \Rfunction{residSVD}
may speed the calculations dramatically (see for example Section
\ref{sec:GAMMI}), or it may be crucial to success in
locating the MLE (for example see \Rcode{help(House2001)}, where
the number of multiplicative parameters is in the hundreds).

The \Rfunction{residSVD} result in this instance
provides a crude approximation to the MLE
of the enlarged model, as can be seen in Figure \ref{fig:residSVDplot}:

@
<<residSVDplot, fig = TRUE, include = FALSE, echo = FALSE>>=
plot(coef(biplotModel), biplotStart,
     main = "Comparison of residSVD and MLE for a 2-dimensional
 biplot model", ylim = c(-2, 2), xlim = c(-4, 4))
abline(a = 0, b = 1, lty = 2)
@ %def

\begin{figure}[!tbph]
    \begin{center}
        \includegraphics{gnmOverview-residSVDplot}
    \end{center}
    \caption{Comparison of residSVD and the MLE for a 2-dimensional biplot
      model}
    \label{fig:residSVDplot}
\end{figure}


\section{\Rfunction{gnm} or \Rfunction{(g)nls}?}
\label{sec:gnmVnls}

The \Rfunction{nls} function in the \Rpackage{stats} package may be used to fit
a nonlinear model via least-squares estimation. Statistically speaking,
\Rfunction{gnm} is to \Rfunction{nls} as \Rfunction{glm} is to \Rfunction{lm},
in that a nonlinear least-squares model is equivalent to a
generalized nonlinear model with \Rcode{family = gaussian}.
A \Rfunction{nls} model assumes that the responses are
distributed either with constant variance or with fixed relative variances
(specified via the \Rfunarg{weights} argument).
The \Rfunction{gnls} function in
the \Rpackage{nlme} package extends \Rfunction{nls}
to allow correlated responses.
On the other hand, \Rfunction{gnm} allows for responses distributed with
variances that are a specified (via the \Rfunarg{family} argument) function of
the mean; as with \Rfunction{nls}, no correlation is allowed.

The \Rfunction{gnm} function also differs from \Rfunction{nls}/\Rfunction{gnls}
in terms of the interface. Models are specified to \Rfunction{nls} and
\Rfunction{gnls} in terms of a mathematical formula or a \Rclass{selfStart}
function based on such a formula, which is convenient for models that have a
small number of parameters. For models that have a large
number of parameters, or can not easily be represented by a mathematical
formula, the symbolic model specification used by \Rfunction{gnm} may be more
convenient. This would usually be the case for models involving factors, which
would need to be represented by dummy variables in a \Rfunction{nls}
formula.

When working with artificial data, \Rfunction{gnm} has the
minor advantage that it does not fail when a model is an exact fit to the
data (see \Rcode{help(nls)})\null.
Therefore it is not necessary with \Rfunction{gnm}
to add noise to artificial data, which can
be useful when testing methods.

\section{Examples}
\label{sec:Examples}

\subsection{Row-column association models}
\label{sec:RCmodels}

There are several models that have been proposed for modelling the relationship
between the cell means of a contingency table and the cross-classifying
factors. The following examples consider the row-column association models
proposed by \citet{Good79}. The examples shown use data from two-way contingency
tables, but the \Rpackage{gnm} package can also be used to fit the equivalent
models for higher order tables.

\subsubsection{RC(1) model}

The RC(1) model is a row and column association model with the interaction
between row and column factors represented by one component of the
multiplicative
interaction. If the rows are indexed by $r$ and the columns by $c$, then the
log-multiplicative form of the RC(1) model for the cell means $\mu_{rc}$ is
given by
\[\log \mu_{rc} = \alpha_r + \beta_c + \gamma_r\delta_c. \]

We shall fit this model to the \Robject{mentalHealth} data set from
\citet[][page 381]{Agre02}, which is a two-way contingency table classified
by the child's
mental impairment (MHS) and the parents' socioeconomic status (SES). Although
both of these factors are ordered, we do not wish to use polynomial contrasts
in the model, so we begin by setting the contrasts attribute of these
factors to \Rcode{treatment}:
<<Set_contrasts_attribute>>=
set.seed(1)
mentalHealth$MHS <- C(mentalHealth$MHS, treatment)
mentalHealth$SES <- C(mentalHealth$SES, treatment)
@
The \Rclass{gnm} model is then specified as follows, using the poisson family
with a log link function:
<<RC1_model>>=
RC1model <- gnm(count ~ SES + MHS + Mult(SES, MHS), family = poisson,
                data = mentalHealth)
RC1model
@ %def
The row scores (parameters 10 to 15) and the column scores (parameters 16 to 19)
of the multiplicative interaction can be normalized as in Agresti's eqn (9.15):
<<Normalize_scores>>=
rowProbs <- with(mentalHealth, tapply(count, SES, sum) / sum(count))
colProbs <- with(mentalHealth, tapply(count, MHS, sum) / sum(count))
rowScores <- coef(RC1model)[10:15]
colScores <- coef(RC1model)[16:19]
rowScores <- rowScores - sum(rowScores * rowProbs)
colScores <- colScores - sum(colScores * colProbs)
beta1 <- sqrt(sum(rowScores^2 * rowProbs))
beta2 <- sqrt(sum(colScores^2 * colProbs))
assoc <- list(beta = beta1 * beta2,
              mu = rowScores / beta1,
              nu = colScores / beta2)
assoc
@ %def
Alternatively, the elliptical contrasts \Robject{mu} and \Robject{nu} can be
obtained using \Rfunction{getContrasts}, with the advantage that the standard
errors for the contrasts will also be computed:
@
<<Elliptical_contrasts>>=
mu <- getContrasts(RC1model, pickCoef(RC1model, "[.]SES"),
                   ref = rowProbs, scaleWeights = rowProbs)
nu <- getContrasts(RC1model, pickCoef(RC1model, "[.]MHS"),
                   ref = colProbs, scaleWeights = colProbs)
mu
nu
@ %def
Since the value of \Robject{beta} is dependent upon the particular
scaling used for the contrasts,
it is typically not of interest
to conduct inference on this parameter directly. The
standard error for \Robject{beta} could be obtained, if desired, via the delta
method.

\subsubsection{RC(2) model}

The RC(1) model can be extended to an RC($m$) model with $m$ components of the
multiplicative interaction. For example, the RC(2) model is given by
\[
\log \mu_{rc} = \alpha_r + \beta_c + \gamma_r\delta_c + \theta_r\phi_c.
\]
Extra instances of the multiplicative interaction can be specified by the
\Rfunarg{multiplicity} argument of \Rfunction{Mult}, so the RC(2) model can be
fitted to the \Robject{mentalHealth} data as follows
<<RC2_model>>=
RC2model <- gnm(count ~ SES + MHS + instances(Mult(SES, MHS), 2),
                family = poisson, data = mentalHealth)
RC2model
@

\subsubsection{Homogeneous effects}

If the row and column factors have the same levels, or perhaps some levels in
common, then the row-column interaction could be modelled by a multiplicative
interaction with homogeneous effects, that is
\[\log \mu_{rc} = \alpha_r + \beta_c + \gamma_r\gamma_c.\]
For example, the \Robject{occupationalStatus} data set from \citet{Good79} is a
contingency table classified by the occupational status of fathers (origin) and
their sons (destination). \citet{Good79} fits a row-column association model
with homogeneous effects to these data after deleting the cells on the main
diagonal. Equivalently we can account for the diagonal effects by a separate
\Rfunction{Diag} term:
@
<<Homogeneous_effects>>=
RChomog <- gnm(Freq ~ origin + destination + Diag(origin, destination) +
               MultHomog(origin, destination), family = poisson,
               data = occupationalStatus)
RChomog
@ %def

To determine whether it would be better to allow for heterogeneous effects on
the association of the fathers' occupational status and the sons' occupational
status, we can compare this model to the RC(1) model for these data:
<<Heterogeneous_effects>>=
RCheterog <- gnm(Freq ~ origin + destination + Diag(origin, destination) +
               Mult(origin, destination), family = poisson,
               data = occupationalStatus)
anova(RChomog, RCheterog)
@
In this case there is little gain in allowing heterogeneous effects.

\subsection{Diagonal reference models}
\label{sec:Dref}

Diagonal reference models, proposed by \citet{Sobe81, Sobe85}, are designed for
contingency tables classified by factors with the same levels. The cell
means are modelled as a function of the diagonal effects, i.e., the mean
responses of the `diagonal' cells in which the levels of the row and
column factors are the same.

\subsubsection*{\Rfunction{Dref} example 1: Political consequences of
social mobility}

To illustrate the use of diagonal reference models we shall use the
\Robject{voting} data from \citet{Clif93}. The data come from the 1987 British
general election and are the percentage voting Labour in groups cross-classified
by the class of the head of household (\Robject{destination}) and the class of
their father (\Robject{origin}). In order to weight these percentages by the
group size, we first back-transform them to the counts of those voting Labour
and those not voting Labour:
@
<<Transform_to_counts>>=
set.seed(1)
count <- with(voting, percentage/100 * total)
yvar <- cbind(count, voting$total - count)
@ %def

The grouped percentages may be modelled by a basic diagonal reference model, that
is, a weighted sum of the diagonal effects for the corresponding origin and
destination classes. This model may be expressed as
\[
\mu_{od} = \frac{e^{\delta_1}}{e^{\delta_1} + e^{\delta_2}}\gamma_o +
\frac{e^{\delta_2}}{e^{\delta_1} + e^{\delta_2}}\gamma_d .
\]
See Section \ref{sec:Dref function} for more detail on the parameterization.

The basic diagonal reference model may be fitted using \Rfunction{gnm} as
follows
@
<<Class_mobility>>=
classMobility <- gnm(yvar ~ Dref(origin, destination),
                       family = binomial, data = voting)
classMobility
@ %def
and the origin and destination weights can be evaluated as below
@
<<Class_mobility_weights>>=
DrefWeights(classMobility)
@ %def
These results are slightly different from those reported by \citet{Clif93}.
The reason for this is unclear: we are confident that the
above results are correct for the
data as given in \citet{Clif93}, but have not been able to confirm
that the data
as printed in the journal were exactly as used in Clifford and Heath's
analysis.

\citet{Clif93} suggest that movements in and out of the salariat (class 1)
should be treated differently from movements between the lower classes (classes
2 - 5), since the former has a greater effect on social status. Thus they
propose the following model
\begin{equation*}
\mu_{od} = \begin{cases}
\dfrac{e^{\delta_1}}{e^{\delta_1} + e^{\delta_2}}\gamma_o +
\dfrac{e^{\delta_2}}{e^{\delta_1} + e^{\delta_2}}\gamma_d & \text{if } o = 1\\
\\
\dfrac{e^{\delta_3}}{e^{\delta_3} + e^{\delta_4}}\gamma_o +
\dfrac{e^{\delta_4}}{e^{\delta_3} + e^{\delta_4}}\gamma_d & \text{if } d = 1\\
\\
\dfrac{e^{\delta_5}}{e^{\delta_5} + e^{\delta_6}}\gamma_o +
\dfrac{e^{\delta_6}}{e^{\delta_5} + e^{\delta_6}}\gamma_d & \text{if } o \ne 1
\text{ and } d \ne 1
\end{cases}
\end{equation*}
To fit this model we define factors indicating movement in (upward) and out
(downward) of the salariat
@
<<Salariat_factors>>=
upward <- with(voting, origin != 1 & destination == 1)
downward <- with(voting, origin == 1 & destination != 1)
@ %def
Then the diagonal reference model with separate weights for socially mobile
groups can be estimated as follows
@
<<Social_mobility>>=
socialMobility <- gnm(yvar ~ Dref(origin, destination,
                                  delta = ~ 1 + downward + upward),
                      family = binomial, data = voting)
socialMobility
@ %def
The weights for those moving into the salariat, those moving out of the
salariat and those in any other group, can be evaluated as below
@
<<social_mobility_weights>>=
DrefWeights(socialMobility)
@ %def
Again, the results differ slightly from those reported by \citet{Clif93}, but
the essence of the results is the same: the origin weight is much larger for the
downwardly mobile group than for the other groups. The weights for the upwardly
mobile group are very similar to the base level weights, so the model may be
simplified by only fitting separate weights for the downwardly mobile group:
@
<<Downward_mobility>>=
downwardMobility <- gnm(yvar ~ Dref(origin, destination,
                                    delta = ~ 1 + downward),
                        family = binomial, data = voting)
downwardMobility
DrefWeights(downwardMobility)
@ %def

\subsubsection*{\Rfunction{Dref} example 2: conformity to parental rules}

%\SweaveInput{vanDerSlikEg.Rnw}

Another application of diagonal reference models is given by
\citet{Vand02}. The data from this paper are not publicly available\footnote{
We thank Frans van der Slik for his kindness in sending us the data.}, but we
shall show how the models presented in the paper may be estimated using
\Rfunction{gnm}.

The data relate to the value parents place on their children conforming to their
rules. There are two response variables: the mother's conformity score (MCFM)
and the father's conformity score (FCFF). The data are cross-classified by two
factors describing the education level of the mother (MOPLM) and the father
(FOPLF), and there are six further covariates (AGEM, MRMM, FRMF, MWORK, MFCM and
FFCF).

In their baseline model for the mother's conformity score, \citet{Vand02}
include five of the six covariates (leaving out the father's family conflict
score, FCFF) and a diagonal reference term with constant weights based on the
two education factors. This model may be expressed as
\[
\mu_{rci} = \beta_1x_{1i} + \beta_2x_{2i} + \beta_3x_{3i} +\beta_4x_{4i} +\beta_5x_{5i} +
\frac{e^{\delta_1}}{e^{\delta_1} + e^{\delta_2}}\gamma_r +
\frac{e^{\delta_2}}{e^{\delta_1} + e^{\delta_2}}\gamma_c .
\]

The baseline model can be fitted as follows:
\begin{Sinput}
> set.seed(1)
>  A <- gnm(MCFM ~ -1 + AGEM + MRMM + FRMF + MWORK + MFCM +
+           Dref(MOPLM, FOPLF), family = gaussian, data = conformity,
+           verbose = FALSE)
> A
\end{Sinput}
\begin{Soutput}
Call:
gnm(formula = MCFM ~ -1 + AGEM + MRMM + FRMF + MWORK + MFCM +
    Dref(MOPLM, FOPLF), family = gaussian, data = conformity,
    verbose = FALSE)

Coefficients:
                    AGEM                      MRMM                      FRMF
                 0.06363                  -0.32425                  -0.25324
                   MWORK                      MFCM  Dref(MOPLM, FOPLF)delta1
                -0.06430                  -0.06043                  -0.33731
Dref(MOPLM, FOPLF)delta2   Dref(., .).MOPLM|FOPLF1   Dref(., .).MOPLM|FOPLF2
                -0.02505                   4.95121                   4.86329
 Dref(., .).MOPLM|FOPLF3   Dref(., .).MOPLM|FOPLF4   Dref(., .).MOPLM|FOPLF5
                 4.86458                   4.72343                   4.43516
 Dref(., .).MOPLM|FOPLF6   Dref(., .).MOPLM|FOPLF7
                 4.18873                   4.43378

Deviance:            425.3389
Pearson chi-squared: 425.3389
Residual df:         576
\end{Soutput}
The coefficients of the covariates are not aliased with the parameters of the
diagonal reference term and thus the basic identifiability constraints that have
been imposed are sufficient for these parameters to be identified. The diagonal
effects do not need to be constrained as they represent contrasts with the
off-diagonal cells.  Therefore the only unidentified parameters in this model
are the weight parameters. This is confirmed in the summary of the model:
\begin{Sinput}
> summary(A)
\end{Sinput}
\begin{Soutput}
Call:
gnm(formula = MCFM ~ -1 + AGEM + MRMM + FRMF + MWORK + MFCM +
    Dref(MOPLM, FOPLF), family = gaussian, data = conformity,
    verbose = FALSE)

Deviance Residuals:
     Min        1Q    Median        3Q       Max
-3.63688  -0.50383   0.01714   0.56753   2.25139

Coefficients:
                         Estimate Std. Error t value Pr(>|t|)
AGEM                      0.06363    0.07375   0.863  0.38859
MRMM                     -0.32425    0.07766  -4.175 3.44e-05
FRMF                     -0.25324    0.07681  -3.297  0.00104
MWORK                    -0.06430    0.07431  -0.865  0.38727
MFCM                     -0.06043    0.07123  -0.848  0.39663
Dref(MOPLM, FOPLF)delta1 -0.33731         NA      NA       NA
Dref(MOPLM, FOPLF)delta2 -0.02505         NA      NA       NA
Dref(., .).MOPLM|FOPLF1   4.95121    0.16639  29.757  < 2e-16
Dref(., .).MOPLM|FOPLF2   4.86329    0.10436  46.602  < 2e-16
Dref(., .).MOPLM|FOPLF3   4.86458    0.12855  37.842  < 2e-16
Dref(., .).MOPLM|FOPLF4   4.72343    0.13523  34.929  < 2e-16
Dref(., .).MOPLM|FOPLF5   4.43516    0.19314  22.963  < 2e-16
Dref(., .).MOPLM|FOPLF6   4.18873    0.17142  24.435  < 2e-16
Dref(., .).MOPLM|FOPLF7   4.43378    0.16903  26.231  < 2e-16
---
(Dispersion parameter for gaussian family taken to be 0.7384355)

Std. Error is NA where coefficient has been constrained or is unidentified

Residual deviance: 425.34 on 576 degrees of freedom
AIC: 1507.8

Number of iterations: 15
\end{Soutput}
The weights have been constrained to sum to one as described in Section
\ref{sec:Dref function}, so the weights themselves may be estimated as follows:
\begin{Sinput}
> prop.table(exp(coef(A)[6:7]))
\end{Sinput}
\begin{Soutput}
    Dref(MOPLM, FOPLF)delta1 Dref(MOPLM, FOPLF)delta2
                   0.4225638                0.5774362
\end{Soutput}
However, in order to estimate corresponding standard errors, the parameters of
one of the weights must be constrained. If no such constraints were applied when
the model was fitted, \Rfunction{DrefWeights} will refit the model constraining
the parameters of the first weight to zero:
\begin{Sinput}
> DrefWeights(A)
\end{Sinput}
\begin{Soutput}
Refitting with parameters of first Dref weight constrained to zero
$MOPLM
   weight        se
0.4225636 0.1439829

$FOPLF
   weight        se
0.5774364 0.1439829
\end{Soutput}
giving the values reported by \citet{Vand02}. All the other coefficients of
model A are the same as those reported by \citet{Vand02} except the coefficients
of the mother's gender role (MRMM) and the father's gender role
(FRMF). \citet{Vand02} reversed the signs of the coefficients of these factors
since they were coded in the direction of liberal values, unlike the other
covariates. However, simply reversing the signs of these coefficients does not
give the same model, since the estimates of the diagonal effects depend on the
estimates of these coefficients. For consistent interpretation of the covariate
coefficients, it is better to recode the gender role factors as follows:
\begin{Sinput}
> MRMM2 <- as.numeric(!conformity$MRMM)
> FRMF2 <- as.numeric(!conformity$FRMF)
> A <- gnm(MCFM ~ -1 + AGEM + MRMM2 + FRMF2 + MWORK + MFCM +
+           Dref(MOPLM, FOPLF), family = gaussian, data = conformity,
+           verbose = FALSE)
> A
\end{Sinput}
\begin{Soutput}
Call:
gnm(formula = MCFM ~ -1 + AGEM + MRMM2 + FRMF2 + MWORK + MFCM +
    Dref(MOPLM, FOPLF), family = gaussian, data = conformity,
    verbose = FALSE)

Coefficients:
                    AGEM                     MRMM2                     FRMF2
                 0.06363                   0.32425                   0.25324
                   MWORK                      MFCM  Dref(MOPLM, FOPLF)delta1
                -0.06430                  -0.06043                   0.08440
Dref(MOPLM, FOPLF)delta2   Dref(., .).MOPLM|FOPLF1   Dref(., .).MOPLM|FOPLF2
                 0.39666                   4.37371                   4.28579
 Dref(., .).MOPLM|FOPLF3   Dref(., .).MOPLM|FOPLF4   Dref(., .).MOPLM|FOPLF5
                 4.28708                   4.14593                   3.85767
 Dref(., .).MOPLM|FOPLF6   Dref(., .).MOPLM|FOPLF7
                 3.61123                   3.85629

Deviance:            425.3389
Pearson chi-squared: 425.3389
Residual df:         576
\end{Soutput}
The coefficients of the covariates are now as reported by \citet{Vand02}, but
the diagonal effects have been adjusted appropriately.

\citet{Vand02} compare the baseline model for the mother's conformity score to
several other models in which the weights in the diagonal reference term are
dependent on one of the covariates. One particular model they consider
incorporates an interaction of the weights with the mother's conflict score as
follows:
\[
\mu_{rci} = \beta_1x_{1i} + \beta_2x_{2i} + \beta_3x_{3i} +\beta_4x_{4i} +\beta_5x_{5i} +
\frac{e^{\xi_{01} + \xi_{11}x_{5i}}}{e^{\xi_{01} + \xi_{11}x_{5i}} +
e^{\xi_{02} + \xi_{12}x_{5i}}}\gamma_r + \frac{e^{\xi_{02} +
  \xi_{12}x_{5i}}}{e^{\xi_{01} + \xi_{11}x_{5i}}
+ e^{\xi_{02} + \xi_{12}x_{5i}}}\gamma_c.
\]

This model can be fitted as below, using the original coding for the gender
role factors for ease of comparison to the results reported by \citet{Vand02},
\begin{Sinput}
> F <- gnm(MCFM ~ -1 + AGEM + MRMM + FRMF + MWORK + MFCM +
+           Dref(MOPLM, FOPLF, delta = ~ 1 + MFCM), family = gaussian,
+           data = conformity, verbose = FALSE)
> F
\end{Sinput}
\begin{Soutput}
Call:
gnm(formula = MCFM ~ -1 + AGEM + MRMM + FRMF + MWORK + MFCM +
    Dref(MOPLM, FOPLF, delta = ~1 + MFCM), family = gaussian,
    data = conformity, verbose = FALSE)

Coefficients:
                                                    AGEM
                                                 0.05818
                                                    MRMM
                                                -0.32701
                                                    FRMF
                                                -0.25772
                                                   MWORK
                                                -0.07847
                                                    MFCM
                                                -0.01694
Dref(MOPLM, FOPLF, delta = ~ . + MFCM).delta1(Intercept)
                                                 1.03515
          Dref(MOPLM, FOPLF, delta = ~ 1 + .).delta1MFCM
                                                -1.77756
Dref(MOPLM, FOPLF, delta = ~ . + MFCM).delta2(Intercept)
                                                -0.03515
          Dref(MOPLM, FOPLF, delta = ~ 1 + .).delta2MFCM
                                                 2.77756
             Dref(., ., delta = ~ 1 + MFCM).MOPLM|FOPLF1
                                                 4.82476
             Dref(., ., delta = ~ 1 + MFCM).MOPLM|FOPLF2
                                                 4.88066
             Dref(., ., delta = ~ 1 + MFCM).MOPLM|FOPLF3
                                                 4.83969
             Dref(., ., delta = ~ 1 + MFCM).MOPLM|FOPLF4
                                                 4.74850
             Dref(., ., delta = ~ 1 + MFCM).MOPLM|FOPLF5
                                                 4.42020
             Dref(., ., delta = ~ 1 + MFCM).MOPLM|FOPLF6
                                                 4.17957
             Dref(., ., delta = ~ 1 + MFCM).MOPLM|FOPLF7
                                                 4.40819

Deviance:            420.9022
Pearson chi-squared: 420.9022
Residual df:         575
\end{Soutput}
In this case there are two sets of weights, one for when the mother's conflict
score is less than average (coded as zero) and one for when the score is greater
than average (coded as one). These can be evaluated as follows:
\begin{Sinput}
> DrefWeights(F)
\end{Sinput}
\begin{Soutput}
Refitting with parameters of first Dref weight constrained to zero
$MOPLM
  MFCM     weight        se
1    1 0.02974675 0.2277711
2    0 0.74465224 0.2006916

$FOPLF
  MFCM    weight        se
1    1 0.9702532 0.2277711
2    0 0.2553478 0.2006916
\end{Soutput}
giving the same weights as in Table 4 of \citet{Vand02}, though we obtain a
lower standard error in the case where MFCM is equal to one.

\subsection{Uniform difference (UNIDIFF) models}
\label{sec:Unidiff}

Uniform difference models \citep{Xie92, Erik92} use a simplified three-way
interaction to provide an interpretable model of contingency tables classified
by three or more variables. For example, the uniform difference model for a
three-way contingency table, also known as the UNIDIFF model, is given by
\[
\mu_{ijk} = \alpha_{ik} + \beta_{jk} + \exp(\delta_k)\gamma_{ij}.
\]
The $\gamma_{ij}$ represent a pattern of association that varies in strength
over the dimension indexed by $k$, and $\exp(\delta_k)$ represents the relative
strength of that association at level $k$.

This model can be applied to the \Robject{yaish} data set
\citep{Yais98,Yais04},
which is a
contingency table cross-classified by father's social class (\Robject{orig}),
son's social
class (\Robject{dest}) and son's education level (\Robject{educ}).
In this case, we can consider the
importance of the association between the social class of father and son across
the education levels.  We omit the sub-table which corresponds to level 7 of
\Robject{dest}, because its information content is negligible:
@
<<UNIDIFF_model>>=
set.seed(1)
unidiff <- gnm(Freq ~ educ*orig + educ*dest + Mult(Exp(educ), orig:dest),
               ofInterest = "[.]educ", family = poisson,
               data = yaish, subset = (dest != 7))
coef(unidiff)
@ %def
The \Robject{ofInterest} component has been set to index the multipliers of the
association between the social class of father and son. We can contrast each
multiplier to that of the lowest education level and obtain the standard errors
for these parameters as follows:
@
<<Unidiff_contrasts>>=
getContrasts(unidiff, ofInterest(unidiff))
@ %def

Four-way contingency tables may sometimes be described by a
``double UNIDIFF'' model
\[
\mu_{ijkl} = \alpha_{il} + \beta_{jkl} + \exp(\delta_l)\gamma_{ij} +
\exp(\phi_l)\theta_{ik},
\]
where the strengths of two, two-way associations with a common variable are
estimated across the levels of the fourth variable.
The \Robject{cautres} data set, from \citet{Caut98}, can be used to illustrate
the application of the
double UNIDIFF model. This data set is classified by the variables vote, class,
religion and election. Using a double UNIDIFF model, we can see how the
association between class and vote, and the association between religion and
vote, differ between the most recent election and the other elections:
@
<<double_UNIDIFF_model>>=
set.seed(1)
doubleUnidiff <- gnm(Freq ~ election*vote + election*class*religion +
                     Mult(Exp(election), religion:vote) +
                     Mult(Exp(election), class:vote),

             family = poisson, data = cautres)
getContrasts(doubleUnidiff, rev(pickCoef(doubleUnidiff, ", class:vote")))
getContrasts(doubleUnidiff, rev(pickCoef(doubleUnidiff, ", religion:vote")))
@ %def

\subsection{Generalized additive main effects and
multiplicative interaction (GAMMI) models}
\label{sec:GAMMI}

Generalized additive main effects and multiplicative interaction models, or
GAMMI models, were motivated by two-way contingency tables and comprise the row
and column main effects plus one or more components of the multiplicative
interaction. The singular value corresponding to each multiplicative component
is often factored out, as a measure of the strength of association between the
row and column scores, indicating the importance of the component, or axis.

For cell means $\mu_{rc}$ a GAMMI-K model has the form

\begin{equation}
    \label{eq:GAMMI}
    g(\mu_{rc}) = \alpha_r + \beta_c + \sum_{k=1}^K
    \sigma_k\gamma_{kr}\delta_{kc},
\end{equation}
in which $g$ is a link function, $\alpha_r$ and $\beta_c$ are the row and column
main effects, $\gamma_{kr}$ and $\delta_{kc}$ are the row and column scores for
multiplicative component $k$ and $\sigma_k$ is the singular value for component
$k$. The number of multiplicative components, $K$, is less than or equal to the
rank of the matrix of residuals from the main effects.

The row-column association models discussed in Section \ref{sec:RCmodels} are
examples of GAMMI models, with a log link and poisson variance. Here we
illustrate the use of an AMMI model, which is a GAMMI model with an identity
link and a constant variance.

We shall use the \Robject{wheat} data set taken from \citet{Varg01}, which gives
wheat yields measured over ten years. First we scale these yields and
create a new treatment factor, so that we can reproduce the analysis of
\citet{Varg01}:
@
<<Scale_yields>>=
set.seed(1)
yield.scaled <- wheat$yield * sqrt(3/1000)
treatment <- interaction(wheat$tillage, wheat$summerCrop, wheat$manure,
                         wheat$N, sep = "")
@ %def
Now we can fit the AMMI-1 model, to the scaled yields using the combined
treatment factor and the year factor from the \Robject{wheat} dataset.  We
will proceed by first fitting the main effects model, then using
\Rfunction{residSVD} (see Section \ref{sec:residSVD}) for the parameters
of the multiplicative term:
@
<<AMMI_model>>=
mainEffects <- gnm(yield.scaled ~ year + treatment, family = gaussian,
                   data = wheat)
svdStart <- residSVD(mainEffects, year, treatment, 3)
bilinear1 <- update(mainEffects, . ~ . + Mult(year, treatment),
                    start = c(coef(mainEffects), svdStart[,1]))
@ %def
We can compare the AMMI-1 model to the main effects model,
@
<<AOD>>=
anova(mainEffects, bilinear1, test = "F")
@ %def
giving the same results as in Table 1 of \citet{Varg01} (up to error caused by
rounding).

Thus the significance of the multiplicative interaction can be tested without
applying constraints to this term. If the multiplicative interaction is
significant, we may wish to apply constraints to obtain estimates of the row and
column scores. We illustrate this using the \Robject{barleyHeights} data, which
records the average height for 15 genotypes of barley over 9 years.

For this small dataset the AMMI-1 model is easily estimated with the default
settings:
@
<<AMMI_model2>>=
set.seed(1)
barleyModel <- gnm(height ~ year + genotype + Mult(year, genotype),
                   data = barleyHeights)
@ %def
To obtain the parameterization of Equation \ref{eq:GAMMI} in which $\sigma_k$ is
the singular value for component $k$, the row and column scores must be
constrained so that the scores sum to zero and the squared scores sum to
one. These contrasts can be obtained using \Robject{getContrasts}:
@
<<Spherical_contrasts>>=
gamma <- getContrasts(barleyModel, pickCoef(barleyModel, "[.]y"),
                      ref = "mean", scaleWeights = "unit")
delta <- getContrasts(barleyModel, pickCoef(barleyModel, "[.]g"),
                      ref = "mean", scaleWeights = "unit")
gamma
delta
@ %def
Confidence intervals based on the assumption of asymptotic normality can be
computed as follows:
@
<<CI>>=
gamma[[2]][,1] + (gamma[[2]][,2]) %o% c(-1.96, 1.96)
delta[[2]][,1] + (delta[[2]][,2]) %o% c(-1.96, 1.96)
@ %def
which broadly agree with Table 8 of Chadoeuf and Denis (1991), allowing for the
change in sign.

On the basis of such confidence intervals we can investigate simplifications of
the model such as combining levels of the factors or fitting an additive
model to a subset of the data.

The singular value $\sigma_k$ may be obtained as follows
@
<<SVD>>=
svd(termPredictors(barleyModel)[, "Mult(year, genotype)"])$d
@ %def
This parameter is of little interest in itself, given that the significance of
the term as a whole can be tested using ANOVA.

The SVD representation can also be obtained quite easily for AMMI and GAMMI models with
interaction rank greater than 1\null.  See \Rcode{example(wheat)} for an example of
this in an AMMI model with rank 2\null.  (The calculation of \emph{standard errors} and
\emph{confidence regions} for the SVD representation with rank greater than 1 is not
yet implemented, though.)

\subsection{Biplot models}
\label{sec:biplot}

Biplots are graphical displays of two-dimensional arrays, which represent the
objects that index both dimensions of the array on the same plot. Here we
consider the case of a two-way table, where a biplot may be used to represent
both the row and column categories simultaneously.

A two-dimensional biplot is constructed from a rank-2 representation of the
data. For two-way tables, the generalized bilinear model defines one such
representation:
\begin{equation*}
    g(\mu_{ij}) = \eta_{ij} = \alpha_{1i}\beta_{1j} + \alpha_{2i}\beta_{2j}
\end{equation*}
since we can alternatively write
\begin{align*}
    \boldsymbol{\eta} &=
    \begin{pmatrix}
        \alpha_{11} & \alpha_{21} \\
        \vdots & \vdots \\
        \alpha_{1n} & \alpha_{2n} \\
    \end{pmatrix}
    \begin{pmatrix}
        \beta_{11} & \dots & \beta_{1p} \\
        \beta_{21} & \dots & \beta_{2p} \\
    \end{pmatrix} \\
    &= \boldsymbol{AB}^T
\end{align*}
where the columns of $A$ and $B$ are linearly independent by definition.

To demonstrate how the biplot is obtained from this model, we shall use the
\Robject{barley} data set which gives the percentage of leaf area affected by
leaf blotch for ten varieties of barley grown at nine sites
\citep{Wedd74,Gabr98}. As suggested by \citet{Wedd74} we model these data using
a logit link and a variance proportional to the square of that of the binomial,
implemented as the \Rfunction{wedderburn} family in \Rpackage{gnm} (see also
Section \ref{sec:glms}):
@
<<Biplot_model>>=
set.seed(83)
biplotModel <- gnm(y ~ -1 + instances(Mult(site, variety), 2),
                   family = wedderburn, data = barley)
@ %def

The effect of site $i$ can be represented by the point
\[
(\alpha_{1i}, \alpha_{2i})
\]
in the space spanned by the linearly independent basis vectors
\begin{align*}
    a_1 = (\alpha_{11}, \alpha_{12}, \ldots \alpha_{19})^T\\
    a_2 = (\alpha_{21}, \alpha_{22}, \ldots \alpha_{29})^T\\
\end{align*}
and the variety effects can be similarly represented.

Thus we can represent the sites and varieties separately as follows
\begin{Sinput}
sites <- pickCoef(biplotModel, "[.]site")
coefs <- coef(biplotModel)
A <- matrix(coefs[sites], nc = 2)
B <- matrix(coefs[-sites], nc = 2)
par(mfrow = c(1, 2))
plot(A, pch = levels(barley$site), xlim = c(-5, 5), ylim = c(-5, 5),
    main = "Site Effects", xlab = "Component 1", ylab = "Component 2")
plot(B, pch = levels(barley$variety), xlim = c(-5, 5), ylim = c(-5, 5),
    main = "Variety Effects", xlab = "Component 1", ylab = "Component 2")
\end{Sinput}

\begin{figure}[!tbph]
    \begin{center}
        \includegraphics[width = 6in]{fig-Effect_plots.pdf}
    \end{center}
    \caption{Plots of site and variety effects from the generalized bilinear
      model of the barley data.}
    \label{fig:Effect_plots}
\end{figure}

Of course the parameterization of the bilinear model is not unique and therefore
the scale and rotation of the points in these plots will depend on the random
seed. By rotation and reciprocal scaling of the matrices $A$ and $B$,
we can obtain basis vectors with desirable properties without changing the
fitted model.

In particular, if we rotate the matrices $A$ and $B$ so that their columns are
orthogonal, then the corresponding plots will display the euclidean distances
between sites and varieties respectively. If we also scale the matrices $A$ and $B$
so that the corresponding plots have the same units, then we can combine the two
plots to give a conventional biplot display.

The required rotation and scaling can be performed via singular value
decomposition of the fitted predictors:
@
<<Row_and_column_scores>>=
barleyMatrix <- xtabs(biplotModel$predictors ~ site + variety,
                      data = barley)
barleySVD <- svd(barleyMatrix)
A <- sweep(barleySVD$u, 2, sqrt(barleySVD$d), "*")[, 1:2]
B <- sweep(barleySVD$v, 2, sqrt(barleySVD$d), "*")[, 1:2]
rownames(A) <- levels(barley$site)
rownames(B) <- levels(barley$variety)
colnames(A) <- colnames(B) <- paste("Component", 1:2)
A
B
@ %def
These matrices are essentially the same as in \citet{Gabr98}. From these the
biplot can be produced, for sites $A \ldots I$ and varieties $1 \dots 9, X$:
@
<<Biplot1, fig = TRUE, include = FALSE>>=
barleyCol <- c("red", "blue")
plot(rbind(A, B), pch = c(levels(barley$site), levels(barley$variety)),
     col = rep(barleyCol, c(nlevels(barley$site), nlevels(barley$variety))),
     xlim = c(-4, 4), ylim = c(-4, 4), main = "Biplot for barley data",
     xlab = "Component 1", ylab = "Component 2")
text(c(-3.5, -3.5), c(3.9, 3.6), c("sites: A-I","varieties: 1-9, X"),
     col = barleyCol, adj = 0)
@ %def
\begin{figure}[!tbph]
    \begin{center}
        \includegraphics{gnmOverview-Biplot1.pdf}
    \end{center}
    \caption{Biplot for barley data}
    \label{fig:Biplot1}
\end{figure}
The biplot gives an idea of how the sites and varieties are related to one
another. It also allows us to consider whether the data can be represented by a
simpler model than the generalized bilinear model. We see that the points in the
biplot approximately align with the rotated axes shown in Figure
\ref{fig:Biplot2}, such that the sites fall about a line parallel to
the ``h-axis'' and the varieties group about two lines roughly parallel to the
``v-axis''.
@
<<Biplot2, fig = TRUE, include = FALSE>>=
plot(rbind(A, B), pch = c(levels(barley$site), levels(barley$variety)),
     col = rep(barleyCol, c(nlevels(barley$site), nlevels(barley$variety))),
     xlim = c(-4, 4), ylim = c(-4, 4), main = "Biplot for barley data",
     xlab = "Component 1", ylab = "Component 2")
text(c(-3.5, -3.5), c(3.9, 3.6), c("sites: A-I","varieties: 1-9, X"),
     col = barleyCol, adj = 0)
abline(a = 0, b = tan(pi/3))
abline(a = 0, b = -tan(pi/6))
abline(a = 2.6, b = tan(pi/3), lty = 2)
abline(a = 4.5, b = tan(pi/3), lty = 2)
abline(a = 1.3, b = -tan(pi/6), lty = 2)
text(2.8, 3.9, "v-axis", font = 3)
text(3.8, -2.7, "h-axis", font = 3)
@ %def
%abline(a = 0, b = tan(3*pi/10), lty = 4)
%abline(a = 0, b = -tan(pi/5), lty = 4)
\begin{figure}[!tbph]
    \begin{center}
        \includegraphics{gnmOverview-Biplot2.pdf}
    \end{center}
    \caption{Biplot for barley data, showing approximate alignment with rotated axes.}
    \label{fig:Biplot2}
\end{figure}
This suggests that the sites could be represented by points along a line, with
co-ordinates
\begin{equation*}
    (\gamma_i, \delta_0).
\end{equation*}
and the varieties by points on two lines perpendicular to the site line:
\begin{equation*}
    (\nu_0 + \nu_1I(i \in \{2, 3, 6\}), \omega_j)
\end{equation*}
This corresponds to the following simplification of the bilinear model:
\begin{align*}
    &\alpha_{1i}\beta_{1j} + \alpha_{2i}\beta_{2j} \\
    \approx &\gamma_i(\nu_0 + \nu_1I(i \in \{2, 3, 6\})) + \delta_0\omega_j
\end{align*}
or equivalently
\begin{equation*}
    \gamma_i(\nu_0 + \nu_1I(i \in \{2, 3, 6\})) + \omega_j,
\end{equation*}
the double additive model proposed by \citet{Gabr98}. We can fit this model as
follows:
@
<<Double_additive>>=
variety.binary <- factor(match(barley$variety, c(2,3,6), nomatch = 0) > 0,
                        labels = c("rest", "2,3,6"))
doubleAdditive <- gnm(y ~ variety + Mult(site, variety.binary),
                      family = wedderburn, data = barley)
@ %def
Comparing the chi-squared statistics, we see that the double additive model is
an adequate model for the leaf blotch incidence:
@
<<Compare_chi-squared>>=
biplotModChiSq <- sum(residuals(biplotModel, type = "pearson")^2)
doubleAddChiSq <- sum(residuals(doubleAdditive, type = "pearson")^2)
c(doubleAddChiSq - biplotModChiSq,
  doubleAdditive$df.residual - biplotModel$df.residual)
@ %def

\subsection{Stereotype model for multinomial response}
\label{sec:Stereotype}

The stereotype model was proposed by \citet{Ande84} for ordered categorical
data. It is a special case of the multinomial logistic model, in which the
covariate coefficients are common to all categories but the scale of association
is allowed to vary between categories such that
\[
p_{ic} =  \frac{\exp(\beta_{0c} + \gamma_c
  \boldsymbol{\beta}^T\boldsymbol{x}_{i})}{\sum_{k = 1}^K \exp(\beta_{0k} + \gamma_k
  \boldsymbol{\beta}^T\boldsymbol{x}_{i})}
\]
where $p_{ic}$ is the probability that the response for individual $i$ is
category $c$ and $K$ is the number of categories. Like the multinomial
logistic model, the stereotype model specifies a simple form for the log
odds of one category against another, e.g.
\begin{equation*}
\log\left(\frac{p_{ic}}{p_{ik}}\right) = (\beta_{0c} - \beta_{0k}) + (\gamma_c - \gamma_k)\boldsymbol{\beta}^T\boldsymbol{x}_{i}
\end{equation*}

In order to model a multinomial response in the generalized nonlinear model
framework, we must re-express the data as category counts $Y_i = (Y_{i1},
\ldots, Y_{iK})$ for each individual (or group). Then assuming a Poisson distribution
for the counts $Y_{ic}$, the joint distribution of $Y_i$ is Multinomial$(N_i,
p_{i1}, \ldots, p_{iK})$ conditional on the total count for each individual
$N_i$. The expected counts are then $\mu_{ic} = N_ip_{ic}$ and the parameters of
the stereotype model can be estimated through fitting the following model
\begin{align*}
\log \mu_{ic} &= \log(N_i) + \log(p_{ic}) \\
&= \alpha_i + \beta_{0c} + \gamma_c\sum_r \boldsymbol{\beta}_{r}\boldsymbol{x}_{ir} \\
\end{align*}
where the ``nuisance'' parameters $\alpha_i$ ensure that the multinomial
denominators are reproduced exactly, as required.

The \Rpackage{gnm} package includes the utility function
\Rfunction{expandCategorical} to re-express the categorical response as category
counts. By default, individuals with common values across all covariates are
grouped together, to avoid redundancy.

For example, the \Robject{backPain} data set from \citet{Ande84} describes the
progress of patients with back pain. The data set consists of an ordered factor
quantifying the progress of each patient, and three prognostic variables. We
re-express the data as follows:
@
<<Re-express_data>>=
set.seed(1)
subset(backPain, x1 == 1 & x2 == 1 & x3 == 1)
backPainLong <- expandCategorical(backPain, "pain")
head(backPainLong)
@ %def
We can now fit the stereotype model to these data:
@
<<Stereotype_model>>=
oneDimensional <- gnm(count ~ pain + Mult(pain, x1 + x2 + x3),
                      eliminate = id, family = "poisson", data = backPainLong)
oneDimensional
@ %def
specifying the \Robject{id} factor through \Rfunarg{eliminate} so that the 12
\Robject{id} effects are estimated more efficiently and are excluded from
printed model summaries by default. This model is one
dimensional since it involves only one function of $\mathbf{x} = (x1, x2,
x3)$. We can compare this model to one with category-specific coefficients of the
$x$ variables, as may be used for a qualitative categorical response:
@
<<Qualitative_model>>=
threeDimensional  <- gnm(count ~ pain + pain:(x1 + x2 + x3), eliminate = id,
                         family = "poisson", data = backPainLong)
threeDimensional
@ %def
This model has the maximum dimensionality of three (as determined by the number
of covariates). The ungrouped multinomial log-likelihoods reported in
\citet{Ande84} are given by
\begin{equation*}
    \sum_{i,c} y_{ic}\log(p_{ic}) = \sum_{i,c} y_{ic}\log(\mu_{ic}/n_{ic})
\end{equation*}
We write a simple function to compute this and the corresponding degrees of
freedom, then compare the log-likelihoods of the one dimensional model and the
three dimensional model:
@
<<Calculate_log-likelihood>>=
logLikMultinom <- function(model, size){
    object <- get(model)
    l <- sum(object$y * log(object$fitted/size))
    c(nParameters = object$rank - nlevels(object$eliminate), logLikelihood = l)
}
size <- tapply(backPainLong$count, backPainLong$id, sum)[backPainLong$id]
t(sapply(c("oneDimensional", "threeDimensional"), logLikMultinom, size))
@ %def
showing that the \Robject{oneDimensional} model is adequate.

To obtain estimates of the category-specific multipliers in the stereotype
model, we need to constrain both the location and the scale of these
parameters. The latter constraint can be imposed by fixing the slope of one of
the covariates in the second multiplier to \Robject{1}, which may be achieved by
specifying the covariate as an offset:
@
<<Constrain_slopes>>=
## before constraint
summary(oneDimensional)
oneDimensional <- gnm(count ~ pain + Mult(pain, offset(x1) + x2 + x3),
                      eliminate = id, family = "poisson", data = backPainLong)
## after constraint
summary(oneDimensional)
@ %def
The location of the category-specific multipliers can constrained by setting one
of the parameters to zero, either through the \Rfunarg{constrain} argument of
\Rfunction{gnm} or with \Rfunction{getContrasts}:
@
<<Get_slopes>>=
getContrasts(oneDimensional, pickCoef(oneDimensional, "[.]pain"))
@ %def
giving the required estimates.

\subsection{Lee-Carter model for trends in age-specific mortality}


In the study and projection of population mortality rates, the model proposed
by \cite{LeeCart92} forms the basis of many if not most current analyses.
Here we consider the quasi-Poisson version of the model \citep{Wilm93, Alho00,
BrouDenuVerm02, RensHabe03}, in which the death count $D_{ay}$ for individuals
of age $a$ in year $y$ has mean $\mu_{ay}$ and variance $\phi\mu_{ay}$ (where
$\phi$ is 1 for Poisson-distributed counts, and is respectively greater
than or less than 1 in cases of over-dispersion or under-dispersion).  In
the Lee-Carter model, the
expected counts follow the log-bilinear form
\[
\log(\mu_{ay}/e_{ay}) = \alpha_a + \beta_a \gamma_y,
\]
where $e_{ay}$ is the `exposure' (number of lives at risk).  This is
a generalized nonlinear model with a single multiplicative term.

The use of \Rpackage{gnm} to fit this model is straightforward.  We will
illustrate by using data downloaded on 2006-11-14 from the Human Mortality
Database\footnote{Thanks to Iain Currie for helpful advice
relating to this section}
(HMD, made available by the University of California, Berkeley, and Max Planck Institute for Demographic Research, at \texttt{http://www.mortality.org})
on male deaths in Canada between 1921 and
2003.  The data are not made available as part of \Rpackage{gnm} because
of license restrictions; but they are readily available via the web simply by
registering with the HMD.  We assume that the data for Canadian males (both
deaths and exposure-to-risk) have been downloaded from the HMD and organised
into a data frame named \Robject{Canada} in \R, with columns \Robject{Year}
(a factor, with levels \Rcode{1921} to \Rcode{2003}),
\Robject{Age} (a factor, with levels \Rcode{20} to \Rcode{99}),
\Robject{mDeaths} and \Robject{mExposure} (both quantitative).  The Lee-Carter
model may then be specified as
\begin{Sinput}
LCmodel.male <- gnm(mDeaths ~ Age + Mult(Exp(Age), Year),
                    offset = log(mExposure), family = "quasipoisson",
                    data = Canada)
\end{Sinput}
Here we have acknowledged the fact that the model only really makes
sense if all of the $\beta_a$ parameters, which represent the `sensitivity'
of age group $a$ to a change in the level of general mortality
\citep[e.g.,][]{BrouDenuVerm02}, have the same sign.  Without loss of
generality we assume $\beta_a>0$ for all $a$, and we impose this constraint
by using \Rcode{Exp(Age)} instead of just \Rcode{Age} in the
multiplicative term.
Convergence is to a fitted model with residual
deviance 32419.83 on 6399 degrees of freedom --- representing
clear evidence of substantial overdispersion relative to the Poisson
distribution.  In order to explore the lack of fit a little further, we
plot the distribution of Pearson residuals in Figure \ref{fig:LCresplot}:
\begin{Sinput}
par(mfrow = c(2,2))
age <- as.numeric(as.character(Canada$Age))
with(Canada,{
    res <- residuals(LCmodel.male, type = "pearson")
    plot(Age, res, xlab="Age", ylab="Pearson residual",
         main = "(a) Residuals by age")
    plot(Year, res, xlab="Year", ylab="Pearson residual",
         main = "(b) Residuals by year")
    plot(Year[(age>24) & (age<36)], res[(age>24) & (age<36)],
         xlab = "Year", ylab = "Pearson residual",
         main = "(c) Age group 25-35")
    plot(Year[(age>49) & (age<66)], res[(age>49) & (age<66)],
         xlab = "Year", ylab = "Pearson residual",
         main = "(d) Age group 50-65")
})
\end{Sinput}
%$
\begin{figure}[!tbph]
\begin{center}
\includegraphics[width=6in]{fig-LCall.pdf}
\end{center}
\caption{Canada, males: plots of residuals from the Lee-Carter model
of mortality}
\label{fig:LCresplot}
\end{figure}
Panel (a) of Figure \ref{fig:LCresplot} indicates that the overdispersion
is not evenly spread through the data, but is largely concentrated in
two age groups, roughly ages 25--35 and 50--65\null.  Panels (c) and (d)
focus on the residuals in each of these two age groups: there is a clear
(and roughly cancelling)
dependence on \Robject{Year}, indicating that the assumed bilinear
interaction between \Robject{Age} and \Robject{Year} does not hold
for the full range of ages and years considered here.

A somewhat more satisfactory Lee-Carter model fit
is obtained if only a subset of
the data is used, namely only those males aged 45 or over:
\begin{Sinput}
LCmodel.maleOver45 <- gnm(mDeaths ~ Age + Mult(Exp(Age), Year),
                          offset = log(mExposure), family = "quasipoisson",
                          data = Canada[age>44,])
\end{Sinput}
The residual deviance now is 12595.44 on 4375 degrees of freedom: still
substantially overdispersed, but less severely so than before.  Again we plot
the distributions of Pearson residuals (Figure \ref{fig:LCresplot2}).
\begin{figure}[!tbph]
\begin{center}
\includegraphics[width=6in]{fig-LCover45.pdf}
\end{center}
\caption{Canada, males over 45: plots of residuals from the Lee-Carter model
of mortality}
\label{fig:LCresplot2}
\end{figure}
Still clear departures from the assumed bilinear structure are evident,
especially for age group 81--89; but they are less pronounced than in
the previous model fit.

The main purpose here is only to illustrate how straightforward it
is to work with the Lee-Carter model using \Rfunction{gnm}, but we will take
this example a little further by examining the estimated $\beta_a$
parameters from the last fitted model.  We can use \Rfunction{getContrasts}
to compute quasi standard errors for the logarithms of $\hat\beta_a$ --- the
logarithms
being the result of having used
\Rcode{Exp(Age)} in the model specification ---
and use these in a plot of the coefficients:
\begin{Sinput}
AgeContrasts <- getContrasts(LCmodel.maleOver45, 56:100) ## ages 45 to 89 only
\end{Sinput}
\begin{figure}[!tbph]
\begin{center}
\includegraphics{fig-LCqvplot.pdf}
\end{center}
\caption{Canada, males over 45, Lee-Carter model: relative sensitivity
of different ages to change in total mortality.}
\label{fig:LCqvplot}
\end{figure}
The plot shows that sensitivity to the general level of mortality is highest
at younger ages, as expected.  An \emph{unexpected} feature is the clear
outlying positions occupied by the estimates for ages 51, 61, 71 and 81:
for each of those ages, the estimated $\beta_a$ coefficient is substantially
less than it is for the neighbouring age groups (and the
error bars indicate clearly that the deviations are larger than could plausibly
be due to chance variation).  This is a curious finding.  An explanation
comes from a look back at the raw death-count data.  In the years between
1921 and 1940, the death counts for ages 31, 41, 51, 61, 71 and 81 all
stand out as being very substantially lower than those of neighbouring
ages
(Figure \ref{fig:deaths2140}: the ages concerned are highlighted in solid red).
The same does \emph{not} hold for later years: after about 1940, the `1' ages
fall in with the general pattern.    This apparent
`age heaping\footnote{Age heaping is common in mortality data: see
\url{http://www.mortality.org/Public/Overview.php}}'  explains
our finding above regarding the
$\beta_a$ coefficients: whilst all age groups have benefited from the general
trend of reduced mortality, the `1' age groups appear to have
benefited least because
their starting point (in the 1920s and 1930s) was lower than would have
been indicated by the general pattern --- hence
$\hat\beta_a$ is smaller
for ages $a=31$, $a=41$,\ldots, $a=81$.

\begin{figure}[!tbph]
\begin{center}
\includegraphics{fig-deaths1921-1940.pdf}
\end{center}
\caption{Canada, males: Deaths 1921 to 1940 by age}
\label{fig:deaths2140}
\end{figure}

\subsection{Exponential and sum-of-exponentials models for decay curves}

A class of nonlinear functions which arise in various application contexts ---
a notable one being pharmacokinetic studies -- involves one or more
\emph{exponential decay} terms.  For example, a simple decay model with
additive error is
\begin{equation}
\label{eq:singleExp}
y = \alpha + \exp(\beta + \gamma x) + e
\end{equation}
(with $\gamma<0$), while a more complex (`sum of exponentials')
model might involve two decay
terms:
\begin{equation}
\label{eq:twoExp}
y = \alpha + \exp(\beta_1 + \gamma_1 x) + \exp(\beta_2+ \gamma_2 x) + e.
\end{equation}
Estimation and inference with such models are typically not straightforward,
partly on account of multiple local maxima in the likelihood
\citep[e.g.,][Ch.3]{Sebe89}.  We illustrate the difficulties here, with a
couple of artificial examples.  These examples will make clear the value
of making repeated calls to \Rfunction{gnm}, in order to use different,
randomly-generated parameterizations and starting values and thus improve
the chances of locating both the global maximum and all local maxima of the
likelihood.

\subsubsection{Example: single exponential decay term}

Let us first construct some data from model (\ref{eq:singleExp}).  For our
illustrative purposes here, we will use \emph{noise-free} data, i.e., we
fix the variance of $e$ to be zero; for the other parameters we will use
$\alpha=0$, $\beta = 0$, $\gamma = -0.1$.
@
<<singleExp>>=
x <- 1:100
y <- exp(- x / 10)
set.seed(1)
saved.fits <- list()
for (i in 1:100) saved.fits[[i]] <- gnm(y ~ Exp(1 + x), verbose = FALSE)
table(zapsmall(sapply(saved.fits, deviance)))
@ %def
The \Robject{saved.fits} object thus contains the results of 100 calls to
\Rfunction{gnm}, each using a different, randomly-generated starting value
for the vector of parameters $(\alpha, \beta, \gamma)$.  Out of 100 fits, 52
reproduce the data exactly, to machine accuracy.
The remaining 48 fits are all identical to one another, but they are far from
globally optimal, with residual sum of squares 3.61: they result from
divergence of $\hat\gamma$ to $+\infty$, and correspondingly of $\hat\beta$
to $-\infty$, such that the fitted `curve' is in fact just a constant, with
level equal to $\bar{y}=0.09508$.  For example, the second of the 100 fits
is of this kind:
@
<<singleExp2>>=
saved.fits[[2]]
@ %def
The use of repeated calls to \Rfunction{gnm}, as here,
allows the local and global
maxima to be easily distinguished.

\subsubsection{Example: sum of two exponentials}

We can conduct a similar exercise based on the more complex model
(\ref{eq:twoExp}):
@
<<doubleExp>>=
x <- 1:100
y <- exp(- x / 10) + 2 * exp(- x / 50)
set.seed(1)
saved.fits <- list()
for (i in 1:100) {
    saved.fits[[i]] <- suppressWarnings(gnm(y ~ Exp(1 + x, inst = 1) +
                                            Exp(1 + x, inst = 2),
                                            verbose = FALSE))
}
table(round(unlist(sapply(saved.fits, deviance)), 4))
@ %def
In this instance, only 27 of the 100 calls to \Rfunction{gnm} have successfully
located a local maximum of the likelihood: in the remaining 73 cases the
starting values generated were such that numerical problems resulted, and the
fitting algorithm was abandoned (giving a \Robject{NULL} result).
Among the 27 `successful'
fits, it is evident that there are three distinct solutions (with respective
residual sums of squares equal to 0.1589, 41.64, and essentially zero ---
the last of these, the exact fit to the data, having been found 20 times
out of the above 27).
The two non-optimal local maxima here correspond to the best fit
with a single exponential (which has residual sum of squares 0.1589) and to the
fit with no dependence at all on $x$
(residual sum of squares 41.64), as we can see
by comparing with:
@
<<doubleExp2, fig = TRUE, include = FALSE>>=
singleExp <- gnm(y ~ Exp(1 + x), start = c(NA, NA, -0.1), verbose = FALSE)
singleExp
meanOnly <- gnm(y ~ 1, verbose = FALSE)
meanOnly
plot(x, y, main = "Two sub-optimal fits to a sum-of-exponentials curve")
lines(x, fitted(singleExp))
lines(x, fitted(meanOnly), lty = "dashed")
@ %def

\begin{figure}[!tbph]
    \centering
    \includegraphics{gnmOverview-doubleExp2.pdf}
    \caption{Two sub-optimal fits to a sum-of-exponentials curve}
    \label{fig:doubleExp}
\end{figure}

In this example, it is clear that
even a small amount of noise in the data would make it
practically impossible to distinguish between competing
models containing one and two exponential-decay terms.

In summary: the default \Rfunction{gnm} setting of randomly-chosen starting
values is useful for identifying multiple local maxima in the likelihood; and
reasonably good starting values are needed if the global maximum is to be
found.  In the present example, knowing that $\gamma_1$ and $\gamma_2$ should
both be small and negative, we might perhaps have tried
@
<<doubleExp3>>=
gnm(y ~ instances(Exp(1 + x), 2), start = c(NA, NA, -0.1, NA, -0.1),
    verbose = FALSE)
@ %def
which reliably yields the (globally optimal) perfect fit to the data.



\newpage
\appendix

\section{User-level functions}

We list here, for easy reference, all of the user-level functions in the
\Rpackage{gnm} package.  For full documentation see the package help pages.

\begin{table}[!h]
\begin{tabular*}{\textwidth}{@{}p{0.2in}p{1.3in}p{4.5in}@{}}
    \toprule
    \multicolumn{3}{l}{\textbf{Model Fitting}} 	\\
    \midrule
    &	\Rfunction{gnm}	&  fit generalized nonlinear models	\\
    \midrule
    \multicolumn{3}{l}{\textbf{Model Specification}}			\\
    \midrule
    &	\Rfunction{Diag}	&  create factor differentiating
                                   diagonal elements \\
    &	\Rfunction{Symm}	&  create symmetric interaction of factors \\
    &   \Rfunction{Topo}        &  create `topological' interaction factors \\
    &   \Rfunction{Const}       &  specify a constant in a \Rclass{nonlin}
                                   function predictor \\
    &	\Rfunction{Dref}	&  specify a diagonal reference term in a \Rfunction{gnm}
                                   model formula \\
    &	\Rfunction{Mult}	&  specify a product of predictors in a
                                   \Rfunction{gnm} formula	\\
    &	\Rfunction{MultHomog}	&  specify a multiplicative interaction with
                                   homogeneous effects in a \Rfunction{gnm} formula	\\
    &	\Rfunction{Exp}	&  specify the exponential of a predictor in a
                           \Rfunction{gnm} formula	\\
%    &	\Rfunction{Log}	&  specify the natural logarithm of a predictor in a
%                           \Rfunction{gnm} formula	\\
%    &	\Rfunction{Logit}	&  specify the logit of a predictor in a
%                           \Rfunction{gnm} formula	\\
    &	\Rfunction{Inv}	&  specify the reciprocal of a predictor in a
                           \Rfunction{gnm} formula	\\
%    &	\Rfunction{Raise}	&  specify a predictor raised to a constant
%                                   power in a  \Rfunction{gnm} formula	\\
    &   \Rfunction{wedderburn}      &  specify the Wedderburn
                                       quasi-likelihood family \\
    \midrule
    \multicolumn{3}{l}{\textbf{Methods and Accessor Functions}}	\\
    \midrule
    &	\Rmethod{confint.gnm}	&  compute confidence intervals of \Rclass{gnm} parameters
                                   based on the profiled deviance	\\
    &   \Rmethod{confint.profile.gnm}   & compute confidence intervals of
                                          parameters from a \Rclass{profile.gnm} object \\
    &	\Rmethod{predict.gnm}	&  predict from a \Rclass{gnm} model \\
    &	\Rmethod{profile.gnm}	&  profile deviance for parameters in a
                                   \Rclass{gnm} model \\
    &   \Rmethod{plot.profile.gnm}      & plot profile traces from a
                                          \Rclass{profile.gnm} object \\
    &	\Rmethod{summary.gnm}	&  summarize \Rclass{gnm} fits	\\
    &	\Rfunction{residSVD}	&  multiplicative approximation of
                                   model residuals	\\
    &   \Rfunction{exitInfo}    &  print numerical details of last iteration
                                   when  \Rfunction{gnm} has not converged \\
    &   \Rfunction{ofInterest}  &  extract the \Robject{ofInterest} component of
                                   a \Rclass{gnm} object \\
    &   \Rfunction{ofInterest<-}        &  replace the \Robject{ofInterest} component of
                                           a \Rclass{gnm} object \\
    &   \Rfunction{parameters}  &  get model parameters from a \Rclass{gnm}
                                   object, including parameters that were
                                   constrained \\
    &   \Rfunction{pickCoef}    &  get indices of model parameters \\
    &	\Rfunction{getContrasts}	&  estimate contrasts and their
                                           standard errors for parameters in a \Rclass{gnm}
                                           model \\
    &	\Rfunction{checkEstimable}	&  check whether one or more parameter
                                           combinations in a
                                           \Rclass{gnm} model is identified \\
    &	\Rfunction{se}	&  get standard errors of linear parameter
                           combinations in \Rclass{gnm} models	\\
    &   \Rfunction{Dref} & estimate weights and corresponding standard errors
                           for a diagonal reference term in a \Rclass{gnm} model
                           \\
    &	\Rfunction{termPredictors}	&  (\emph{generic}) extract term
                                           contributions to predictor	\\
    \midrule
    \multicolumn{3}{l}{\textbf{Auxiliary Functions}}			\\
    \midrule
    &	\Rfunction{asGnm}	&  coerce an object of class \Rclass{lm} or
                                   \Rclass{glm} to class \Rclass{gnm} 	\\
    &   \Rfunction{expandCategorical}   & expand a data frame by re-expressing
                                          categorical data as counts \\
    &	\Rfunction{getModelFrame} &  get the model frame in use by
                                           \Rfunction{gnm}	\\
    &	\Rfunction{MPinv}	&  Moore-Penrose pseudoinverse of a
                                   real-valued matrix	\\
    &	\Rfunction{qrSolve}	&  Minimum-length solution of a linear system\\

\end{tabular*}
\end{table}

\newpage
\bibliography{gnm}
\bibliographystyle{jss}

\end{document}