File: haproxy-en.txt

package info (click to toggle)
haproxy 1.4.8-1%2Bsqueeze1
  • links: PTS
  • area: main
  • in suites: squeeze
  • size: 5,220 kB
  • ctags: 4,072
  • sloc: ansic: 34,590; perl: 543; sh: 415; makefile: 377; xml: 124
file content (2828 lines) | stat: -rw-r--r-- 123,740 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
                           -------------------
                                 HAProxy
                            Reference  Manual
                           -------------------
                              version 1.3.15
                              willy tarreau
                                2008/04/19


 !!!!  NOTE: THIS DOCUMENT IS OUTDATED  !!!!

 Please use "configuration.txt" from the same directory, or download
 an up-to-date version from the following location :

        http://haproxy.1wt.eu/download/1.4/doc/configuration.txt


============
| Abstract |
============

HAProxy is a TCP/HTTP reverse proxy which is particularly suited for high
availability environments. Indeed, it can :
  - route HTTP requests depending on statically assigned cookies ;
  - spread the load among several servers while assuring server persistence
    through the use of HTTP cookies ;
  - switch to backup servers in the event a main one fails ;
  - accept connections to special ports dedicated to service monitoring ;
  - stop accepting connections without breaking existing ones ;
  - add/modify/delete HTTP headers both ways ;
  - block requests matching a particular pattern ;
  - hold clients to the right application server depending on application
    cookies
  - report detailed status as HTML pages to authenticated users from an URI
    intercepted from the application.

It needs very little resource. Its event-driven architecture allows it to easily
handle thousands of simultaneous connections on hundreds of instances without
risking the system's stability.

====================
| Start parameters |
====================

There are only a few command line options :

    -f <configuration file>
    -n <high limit for the total number of simultaneous connections>
       = 'maxconn' in 'global' section
    -N <high limit for the per-listener number of simultaneous connections>
       = 'maxconn' in 'listen' or 'default' sections
    -d starts in foregreound with debugging mode enabled
    -D starts in daemon mode
    -q disable messages on output
    -V displays messages on output even when -q or 'quiet' are specified.
    -c only checks config file and exits with code 0 if no error was found, or
       exits with code 1 if a syntax error was found.
    -p <pidfile> asks the process to write down each of its children's
       pids to this file in daemon mode.
    -sf specifies a list of pids to send a FINISH signal to after startup.
    -st specifies a list of pids to send a TERMINATE signal to after startup.
    -s shows statistics (only if compiled in)
    -l shows even more statistics (implies '-s')
    -dk disables use of kqueue()
    -ds disables use of speculative epoll()
    -de disables use of epoll()
    -dp disables use of poll()
    -db disables background mode (stays in foreground, useful for debugging)
    -m <megs> enforces a memory usage limit to a maximum of <megs> megabytes.

The maximal number of connections per proxy instance is used as the default
parameter for each instance for which the 'maxconn' paramter is not set in the
'listen' section.

The maximal number of total connections limits the number of connections used by
the whole process if the 'maxconn' parameter is not set in the 'global' section.

The debugging mode has the same effect as the 'debug' option in the 'global'
section. When the proxy runs in this mode, it dumps every connections,
disconnections, timestamps, and HTTP headers to stdout. This should NEVER
be used in an init script since it will prevent the system from starting up.

For debugging, the '-db' option is very useful as it temporarily disables
daemon mode and multi-process mode. The service can then be stopped by simply
pressing Ctrl-C, without having to edit the config nor run full debug.

Statistics are only available if compiled in with the 'STATTIME' option. It's
only used during code optimization phases, and will soon disappear.

The '-st' and '-sf' options are used for hot reconfiguration (see below).

======================
| Configuration file |
======================

Structure
=========

The configuration file parser ignores empty lines, spaces, tabs. Anything
between a sharp ('#') not following a backslash ('\'), and the end of a line
constitutes a comment and is ignored too.

The configuration file is segmented in sections. A section begins whenever
one of these 3 keywords are encountered :

  - 'global'
  - 'listen'
  - 'defaults'

Every parameter refer to the section beginning at the last one of these 3
keywords.


1) Global parameters
====================

Global parameters affect the whole process behaviour. They are all set in the
'global' section. There may be several 'global' sections if needed, but their
parameters will only be merged. Allowed parameters in 'global' section include
the following ones :

  - log <address> <facility> [max_level]
  - maxconn <number>
  - uid <user id>
  - gid <group id>
  - user <user name>
  - group <group name>
  - chroot <directory>
  - nbproc <number>
  - daemon
  - debug
  - nokqueue
  - nosepoll
  - noepoll
  - nopoll
  - quiet
  - pidfile <file>
  - ulimit-n <number>
  - stats
  - tune.maxpollevents <number>


1.1) Event logging
------------------
Most events are logged : start, stop, servers going up and down, connections and
errors. Each event generates a syslog message which can be sent to up to 2
servers. The syntax is :

    log <ip_address> <facility> [max_level]

Connections are logged at level "info". Services initialization and servers
going up are logged at level "notice", termination signals are logged at
"warning", and definitive service termination, as well as loss of servers are
logged at level "alert". The optional parameter <max_level> specifies above
what level messages should be sent. Level can take one of these 8 values :

    emerg, alert, crit, err, warning, notice, info, debug

For backwards compatibility with versions 1.1.16 and earlier, the default level
value is "debug" if not specified.

Permitted facilities are :
    kern, user, mail, daemon, auth, syslog, lpr, news,
    uucp, cron, auth2, ftp, ntp, audit, alert, cron2,
    local0, local1, local2, local3, local4, local5, local6, local7

According to RFC3164, messages are truncated to 1024 bytes before being emitted.

Example :
---------
    global
        log 192.168.2.200 local3
        log 127.0.0.1     local4 notice


1.2) limiting the number of connections
---------------------------------------
It is possible and recommended to limit the global number of per-process
connections using the 'maxconn' global keyword. Since one connection includes
both a client and a server, it means that the max number of TCP sessions will
be about the double of this number. It's important to understand this when
trying to find best values for 'ulimit -n' before starting the proxy. To
anticipate the number of sockets needed, all these parameters must be counted :

  - 1 socket per incoming connection
  - 1 socket per outgoing connection
  - 1 socket per address/port/proxy tuple.
  - 1 socket per server being health-checked
  - 1 socket for all logs

In simple configurations where each proxy only listens one one address/port,
set the limit of file descriptors (ulimit -n) to
(2 * maxconn + nbproxies + nbservers + 1). Starting with versions 1.1.32/1.2.6,
it is now possible to set the limit in the configuration using the 'ulimit-n'
global keyword, provided the proxy is started as root. This puts an end to the
recurrent problem of ensuring that the system limits are adapted to the proxy
values. Note that these limits are per-process.

Example :
---------
    global
        maxconn 32000
        ulimit-n 65536


1.3) Drop of priviledges
------------------------
In order to reduce the risk and consequences of attacks, in the event where a
yet non-identified vulnerability would be successfully exploited, it's possible
to lower the process priviledges and even isolate it in a riskless directory.

In the 'global' section, the 'uid' parameter sets a numerical user identifier
which the process will switch to after binding its listening sockets. The value
'0', which normally represents the super-user, here indicates that the UID must
not change during startup. It's the default behaviour. The 'gid' parameter does
the same for the group identifier. If setting an uid is not possible because of
deployment constraints, it is possible to set a user name with the 'user'
keyword followed by a valid user name. The same is true for the gid. It is
possible to specify a group name after the 'group' keyword.

It is particularly advised against use of generic accounts such as 'nobody'
because it has the same consequences as using 'root' if other services use
them.

The 'chroot' parameter makes the process isolate itself in an empty directory
just before switching its UID. This type of isolation (chroot) can sometimes
be worked around on certain OS (Linux, Solaris), provided that the attacker
has gained 'root' priviledges and has the ability to use or create a directory.
For this reason, it's capital to use a dedicated directory and not to share one
between several services of different nature. To make isolation more resistant,
it's recommended to use an empty directory without any right, and to change the
UID of the process so that it cannot do anything there.

Note: in the event where such a vulnerability would be exploited, it's most
likely that first attempts would kill the process due to 'Segmentation Fault',
'Bus Error' or 'Illegal Instruction' signals. Eventhough it's true that
isolating the server reduces the risks of intrusion, it's sometimes useful to
find why a process dies, via the analysis of a 'core' file, although very rare
(the last bug of this sort was fixed in 1.1.9). For security reasons, most
systems disable the generation of core file when a process changes its UID. So
the two workarounds are either to start the process from a restricted user
account, which will not be able to chroot itself, or start it as root and not
change the UID. In both cases the core will be either in the start or the chroot
directories. Do not forget to allow core dumps prior to start the process :

# ulimit -c unlimited

Example :
---------

    # with uid/gid
    global
        uid     30000
        gid     30000
        chroot  /var/chroot/haproxy

    # with user/group
    global
        user    haproxy
        group   public
        chroot  /var/chroot/haproxy


1.4) Startup modes
------------------
The service can start in several different modes :
  - foreground / background
  - quiet / normal / debug

The default mode is normal, foreground, which means that the program doesn't
return once started. NEVER EVER use this mode in a system startup script, or
the system won't boot. It needs to be started in background, so that it
returns immediately after forking. That's accomplished by the 'daemon' option
in the 'global' section, which is the equivalent of the '-D' command line
argument.

The '-db' command line argument overrides the 'daemon' and 'nbproc' global
options to make the process run in normal, foreground mode.

Moreover, certain alert messages are still sent to the standard output even
in 'daemon' mode. To make them disappear, simply add the 'quiet' option in the
'global' section. This option has no command-line equivalent.

Last, the 'debug' mode, enabled with the 'debug' option in the 'global' section,
and which is equivalent of the '-d' option, allows deep TCP/HTTP analysis, with
timestamped display of each connection, disconnection, and HTTP headers for both
ways. This mode is incompatible with 'daemon' and 'quiet' modes for obvious
reasons.


1.5) Increasing the overall processing power
--------------------------------------------
On multi-processor systems, it may seem to be a shame to use only one processor,
eventhough the load needed to saturate a recent processor is far above common
usage. Anyway, for very specific needs, the proxy can start several processes
between which the operating system will spread the incoming connections. The
number of processes is controlled by the 'nbproc' parameter in the 'global'
section. It defaults to 1, and obviously works only in 'daemon' mode. One
typical usage of this parameter has been to workaround the default per-process
file-descriptor limit that Solaris imposes to user processes.

Example :
---------

    global
        daemon
        quiet
        nbproc  2


1.6) Helping process management
-------------------------------
Haproxy now supports the notion of pidfile. If the '-p' command line argument,
or the 'pidfile' global option is followed with a file name, this file will be
removed, then filled with all children's pids, one per line (only in daemon
mode). This file is NOT within the chroot, which allows to work with a readonly
 chroot. It will be owned by the user starting the process, and will have
permissions 0644.

Example :
---------

    global
        daemon
        quiet
        nbproc  2
        pidfile /var/run/haproxy-private.pid

    # to stop only those processes among others :
    # kill $(</var/run/haproxy-private.pid)

    # to reload a new configuration with minimal service impact and without
    # breaking existing sessions :
    # haproxy -f haproxy.cfg -p /var/run/haproxy-private.pid -sf $(</var/run/haproxy-private.pid)

1.7) Polling mechanisms
-----------------------
Starting from version 1.2.5, haproxy supports the poll() and epoll() polling
mechanisms. On systems where select() is limited by FD_SETSIZE (like Solaris),
poll() can be an interesting alternative. Performance tests show that Solaris'
poll() performance does not decay as fast as the numbers of sockets increase,
making it a safe solution for high loads. However, Solaris already uses poll()
to emulate select(), so as long as the number of sockets has no reason to go
higher than FD_SETSIZE, poll() should not provide any better performance. On
Linux systems with the epoll() patch (or any 2.6 version), haproxy will use
epoll() which is extremely fast and non dependant on the number of sockets.
Tests have shown constant performance from 1 to 20000 simultaneous sessions.
Version 1.3.9 introduced kqueue() for FreeBSD/OpenBSD, and speculative epoll()
which consists in trying to perform I/O before queuing the events via syscalls.

In order to optimize latency, it is now possible to limit the number of events
returned by a single call to poll. The limit is fixed to 200 by default. If a
smaller latency is seeked, it may be useful to reduce this value by using the
'tune.maxpollevents' parameter in the 'global' section. Increasing it will
slightly save CPU cycles in presence of large number of connections.

Haproxy will use kqueue() or speculative epoll() when available, then epoll(),
and will fall back to poll(), then to select(). However, if for any reason you
need to disable epoll() or poll() (eg. because of a bug or just to compare
performance), new global options have been created for this matter : 'nosepoll',
'nokqueue', 'noepoll' and 'nopoll'.

Example :
---------

    global
        # use only select()
        noepoll
        nopoll
        tune.maxpollevents 100

Note :
------
For the sake of configuration file portability, these options are accepted but
ignored if the poll() or epoll() mechanisms have not been enabled at compile
time.

To make debugging easier, the '-de' runtime argument disables epoll support,
the '-dp' argument disables poll support, '-dk' disables kqueue and '-ds'
disables speculative epoll(). They are respectively equivalent to 'noepoll',
'nopoll', 'nokqueue' and 'nosepoll'.


2) Declaration of a listening service
=====================================

Service sections start with the 'listen' keyword :

    listen <instance_name> [ <IP_address>:<port_range>[,...] ]

- <instance_name> is the name of the instance. This name will be reported in
  logs, so it is good to have it reflect the proxied service. No unicity test
  is done on this name, and it's not mandatory for it to be unique, but highly
  recommended.

- <IP_address> is the IP address the proxy binds to. Empty address, '*' and
  '0.0.0.0' all mean that the proxy listens to all valid addresses on the
  system.

- <port_range> is either a unique port, or a port range for which the proxy will
  accept connections for the IP address specified above. This range can be :
    - a numerical port (ex: '80')
    - a dash-delimited ports range explicitly stating the lower and upper bounds
      (ex: '2000-2100') which are included in the range.

  Particular care must be taken against port ranges, because every <addr:port>
  couple consumes one socket (=a file descriptor), so it's easy to eat lots of
  descriptors with a simple range. The <addr:port> couple must be used only once
  among all instances running on a same system. Please note that attaching to
  ports lower than 1024 need particular priviledges to start the program, which
  are independant of the 'uid' parameter.

- the <IP_address>:<port_range> couple may be repeated indefinitely to require
  the proxy to listen to other addresses and/or ports. To achieve this, simply
  separate them with a coma.

Examples :
---------
    listen http_proxy :80
    listen x11_proxy 127.0.0.1:6000-6009
    listen smtp_proxy 127.0.0.1:25,127.0.0.1:587
    listen ldap_proxy :389,:663

In the event that all addresses do not fit line width, it's preferable to
detach secondary addresses on other lines with the 'bind' keyword. If this
keyword is used, it's not even necessary to specify the first address on the
'listen' line, which sometimes makes multiple configuration handling easier :

    bind [ <IP_address>:<port_range>[,...] ]

Examples :
----------
    listen http_proxy
        bind :80,:443
        bind 10.0.0.1:10080,10.0.0.1:10443


2.1) Inhibiting a service
-------------------------
A service may be disabled for maintenance reasons, without needing to comment
out the whole section, simply by specifying the 'disabled' keyword in the
section to be disabled :

    listen smtp_proxy 0.0.0.0:25
        disabled

Note: the 'enabled' keyword allows to enable a service which has been disabled
      previously by a default configuration.


2.2) Modes of operation
-----------------------
A service can work in 3 different distinct modes :
  - TCP
  - HTTP
  - health

TCP mode
--------
In this mode, the service relays TCP connections as soon as they're established,
towards one or several servers. No processing is done on the stream. It's only
an association of source(addr:port) -> destination(addr:port). To use this mode,
you must specify 'mode tcp' in the 'listen' section. This is the default mode.

Example :
---------
    listen smtp_proxy 0.0.0.0:25
        mode tcp

HTTP mode
---------
In this mode, the service relays TCP connections towards one or several servers,
when it has enough informations to decide, which normally means that all HTTP
headers have been read. Some of them may be scanned for a cookie or a pattern
matching a regex. To use this mode, specify 'mode http' in the 'listen' section.

Example :
---------
    listen http_proxy 0.0.0.0:80
        mode http

Health-checking mode
--------------------
This mode provides a way for external components to check the proxy's health.
It is meant to be used with intelligent load-balancers which can use send/expect
scripts to check for all of their servers' availability. This one simply accepts
the connection, returns the word 'OK' and closes it. If the 'option httpchk' is
set, then the reply will be 'HTTP/1.0 200 OK' with no data, so that it can be
tested from a tool which supports HTTP health-checks. To enable it, simply
specify 'health' as the working mode :

Example :
---------
    # simple response : 'OK'
    listen health_check 0.0.0.0:60000
        mode health

    # HTTP response : 'HTTP/1.0 200 OK'
    listen http_health_check 0.0.0.0:60001
        mode health
        option httpchk

2.2.1 Monitoring
----------------
Versions 1.1.32 and 1.2.6 provide a new solution to check the proxy's
availability without perturbating the service. The 'monitor-net' keyword was
created to specify a network of equipments which CANNOT use the service for
anything but health-checks. This is particularly suited to TCP proxies, because
it prevents the proxy from relaying the monitor's connection to the remote
server.

When used with TCP, the connection is accepted then closed and nothing is
logged. This is enough for a front-end load-balancer to detect the service as
available.

When used with HTTP, the connection is accepted, nothing is logged, the
following response is sent, then the session is closed : "HTTP/1.0 200 OK".
This is normally enough for any front-end HTTP load-balancer to detect the
service as available too, both with TCP and HTTP checks.

Proxies using the "monitor-net" keyword can remove the "option dontlognull", as
it will make them log empty connections from hosts outside the monitoring
network.

Example :
---------

    listen tse-proxy
       bind :3389,:1494,:5900  # TSE, ICA and VNC at once.
       mode tcp
       balance roundrobin
       server tse-farm 192.168.1.10
       monitor-net 192.168.1.252/31   # L4 load-balancers on .252 and .253


When the system executing the checks is located behind a proxy, the monitor-net
keyword cannot be used because haproxy will always see the proxy's address. To
overcome this limitation, version 1.2.15 brought the 'monitor-uri' keyword. It
defines an URI which will not be forwarded nor logged, but for which haproxy
will immediately send an "HTTP/1.0 200 OK" response. This makes it possible to
check the validity of the reverse-proxy->haproxy chain with one request. It can
be used in HTTPS checks in front of an stunnel -> haproxy combination for
instance. Obviously, this keyword is only valid in HTTP mode, otherwise there
is no notion of URI. Note that the method and HTTP versions are simply ignored.

Example :
---------

    listen stunnel_backend :8080
       mode http
       balance roundrobin
       server web1 192.168.1.10:80 check
       server web2 192.168.1.11:80 check
       monitor-uri /haproxy_test


2.3) Limiting the number of simultaneous connections
----------------------------------------------------
The 'maxconn' parameter allows a proxy to refuse connections above a certain
amount of simultaneous ones. When the limit is reached, it simply stops
listening, but the system may still be accepting them because of the back log
queue. These connections will be processed later when other ones have freed
some slots. This provides a serialization effect which helps very fragile
servers resist to high loads. See further for system limitations.

Example :
---------
    listen tiny_server 0.0.0.0:80
        maxconn 10


2.4) Soft stop
--------------
It is possible to stop services without breaking existing connections by the
sending of the SIGUSR1 signal to the process. All services are then put into
soft-stop state, which means that they will refuse to accept new connections,
except for those which have a non-zero value in the 'grace' parameter, in which
case they will still accept connections for the specified amount of time, in
milliseconds. This makes it possible to tell a load-balancer that the service
is failing, while still doing the job during the time it needs to detect it.

Note: active connections are never killed. In the worst case, the user will have
to wait for all of them to close or to time-out, or simply kill the process
normally (SIGTERM). The default 'grace' value is '0'.

Example :
---------
    # enter soft stop after 'killall -USR1 haproxy'
    # the service will still run 10 seconds after the signal
    listen http_proxy 0.0.0.0:80
        mode http
        grace 10000

    # this port is dedicated to a load-balancer, and must fail immediately
    listen health_check 0.0.0.0:60000
        mode health
        grace 0


As of version 1.2.8, a new soft-reconfiguration mechanism has been introduced.
It is now possible to "pause" all the proxies by sending a SIGTTOU signal to
the processes. This will disable the listening socket without breaking existing
connections. After that, sending a SIGTTIN signal to those processes enables
the listening sockets again. This is very useful to try to load a new
configuration or even a new version of haproxy without breaking existing
connections. If the load succeeds, then simply send a SIGUSR1 which will make
the previous proxies exit immediately once their sessions are closed ; and if
the load fails, then simply send a SIGTTIN to restore the service immediately.
Please note that the 'grace' parameter is ignored for SIGTTOU, as well as for
SIGUSR1 when the process was in the pause mode. Please also note that it would
be useful to save the pidfile before starting a new instance.

This mechanism fully exploited since 1.2.11 with the '-st' and '-sf' options
(see below).

2.4.1) Hot reconfiguration
--------------------------
The '-st' and '-sf' command line options are used to inform previously running
processes that a configuration is being reloaded. They will receive the SIGTTOU
signal to ask them to temporarily stop listening to the ports so that the new
process can grab them. If anything wrong happens, the new process will send
them a SIGTTIN to tell them to re-listen to the ports and continue their normal
work. Otherwise, it will either ask them to finish (-sf) their work then softly
exit, or immediately terminate (-st), breaking existing sessions. A typical use
of this allows a configuration reload without service interruption :

 # haproxy -p /var/run/haproxy.pid -sf $(cat /var/run/haproxy.pid)


2.5) Connections expiration time
--------------------------------
It is possible (and recommended) to configure several time-outs on TCP
connections. Three independant timers are adjustable with values specified
in milliseconds. A session will be terminated if either one of these timers
expire.

  - the time we accept to wait for data from the client, or for the client to
    accept data : 'clitimeout' :

        # client time-out set to 2mn30.
        clitimeout  150000

  - the time we accept to wait for data from the server, or for the server to
    accept data : 'srvtimeout' :

        # server time-out set to 30s.
        srvtimeout  30000

  - the time we accept to wait for a connection to establish on a server :
    'contimeout' :

        # we give up if the connection does not complete within 4 seconds
        contimeout  4000

Notes :
-------
  - 'contimeout' and 'srvtimeout' have no sense on 'health' mode servers ;
  - under high loads, or with a saturated or defective network, it's possible
    that some packets get lost. Since the first TCP retransmit only happens
    after 3 seconds, a time-out equal to, or lower than 3 seconds cannot
    compensate for a packet loss. A 4 seconds time-out seems a reasonable
    minimum which will considerably reduce connection failures.
  - starting with version 1.3.14, it is possible to specify timeouts in
    arbitrary time units among { us, ms, s, m, h, d }. For this, the integer
    value just has to be suffixed with the unit.

2.6) Attempts to reconnect
--------------------------
After a connection failure to a server, it is possible to retry, potentially
on another server. This is useful if health-checks are too rare and you don't
want the clients to see the failures. The number of attempts to reconnect is
set by the 'retries' paramter.

Example :
---------
        # we can retry 3 times max after a failure
        retries 3

Please note that the reconnection attempt may lead to getting the connection
sent to a new server if the original one died between connection attempts.


2.7) Address of the dispatch server (deprecated)
------------------------------------------------
The server which will be sent all new connections is defined by the 'dispatch'
parameter, in the form <address>:<port>. It generally is dedicated to unknown
connections and will assign them a cookie, in case of HTTP persistence mode,
or simply is a single server in case of generic TCP proxy. This old mode is only
provided for backwards compatibility, but doesn't allow to check remote servers
state, and has a rather limited usage. All new setups should switch to 'balance'
mode. The principle of the dispatcher is to be able to perform the load
balancing itself, but work only on new clients so that the server doesn't need
to be a big machine.

Example :
---------
           # all new connections go there
        dispatch 192.168.1.2:80

Note :
------
This parameter has no sense for 'health' servers, and is incompatible with
'balance' mode.


2.8) Outgoing source address
----------------------------
It is often necessary to bind to a particular address when connecting to some
remote hosts. This is done via the 'source' parameter which is a per-proxy
parameter. A newer version may allow to fix different sources to reach different
servers. The syntax is 'source <address>[:<port>]', where <address> is a valid
local address (or '0.0.0.0' or '*' or empty to let the system choose), and
<port> is an optional parameter allowing the user to force the source port for
very specific needs. If the port is not specified or is '0', the system will
choose a free port. Note that as of version 1.1.18, the servers health checks
are also performed from the same source.

Examples :
----------
    listen http_proxy *:80
           # all connections take 192.168.1.200 as source address
        source 192.168.1.200:0

    listen rlogin_proxy *:513
           # use address 192.168.1.200 and the reserved port 900 (needs to be root)
        source 192.168.1.200:900


2.9) Setting the cookie name
----------------------------
In HTTP mode, it is possible to look for a particular cookie which will contain
a server identifier which should handle the connection. The cookie name is set
via the 'cookie' parameter.

Example :
---------
    listen http_proxy :80
        mode http
        cookie SERVERID

It is possible to change the cookie behaviour to get a smarter persistence,
depending on applications. It is notably possible to delete or modify a cookie
emitted by a server, insert a cookie identifying the server in an HTTP response
and even add a header to tell upstream caches not to cache this response.

Examples :
----------

To remove the cookie for direct accesses (ie when the server matches the one
which was specified in the client cookie) :

        cookie SERVERID indirect

To replace the cookie value with the one assigned to the server if any (no
cookie will be created if the server does not provide one, nor if the
configuration does not provide one). This lets the application put the cookie
exactly on certain pages (eg: successful authentication) :

        cookie SERVERID rewrite

To create a new cookie and assign the server identifier to it (in this case, all
servers should be associated with a valid cookie, since no cookie will simply
delete the cookie from the client's browser) :

        cookie SERVERID insert

To reuse an existing application cookie and prefix it with the server's
identifier, and remove it in the request, use the 'prefix' option. This allows
to insert a haproxy in front of an application without risking to break clients
which does not support more than one cookie :

        cookie JSESSIONID prefix

To insert a cookie and ensure that no upstream cache will store it, add the
'nocache' option :

        cookie SERVERID insert nocache

To insert a cookie only after a POST request, add 'postonly' after 'insert'.
This has the advantage that there's no risk of caching, and that all pages
seen before the POST one can still be cached :

        cookie SERVERID insert postonly

Notes :
-----------
- it is possible to combine 'insert' with 'indirect' or 'rewrite' to adapt to
  applications which already generate the cookie with an invalid content.

- in the case where 'insert' and 'indirect' are both specified, the cookie is
  never transmitted to the server, since it wouldn't understand it. This is the
  most application-transparent mode.

- it is particularly recommended to use 'nocache' in 'insert' mode if any
  upstream HTTP/1.0 cache is susceptible to cache the result, because this may
  lead to many clients going to the same server, or even worse, some clients
  having their server changed while retrieving a page from the cache.

- the 'prefix' mode normally does not need 'indirect', 'nocache', nor
  'postonly', because just as in the 'rewrite' mode, it relies on the
  application to know when a cookie can be emitted. However, since it has to
  fix the cookie name in every subsequent requests, you must ensure that the
  proxy will be used without any "HTTP keep-alive". Use option "httpclose" if
  unsure.

- when the application is well known and controlled, the best method is to
  only add the persistence cookie on a POST form because it's up to the
  application to select which page it wants the upstream servers to cache. In
  this case, you would use 'insert postonly indirect'.


2.10) Associating a cookie value with a server
----------------------------------------------
In HTTP mode, it's possible to associate a cookie value to each server. This
was initially used in combination with 'dispatch' mode to handle direct accesses
but it is now the standard way of doing the load balancing. The syntax is :

    server <identifier> <address>:<port> cookie <value>

- <identifier> is any name which can be used to identify the server in the logs.
- <address>:<port> specifies where the server is bound.
- <value> is the value to put in or to read from the cookie.

Example : the 'SERVERID' cookie can be either 'server01' or 'server02'
---------
    listen http_proxy :80
        mode http
        cookie SERVERID
        dispatch 192.168.1.100:80
        server web1 192.168.1.1:80 cookie server01
        server web2 192.168.1.2:80 cookie server02

Warning : the syntax has changed since version 1.0 !
---------


2.11) Application Cookies
-------------------------
Since 1.2.4 it is possible to catch the cookie that comes from an
application server in order to apply "application session stickyness".
The server's response is searched for 'appsession' cookie, the first
'len' bytes are used for matching and it is stored for a period of
'timeout'.
The syntax is:

    appsession <session_cookie> len <match_length> timeout <holdtime>

- <session_cookie> is the cookie, the server uses for it's session-handling
- <match_length> how many bytes/characters should be used for matching equal
                sessions 
- <holdtime> after this inactivaty time, in ms, the cookie will be deleted 
             from the sessionstore
- starting with version 1.3.14, it is possible to specify timeouts in
  arbitrary time units among { us, ms, s, m, h, d }. For this, the integer
  value just has to be prefixed with the unit.

The appsession is only per 'listen' section possible.

Example :
---------
    listen http_lb1 192.168.3.4:80
       mode    http
       capture request  header Cookie len 200
       # Havind a ServerID cookie on the client allows him to reach
       # the right server even after expiration of the appsession.
       cookie ServerID insert nocache indirect
       # Will memorize 52 bytes of the cookie 'JSESSIONID' and keep them
       # for 3 hours. It will match it in the cookie and the URL field.
       appsession JSESSIONID len 52 timeout 3h
       server first1 10.3.9.2:10805 check inter 3000 cookie first
       server secon1 10.3.9.3:10805 check inter 3000 cookie secon
       server first1 10.3.9.4:10805 check inter 3000 cookie first
       server secon2 10.3.9.5:10805 check inter 3000 cookie secon
       option httpchk GET /test.jsp


3) Autonomous load balancer
===========================

The proxy can perform the load-balancing itself, both in TCP and in HTTP modes.
This is the most interesting mode which obsoletes the old 'dispatch' mode
described above. It has advantages such as server health monitoring, multiple
port binding and port mapping. To use this mode, the 'balance' keyword is used,
followed by the selected algorithm. Up to version 1.2.11, only 'roundrobin' was
available, which is also the default value if unspecified. Starting with
version 1.2.12, a new 'source' keyword appeared. A new 'uri' keyword was added
in version 1.3.10. In this mode, there will be no dispatch address, but the
proxy needs at least one server.

Example : same as the last one, with internal load balancer
---------

    listen http_proxy :80
        mode http
        cookie SERVERID
        balance roundrobin
        server web1 192.168.1.1:80 cookie server01
        server web2 192.168.1.2:80 cookie server02


Since version 1.1.22, it is possible to automatically determine on which port
the server will get the connection, depending on the port the client connected
to. Indeed, there now are 4 possible combinations for the server's <port> field:

  - unspecified or '0' :
    the connection will be sent to the same port as the one on which the proxy
    received the client connection itself.

  - numerical value (the only one supported in versions earlier than 1.1.22) :
    the connection will always be sent to the specified port.

  - '+' followed by a numerical value :
    the connection will be sent to the same port as the one on which the proxy
    received the connection, plus this value.

  - '-' followed by a numerical value :
    the connection will be sent to the same port as the one on which the proxy
    received the connection, minus this value.
    
Examples :
----------

# same as previous example

    listen http_proxy :80
        mode http
        cookie SERVERID
        balance roundrobin
        server web1 192.168.1.1 cookie server01
        server web2 192.168.1.2 cookie server02

# simultaneous relaying of ports 80, 81 and 8080-8089

    listen http_proxy :80,:81,:8080-8089
        mode http
        cookie SERVERID
        balance roundrobin
        server web1 192.168.1.1 cookie server01
        server web2 192.168.1.2 cookie server02

# relaying of TCP ports 25, 389 and 663 to ports 1025, 1389 and 1663

    listen http_proxy :25,:389,:663
        mode tcp
        balance roundrobin
        server srv1 192.168.1.1:+1000
        server srv2 192.168.1.2:+1000

As previously stated, version 1.2.12 brought the 'source' keyword. When this
keyword is used, the client's IP address is hashed and evenly distributed among
the available servers so that a same source IP will always go to the same
server as long as there are no change in the number of available servers. This
can be used for instance to bind HTTP and HTTPS to the same server. It can also
be used to improve stickyness when one part of the client population does not
accept cookies. In this case, only those ones will be perturbated should a
server fail.

NOTE: It is important to consider the fact that many clients surf the net
      through proxy farms which assign different IP addresses for each
      request. Others use dialup connections with a different IP at each
      connection. Thus, the 'source' parameter should be used with extreme
      care.

Examples :
----------

# make a same IP go to the same server whatever the service

    listen http_proxy
        bind :80,:443
        mode http
        balance source
        server web1 192.168.1.1
        server web2 192.168.1.2

# try to improve client-server binding by using both source IP and cookie :

    listen http_proxy :80
        mode http
        cookie SERVERID
        balance source
        server web1 192.168.1.1 cookie server01
        server web2 192.168.1.2 cookie server02

As indicated above, the 'uri' keyword was introduced in version 1.3.10. It is
useful when load-balancing between reverse proxy-caches, because it will hash
the URI and use the hash result to select a server, thus optimizing the hit
rate on the caches, because the same URI will always reach the same cache. This
keyword is only allowed in HTTP mode.

Example :
---------

# Always send a given URI to the same server

    listen http_proxy
        bind :3128
        mode http
        balance uri
        server squid1 192.168.1.1
        server squid2 192.168.1.2

Version 1.3.14 introduced the "balance url_param" method. It consists in
relying on a parameter passed in the URL to perform a hash. This is mostly
useful for applications which do not have strict persistence requirements,
but for which it still provides a performance boost due to local caching.
Some of these applications may not be able to use a cookie for whatever reason,
but may be able to look for a parameter passed in the URL. If the parameter is
missing from the URL, then the 'round robin' method applies.

A modifier may be added to specify that parameters in POST requests may be
found in the messsage body if the URL lacks a '?' separator character.
A wait limit may also be applied, if no limit is requested then
the default value is 48 octets, the minimum is 3.  HAProxy may wait, until 48
octets are received. If Content-Length is missing, or zero it need not
wait for more data then the client promissed to send. When Content-Length is
present, and more than <max_wait>; then waiting is limited and it is assumed this
will be enough data to search for the presence of a parameter.  If
Transfer-Encoding: chunked is used (unlikely), then the length of the first chunk
is the maximum number of bytes to wait for.

balance url_param <param> [check_post [<max_wait>]]

Caveats for using the check_post extension:

  - all POST requests are eligable for consideration, because there is
    no way to determine if the parameters will be found in the body or
    entity which may contain binary data. Therefore another method may be
    required to restrict consideration of POST requests that have no URL
    parameters in the body. (see acl reqideny http_end)

Limitations on inspecting the entity body of a POST:

  - Content-Encoding is not supported, the parameter search will probably fail;
    and load balancing will fall back to Round Robin.

  - Expect: 100-continue is not supported, load balancing will fall back to
    Round Robin.

  - Transfer-Encoding(RFC2616 3.6.1) is only supported in the first chunk. If
    the entire parameter value is not present in the first chunk, the selection
    of server is undefined (actually, defined by how little actually appeared in
    the first chunk).

  - This feature does not support generation of a 100, 411 or 501 response.

  - In some cases, requesting check_post MAY attempt to scan the entire contents
    of a message body.  Scaning normally terminates when linear white space or
    control characters are found, indicating the end of what might be a URL parameter
    list.  This is probably not a concern with SGML type message bodies.


Example :
---------

# Hash the "basket_id" argument from the URL to determine the server

    listen http_proxy
        bind :3128
        mode http
        balance url_param basket_id
        server ebiz1 192.168.1.1
        server ebiz2 192.168.1.2


3.1) Server monitoring
----------------------
It is possible to check the servers status by trying to establish TCP
connections or even sending HTTP requests to them. A server which fails to
reply to health checks as expected will not be used by the load balancing
algorithms. To enable monitoring, add the 'check' keyword on a server line.
It is possible to specify the interval between tests (in milliseconds) with
the 'inter' parameter, the number of failures supported before declaring that
the server has fallen down with the 'fall' parameter, and the number of valid
checks needed for the server to fully get up with the 'rise' parameter. Since
version 1.1.22, it is also possible to send checks to a different port
(mandatory when none is specified) with the 'port' parameter. The default
values are the following ones :

  - inter : 2000
  - rise  : 2
  - fall  : 3
  - port  : default server port
  - addr  : specific address for the test (default = address server)
  
The default mode consists in establishing TCP connections only. But in certain
types of application failures, it is often that the server continues to accept
connections because the system does it itself while the application is running
an endless loop, or is completely stuck. So in version 1.1.16 were introduced
HTTP health checks which only performed simple lightweight requests and analysed
the response. Now, as of version 1.1.23, it is possible to change the HTTP
method, the URI, and the HTTP version string (which even allows to send headers
with a dirty trick). To enable HTTP health-checks, use 'option httpchk'.

By default, requests use the 'OPTIONS' method because it's very light and easy
to filter from logs, and does it on '/'. Only HTTP responses 2xx and 3xx are
considered valid ones, and only if they come before the time to send a new
request is reached ('inter' parameter). If some servers block this type of
request, 3 other forms help to forge a request :

  - option httpchk               -> OPTIONS / HTTP/1.0
  - option httpchk URI           -> OPTIONS <URI> HTTP/1.0
  - option httpchk METH URI      -> <METH> <URI> HTTP/1.0
  - option httpchk METH URI VER  -> <METH> <URI> <VER>

Some people are using HAProxy to relay various TCP-based protocols such as
HTTPS, SMTP or LDAP, with the most common one being HTTPS. One problem commonly
encountered in data centers is the need to forward the traffic to far remote
servers while providing server fail-over. Often, TCP-only checks are not enough
because intermediate firewalls, load balancers or proxies might acknowledge the
connection before it reaches the real server. The only solution to this problem
is to send application-level health checks. Since the demand for HTTPS checks
is high, it has been implemented in 1.2.15 based on SSLv3 Client Hello packets.
To enable it, use 'option ssl-hello-chk'. It will send SSL CLIENT HELLO packets
to the servers, announcing support for most common cipher suites. If the server
responds what looks like a SERVER HELLO or an ALERT (refuses the ciphers) then
the response is considered as valid. Note that Apache does not generate a log
when it receives only an HELLO message, which makes this type of message
perfectly suit this need.

Version 1.3.10 introduced the SMTP health check. By default, it sends
"HELO localhost" to the servers, and waits for the 250 message. Note that it
can also send a specific request :

  - option smtpchk                         -> sends "HELO localhost"
  - option smtpchk EHLO mail.mydomain.com  -> sends this ESMTP greeting

See examples below.

Since version 1.1.17, it is possible to specify backup servers. These servers
are only sollicited when no other server is available. This may only be useful
to serve a maintenance page, or define one active and one backup server (seldom
used in TCP mode). To make a server a backup one, simply add the 'backup' option
on its line. These servers also support cookies, so if a cookie is specified for
a backup server, clients assigned to this server will stick to it even when the
other ones come back. Conversely, if no cookie is assigned to such a server,
the clients will get their cookies removed (empty cookie = removal), and will
be balanced against other servers once they come back. Please note that there
is no load-balancing among backup servers by default. If there are several
backup servers, the second one will only be used when the first one dies, and
so on. To force load-balancing between backup servers, specify the 'allbackups'
option.

Since version 1.1.22, it is possible to send health checks to a different port
than the service. It is mainly needed in setups where the server does not have
any predefined port, for instance when the port is deduced from the listening
port. For this, use the 'port' parameter followed by the port number which must
respond to health checks. It is also possible to send health checks to a
different address than the service. It makes it easier to use a dedicated check
daemon on the servers, for instance, check return contents and stop several
farms at once in the event of an error anywhere.

Since version 1.1.17, it is also possible to visually check the status of all
servers at once. For this, you just have to send a SIGHUP signal to the proxy.
The servers status will be dumped into the logs at the 'notice' level, as well
as on <stderr> if not closed. For this reason, it's always a good idea to have
one local log server at the 'notice' level.

Since version 1.1.28 and 1.2.1, if an instance loses all its servers, an
emergency message will be sent in the logs to inform the administator that an
immediate action must be taken.

Since version 1.1.30 and 1.2.3, several servers can share the same cookie
value. This is particularly useful in backup mode, to select alternate paths
for a given server for example, to provide soft-stop, or to direct the clients
to a temporary page during an application restart. The principle is that when
a server is dead, the proxy will first look for another server which shares the
same cookie value for every client which presents the cookie. If there is no
standard server for this cookie, it will then look for a backup server which
shares the same name. Please consult the architecture guide for more information.

Examples :
----------
# same setup as in  paragraph 3) with TCP monitoring
    listen http_proxy 0.0.0.0:80
        mode http
        cookie SERVERID
        balance roundrobin
        server web1 192.168.1.1:80 cookie server01 check
        server web2 192.168.1.2:80 cookie server02 check inter 500 rise 1 fall 2

# same with HTTP monitoring via 'OPTIONS / HTTP/1.0'
    listen http_proxy 0.0.0.0:80
        mode http
        cookie SERVERID
        balance roundrobin
        option httpchk
        server web1 192.168.1.1:80 cookie server01 check
        server web2 192.168.1.2:80 cookie server02 check inter 500 rise 1 fall 2

# same with HTTP monitoring via 'OPTIONS /index.html HTTP/1.0'
    listen http_proxy 0.0.0.0:80
        mode http
        cookie SERVERID
        balance roundrobin
        option httpchk /index.html
        server web1 192.168.1.1:80 cookie server01 check
        server web2 192.168.1.2:80 cookie server02 check inter 500 rise 1 fall 2

# same with HTTP monitoring via 'HEAD /index.jsp? HTTP/1.1\r\nHost: www'
    listen http_proxy 0.0.0.0:80
        mode http
        cookie SERVERID
        balance roundrobin
        option httpchk HEAD /index.jsp? HTTP/1.1\r\nHost:\ www
        server web1 192.168.1.1:80 cookie server01 check
        server web2 192.168.1.2:80 cookie server02 check inter 500 rise 1 fall 2

# Load-balancing with 'prefixed cookie' persistence, and soft-stop using an
# alternate port 81 on the server for health-checks.
    listen http_proxy 0.0.0.0:80
        mode http
        cookie JSESSIONID prefix
        balance roundrobin
        option httpchk HEAD /index.jsp? HTTP/1.1\r\nHost:\ www
        server web1-norm 192.168.1.1:80 cookie s1 check port 81
        server web2-norm 192.168.1.2:80 cookie s2 check port 81
        server web1-stop 192.168.1.1:80 cookie s1 check port 80 backup
        server web2-stop 192.168.1.2:80 cookie s2 check port 80 backup

# automatic insertion of a cookie in the server's response, and automatic
# deletion of the cookie in the client request, while asking upstream caches
# not to cache replies.
    listen web_appl 0.0.0.0:80
        mode http
        cookie SERVERID insert nocache indirect
        balance roundrobin
        server web1 192.168.1.1:80 cookie server01 check
        server web2 192.168.1.2:80 cookie server02 check

# same with off-site application backup and local error pages server
    listen web_appl 0.0.0.0:80
        mode http
        cookie SERVERID insert nocache indirect
        balance roundrobin
        server web1 192.168.1.1:80 cookie server01 check
        server web2 192.168.1.2:80 cookie server02 check
        server web-backup 192.168.2.1:80 cookie server03 check backup
        server web-excuse 192.168.3.1:80 check backup

# SMTP+TLS relaying with health-checks and backup servers

    listen http_proxy :25,:587
        mode tcp
        balance roundrobin
        server srv1 192.168.1.1 check port 25 inter 30000 rise 1 fall 2
        server srv2 192.168.1.2 backup

# HTTPS relaying with health-checks and backup servers

    listen http_proxy :443
        mode tcp
	option ssl-hello-chk
        balance roundrobin
        server srv1 192.168.1.1 check inter 30000 rise 1 fall 2
        server srv2 192.168.1.2 backup

# Load-balancing using a backup pool (requires haproxy 1.2.9)
    listen http_proxy 0.0.0.0:80
        mode http
        balance roundrobin
        option httpchk
        server inst1 192.168.1.1:80 cookie s1 check
        server inst2 192.168.1.2:80 cookie s2 check
        server inst3 192.168.1.3:80 cookie s3 check
        server back1 192.168.1.10:80 check backup
        server back2 192.168.1.11:80 check backup
        option allbackups  # all backups will be used


3.2) Redistribute connections in case of failure
------------------------------------------------
In HTTP mode, if a server designated by a cookie does not respond, the clients
may definitely stick to it because they cannot flush the cookie, so they will
not be able to access the service anymore. Specifying 'redispatch' will allow
the proxy to break their persistence and redistribute them to working servers.

Example :
---------
    listen http_proxy 0.0.0.0:80
        mode http
        cookie SERVERID
        dispatch 192.168.1.100:80
        server web1 192.168.1.1:80 cookie server01
        server web2 192.168.1.2:80 cookie server02
        redispatch # send back to dispatch in case of connection failure

Up to, and including version 1.1.16, this parameter only applied to connection
failures. Since version 1.1.17, it also applies to servers which have been
detected as failed by the health check mechanism. Indeed, a server may be broken
but still accepting connections, which would not solve every case. But it is
possible to conserve the old behaviour, that is, make a client insist on trying
to connect to a server even if it is said to be down, by setting the 'persist'
option :

    listen http_proxy 0.0.0.0:80
        mode http
        option persist
        cookie SERVERID
        dispatch 192.168.1.100:80
        server web1 192.168.1.1:80 cookie server01
        server web2 192.168.1.2:80 cookie server02
        redispatch # send back to dispatch in case of connection failure


3.3) Assigning different weights to servers
-------------------------------------------
Sometimes you will need to bring new servers to increase your server farm's
capacity, but the new server will be either smaller (emergency use of anything
that fits) or bigger (when investing in new hardware). For this reason, it
might be wise to be able to send more clients to biggest servers. Till version
1.2.11, it was necessary to replicate the same server multiple times in the
configuration. Starting with 1.2.12, the 'weight' option is available. HAProxy
then computes the most homogenous possible map of servers based on their
weights so that the load gets distributed as smoothly as possible among them.
The weight, between 1 and 256, should reflect one server's capacity relative to
others. Weight 1 represents the lowest frequency and 256 the highest. This way,
if a server fails, the remaining capacities are still respected.

Example :
---------
# fair distribution among two opterons and one old pentium3

    listen web_appl 0.0.0.0:80
        mode http
        cookie SERVERID insert nocache indirect
        balance roundrobin
        server pentium3-800 192.168.1.1:80 cookie server01 weight  8 check
        server opteron-2.0G 192.168.1.2:80 cookie server02 weight 20 check
        server opteron-2.4G 192.168.1.3:80 cookie server03 weight 24 check
        server web-backup1 192.168.2.1:80 cookie server04 check backup
        server web-excuse 192.168.3.1:80 check backup

Notes :
-------
  - if unspecified, the default weight is 1

  - the weight does not impact health checks, so it is cleaner to use weights
    than replicating the same server several times

  - weights also work on backup servers if the 'allbackups' option is used

  - the weights also apply to the source address load balancing
    ('balance source').

  - whatever the weights, the first server will always be assigned first. This
    is helpful for troubleshooting.

  - for the purists, the map calculation algorithm gives precedence to first
    server, so the map is the most uniform when servers are declared in
    ascending order relative to their weights.

The load distribution will follow exactly this sequence :

        Request|                   1 1 1 1
        number | 1 2 3 4 5 6 7 8 9 0 1 2 3
       --------+---------------------------
        p3-800 | X . . . . . . X . . . . .
        opt-20 | . X . X . X . . . X . X .
        opt-24 | . . X . X . X . X . X . X


3.4) Limiting the number of concurrent sessions on each server
--------------------------------------------------------------
Some pre-forked servers such as Apache suffer from too many concurrent
sessions, because it's very expensive to run hundreds or thousands of
processes on one system. One solution is to increase the number of servers
and load-balance between them, but it is a problem when the only goal is
to resist to short surges.

To solve this problem, a new feature was implemented in HAProxy 1.2.13.
It's a per-server 'maxconn', associated with a per-server and a per-proxy
queue. This transforms haproxy into a request buffer between the thousands of
clients and the few servers. On many circumstances, lowering the maxconn value
will increase the server's performance and decrease the overall response times
because the servers will be less congested.

When a request tries to reach any server, the first non-saturated server is
used, respective to the load balancing algorithm. If all servers are saturated,
then the request gets queued into the instance's global queue. It will be
dequeued once a server will have freed a session and all previously queued
requests have been processed.

If a request references a particular server (eg: source hashing, or persistence
cookie), and if this server is full, then the request will be queued into the
server's dedicated queue. This queue has higher priority than the global queue,
so it's easier for already registered users to enter the site than for new
users.

For this, the logs have been enhanced to show the number of sessions per
server, the request's position in the queue and the time spent in the queue.
This helps doing capacity planning. See the 'logs' section below for more info.

Example :
---------
    # be nice with P3 which only has 256 MB of RAM.
    listen web_appl 0.0.0.0:80
        maxconn 10000
        mode http
        cookie SERVERID insert nocache indirect
        balance roundrobin
        server pentium3-800 192.168.1.1:80 cookie s1 weight  8 maxconn 100 check
        server opteron-2.0G 192.168.1.2:80 cookie s2 weight 20 maxconn 300 check
        server opteron-2.4G 192.168.1.3:80 cookie s3 weight 24 maxconn 300 check
        server web-backup1 192.168.2.1:80 cookie s4 check maxconn 200 backup
        server web-excuse 192.168.3.1:80 check backup


This was so much efficient at reducing the server's response time that some
users wanted to use low values to improve their server's performance. However,
they were not able anymore to handle very large loads because it was not
possible anymore to saturate the servers. For this reason, version 1.2.14 has
brought dynamic limitation with the addition of the parameter 'minconn'. When
this parameter is set along with maxconn, it will enable dynamic limitation
based on the instance's load. The maximum number of concurrent sessions on a
server will be proportionnal to the number of sessions on the instance relative
to its maxconn. A minimum of <minconn> will be allowed whatever the load. This
will ensure that servers will perform at their best level under normal loads,
while still handling surges when needed. The dynamic limit is computed like
this :

    srv.dyn_limit = max(srv.minconn, srv.maxconn * inst.sess / inst.maxconn)

Example :
---------
    # be nice with P3 which only has 256 MB of RAM.
    listen web_appl 0.0.0.0:80
        maxconn 10000
        mode http
        cookie SERVERID insert nocache indirect
        balance roundrobin
        server pentium3-800 192.168.1.1:80 cookie s1 weight  8 minconn 10 maxconn 100 check
        server opteron-2.0G 192.168.1.2:80 cookie s2 weight 20 minconn 30 maxconn 300 check
        server opteron-2.4G 192.168.1.3:80 cookie s3 weight 24 minconn 30 maxconn 300 check
        server web-backup1 192.168.2.1:80 cookie s4 check maxconn 200 backup
        server web-excuse 192.168.3.1:80 check backup

In the example above, the server 'pentium3-800' will receive at most 100
simultaneous sessions when the proxy instance will reach 10000 sessions, and
will receive only 10 simultaneous sessions when the proxy will be under 1000
sessions.

It is possible to limit server queue length in order to rebalance excess
sessions between less busy application servers IF session affinity isn't
hard functional requirement (for example it just gives huge performance boost
by keeping server-local caches hot and compact). 'maxqueue' option sets a
queue limit on a server, as in example below:

... (just the same as in example above)
        server pentium3-800 192.168.1.1:80 cookie s1 weight  8 minconn 10 maxconn 100 check maxqueue 50
        server opteron-2.0G 192.168.1.2:80 cookie s2 weight 20 minconn 30 maxconn 300 check maxqueue 200
        server opteron-2.4G 192.168.1.3:80 cookie s3 weight 24 minconn 30 maxconn 300 check

Absence of 'maxqueue' option means unlimited queue. When queue gets filled
up to 'maxqueue' client session is moved from server-local queue to a global
one.

Notes :
-------
  - The requests will not stay indefinitely in the queue, they follow the
    'contimeout' parameter, and if a request cannot be dequeued within this
    timeout because the server is saturated or because the queue is filled,
    the session will expire with a 503 error.

  - if only <minconn> is specified, it has the same effect as <maxconn>

  - setting too low values for maxconn might improve performance but might also
    allow slow users to block access to the server for other users.


3.5) Dropping aborted requests
------------------------------
In presence of very high loads, the servers will take some time to respond. The
per-proxy's connection queue will inflate, and the response time will increase
respective to the size of the queue times the average per-session response
time. When clients will wait for more than a few seconds, they will often hit
the 'STOP' button on their browser, leaving a useless request in the queue, and
slowing down other users.

As there is no way to distinguish between a full STOP and a simple
shutdown(SHUT_WR) on the client side, HTTP agents should be conservative and
consider that the client might only have closed its output channel while
waiting for the response. However, this introduces risks of congestion when
lots of users do the same, and is completely useless nowadays because probably
no client at all will close the session while waiting for the response. Some
HTTP agents support this (Squid, Apache, HAProxy), and others do not (TUX, most
hardware-based load balancers). So the probability for a closed input channel
to represent a user hitting the 'STOP' button is close to 100%, and it is very
tempting to be able to abort the session early without polluting the servers.

For this reason, a new option "abortonclose" was introduced in version 1.2.14.
By default (without the option) the behaviour is HTTP-compliant. But when the
option is specified, a session with an incoming channel closed will be aborted
if it's still possible, which means that it's either waiting for a connect() to
establish or it is queued waiting for a connection slot. This considerably
reduces the queue size and the load on saturated servers when users are tempted
to click on STOP, which in turn reduces the response time for other users.

Example :
---------
    listen web_appl 0.0.0.0:80
        maxconn 10000
        mode http
        cookie SERVERID insert nocache indirect
        balance roundrobin
        server web1 192.168.1.1:80 cookie s1 weight 10 maxconn 100 check
        server web2 192.168.1.2:80 cookie s2 weight 10 maxconn 100 check
        server web3 192.168.1.3:80 cookie s3 weight 10 maxconn 100 check
        server bck1 192.168.2.1:80 cookie s4 check maxconn 200 backup
        option abortonclose


4) Additionnal features
=======================

Other features are available. They are transparent mode, event logging, header
rewriting/filtering, and the status as an HTML page.


4.1) Network features
---------------------
4.1.1) Transparent mode
-----------------------
In HTTP mode, the 'transparent' keyword allows to intercept sessions which are
routed through the system hosting the proxy. This mode was implemented as a
replacement for the 'dispatch' mode, since connections without cookie will be
sent to the original address while known cookies will be sent to the servers.
This mode implies that the system can redirect sessions to a local port.

Example :
---------
    listen http_proxy 0.0.0.0:65000
        mode http
        transparent
        cookie SERVERID
        server server01 192.168.1.1:80
        server server02 192.168.1.2:80

    # iptables -t nat -A PREROUTING -i eth0 -p tcp -d 192.168.1.100 \
      --dport 80 -j REDIRECT --to-ports 65000

Note :
------
If the port is left unspecified on the server, the port the client connected to
will be used. This allows to relay a full port range without using transparent
mode nor thousands of file descriptors, provided that the system can redirect
sessions to local ports.

Example :
---------
    # redirect all ports to local port 65000, then forward to the server on the
    # original port.
    listen http_proxy 0.0.0.0:65000
        mode tcp
        server server01 192.168.1.1 check port 60000
        server server02 192.168.1.2 check port 60000

    # iptables -t nat -A PREROUTING -i eth0 -p tcp -d 192.168.1.100 \
      -j REDIRECT --to-ports 65000

4.1.2) Per-server source address binding
----------------------------------------
As of versions 1.1.30 and 1.2.3, it is possible to specify a particular source
to reach each server. This is useful when reaching backup servers from a
different LAN, or to use an alternate path to reach the same server. It is also
usable to provide source load-balancing for outgoing connections. Obviously,
the same source address is used to send health-checks.

Example :
---------
    # use a particular source to reach both servers
    listen http_proxy 0.0.0.0:65000
        mode http
        balance roundrobin
        server server01 192.168.1.1:80 source 192.168.2.13
        server server02 192.168.1.2:80 source 192.168.2.13

Example :
---------
    # use a particular source to reach each servers
    listen http_proxy 0.0.0.0:65000
        mode http
        balance roundrobin
        server server01 192.168.1.1:80 source 192.168.1.1
        server server02 192.168.2.1:80 source 192.168.2.1

Example :
---------
    # provide source load-balancing to reach the same proxy through 2 WAN links
    listen http_proxy 0.0.0.0:65000
        mode http
        balance roundrobin
        server remote-proxy-way1 192.168.1.1:3128 source 192.168.2.1
        server remote-proxy-way2 192.168.1.1:3128 source 192.168.3.1

Example :
---------
    # force a TCP connection to bind to a specific port
    listen http_proxy 0.0.0.0:2000
        mode tcp
        balance roundrobin
        server srv1 192.168.1.1:80 source 192.168.2.1:20
        server srv2 192.168.1.2:80 source 192.168.2.1:20

4.1.3) TCP keep-alive
---------------------
With version 1.2.7, it becomes possible to enable TCP keep-alives on both the
client and server sides. This makes it possible to prevent long sessions from
expiring on external layer 4 components such as firewalls and load-balancers.
It also allows the system to terminate dead sessions when no timeout has been
set (not recommanded). The proxy cannot set the keep-alive probes intervals nor
maximal count, consult your operating system manual for this. There are 3
options to enable TCP keep-alive :

	option tcpka	# enables keep-alive both on client and server side
	option clitcpka	# enables keep-alive only on client side
	option srvtcpka	# enables keep-alive only on server side

4.1.4) TCP lingering
--------------------
It is possible to disable the system's lingering of data unacked by the client
at the end of a session. This is sometimes required when haproxy is used as a
front-end with lots of unreliable clients, and you observe thousands of sockets
in the FIN_WAIT state on the machine. This may be used in a frontend to affect
the client-side connection, as well as in a backend for the server-side
connection :

	option nolinger	# disables data lingering


4.2) Event logging
------------------

HAProxy's strength certainly lies in its precise logs. It probably provides the
finest level of information available for such a product, which is very
important for troubleshooting complex environments. Standard log information
include client ports, TCP/HTTP state timers, precise session state at
termination and precise termination cause, information about decisions to
direct trafic to a server, and of course the ability to capture arbitrary
headers.

In order to improve administrators reactivity, it offers a great transparency
about encountered problems, both internal and external, and it is possible to
send logs to different sources at the same time with different level filters :

  - global process-level logs (system errors, start/stop, etc..)
  - per-listener system and internal errors (lack of resource, bugs, ...)
  - per-listener external troubles (servers up/down, max connections)
  - per-listener activity (client connections), either at the establishment or
    at the termination.

The ability to distribute different levels of logs to different log servers
allow several production teams to interact and to fix their problems as soon
as possible. For example, the system team might monitor system-wide errors,
while the application team might be monitoring the up/down for their servers in
real time, and the security team might analyze the activity logs with one hour
delay.

4.2.1) Log levels
-----------------
TCP and HTTP connections can be logged with informations such as date, time,
source IP address, destination address, connection duration, response times,
HTTP request, the HTTP return code, number of bytes transmitted, the conditions
in which the session ended, and even exchanged cookies values, to track a
particular user's problems for example. All messages are sent to up to two
syslog servers. Consult section 1.1 for more info about log facilities. The
syntax follows :

    log <address_1> <facility_1> [max_level_1]
    log <address_2> <facility_2> [max_level_2]
or
    log global

Note :
------
The particular syntax 'log global' means that the same log configuration as the
'global' section will be used.

Example :
---------
    listen http_proxy 0.0.0.0:80
        mode http
        log 192.168.2.200 local3
        log 192.168.2.201 local4

4.2.2) Log format
-----------------
By default, connections are logged at the TCP level, as soon as the session
establishes between the client and the proxy. By enabling the 'tcplog' option,
the proxy will wait until the session ends to generate an enhanced log
containing more information such as session duration and its state during the
disconnection. The number of remaining session after disconnection is also
indicated (for the server, the listener, and the process).

Example of TCP logging :
------------------------
    listen relais-tcp 0.0.0.0:8000
        mode tcp
        option tcplog
        log 192.168.2.200 local3

>>> haproxy[18989]: 127.0.0.1:34550 [15/Oct/2003:15:24:28] relais-tcp Srv1 0/0/5007 0 -- 1/1/1 0/0
  
    Field  Format                                        Example

        1  process_name '[' pid ']:'                     haproxy[18989]:
        2  client_ip ':' client_port                     127.0.0.1:34550
        3  '[' date ']'                                  [15/Oct/2003:15:24:28]
        4  listener_name                                 relais-tcp
        5  server_name                                   Srv1
        6  queue_time '/' connect_time '/' total_time    0/0/5007
        7  bytes_read                                    0
        8  termination_state                             --
        9  srv_conn '/' listener_conn '/' process_conn   1/1/1
       10  position in srv_queue / listener_queue        0/0


Another option, 'httplog', provides more detailed information about HTTP
contents, such as the request and some cookies. In the event where an external
component would establish frequent connections to check the service, logs may be
full of useless lines. So it is possible not to log any session which didn't
transfer any data, by the setting of the 'dontlognull' option. This only has
effect on sessions which are established then closed.

Example of HTTP logging :
-------------------------
    listen http_proxy 0.0.0.0:80
        mode http
        option httplog
        option dontlognull
        log 192.168.2.200 local3

>>> haproxy[674]: 127.0.0.1:33319 [15/Oct/2003:08:31:57] relais-http Srv1 9/0/7/147/723 200 243 - - ---- 34/34/15/8/3 0/0 "HEAD / HTTP/1.0"

More complete example
    haproxy[18989]: 10.0.0.1:34552 [15/Oct/2003:15:26:31] relais-http Srv1 3183/-1/-1/-1/11215 503 0 - - SC-- 205/202/150/137/+4 0/0 {w.ods.org|Mozilla} {} "HEAD / HTTP/1.0" 

    Field  Format							Example
  
        1  process_name  '[' pid ']:'					haproxy[18989]:
        2  client_ip ':' client_port					10.0.0.1:34552
        3  '[' date ']'							[15/Oct/2003:15:26:31]
        4  listener_name						relais-http
        5  server_name							Srv1
        6  Tq '/' Tw '/' Tc '/' Tr '/' Tt				3183/-1/-1/-1/11215
        7  HTTP_return_code						503
        8  bytes_read							0
        9  captured_request_cookie					-
       10  captured_response_cookie					-
       11  termination_state						SC--
       12  actconn '/' feconn '/' beconn '/' srv_conn '/' retries 	205/202/150/137/+4
       13  position in srv_queue / listener_queue			0/0
       14  '{' captured_request_headers '}'				{w.ods.org|Mozilla}
       15  '{' captured_response_headers '}'				{}
       16  '"' HTTP_request '"'						"HEAD / HTTP/1.0"
  
Note for log parsers: the URI is ALWAYS the end of the line starting with the
                      first double quote '"'.

The retries count may have additional '+' sign means that the connection had been
redispatched from one server to another shortly before retries limit (retries 4
in above example) was depleted.

The problem when logging at end of connection is that you have no clue about
what is happening during very long sessions. To workaround this problem, a
new option 'logasap' has been introduced in 1.1.28/1.2.1. When specified, the
proxy will log as soon as possible, just before data transfer begins. This means
that in case of TCP, it will still log the connection status to the server, and
in case of HTTP, it will log just after processing the server headers. In this
case, the number of bytes reported is the number of header bytes sent to the
client.

In order to avoid confusion with normal logs, the total time field and the
number of bytes are prefixed with a '+' sign which means that real numbers are
certainly bigger.

Example :
---------

    listen http_proxy 0.0.0.0:80
        mode http
        option httplog
        option dontlognull
        option logasap
        log 192.168.2.200 local3

>>> haproxy[674]: 127.0.0.1:33320 [15/Oct/2003:08:32:17] relais-http Srv1 9/10/7/14/+30 200 +243 - - ---- 3/1/1/1/0 1/0 "GET /image.iso HTTP/1.0"

4.2.3) Timing events
--------------------
Timers provide a great help in trouble shooting network problems. All values
are reported in milliseconds (ms). In HTTP mode, four control points are
reported under the form 'Tq/Tw/Tc/Tr/Tt' :

  - Tq: total time to get the client request.
    It's the time elapsed between the moment the client connection was accepted
    and the moment the proxy received the last HTTP header. The value '-1'
    indicates that the end of headers (empty line) has never been seen.

  - Tw: total time spent in the queues waiting for a connection slot. It
    accounts for listener's queue as well as the server's queue, and depends
    on the queue size, and the time needed for the server to complete previous
    sessions. The value '-1' means that the request was killed before reaching
    the queue.

  - Tc: total time to establish the TCP connection to the server.
    It's the time elapsed between the moment the proxy sent the connection
    request, and the moment it was acknowledged, or between the TCP SYN packet
    and the matching SYN/ACK in return. The value '-1' means that the
    connection never established.

  - Tr: server response time. It's the time elapsed between the moment the
    TCP connection was established to the server and the moment it send its
    complete response header. It purely shows its request processing time,
    without the network overhead due to the data transmission. The value '-1'
    means that the last the response header (empty line) was never seen.

  - Tt: total session duration time, between the moment the proxy accepted it
    and the moment both ends were closed. The exception is when the 'logasap'
    option is specified. In this case, it only equals (Tq+Tw+Tc+Tr), and is
    prefixed with a '+' sign. From this field, we can deduce Td, the data
    transmission time, by substracting other timers when valid :

        Td = Tt - (Tq + Tw + Tc + Tr)

    Timers with '-1' values have to be excluded from this equation.

In TCP mode ('option tcplog'), only Tw, Tc and Tt are reported.

These timers provide precious indications on trouble causes. Since the TCP
protocol defines retransmit delays of 3, 6, 12... seconds, we know for sure
that timers close to multiples of 3s are nearly always related to packets lost
due to network problems (wires or negociation). Moreover, if <Tt> is close to
a timeout value specified in the configuration, it often means that a session
has been aborted on time-out.

Most common cases :

  - If Tq is close to 3000, a packet has probably been lost between the client
    and the proxy.
  - If Tc is close to 3000, a packet has probably been lost between the server
    and the proxy during the server connection phase. This one should always be
    very low (less than a few tens).
  - If Tr is nearly always lower than 3000 except some rare values which seem to
    be the average majored by 3000, there are probably some packets lost between
    the proxy and the server.
  - If Tt is often slightly higher than a time-out, it's often because the
    client and the server use HTTP keep-alive and the session is maintained
    after the response ends. Se further for how to disable HTTP keep-alive.

Other cases ('xx' means any value to be ignored) :
  -1/xx/xx/xx/Tt: the client was not able to send its complete request in time,
                  or that it aborted it too early.
  Tq/-1/xx/xx/Tt: it was not possible to process the request, maybe because
                  servers were out of order.
  Tq/Tw/-1/xx/Tt: the connection could not establish on the server. Either it
                  refused it or it timed out after Tt-(Tq+Tw) ms.
  Tq/Tw/Tc/-1/Tt: the server has accepted the connection but did not return a
                  complete response in time, or it closed its connexion
                  unexpectedly, after Tt-(Tq+Tw+Tc) ms.

4.2.4) Session state at disconnection
-------------------------------------
TCP and HTTP logs provide a session completion indicator in the
<termination_state> field, just before the number of active
connections. It is 2-characters long in TCP, and 4-characters long in
HTTP, each of which has a special meaning :

  - On the first character, a code reporting the first event which caused the
    session to terminate :

        C : the TCP session was unexpectedly aborted by the client.

        S : the TCP session was unexpectedly aborted by the server, or the
            server explicitly refused it.

        P : the session was prematurely aborted by the proxy, because of a
            connection limit enforcement, because a DENY filter was matched,
            or because of a security check which detected and blocked a
            dangerous error in server response which might have caused
            information leak (eg: cacheable cookie).

        R : a resource on the proxy has been exhausted (memory, sockets, source
            ports, ...). Usually, this appears during the connection phase, and
            system logs should contain a copy of the precise error.

        I : an internal error was identified by the proxy during a self-check.
            This should NEVER happen, and you are encouraged to report any log
            containing this, because this is a bug.

        c : the client-side time-out expired first.

        s : the server-side time-out expired first.

        - : normal session completion.

  - on the second character, the TCP/HTTP session state when it was closed :

        R : waiting for complete REQUEST from the client (HTTP only). Nothing
            was sent to any server.

        Q : waiting in the QUEUE for a connection slot. This can only happen on
            servers which have a 'maxconn' parameter set. No connection attempt
            was made to any server.

        C : waiting for CONNECTION to establish on the server. The server might
            at most have noticed a connection attempt.

        H : waiting for, receiving and processing server HEADERS (HTTP only).

        D : the session was in the DATA phase.

        L : the proxy was still transmitting LAST data to the client while the
            server had already finished.

        T : the request was tarpitted. It has been held open on with the client
            during the whole contimeout duration or untill the client closed.

        - : normal session completion after end of data transfer.

  - the third character tells whether the persistence cookie was provided by
    the client (only in HTTP mode) :

        N : the client provided NO cookie. This is usually the case on new
            connections.

        I : the client provided an INVALID cookie matching no known
            server. This might be caused by a recent configuration change,
            mixed cookies between HTTP/HTTPS sites, or an attack.

        D : the client provided a cookie designating a server which was DOWN,
            so either the 'persist' option was used and the client was sent to
            this server, or it was not set and the client was redispatched to
            another server.

        V : the client provided a valid cookie, and was sent to the associated
            server.

        - : does not apply (no cookie set in configuration).

  - the last character reports what operations were performed on the persistence
    cookie returned by the server (only in HTTP mode) :

        N : NO cookie was provided by the server, and none was inserted either.

        I : no cookie was provided by the server, and the proxy INSERTED one.

        P : a cookie was PROVIDED by the server and transmitted as-is.

        R : the cookie provided by the server was REWRITTEN by the proxy.

        D : the cookie provided by the server was DELETED by the proxy.

        - : does not apply (no cookie set in configuration).

The combination of the two first flags give a lot of information about what was
happening when the session terminated. It can be helpful to detect server
saturation, network troubles, local system resource starvation, attacks, etc...

The most common termination flags combinations are indicated here.

   Flags  Reason
      CR  The client aborted before sending a full request. Most probably the
          request was done by hand using a telnet client, and aborted early.

      cR  The client timed out before sending a full request. This is sometimes
          caused by too large TCP MSS values on the client side for PPPoE
          networks which cannot transport full-sized packets, or by clients
          sending requests by hand and not typing fast enough.

      SC  The server explicitly refused the connection (the proxy received a
          TCP RST or an ICMP in return). Under some circumstances, it can
          also be the network stack telling the proxy that the server is
          unreachable (eg: no route, or no ARP response on local network).

      sC  The connection to the server did not complete during contimeout.

      PC  The proxy refused to establish a connection to the server because the
          maxconn limit has been reached. The listener's maxconn parameter may
          be increased in the proxy configuration, as well as the global
          maxconn parameter.

      RC  A local resource has been exhausted (memory, sockets, source ports)
          preventing the connection to the server from establishing. The error
          logs will tell precisely what was missing. Anyway, this can only be
          solved by system tuning.

      cH  The client timed out during a POST request. This is sometimes caused
          by too large TCP MSS values for PPPoE networks which cannot transport
          full-sized packets.

      CH  The client aborted while waiting for the server to start responding.
          It might be the server taking too long to respond or the client
          clicking the 'Stop' button too fast.

      CQ  The client aborted while its session was queued, waiting for a server
          with enough empty slots to accept it. It might be that either all the
          servers were saturated or the assigned server taking too long to
          respond.

      CT  The client aborted while its session was tarpitted.

      sQ  The session spent too much time in queue and has been expired.

      SH  The server aborted before sending its full headers, or it crashed.

      sH  The server failed to reply during the srvtimeout delay, which
          indicates too long transactions, probably caused by back-end
          saturation. The only solutions are to fix the problem on the
          application or to increase the 'srvtimeout' parameter to support
          longer delays (at the risk of the client giving up anyway).
      
      PR  The proxy blocked the client's request, either because of an invalid
          HTTP syntax, in which case it returned an HTTP 400 error to the
          client, or because a deny filter matched, in which case it returned
          an HTTP 403 error.

      PH  The proxy blocked the server's response, because it was invalid,
          incomplete, dangerous (cache control), or matched a security filter.
          In any case, an HTTP 502 error is sent to the client.

      PT  The proxy blocked the client's request and has tarpitted its
          connection before returning it a 500 server error. Nothing was sent
          to the server.

      cD  The client did not read any data for as long as the clitimeout delay.
          This is often caused by network failures on the client side.

      CD  The client unexpectedly aborted during data transfer. This is either
          caused by a browser crash, or by a keep-alive session between the
          server and the client terminated first by the client.

      sD  The server did nothing during the srvtimeout delay. This is often
          caused by too short timeouts on L4 equipements before the server
          (firewalls, load-balancers, ...).

4.2.5) Non-printable characters
-------------------------------
As of version 1.1.29, non-printable characters are not sent as-is into log
files, but are converted to their two-digits hexadecimal representation,
prefixed by the character '#'. The only characters that can now be logged
without being escaped are between 32 and 126 (inclusive). Obviously, the
escape character '#' is also encoded to avoid any ambiguity. It is the same for
the character '"', as well as '{', '|' and '}' when logging headers.

4.2.6) Capturing HTTP headers and cookies
-----------------------------------------
Version 1.1.23 brought cookie capture, and 1.1.29 the header capture. All this
is performed using the 'capture' keyword.

Cookie capture makes it easy to track a complete user session. The syntax is :

    capture cookie <cookie_prefix> len <capture_length>

This will enable cookie capture from both requests and responses. This way,
it's easy to detect when a user switches to a new session for example, because
the server will reassign it a new cookie.

The FIRST cookie whose name starts with <cookie_prefix> will be captured, and
logged as 'NAME=value', without exceeding <capture_length> characters (64 max).
When the cookie name is fixed and known, it's preferable to suffix '=' to it to
ensure that no other cookie will be logged.

Examples :
----------
    # capture the first cookie whose name starts with "ASPSESSION"
    capture cookie ASPSESSION len 32

    # capture the first cookie whose name is exactly "vgnvisitor"
    capture cookie vgnvisitor= len 32

In the logs, the field preceeding the completion indicator contains the cookie
value as sent by the server, preceeded by the cookie value as sent by the
client. Each of these field is replaced with '-' when no cookie was seen or
when the option is disabled.

Header captures have a different goal. They are useful to track unique request
identifiers set by a previous proxy, virtual host names, user-agents, POST
content-length, referrers, etc. In the response, one can search for information
about the response length, how the server asked the cache to behave, or an
object location during a redirection. As for cookie captures, it is both
possible to include request headers and response headers at the same time. The
syntax is :

    capture request  header <name> len <max length>
    capture response header <name> len <max length>

Note: Header names are not case-sensitive.

Examples:
---------
    # keep the name of the virtual server
    capture request  header Host len 20
    # keep the amount of data uploaded during a POST
    capture request  header Content-Length len 10

    # note the expected cache behaviour on the response
    capture response header Cache-Control len 8
    # note the URL location during a redirection
    capture response header Location len 20

Non-existant headers are logged as empty strings, and if one header appears more
than once, only its last occurence will be kept. Request headers are grouped
within braces '{' and '}' in the same order as they were declared, and delimited
with a vertical bar '|' without any space. Response headers follow the same
representation, but are displayed after a space following the request headers
block. These blocks are displayed just before the HTTP request in the logs.

Example :

  Config:

    capture request  header Host len 20
    capture request  header Content-Length len 10
    capture request  header Referer len 20
    capture response header Server len 20
    capture response header Content-Length len 10
    capture response header Cache-Control len 8
    capture response header Via len 20
    capture response header Location len 20

  Log :

    Aug  9 20:26:09 localhost haproxy[2022]: 127.0.0.1:34014 [09/Aug/2004:20:26:09] relais-http netcache 0/0/0/162/+162 200 +350 - - ---- 0/0/0 0/0 {fr.adserver.yahoo.co||http://fr.f416.mail.} {|864|private||} "GET http://fr.adserver.yahoo.com/"
    Aug  9 20:30:46 localhost haproxy[2022]: 127.0.0.1:34020 [09/Aug/2004:20:30:46] relais-http netcache 0/0/0/182/+182 200 +279 - - ---- 0/0/0 0/0 {w.ods.org||} {Formilux/0.1.8|3495|||} "GET http://w.ods.org/sytadin.html HTTP/1.1" 
    Aug  9 20:30:46 localhost haproxy[2022]: 127.0.0.1:34028 [09/Aug/2004:20:30:46] relais-http netcache 0/0/2/126/+128 200 +223 - - ---- 0/0/0 0/0 {www.infotrafic.com||http://w.ods.org/syt} {Apache/2.0.40 (Red H|9068|||} "GET http://www.infotrafic.com/images/live/cartesidf/grandes/idf_ne.png HTTP/1.1" 


4.2.7) Examples of logs
-----------------------
- haproxy[674]: 127.0.0.1:33319 [15/Oct/2003:08:31:57] relais-http Srv1 6559/0/7/147/6723 200 243 - - ---- 1/3/5 0/0 "HEAD / HTTP/1.0"
  => long request (6.5s) entered by hand through 'telnet'. The server replied
     in 147 ms, and the session ended normally ('----')

- haproxy[674]: 127.0.0.1:33319 [15/Oct/2003:08:31:57] relais-http Srv1 6559/1230/7/147/6870 200 243 - - ---- 99/239/324 0/9 "HEAD / HTTP/1.0"
  => Idem, but the request was queued in the global queue behind 9 other
     requests, and waited there for 1230 ms.

- haproxy[674]: 127.0.0.1:33320 [15/Oct/2003:08:32:17] relais-http Srv1 9/0/7/14/+30 200 +243 - - ---- 1/3/3 0/0 "GET /image.iso HTTP/1.0"
  => request for a long data transfer. The 'logasap' option was specified, so
     the log was produced just before transfering data. The server replied in
     14 ms, 243 bytes of headers were sent to the client, and total time from
     accept to first data byte is 30 ms.

- haproxy[674]: 127.0.0.1:33320 [15/Oct/2003:08:32:17] relais-http Srv1 9/0/7/14/30 502 243 - - PH-- 0/2/3 0/0 "GET /cgi-bin/bug.cgi? HTTP/1.0"
  => the proxy blocked a server response either because of an 'rspdeny' or
     'rspideny' filter, or because it blocked sensible information which risked
     being cached. In this case, the response is replaced with a '502 bad
     gateway'.

- haproxy[18113]: 127.0.0.1:34548 [15/Oct/2003:15:18:55] relais-http <NOSRV> -1/-1/-1/-1/8490 -1 0 - - CR-- 0/2/2 0/0 "" 
  => the client never completed its request and aborted itself ('C---') after
     8.5s, while the proxy was waiting for the request headers ('-R--').
     Nothing was sent to the server.

- haproxy[18113]: 127.0.0.1:34549 [15/Oct/2003:15:19:06] relais-http <NOSRV> -1/-1/-1/-1/50001 408 0 - - cR-- 2/2 0/0 "" 
  => The client never completed its request, which was aborted by the time-out
     ('c---') after 50s, while the proxy was waiting for the request headers ('-R--').
     Nothing was sent to the server, but the proxy could send a 408 return code
     to the client.

- haproxy[18989]: 127.0.0.1:34550 [15/Oct/2003:15:24:28] relais-tcp Srv1 0/0/5007 0 cD 0/0/0 0/0
  => This is a 'tcplog' entry. Client-side time-out ('c----') occured after 5s.

- haproxy[18989]: 10.0.0.1:34552 [15/Oct/2003:15:26:31] relais-http Srv1 3183/-1/-1/-1/11215 503 0 - - SC-- 115/202/205 0/0 "HEAD / HTTP/1.0" 
  => The request took 3s to complete (probably a network problem), and the
     connection to the server failed ('SC--') after 4 attemps of 2 seconds
     (config says 'retries 3'), then a 503 error code was sent to the client.
     There were 115 connections on this server, 202 connections on this proxy,
     and 205 on the global process. It is possible that the server refused the
     connection because of too many already established.


4.3) HTTP header manipulation
-----------------------------
In HTTP mode, it is possible to rewrite, add or delete some of the request and
response headers based on regular expressions. It is also possible to block a
request or a response if a particular header matches a regular expression,
which is enough to stops most elementary protocol attacks, and to protect
against information leak from the internal network. But there is a limitation
to this : since haproxy's HTTP engine knows nothing about keep-alive, only
headers passed during the first request of a TCP session will be seen. All
subsequent headers will be considered data only and not analyzed. Furthermore,
haproxy doesn't touch data contents, it stops at the end of headers.

The syntax is :
   reqadd    <string>             to add a header to the request
   reqrep    <search> <replace>   to modify the request
   reqirep   <search> <replace>   same, but ignoring the case
   reqdel    <search>             to delete a header in the request
   reqidel   <search>             same, but ignoring the case
   reqallow  <search>             definitely allow a request if a header matches <search>
   reqiallow <search>             same, but ignoring the case
   reqdeny   <search>             denies a request if a header matches <search>
   reqideny  <search>             same, but ignoring the case
   reqpass   <search>             ignore a header matching <search>
   reqipass  <search>             same, but ignoring the case
   reqtarpit <search>             tarpit a request matching <search>
   reqitarpit <search>            same, but ignoring the case

   rspadd   <string>              to add a header to the response
   rsprep   <search> <replace>    to modify the response
   rspirep  <search> <replace>    same, but ignoring the case
   rspdel   <search>              to delete the response
   rspidel  <search>              same, but ignoring the case
   rspdeny  <search>              replaces a response with a HTTP 502 if a header matches <search>
   rspideny <search>              same, but ignoring the case


<search> is a POSIX regular expression (regex) which supports grouping through
parenthesis (without the backslash). Spaces and other delimiters must be
prefixed with a backslash ('\') to avoid confusion with a field delimiter.
Other characters may be prefixed with a backslash to change their meaning :

  \t   for a tab
  \r   for a carriage return (CR)
  \n   for a new line (LF)
  \    to mark a space and differentiate it from a delimiter
  \#   to mark a sharp and differentiate it from a comment
  \\   to use a backslash in a regex
  \\\\ to use a backslash in the text (*2 for regex, *2 for haproxy)
  \xXX to write the ASCII hex code XX as in the C language


<replace> contains the string to be used to replace the largest portion of text
matching the regex. It can make use of the special characters above, and can
reference a substring delimited by parenthesis in the regex, by the group
numerical order from 0 to 9 (0 being the entire line). In this case, you would
write a backslash ('\') immediately followed by one digit indicating the group
position.

<string> represents the string which will systematically be added after the last
header line. It can also use special characters above.

Notes :
-------
  - the first line is considered as a header, which makes it possible to rewrite
    or filter HTTP requests URIs or response codes.
  - 'reqrep' is the equivalent of 'cliexp' in version 1.0, and 'rsprep' is the
    equivalent of 'srvexp' in 1.0. Those names are still supported but
    deprecated.
  - for performances reasons, the number of characters added to a request or to
    a response is limited to 4096 since version 1.1.5 (it was 256 before). This
    value is easy to modify in the code if needed (#define). If it is too short
    on occasional uses, it is possible to gain some space by removing some
    useless headers before adding new ones.
  - a denied request will generate an "HTTP 403 forbidden" response, while a
    denied response will generate an "HTTP 502 Bad gateway" response.
  - a tarpitted request will be held open on the client side for a duration
    defined in the contimeout parameter, or untill the client aborts. Nothing
    will be sent to any server. When the timeout is reached, the proxy will
    reply with a 500 server error response so that the attacker does not
    suspect it has been tarpitted. The logs may report the 500, but the
    termination flags will indicate 'PT' in this case.


Examples :
----------
        ###### a few examples ######

        # rewrite 'online.fr' instead of 'free.fr' for GET and POST requests
        reqrep        ^(GET\ .*)(.free.fr)(.*) \1.online.fr\3
        reqrep        ^(POST\ .*)(.free.fr)(.*) \1.online.fr\3

        # force proxy connections to close
        reqirep       ^Proxy-Connection:.*   Proxy-Connection:\ close
        # rewrite locations
        rspirep       ^(Location:\ )([^:]*://[^/]*)(.*) \1\3

        ###### A full configuration being used on production ######

        # Every header should end with a colon followed by one space.
        reqideny      ^[^:\ ]*[\ ]*$

        # block Apache chunk exploit
        reqideny      ^Transfer-Encoding:[\ ]*chunked
        reqideny      ^Host:\ apache-

        # block annoying worms that fill the logs...
        reqideny      ^[^:\ ]*\ .*(\.|%2e)(\.|%2e)(%2f|%5c|/|\\\\)
        reqideny      ^[^:\ ]*\ ([^\ ]*\ [^\ ]*\ |.*%00)
        reqideny      ^[^:\ ]*\ .*<script
        reqideny      ^[^:\ ]*\ .*/(root\.exe\?|cmd\.exe\?|default\.ida\?)

	# tarpit attacks on the login page.
        reqtarpit     ^[^:\ ]*\ .*\.php?login=[^0-9]

        # allow other syntactically valid requests, and block any other method
        reqipass      ^(GET|POST|HEAD|OPTIONS)\ /.*\ HTTP/1\.[01]$
        reqipass      ^OPTIONS\ \\*\ HTTP/1\.[01]$
        reqideny      ^[^:\ ]*\ 

        # force connection:close, thus disabling HTTP keep-alive
        option        httpclose

        # change the server name
        rspidel       ^Server:\ 
        rspadd        Server:\ Formilux/0.1.8


Also, the 'forwardfor' option creates an HTTP 'X-Forwarded-For' header which
contains the client's IP address. This is useful to let the final web server
know what the client address was (eg for statistics on domains). Starting with
version 1.3.8, it is possible to specify the "except" keyword followed by a
source IP address or network for which no header will be added. This is very
useful when another reverse-proxy which already adds the header runs on the
same machine or in a known DMZ, the most common case being the local use of
stunnel on the same system.

Last, the 'httpclose' option removes any 'Connection' header both ways, and
adds a 'Connection: close' header in each direction. This makes it easier to
disable HTTP keep-alive than the previous 4-rules block.

Example :
---------
    listen http_proxy 0.0.0.0:80
        mode http
        log  global
        option httplog
        option dontlognull
        option forwardfor except 127.0.0.1/8
        option httpclose

Note that some HTTP servers do not necessarily close the connections when they
receive the 'Connection: close', and if the client does not close either, then
the connection will be maintained up to the time-out. This translates into high
number of simultaneous sessions and high global session times in the logs. To
workaround this, a new option 'forceclose' appeared in version 1.2.9 to enforce
the closing of the outgoing server channel as soon as the server begins to
reply and only if the request buffer is empty. Note that this should NOT be
used if CONNECT requests are expected between the client and the server. The
'forceclose' option implies the 'httpclose' option.

Example :
---------
    listen http_proxy 0.0.0.0:80
        mode http
        log  global
        option httplog
        option dontlognull
        option forwardfor
        option forceclose


4.4) Load balancing with persistence
------------------------------------
Combining cookie insertion with internal load balancing allows to transparently
bring persistence to applications. The principle is quite simple :
  - assign a cookie value to each server
  - enable the load balancing between servers
  - insert a cookie into responses resulting from the balancing algorithm
    (indirect accesses), end ensure that no upstream proxy will cache it.
  - remove the cookie in the request headers so that the application never sees
    it.

Example :
---------
    listen application 0.0.0.0:80
        mode http
        cookie SERVERID insert nocache indirect
        balance roundrobin
        server srv1 192.168.1.1:80 cookie server01 check
        server srv2 192.168.1.2:80 cookie server02 check

The other solution brought by versions 1.1.30 and 1.2.3 is to reuse a cookie
from the server, and prefix the server's name to it. In this case, don't forget
to force "httpclose" mode so that you can be assured that every subsequent
request will have its cookie fixed.

    listen application 0.0.0.0:80
        mode http
        cookie JSESSIONID prefix
        balance roundrobin
        server srv1 192.168.1.1:80 cookie srv1 check
        server srv2 192.168.1.2:80 cookie srv2 check
        option httpclose


4.5) Protection against information leak from the servers
---------------------------------------------------------
In versions 1.1.28/1.2.1, a new option 'checkcache' was created. It carefully
checks 'Cache-control', 'Pragma' and 'Set-cookie' headers in server response
to check if there's a risk of caching a cookie on a client-side proxy. When this
option is enabled, the only responses which can be delivered to the client are :
  - all those without 'Set-Cookie' header ;
  - all those with a return code other than 200, 203, 206, 300, 301, 410,
    provided that the server has not set a 'Cache-control: public' header ;
  - all those that come from a POST request, provided that the server has not
    set a 'Cache-Control: public' header ;
  - those with a 'Pragma: no-cache' header
  - those with a 'Cache-control: private' header
  - those with a 'Cache-control: no-store' header
  - those with a 'Cache-control: max-age=0' header
  - those with a 'Cache-control: s-maxage=0' header
  - those with a 'Cache-control: no-cache' header
  - those with a 'Cache-control: no-cache="set-cookie"' header
  - those with a 'Cache-control: no-cache="set-cookie,' header
    (allowing other fields after set-cookie)

If a response doesn't respect these requirements, then it will be blocked just
as if it was from an 'rspdeny' filter, with an "HTTP 502 bad gateway". The
session state shows "PH--" meaning that the proxy blocked the response during
headers processing. Additionnaly, an alert will be sent in the logs so that
admins are told that there's something to be done.


4.6) Customizing errors
-----------------------
Some situations can make haproxy return an HTTP error code to the client :
  - invalid or too long request => HTTP 400
  - request not completely sent in time => HTTP 408
  - forbidden request (matches a deny filter) => HTTP 403
  - internal error in haproxy => HTTP 500
  - the server returned an invalid or incomplete response => HTTP 502
  - no server was available to handle the request => HTTP 503
  - the server failed to reply in time => HTTP 504

A succint error message taken from the RFC accompanies these return codes.
But depending on the clients knowledge, it may be better to return custom, user
friendly, error pages. This is made possible in two ways, one involving a
redirection to a known server, and another one consisting in returning a local
file.

4.6.1) Relocation
-----------------
An error relocation is achieved using the 'errorloc' command :

    errorloc <HTTP_code> <location>

Instead of generating an HTTP error <HTTP_code> among those above, the proxy
will return a temporary redirection code (HTTP 302) towards the address
specified in <location>. This address may be either relative to the site or
absolute. Since this request will be handled by the client's browser, it's
mandatory that the returned address be reachable from the outside.

Example :
---------
    listen application 0.0.0.0:80
        errorloc 400 /badrequest.html
        errorloc 403 /forbidden.html
        errorloc 408 /toolong.html
        errorloc 500 http://haproxy.domain.net/bugreport.html
        errorloc 502 http://192.168.114.58/error50x.html
        errorloc 503 http://192.168.114.58/error50x.html
        errorloc 504 http://192.168.114.58/error50x.html

Note: RFC2616 says that a client must reuse the same method to fetch the
Location returned by a 302, which causes problems with the POST method.
The return code 303 was designed explicitly to force the client to fetch the
Location URL with the GET method, but there are some browsers pre-dating
HTTP/1.1 which don't support it. Anyway, most browsers still behave with 302 as
if it was a 303. In order to allow the user to chose, versions 1.1.31 and 1.2.5
bring two new keywords to replace 'errorloc' : 'errorloc302' and 'errorloc303'.

They are preffered over errorloc (which still does 302). Consider using
errorloc303 everytime you know that your clients support HTTP 303 responses..

4.6.2) Local files
------------------
Sometimes, it is desirable to change the returned error without resorting to
redirections. The second method consists in loading local files during startup
and send them as pure HTTP content upon error. This is what the 'errorfile'
keyword does.

Warning, there are traps to consider :
 - The files are loaded while parsing configuration, before doing a chroot().
   Thus, they are relative to the real filesystem. For this reason, it is
   recommended to pass an absolute path to those files.

 - The contents of those files is not HTML, but real HTTP protocol with
   possible HTML body. So the first line and headers are mandatory. Ideally,
   every line in the HTTP part should end with CR-LF for maximum compatibility.

 - The response is limited to the buffer size (BUSIZE), generally 8 or 16 kB.

 - The response should not include references to the local server, in order to
   avoid infinite loops on the browser in case of local failure.

Example :
---------
        errorfile 400 /etc/haproxy/errorfiles/400badreq.http
        errorfile 403 /etc/haproxy/errorfiles/403forbid.http
        errorfile 503 /etc/haproxy/errorfiles/503sorry.http


4.7) Modifying default values
-----------------------------
Version 1.1.22 introduced the notion of default values, which eliminates the
pain of often repeating common parameters between many instances, such as
logs, timeouts, modes, etc...

Default values are set in a 'defaults' section. Each of these section clears
all previously set default parameters, so there may be as many default
parameters as needed. Only the last one before a 'listen' section will be
used for this section. The 'defaults' section uses the same syntax as the
'listen' section, for the supported parameters. The 'defaults' keyword ignores
everything on its command line, so that fake instance names can be specified
there for better clarity.

In version 1.1.28/1.2.1, only those parameters can be preset in the 'default'
section :
  - log (the first and second one)
  - mode { tcp, http, health }
  - balance { roundrobin }
  - disabled (to disable every further instances)
  - enabled (to enable every further instances, this is the default)
  - contimeout, clitimeout, srvtimeout, grace, retries, maxconn
  - option { redispatch, transparent, keepalive, forwardfor, logasap, httpclose,
             checkcache, httplog, tcplog, dontlognull, persist, httpchk }
  - redispatch, redisp, transparent, source { addr:port }
  - cookie, capture
  - errorloc

As of 1.1.24, it is not possible to put certain parameters in a 'defaults'
section, mainly regular expressions and server configurations :
  - dispatch, server,
  - req*, rsp*

Last, there's no way yet to change a boolean option from its assigned default
value. So if an 'option' statement is set in a 'defaults' section, the only
way to flush it is to redefine a new 'defaults' section without this 'option'.

Examples :
----------
    defaults applications TCP
        log global
        mode tcp
        balance roundrobin
        clitimeout 180000
        srvtimeout 180000
        contimeout 4000
        retries 3
        redispatch

    listen app_tcp1 10.0.0.1:6000-6063
        server srv1 192.168.1.1 check port 6000 inter 10000
        server srv2 192.168.1.2 backup

    listen app_tcp2 10.0.0.2:6000-6063
        server srv1 192.168.2.1 check port 6000 inter 10000
        server srv2 192.168.2.2 backup
    
    defaults applications HTTP
        log global
        mode http
        option httplog
        option forwardfor
        option dontlognull
        balance roundrobin
        clitimeout 20000
        srvtimeout 20000
        contimeout 4000
        retries 3

    listen app_http1 10.0.0.1:80-81
        cookie SERVERID postonly insert indirect
        capture cookie userid= len 10
        server srv1 192.168.1.1:+8000 cookie srv1 check port 8080 inter 1000
        server srv1 192.168.1.2:+8000 cookie srv2 check port 8080 inter 1000

    defaults
        # this empty section voids all default parameters


4.8) Status report in HTML page
-------------------------------
Starting with 1.2.14, it is possible for HAProxy to intercept requests for a
particular URI and return a full report of the proxy's activity and servers
statistics. This is available through the 'stats' keyword, associated to any
such options :

   - stats enable
   - stats uri <uri prefix>
   - stats realm <authentication realm>
   - stats auth <user:password>
   - stats scope <proxy_id> | '.'

By default, the status report is disabled. Specifying any combination above
enables it for the proxy instance referencing it. The easiest solution is to
use "stats enable" which will enable the report with default parameters :

   - default URI   : "/haproxy?stats"        (CONFIG_STATS_DEFAULT_URI)
   - default auth  : unspecified (no authentication)
   - default realm : "HAProxy Statistics"    (CONFIG_STATS_DEFAULT_REALM)
   - default scope : unspecified (access to all instances)

The "stats uri <uri_prefix>" option allows one to intercept another URI prefix.
Note that any URI that BEGINS with this string will match. For instance, one
proxy instance might be dedicated to status page only and would reply to any
URI.

Example :
---------
    # catches any URI and returns the status page.
    listen stats :8080
        mode http
        stats uri /

The "stats auth <user:password>" option enables Basic authentication and adds a
valid user:password combination to the list of authorized accounts. The user
and password are passed in the configuration file as clear text, and since this
is HTTP Basic authentication, you should be aware that it transits as clear
text on the network, so you must not use any sensible account. The list is
unlimited in order to provide easy accesses to developpers or customers.

The "stats realm <realm>" option defines the "realm" name which is displayed
in the popup box when the browser asks for a password. It's important to ensure
that this one is not used by the application, otherwise the browser will try to
use a cached one from the application. Note that any space in the realm name
should be escaped with a backslash ('\').

The "stats scope <proxy_id>" option limits the scope of the status report. By
default, all proxy instances are listed. But under some circumstances, it would
be better to limit the listing to some proxies or only to the current one. This
is what this option does. The special proxy name "." (a single dot) references
the current proxy. The proxy name can be repeated multiple times, even for
proxies defined later in the configuration or some which do not exist. The name
is the one which appears after the 'listen' keyword.

Example :
---------
    # simple application with authenticated embedded status report
    listen app1 192.168.1.100:80
        mode http
        option httpclose
        balance roundrobin
        cookie SERVERID postonly insert indirect
        server srv1 192.168.1.1:8080 cookie srv1 check inter 1000
        server srv1 192.168.1.2:8080 cookie srv2 check inter 1000
        stats uri /my_stats
        stats realm Statistics\ for\ MyApp1-2
        stats auth guest:guest
        stats auth admin:AdMiN123
        stats scope .
        stats scope app2

    # simple application with anonymous embedded status report
    listen app2 192.168.2.100:80
        mode http
        option httpclose
        balance roundrobin
        cookie SERVERID postonly insert indirect
        server srv1 192.168.2.1:8080 cookie srv1 check inter 1000
        server srv1 192.168.2.2:8080 cookie srv2 check inter 1000
        stats uri /my_stats
        stats realm Statistics\ for\ MyApp2
        stats scope .

    listen admin_page :8080
        mode http
        stats uri /my_stats
        stats realm Global\ statistics
        stats auth admin:AdMiN123

Notes :
-------
  - The 'stats' options can also be specified in the 'defaults' section, in
    which case it will provide the exact same configuration to all further
    instances (hence the usefulness of the scope "."). However, if an instance
    redefines any 'stats' parameter, defaults will not be used for this
    instance.

  - HTTP Basic authentication is very basic and unsecure from snooping. No
    sensible password should be used, and be aware that there is no way to
    remove it from the browser so it will be sent to the whole application
    upon further accesses.

  - It is very important that the 'option httpclose' is specified, otherwise
    the proxy will not be able to detect the URI within keep-alive sessions
    maintained between the browser and the servers, so the stats URI will be
    forwarded unmodified to the server as if the option was not set.


5) Access lists
===============

With version 1.3.10, a new concept of access lists (acl) was born. As it was
not necesary to reinvent the wheel, and because even long thoughts lead to
unsatisfying proposals, it was finally decided that something close to what
Squid provides would be a good compromise between features and ease of use.

The principle is very simple : acls are declared with a name, a test and a list
of valid values to check against during the test. Conditions are applied on
various actions, and those conditions apply a logical AND between acls. The
condition is then only met if all acls are true.

It is possible to use the reserved keyword "OR" in conditions, and it is
possible for an acl to be specified multiple times, even with various tests, in
which case the first one which returns true validates the ACL.

As of 1.3.12, only the following tests have been implemented :

   Layer 3/4 :
     src       <ipv4_address>[/mask] ... : match IPv4 source address
     dst       <ipv4_address>[/mask] ... : match IPv4 destination address
     src_port  <range> ...               : match source port range
     dst_port  <range> ...               : match destination port range
     dst_conn  <range> ...               : match #connections on frontend

   Layer 7 :
     method    <HTTP method> ...  : match HTTP method
     req_ver   <1.0|1.1> ...      : match HTTP request version
     resp_ver  <1.0|1.1> ...      : match HTTP response version
     status    <range> ...        : match HTTP response status code in range
     url       <string> ... : exact string match on URI
     url_reg   <regex>  ... : regex string match on URI
     url_beg   <string> ... : true if URI begins with <string>
     url_end   <string> ... : true if URI ends with <string>
     url_sub   <string> ... : true if URI contains <string>
     url_dir   <string> ... : true if URI contains <string> between slashes
     url_dom   <string> ... : true if URI contains <string> between slashes or dots

A 'range' is one or two integers which may be prefixed by an operator.
The syntax is :

  [<op>] <low>[:<high>]

Where <op> can be :
  'eq' : the tested value must be equal to <low> or within <low>..<high>
  'le' : the tested value must be lower than or equal to <low>
  'lt' : the tested value must be lower than <low>
  'ge' : the tested value must be greater than or equal to <low>
  'gt' : the tested value must be greater than <low>

When no operator is defined, 'eq' is assumed. Note that when the operator is
specified, it applies to all subsequent ranges of values until the end of the
line is reached or another operator is specified. Example :

  acl status_error  status   400:599
  acl saturated_frt dst_conn ge 1000
  acl invalid_ports src_port lt 512 ge 65535

Other ones are coming (headers, cookies, time, auth), it's just a matter of
time. It is also planned to be able to read the patterns from a file, as well
as to ignore the case for some of them.

The only command supporting a condition right now is the "block" command, which
blocks a request and returns a 403 if its condition is true (with the "if"
keyword), or if it is false (with the "unless" keyword).

Example :
---------

    acl options_uris  url *
    acl meth_option   method OPTIONS
    acl http_1.1      req_ver 1.1
    acl allowed_meth  method GET HEAD POST OPTIONS CONNECT
    acl connect_meth  method CONNECT
    acl proxy_url     url_beg http://

    # block if reserved URI "*" used with a method other than "OPTIONS"
    block if options_uris !meth_option

    # block if the OPTIONS method is used with HTTP 1.0
    block if meth_option !http_1.1

    # allow non-proxy url with anything but the CONNECT method
    block if !connect_meth !proxy_url

    # block all unknown methods
    block unless allowed_meth

Note: this documentation is very light but should permit one to start and above
all it should permit to work on the project without being slowed down too much
with the doc.


=========================
| System-specific setup |
=========================

Linux 2.4
=========

-- cut here --
#!/bin/sh
# set this to about 256/4M (16384 for 256M machine)
MAXFILES=16384
echo $MAXFILES > /proc/sys/fs/file-max
ulimit -n $MAXFILES

if [ -e /proc/sys/net/ipv4/ip_conntrack_max ]; then
        echo 65536 > /proc/sys/net/ipv4/ip_conntrack_max
fi

if [ -e /proc/sys/net/ipv4/netfilter/ip_ct_tcp_timeout_fin_wait ]; then
        # 30 seconds for fin, 15 for time wait
        echo 3000 > /proc/sys/net/ipv4/netfilter/ip_ct_tcp_timeout_fin_wait
        echo 1500 > /proc/sys/net/ipv4/netfilter/ip_ct_tcp_timeout_time_wait
        echo 0 > /proc/sys/net/ipv4/netfilter/ip_ct_tcp_log_invalid_scale
        echo 0 > /proc/sys/net/ipv4/netfilter/ip_ct_tcp_log_out_of_window
fi

echo 1024 60999 > /proc/sys/net/ipv4/ip_local_port_range
echo 30 > /proc/sys/net/ipv4/tcp_fin_timeout
echo 4096 > /proc/sys/net/ipv4/tcp_max_syn_backlog
echo 262144 > /proc/sys/net/ipv4/tcp_max_tw_buckets
echo 262144 > /proc/sys/net/ipv4/tcp_max_orphans
echo 300 > /proc/sys/net/ipv4/tcp_keepalive_time
echo 1 > /proc/sys/net/ipv4/tcp_tw_recycle
echo 0 > /proc/sys/net/ipv4/tcp_timestamps
echo 0 > /proc/sys/net/ipv4/tcp_ecn
echo 1 > /proc/sys/net/ipv4/tcp_sack
echo 0 > /proc/sys/net/ipv4/tcp_dsack

# auto-tuned on 2.4
#echo 262143 > /proc/sys/net/core/rmem_max
#echo 262143 > /proc/sys/net/core/rmem_default

echo 16384 65536 524288 > /proc/sys/net/ipv4/tcp_rmem
echo 16384 349520 699040 > /proc/sys/net/ipv4/tcp_wmem

-- cut here --


FreeBSD
=======

A FreeBSD port of HA-Proxy is now available and maintained, thanks to
Clement Laforet <sheepkiller@cultdeadsheep.org>.

For more information :
http://www.freebsd.org/cgi/url.cgi?ports/net/haproxy/pkg-descr
http://www.freebsd.org/cgi/cvsweb.cgi/ports/net/haproxy/
http://www.freshports.org/net/haproxy


-- end --