File: CONFIG-KEYS

package info (click to toggle)
pmacct 1.7.6-2
  • links: PTS, VCS
  • area: main
  • in suites: bullseye, sid
  • size: 11,548 kB
  • sloc: ansic: 106,538; sh: 4,876; cpp: 4,340; python: 3,631; makefile: 502
file content (3171 lines) | stat: -rw-r--r-- 176,745 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
SUPPORTED CONFIGURATION KEYS
Both configuration directives and commandline switches are listed below. 
A configuration consists of key/value pairs, separated by the ':' char.
Starting a line with the '!' symbol, makes the whole line to be ignored
by the interpreter, making it a comment. Please also refer to QUICKSTART 
document and the 'examples/' sub-tree for some examples.

Directives are sometimes grouped, like sql_table and print_output_file:
this is to stress if multiple plugins are running as part of the same
daemon instance, such directives must be casted to the plugin they refer
to - in order to prevent undesired inheritance effects. In other words,
grouped directives share the same field in the configuration structure.


LEGEND of flags:

GLOBAL		Can't be configured on individual plugins
NO_GLOBAL	Can't be configured globally
NO_PMACCTD	Does not apply to pmacctd
NO_UACCTD	Does not apply to uacctd 
NO_NFACCTD	Does not apply to nfacctd
NO_SFACCTD	Does not apply to sfacctd
NO_PMBGPD	Does not apply to pmbgpd
NO_PMBMPD	Does not apply to pmbmpd
ONLY_PMACCTD	Applies only to pmacctd
ONLY_UACCTD	Applies only to uacctd
ONLY_NFACCTD	Applies only to nfacctd
ONLY_SFACCTD	Applies only to sfacctd
ONLY_PMBGPD	Applies only to pmbgpd
ONLY_PMBMPD	Applies only to pmbmpd
MAP		Indicates the input file is a map


LIST OF DIRECTIVES:

KEY: 		debug (-d)
VALUES:		[ true | false ]
DESC:		Enables debug (default: false).

KEY:            debug_internal_msg
VALUES:         [ true | false ]
DESC:           Extra flag to enable debug of internal messaging between Core process
		and plugins. It has to be enabled on top of 'debug' (default: false).

KEY:		daemonize (-D) [GLOBAL]
VALUES:		[ true | false ]
DESC:		Daemonizes the process (default: false).

KEY:		aggregate (-c)
VALUES:		[ src_mac, dst_mac, vlan, cos, etype, src_host, dst_host, src_net, dst_net,
		 src_mask, dst_mask, src_as, dst_as, src_port, dst_port, tos, proto, none,
		 sum_mac, sum_host, sum_net, sum_as, sum_port, flows, tag, tag2, label,
		 class, tcpflags, in_iface, out_iface, std_comm, ext_comm, lrg_comm,
		 as_path, peer_src_ip, peer_dst_ip, peer_src_as, peer_dst_as, local_pref,
		 med, dst_roa, src_std_comm, src_ext_comm, src_lrg_comm, src_as_path,
		 src_local_pref, src_med, src_roa, mpls_vpn_rd, mpls_pw_id, mpls_label_top,
		 mpls_label_bottom, mpls_stack_depth, sampling_rate, sampling_direction,
		 src_host_country, dst_host_country, src_host_pocode, dst_host_pocode,
		 src_host_coords, dst_host_coords, nat_event, fw_event, post_nat_src_host,
		 post_nat_dst_host, post_nat_src_port, post_nat_dst_port, tunnel_src_mac,
		 tunnel_dst_mac, tunnel_src_host, tunnel_dst_host, tunnel_proto, tunnel_tos,
		 tunnel_src_port, tunnel_dst_port, vxlan, timestamp_start, timestamp_end,
		 timestamp_arrival, timestamp_export, export_proto_seqno,
		 export_proto_version, export_proto_sysid ]
FOREWORDS:	Individual IP packets are uniquely identified by their header field values (a
		rather large set of primitives!). Same applies to uni-directional IP flows, as
		they have at least enough information to discriminate where packets are coming
		from and going to. Aggregates are instead used for the sole purpose of IP
		accounting and hence can be identified by an arbitrary set of primitives.
		The process to create an aggregate starting from IP packets or flows is: (a)
		select only the primitives of interest (generic aggregation), (b) optionally
		cast certain primitive values into broader logical entities, ie. IP addresses
		into network prefixes or Autonomous System Numbers (spatial aggregation) and
		(c) sum aggregate bytes/flows/packets counters when a new tributary IP packet
		or flow is captured (temporal aggregation).
DESC:		Aggregate captured traffic data by selecting the specified set of primitives.
		sum_<primitive> are compound primitives which sum ingress/egress traffic in a
		single aggregate; current limit of sum primitives: each sum primitive is mutual
		exclusive with any other, sum and non-sum, primitive. The 'none' primitive
		allows to make a single grand total aggregate for traffic flowing through.
		'tag', 'tag2' and 'label' generates tags when tagging engines (pre_tag_map,
		post_tag) are in use. 'class' enables L7 traffic classification.
NOTES:		* List of the aggregation primitives available to each specific pmacct daemon,	
		  along with their description, is available via -a command-line option, ie.
		  "pmacctd -a".
		* Some primitives (ie. tag2, timestamp_start, timestamp_end) are not part of
		  any default SQL table schema shipped. Always check out documentation related
		  to the RDBMS in use (ie. 'sql/README.mysql') which will point you to extra
		  primitive-related documentation, if required.
		* peer_src_ip, peer_dst_ip: two primitives with an obscure name conceived to
		  be as generic as possible due to the many different use-cases around them:
		  peer_src_ip is the IP address of the node exporting NetFlow/IPFIX or sFlow;
		  peer_dst_ip is the BGP next-hop or IP next-hop (if use_ip_next_hop is set
		  to true).
		* sampling_rate: if counters renormalization (ie. sfacctd_renormalize) is
		  enabled this field will report a value of one (1); otherwise it will report
		  the rate that is passed by the protocol or sampling_map. A value of zero (0)
		  means 'unknown' and hence no rate is applied to original counter values.
		* src_std_comm, src_ext_comm, src_lrg_comm, src_as_path are based on reverse
		  BGP lookups; peer_src_as, src_local_pref and src_med are by default based on
		  reverse BGP lookups but can be alternatively based on other methods, for
		  example maps (ie. bgp_peer_src_as_type). Internet traffic is by nature
		  asymmetric hence reverse BGP lookups must be used with caution (ie. against
		  own prefixes).
		* mpls_label_top, mpls_label_bottom primitives only include the MPLS label
		  value, stripped of EXP code-points (and BoS flag). Visibiliy in EXP values
		  can be achieved by defining a custom primitive to extract the full 3 bytes,
		  ie. 'name=mplsFullTopLabel field_type=70 len=3 semantics=raw' for NetFlow/
		  IPFIX.
		* timestamp_start, timestamp_end and timestamp_arrival let pmacct act as a
		  traffic logger up to the msec level (if reported by the capturing method).
		  timestamp_start records NetFlow/IPFIX flow start time or observation;
		  timestamp_end records NetFlow/IPFIX flow end time; timestamp_arrival
		  records libpcap packet timestamp and sFlow/NetFlow/IPFIX packet arrival
		  time at the collector. Historical accounting (enabled by the *_history
		  config directives, ie. kafka_history) finest granularity for time-bins
		  is 1 minute: timestamp_start can be used for finer greater granularitiies,
		  ie. second (timestamps_secs set to true) or sub-second.
		* tcpflags: in pmacctd, uacctd and sfacctd daemons TCP flags are ORed until
		  the aggregate is flushed - hence emulating the behaviour of NetFlow/IPFIX.
		  If a flag analysis is needed, packets with different flags (combinations)
		  should be isolated using a pre_tag_map/pre_tag_filter or aggregate_filter
		  features (see examples in QUICKSTART and review libpcap filtering syntax
		  via pcap-filter man page). 
		* export_proto_seqno reports about export protocol (NetFlow, sFlow, IPFIX)
		  sequence number and can be very relevant to detect packet loss. nfacctd and
		  sfacctd do perform simple non-contextual sequencing checks but these are
		  mainly limited to check out-of-order situations; proper contextual checking
		  can be performed as part of post-processing. A specific plugin instance,
		  separate from the main / accounting one, can be configured with 'aggregate:
		  export_proto_seqno' for the task. An example of a simple check would be to
		  find min/max sequence numbers, compute their difference and make sure it
		  does match to the amount of entries in the interval; the check can be then
		  windowed over time by using timestamps (ie. 'timestamp_export' primitive
		  and/or *_history config directives).
		* timestamp_export is the observation time at the exporter. This is only
		  relevant in export protocols involving caching, ie. NetFlow/IPFIX. In all
		  other cases this would not be populated or be equal to timestamp_start.
		* In nfacctd, undocumented aggregation primitive class_frame allows to apply
		  nDPI clssification to NFv9/IPFIX packets with IE 315 (dataLinkFrameSection).
		  class primitive instead allows to leverage traditional classification using
		  NetFlow v9/IPFIX IE 94, 95 and 96 (applicationDescription, applicationId
		  and applicationName).
DEFAULT:	src_host

KEY:		aggregate_primitives [GLOBAL, MAP]
DESC:		Expects full pathname to a file containing custom-defined primitives. Once
		defined in this file, primitives can be used in 'aggregate' statements. The
		feature is currently available only in nfacctd, for NetFlow v9/IPFIX, pmacctd
		and uacctd. Examples are available in 'examples/primitives.lst.example'. This
		map does not support reloading at runtime. 
DEFAULT:	none

KEY:		aggregate_filter [NO_GLOBAL, NO_UACCTD]
DESC:		Per-plugin filtering applied against the original packet or flow. Aggregation
		is performed slightly afterwards, upon successful match of this filter.
		By binding a filter, in tcpdump syntax, to an active plugin, this directive
		allows to select which data has to be delivered to the plugin and aggregated
		as specified by the plugin 'aggregate' directive. See the following example:

		...
		aggregate[inbound]: dst_host
		aggregate[outbound]: src_host
		aggregate_filter[inbound]: dst net 192.168.0.0/16
		aggregate_filter[outbound]: src net 192.168.0.0/16
		plugins: memory[inbound], memory[outbound]
		...

		This directive can be used in conjunction with 'pre_tag_filter' (which, in
		turn, allows to filter tags). You will also need to force fragmentation handling
		in the specific case in which a) none of the 'aggregate' directives is including
		L4 primitives (ie. src_port, dst_port) but b) an 'aggregate_filter' runs a filter
		which requires dealing with L4 primitives. For further information, refer to the
		'pmacctd_force_frag_handling' directive.
DEFAULT:	none

KEY:		dtls_path [GLOBAL]
DESC:		Full path to a directory containing files needed to establish a successful DTLS
		session (key, certificate and CA file); a key.pem file can be generated with the
		"certtool --generate-privkey --outfile key.pem" command-line; a self-signed
		cert.pem certificate, having previously created the key, can be generated with
		the "certtool --generate-self-signed --load-privkey key.pem --outfile cert.pem"
		command-line; the ca-certificates.crt CA file can be copied from (ie. on Debian
		or Ubuntu) "/etc/ssl/certs/ca-certificates.crt".
DEFAULT:	none

KEY:		pcap_filter [GLOBAL, PMACCTD_ONLY, ONLY_PMBMPD]
DESC:		This filter is global and applied to all incoming packets. It's passed to libpcap
		and expects libpcap/tcpdump filter syntax. Being global it doesn't offer a great
		flexibility but it's the fastest way to drop unwanted traffic.
DEFAULT:	none

KEY:		pcap_protocol [GLOBAL, PMACCTD_ONLY]
DESC:		If set, specifies a specific packet socket protocol value to limit packet capture
		to (for example, 0x0800 = IPv4). This option is only supported if pmacct was built
		against a version of libpcap that supports pcap_set_protocol().
DEFAULT:	none

KEY:		snaplen (-L) [GLOBAL, NO_NFACCTD, NO_SFACCTD]
DESC:		Specifies the maximum number of bytes to capture for each packet. This directive has
		key importance to both classification and connection tracking engines. In fact, some
		protocols (mostly text-based eg.: RTSP, SIP, etc.) benefit of extra bytes because
		they give more chances to successfully track data streams spawned by control channel.
		But it must be also noted that capturing larger packet portion require more resources.
		The right value need to be traded-off. In case classification is enabled, values under
		200 bytes are often meaningless. 500-750 bytes are enough even for text based
		protocols. Default snaplen values are ok if classification is disabled.
DEFAULT:	128 bytes; 64 bytes if compiled with --disable-ipv6

KEY:		plugins (-P) [GLOBAL]
VALUES:		[ memory | print | mysql | pgsql | sqlite3 | nfprobe | sfprobe | tee | amqp | kafka ]
DESC:		Plugins to be enabled. memory, print, nfprobe, sfprobe and tee plugins are always
		included in pmacct executables as they do not contain dependencies on external
		libraries. Database (ie. RDBMS, noSQL) and messaging ones (ie. amqp, kafka) do have
		external dependencies and hence are available only if explicitely configured and
		compiled.
		memory plugin uses a memory table as backend; then, a client tool, 'pmacct', can fetch
		the memory table content; the memory plugin is good for prototype solutions and/or
		small environments. mysql, pgsql and sqlite3 plugins output respectively to MySQL (or
		MariaDB with the MySQL-compatible C API), PostgreSQL and SQLite 3.x (or BerkeleyDB 5.x
		with the SQLite API compiled-in) tables to store data. print plugin prints output data
		to flat-files or stdout in JSON, CSV or tab-spaced formats, or encodes it using the
		Apache Avro serialization system. amqp and kafka plugins allow to output data to
		RabbitMQ and Kafka brokers respectively. All these plugins, SQL, no-SQL and messaging
		are good for production solutions and/or larger scenarios.
		nfprobe acts as a NetFlow/IPFIX agent and exports collected data via NetFlow v5/
		v9 and IPFIX datagrams to a remote collector. sfprobe acts as a sFlow agent and 
		exports collected data via sFlow v5 datagrams to a remote collector. Both nfprobe
		and sfprobe plugins apply only to pmacctd and uacctd daemons. tee acts as a replicator
		for NetFlow/IPFIX/sFlow data (also transparent); it applies to nfacctd and sfacctd
		daemons only. Plugins can be either anonymous or named; configuration directives can
		be either global or bound to a specific plugins, if named. An anonymous plugin is
		declared as 'plugins: mysql' in the config whereas a named plugin is declared as
		'plugins: mysql[name]'. Then, directives can be bound specifically to such named
		plugin as: 'directive[name]: value'.
DEFAULT:	memory

KEY:		[ nfacctd_pipe_size | sfacctd_pipe_size | pmacctd_pipe_size ] [GLOBAL, NO_UACCTD]
DESC:		Defines the size of the kernel socket to read  traffic data. The socket is highlighted
		below with "XXXX": 

                                         XXXX 
                [network] ----> [kernel] ----> [core process] ----> [plugin] ----> [backend]
					       [__________pmacct___________]

		On Linux systems, if this configuration directive is not specified default socket size
		awarded is defined in /proc/sys/net/core/[rw]mem_default ; the maximum configurable
		socket size is defined in /proc/sys/net/core/[rw]mem_max instead. Still on Linux, the
		"drops" field of /proc/net/udp or /proc/net/udp6 can be checked to ensure its value
		is not increasing.
DEFAULT:	Operating System default 

KEY:            [ bgp_daemon_pipe_size | bmp_daemon_pipe_size ] [GLOBAL]
DESC:           Defines the size of the kernel socket used for BGP and BMP messaging. The socket is
		highlighted below with "XXXX":

                                         XXXX
                [network] ----> [kernel] ----> [core process] ----> [plugin] ----> [backend]
                                               [__________pmacct___________]

		On Linux systems, if this configuration directive is not specified default socket size
		awarded is defined in /proc/sys/net/core/rmem_default ; the maximum configurable socket
		size (which can be changed via sysctl) is defined in /proc/sys/net/core/rmem_max
		instead.
DEFAULT:	Operating System default

KEY:		plugin_pipe_size
DESC:		Core Process and each of the plugin instances are run into different processes. To
		exchange data, they set up a circular queue (home-grown implementation, referred to
		as 'pipe') and highlighted below with "XXXX":

							      XXXX
		[network] ----> [kernel] ----> [core process] ----> [plugin] ----> [backend]
					       [__________pmacct___________]

		This directive sets the total size, in bytes, of such queue. Its default size is set
		to 4MB. Whenever facing heavy traffic loads, this size can be adjusted to hold more
		data. In the following example, the queue between the Core process and the plugin
		'test' is set to 10MB:

		...
		plugins: memory[test]
		plugin_pipe_size[test]: 10240000 
		...

		When enabling debug, log messages about obtained and target pipe sizes are printed.
		If obtained is less than target, it could mean the maximum socket size granted by
		the Operating System has to be increased. On Linux systems default socket size awarded
		is defined in /proc/sys/net/core/[rw]mem_default ; the maximum configurable socket
		size (which can be changed via sysctl) is defined in /proc/sys/net/core/[rw]mem_max
		instead.

		In case of data loss messages containing the "missing data detected" string will be
		logged - indicating the plugin affected and current settings.

		Alternatively see at plugin_pipe_zmq and plugin_pipe_zmq_profile.
DEFAULT:	4MB

KEY:		plugin_buffer_size 
DESC:		By defining the transfer buffer size, in bytes, this directive enables buffering of
		data transfers between core process and active plugins. Once a buffer is filled, it
		is delivered to the plugin. Setting a larger value may improve throughput (ie. amount
		of CPU cycles required to transfer data); setting a smaller value may improve latency,
		especially in scenarios with little data influx. It is disabled by default. If used
		with the home-grown circular queue implemetation, the value has to be minor/equal to
		the size defined by 'plugin_pipe_size' and keeping a ratio between 1:100 and 1:1000
		among the two is considered good practice; the circular queue of plugin_pipe_size size
		is partitioned in chunks of plugin_buffer_size. 

		Alternatively see at plugin_pipe_zmq and plugin_pipe_zmq_profile.
DEFAULT:	Set to the size of the smallest element to buffer 

KEY:		plugin_pipe_zmq
VALUES:		[ true | false ]
DESC:		By defining this directive to 'true', a ZeroMQ queue is used for queueing and data
		exchange between the Core Process and the plugins. This is in alternative to the
		home-grown circular queue implementation (see plugin_pipe_size description). This
		directive, along with all other plugin_pipe_zmq_* directives, can be set globally
		or be applied on a per plugin basis (ie. it is a valid scenario, if multiple
		plugins are instantiated, that some make use of home-grown queueing, while others
		use ZeroMQ based queueing). For a quick comparison: while relying on a ZeroMQ queue
                introduces an external dependency, ie. libzmq, it reduces the bare minimum the need
		of settings of the home-grown circular queue implementation. See QUICKSTART for
		some examples.
DEFAULT:        false

KEY:		plugin_pipe_zmq_retry
DESC:		Defines the interval of time, in seconds, after which a connection to the ZeroMQ
                server (Core Process) should be retried by the client (Plugin) after a failure is
		detected.
DEFAULT:        60

KEY:		plugin_pipe_zmq_profile
VALUES:		[ micro | small | medium | large | xlarge ]
DESC:		Allows to select some standard buffering profiles. Following are the recommended
		buckets in flows/samples/packets per second (the configured buffer value is
		reported in brackets and is meant only to facilitate transitioning existing
		deployments from plugin_buffer_size):

			micro   : up to 1K (0KB)
			small   : from 1K to 10-15K (10KB) 
			medium  : from 10-10K to 100-125K (100KB)
			large   : from 100-125K to 250K (1MB)
			xlarge  : from 250K (10MB)

		A symptom the selected profile is undersized is missing data warnings appear in
		the logs; a symptom it is oversized instead is latency in data being purged out.
		The amount of flows/samples per second can be estimated as described in Q21 in
		the FAQS document. Should no profile fit the sizing, the buffering value can be
		customised using the plugin_buffer_size directive. 
DEFAULT:	micro 

KEY:		plugin_pipe_zmq_hwm
DESC:           Defines the messages high watermark, that is, "The high water mark is a hard
		limit on the maximum number of outstanding messages ZeroMQ shall queue in
		memory for any single peer that the specified socket is communicating with. A
		value of zero means no limit.". If configured, upon reaching the set watermark
		value, exceeding data will be discaded and an error log message will be output.
DEFAULT:        0

KEY:		plugin_exit_any
VALUES:		[ true | false ]
DESC:		Daemons gracefully shut down (core process and all plugins) if either the core
		process or all the registered plugins bail out. Setting this to true makes the
		daemon to gracefully shut down in case any single one of the plugins bails out
		and regardless there may be more plugins still active.
DEFAULT:	false


KEY:		files_umask 
DESC:		Defines the mask for newly created files (log, pid, etc.) and their related directory
		structure. A mask less than "002" is not accepted due to security reasons.
DEFAULT:	077

KEY:            files_uid
DESC:           Defines the system user id (UID) for files opened for writing (log, pid, etc.); this
		is indeed possible only when running the daemon as super-user. This is also applied
		to any intermediary directory structure which might be created. Both user string and  
		id are valid input.
DEFAULT:	Operating System default (current user UID)

KEY:            files_gid
DESC:           Defines the system group id (GID) for files opened for writing (log, pid, etc.); this
		is indeed possible only when running the daemon as super-user; this is also applied
		to any intermediary directory structure which might be created. Both group string and
		id are valud input.
DEFAULT:	Operating System default (current user GID)

KEY:		pcap_interface (-i) [GLOBAL, PMACCTD_ONLY]
DESC:		Interface on which 'pmacctd' listens. If such directive isn't supplied, a libpcap
		function is used to select a valid device. [ns]facctd can catch similar behaviour by
		employing the [ns]facctd_ip directives; also, note that this directive is mutually
		exclusive with 'pcap_savefile' (-I). 
DEFAULT:	Interface is selected by by the Operating System

KEY:		pcap_interface_wait (-w) [GLOBAL, PMACCTD_ONLY]
VALUES:		[ true | false ]
DESC:		If set to true, this option causes 'pmacctd' to wait for the listening device to become
		available; it will try to open successfully the device each few seconds. Whenever set to
		false, 'pmacctd' will exit as soon as any error (related to the listening interface) is
		detected.
DEFAULT:	false

KEY:		pcap_savefile (-I) [GLOBAL, NO_UACCTD, NO_PMBGPD]
DESC:		File in libpcap savefile format to read data from (as an alternative to live data
		collection). As soon as the daemon finished processing the file, it exits (unless in
		pmacctd the 'pcap_savefile_wait' config directive is specified). The directive is
		mutually exclusive with pcap_interface (-i) for pmacctd, with [ns]facctd_ip (-L)
		and [ns]facctd_port (-l) for nfacctd and sfacctd respectively and bmp_daemon_ip
		for pmbmpd.
DEFAULT:	none

KEY:            pcap_savefile_wait (-W) [GLOBAL, NO_UACCTD, NO_PMBGPD]
VALUES:         [ true | false ]
DESC:           If set to true, this option will cause the daemon to wait indefinitely for a signal
		(ie. CTRL-C when not daemonized or 'killall -9 pmacctd' if it is) after being finished
		processing  the supplied libpcap savefile (pcap_savefile). This is particularly useful
		when inserting fixed amounts of data into memory tables.
DEFAULT:        false

KEY:		pcap_savefile_delay (-Z) [GLOBAL, NO_UACCTD, NO_PMBGPD]
DESC:		When reading from a pcap_savefile, sleep for the supplied amount of seconds before
		(re)playing the file. For example this is useful to let a BGP session be established
		and a RIB be finalised before playing a given file or buy time among replays so for
		a dump event to trigger.
DEFAULT:	0

KEY:            pcap_savefile_replay (-Y) [GLOBAL, NO_UACCTD, NO_PMBGPD]
DESC:           When reading from a pcap_savefile, replay content for the specified amount of times.
                Other than for testing in general, this may be useful when playing templated-based
		protocols, ie. NetFlow v9/IPFIX, to replay data packets  that could not be parsed
		the first time due to the template not being sent yet.
DEFAULT:        1

KEY:		[ pcap_direction | uacctd_direction ] [GLOBAL, ONLY_PMACCTD]
VALUES:		[ "in", "out" ]
DESC:		Defines the traffic capturing direction with two possible values, "in" and "out". In
		pmacctd this is used to determine which primitive to populate, whether in_iface or
		out_iface with the pcap_ifindex value. In addition, this allows to tag data basing
		on direction in pre_tag_map. In uacctd the only functionality is the latter of the
		two. 
DEFAULT:	none

KEY:		pcap_ifindex [GLOBAL, PMACCTD_ONLY]
VALUES:		[ "sys", "hash", "map", "none" ]
DESC:		Defines how to source the ifindex of the capturing interface. If "sys" then a
		if_nametoindex() call is triggered to the underlying OS and the result is used; if
		"hash" an hashing algorithm is used against the interface name to generate a unique
		number per interface; if "map" then ifindex definitions are expected as part of a
		pcap_interfaces_map (see below). 
DEFAULT:	none

KEY:		pcap_interfaces_map [GLOBAL, PMACCTD_ONLY, MAP]
DESC:		Allows to listen for traffic data on multiple interfaces (compared to pcap_interface
		where only a single interface can be defined). The map allows to define also ifindex
		and capturing direction on a per-interface basis. The map can be reloaded at runtime
		by sending the daemon a SIGUSR2 signal (ie. "killall -USR2 nfacctd"). Sample map in
                examples/pcap_interfaces.map.example .
DEFAULT:	none

KEY:		promisc (-N) [GLOBAL, PMACCTD_ONLY]
VALUES:		[ true | false ]
DESC:		If set to true, puts the listening interface in promiscuous mode. It's mostly useful when
		running 'pmacctd' in a box which is not a router, for example, when listening for traffic
		on a mirroring port.
DEFAULT:        true

KEY:		imt_path (-p)
DESC:		Specifies the full pathname where the memory plugin has to listen for client queries.
		When multiple memory plugins are active, each one has to use its own file to communicate
		with the client tool. Note that placing these files into a carefully protected directory
		(rather than /tmp) is the proper way to control who can access the memory backend.
DEFAULT:	/tmp/collect.pipe

KEY:		imt_buckets (-b)
DESC:		Defines the number of buckets of the memory table which is organized as a chained hash
		table. A prime number is highly recommended. Read INTERNALS 'Memory table plugin' chapter
		for further details. 
DEFAULT:	32771

KEY:		imt_mem_pools_number (-m)
DESC:		Defines the number of memory pools the memory table is able to allocate; the size of each
		pool is defined by the 'imt_mem_pools_size' directive. Here, a value of 0 instructs the
		memory plugin to allocate new memory chunks as they are needed, potentially allowing the
		memory structure to grow undefinitely. A value > 0 instructs the plugin to not try to 
		allocate more than the specified number of memory pools, thus placing an upper boundary
		to the table size.
DEFAULT:	16

KEY:		imt_mem_pools_size (-s)
DESC:		Defines the size of each memory pool. For further details read INTERNALS 'Memory table
		plugin'. The number of memory pools is defined by the 'imt_mem_pools_number' directive.
DEFAULT:	8192

KEY:		syslog (-S) [GLOBAL]
VALUES:		[ auth | mail | daemon | kern | user | local[0-7] ]
DESC:		Enables syslog logging, using the specified facility.
DEFAULT:	none (logging to stderr)

KEY:		logfile [GLOBAL] 
DESC:           Enables logging to a file (bypassing syslog); expected value is a pathname. The target
		file can be re-opened by sending a SIGHUP to the daemon so that, for example, logs can
		be rotated. 
DEFAULT:	none (logging to stderr)

KEY:		amqp_host
DESC:           Defines the AMQP/RabbitMQ broker IP. All amqp_* directives are used by the AMQP plugin
		of flow daemons only. Check *_amqp_host out (ie. bgp_daemon_msglog_amqp_host) for the
		equivalent directives relevant to other RabbitMQ exports.
DEFAULT:        localhost

KEY:		[ bgp_daemon_msglog_amqp_host | bgp_table_dump_amqp_host | bmp_dump_amqp_host |
		  bmp_daemon_msglog_amqp_host | sfacctd_counter_amqp_host |
		  telemetry_daemon_msglog_amqp_host | telemetry_dump_amqp_host ] [GLOBAL]
DESC:		See amqp_host. bgp_daemon_msglog_amqp_* directives are used by the BGP thread/daemon
		to stream data out; bgp_table_dump_amqp_* directives are used by the BGP thread/daemon
		to dump data out at regular time intervals; bmp_daemon_msglog_amqp_* directives are
		used by the BMP thread/daemon to stream data out; bmp_dump_amqp_* directives are
		used by the BMP thread/daemon to dump data out at regular time intervals;
		sfacctd_counter_amqp_* directives are used by sfacctd to stream sFlow counter data out;
		telemetry_daemon_msglog_amqp_* are used by the Streaming Telemetry thread/daemon to
		stream data out; telemetry_dump_amqp_* directives are used by the Streaming Telemetry
		thread/daemon to dump data out at regular time intervals.
DEFAULT:	See amqp_host

KEY:            amqp_vhost
DESC:           Defines the AMQP/RabbitMQ server virtual host; see also amqp_host.
DEFAULT:        "/"

KEY:		[ bgp_daemon_msglog_amqp_vhost | bgp_table_dump_amqp_vhost | bmp_dump_amqp_vhost |
		  bmp_daemon_msglog_amqp_vhost | sfacctd_counter_amqp_vhost |
		  telemetry_daemon_msglog_amqp_vhost | telemetry_dump_amqp_vhost ] [GLOBAL]
DESC:           See amqp_vhost; see also bgp_daemon_msglog_amqp_host.
DEFAULT:	See amqp_vhost 

KEY:		amqp_user
DESC:           Defines the username to use when connecting to the AMQP/RabbitMQ server; see also
		amqp_host.
DEFAULT:        guest

KEY:		[ bgp_daemon_msglog_amqp_user | bgp_table_dump_amqp_user | bmp_dump_amqp_user |
		  bmp_daemon_msglog_amqp_user | sfacctd_counter_amqp_user |
		  telemetry_daemon_msglog_amqp_user | telemetry_dump_amqp_user ] [GLOBAL]
DESC:		See amqp_user; see also bgp_daemon_msglog_amqp_host.
DEFAULT:	See amqp_user

KEY:		amqp_passwd
DESC:           Defines the password to use when connecting to the server; see also amqp_host.
DEFAULT:        guest

KEY:		[ bgp_daemon_msglog_amqp_passwd | bgp_table_dump_amqp_passwd |
		  bmp_dump_amqp_passwd | bmp_daemon_msglog_amqp_passwd |
		  sfacctd_counter_amqp_passwd | telemetry_daemon_msglog_amqp_passwd |
		  telemetry_dump_amqp_passwd ]
		[GLOBAL]
DESC:		See amqp_passwd; see also bgp_daemon_msglog_amqp_host.
DEFAULT: 	See amqp_passwd

KEY:		amqp_routing_key
DESC:           Name of the AMQP routing key to attach to published data. Dynamic names are supported
		through the use of variables, which are computed at the moment when data is purged to
		the backend. The list of variables supported is:

                $peer_src_ip    Value of the peer_src_ip primitive of the record being processed.

		$tag		Value of the tag primitive of the record being processed.

		$tag2		Value of the tag2 primitive of the record being processed.

                $post_tag       Configured value of post_tag.

                $post_tag2      Configured value of post_tag2.

		See also amqp_host.

DEFAULT:	'acct'

KEY:		[ bgp_daemon_msglog_amqp_routing_key | bgp_table_dump_amqp_routing_key |
		  bmp_daemon_msglog_amqp_routing_key | bmp_dump_amqp_routing_key |
		  sfacctd_counter_amqp_routing_key | telemetry_daemon_msglog_amqp_routing_key |
		  telemetry_dump_amqp_routing_key ] [GLOBAL]
DESC:		See amqp_routing_key; see also bgp_daemon_msglog_amqp_host. Variables supported by
		the configuration directives described in this section:

                $peer_src_ip    	BGP peer IP address (bgp_*) or sFlow agent IP address (sfacctd_*).

                $bmp_router		BMP peer IP address.

                $telemetry_node		Streaming Telemetry exporter IP address.

		$peer_tcp_port  	BGP peer TCP port.

		$bmp_router_port	BMP peer TCP port.

		$telemetry_node_port	Streaming Telemetry exporter port.

DEFAULT:	none

KEY:            [ amqp_routing_key_rr | kafka_topic_rr ]
DESC:           Performs round-robin load-balancing over a set of AMQP routing keys or Kafka topics.
		The base name for the string is defined by amqp_routing_key or kafka_topic. This key
		accepts a positive int value. If, for example, amqp_routing_key is set to 'blabla'
		and amqp_routing_key_rr to 3 then the AMQP plugin will round robin as follows:
		message #1 -> blabla_0, message #2 -> blabla_1, message #3 -> blabla_2, message #4
		-> blabla_0 and so forth. This works in the same fashion for kafka_topic. By default
		the feature is disabled, meaning all messages are sent to the base AMQP routing key
		or Kafka topic (or the default one, if no amqp_routing_key or kafka_topic is being
		specified).
		For Kafka it is adviced to create topics in advance with a tool like kafka-topics.sh
		(ie. "kafka-topics.sh --zookeepeer <zookeeper URL> --topic <topic> --create") even
		if auto.create.topics.enable is set to true (default) on the broker. This is because
		topic creation, especially on distributed systems, may take time and lead to data
		loss.  
DEFAULT:        0

KEY:		[ bgp_daemon_msglog_amqp_routing_key_rr | bgp_table_dump_amqp_routing_key_rr |
		  bmp_daemon_msglog_amqp_routing_key_rr | bmp_dump_amqp_routing_key_rr |
		  telemetry_daemon_msglog_amqp_routing_key_rr | telemetry_dump_amqp_routing_key_rr ]
		[GLOBAL]
DESC:		See amqp_routing_key_rr; see also bgp_daemon_msglog_amqp_host.
DEFAULT:	See amqp_routing_key_rr

KEY:            amqp_exchange
DESC:           Name of the AMQP exchange to publish data; see also amqp_host.
DEFAULT:	pmacct

KEY:		[ bgp_daemon_msglog_amqp_exchange | bgp_table_dump_amqp_exchange |
		  bmp_daemon_msglog_amqp_exchange | bmp_dump_amqp_exchange |
		  sfacctd_counter_amqp_exchange | telemetry_daemon_msglog_amqp_exchange |
		  telemetry_dump_amqp_exchange ] [GLOBAL]
DESC:           See amqp_exchange
DEFAULT:	See amqp_exchange; see also bgp_daemon_msglog_amqp_host.

KEY:            amqp_exchange_type
DESC:           Type of the AMQP exchange to publish data to. 'direct', 'fanout' and 'topic'
		types are supported; "rabbitmqctl list_exchanges" can be used to check the
		exchange type. Upon mismatch of exchange type, ie. exchange type is 'direct'
		but amqp_exchange_type is set to 'topic', an error will be returned. 
DEFAULT:	direct

KEY:		[ bgp_daemon_msglog_amqp_exchange_type | bgp_table_dump_amqp_exchange_type |
		  bmp_daemon_msglog_amqp_exchange_type | bmp_dump_amqp_exchange_type |
		  sfactd_counter_amqp_exchange_type | telemetry_daemon_msglog_amqp_exchange_type |
		  telemetry_dump_amqp_exchange_type ] [GLOBAL]
DESC:           See amqp_exchange_type; see also bgp_daemon_msglog_amqp_host.
DEFAULT:	See amqp_exchange_type

KEY:            amqp_persistent_msg
VALUES:         [ true | false ]
DESC:           Marks messages as persistent and sets Exchange as durable so to prevent data loss
		if a RabbitMQ server restarts (it will still be consumer responsibility to declare
		the queue durable). Note from RabbitMQ docs: "Marking messages as persistent does
		not fully guarantee that a message won't be lost. Although it tells RabbitMQ to
		save message to the disk, there is still a short time window when RabbitMQ has
		accepted a message and hasn't saved it yet. Also, RabbitMQ doesn't do fsync(2) for
		every message -- it may be just saved to cache and not really written to the disk.
		The persistence guarantees aren't strong, but it is more than enough for our simple
		task queue."; see also amqp_host.
DEFAULT:        false

KEY:		[ bgp_daemon_msglog_amqp_persistent_msg  | bgp_table_dump_amqp_persistent_msg |
		  bmp_daemon_msglog_amqp_persistent_msg | bmp_dump_amqp_persistent_msg |
		  sfacctd_counter_persistent_msg | telemetry_daemon_msglog_amqp_persistent_msg |
		  telemetry_dump_amqp_persistent_msg ] [GLOBAL]
VALUES:         See amqp_persistent_msg; see also bgp_daemon_msglog_amqp_host.
DESC:		See amqp_persistent_msg
DEFAULT:	See amqp_persistent_msg

KEY:		amqp_frame_max
DESC:		Defines the maximum size, in bytes, of an AMQP frame on the wire to request of the broker
		for the connection. 4096 is the minimum size, 2^31-1 is the maximum; see also amqp_host.
DEFAULT:	131072

KEY:		[ bgp_daemon_msglog_amqp_frame_max | bgp_table_dump_amqp_frame_max |
		  bmp_daemon_msglog_amqp_frame_max | bmp_dump_amqp_frame_max |
		  sfacctd_counter_amqp_frame_max | telemetry_daemon_msglog_amqp_frame_max |
		  telemetry_dump_amqp_frame_max ] [GLOBAL]
DESC:		See amqp_frame_max; see also bgp_daemon_msglog_amqp_host.
DEFAULT:	See amqp_frame_max

KEY:            amqp_heartbeat_interval
DESC:           Defines the heartbeat interval in order to detect general failures of the RabbitMQ server.
                The value is expected in seconds. By default the heartbeat mechanism is disabled with a
		value of zero. According to RabbitMQ C API, detection takes place only upon publishing a
		JSON message, ie. not at login or if idle. The maximum value supported is INT_MAX (or
		2147483647); see also amqp_host.
DEFAULT:	0

KEY:		[ bgp_daemon_msglog_amqp_heartbeat_interval | bgp_table_dump_amqp_heartbeat_interval |
		  bmp_daemon_msglog_amqp_heartbeat_interval | bmp_dump_amqp_heartbeat_interval |
		  sfacctd_counter_amqp_heartbeat_interval | telemetry_daemon_msglog_amqp_heartbeat_interval |
		  telemetry_dump_amqp_heartbeat_interval ] [GLOBAL]
DESC:           See amqp_heartbeat_interval; see also bgp_daemon_msglog_amqp_host.
DEFAULT:	See amqp_heartbeat_interval

KEY:		[ bgp_daemon_msglog_amqp_retry | bmp_daemon_msglog_amqp_retry |
		  sfacctd_counter_amqp_retry | telemetry_daemon_msglog_amqp_retry ] [GLOBAL]
DESC:		Defines the interval of time, in seconds, after which a connection to the RabbitMQ
		server should be retried after a failure is detected; see also amqp_host. See also
		bgp_daemon_msglog_amqp_host.
DEFAULT: 	60

KEY:		kafka_topic
DESC:           Name of the Kafka topic to attach to published data. Dynamic names are supported by
                kafka_topic through the use of variables, which are computed at the moment when data
                is purged to the backend. The list of variables supported by amqp_routing_key:

                $peer_src_ip    Value of the peer_src_ip primitive of the record being processed.

		$tag		Value of the tag primitive of the record being processed.

		$tag2		Value of the tagw primitive of the record being processed.

                $post_tag       Configured value of post_tag.

                $post_tag2      Configured value of post_tag2.

		It is adviced to create topics in advance with a tool like kafka-topics.sh (ie.
		"kafka-topics.sh --zookeepeer <zookeeper URL> --topic <topic> --create") even if
		auto.create.topics.enable is set to true (default) on the broker. This is because
		topic creation, especially on distributed systems, may take time and lead to data
		loss.

DEFAULT:        'pmacct.acct'

KEY:		kafka_config_file
DESC:		Full pathname to a file containing directives to configure librdkafka. All knobs
		whose values are string, integer, boolean, CSV are supported. Pointer values, ie.
		for setting callbacks, are currently not supported through this infrastructure.
		The syntax of the file is CSV and expected in the format: <type, key, value> where
		'type' is one of 'global' or 'topic' and 'key' and 'value' are set according to
		librdkafka doc https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
		Both 'key' and 'value' are passed onto librdkafka without any validation being
		performed; the 'value' field can also contain commas no problem as it is also not
		parsed. Examples are:

		topic, compression.codec, snappy
		global, socket.keepalive.enable, true

DEFAULT:	none

KEY:            kafka_broker_host
DESC:           Defines one or multiple, comma-separated, Kafka brokers for the bootstrap process.
		If only a single broker IP address is defined then the broker port is read via the
		kafka_broker_port config directive (legacy syntax); if multiple brokers are defined
		then each broker port, if not left to default 9092, is expected as part of this
		directive, for example: "broker1:10000,broker2". When defining multiple brokers,
		if the host is IPv4, the value is expected as 'address:port'. If IPv6, it is
		expected as '[address]:port' (although when defining a single broker, this is not
		required as the IPv6 address is detected and wrapped-around '[' ']' symbols).
		Resolvable hostnames are also accepted, if host resolves to multiple addresses it
		will round-robin the addresses for each connection attempt. SSL connections can be
		configured as "ssl://broker3:9000,ssl://broker2". All kafka_* directives are used
		by the Kafka plugin of flow daemons only. Check other *_kafka_broker_host out (ie.
		bgp_daemon_msglog_kafka_broker_host) for the equivalent directives relevant to
		other Kafka exports.
DEFAULT:        127.0.0.1

KEY:            kafka_broker_port
DESC:           Defines the Kafka broker port. See also kafka_broker_host.
DEFAULT:        9092

KEY:		kafka_partition
DESC:		Defines the Kafka broker topic partition ID. RD_KAFKA_PARTITION_UA or ((int32_t)-1)
		is to define the configured or default partitioner (slower than sending to a fixed
		partition). See also kafka_broker_host. 
DEFAULT:	-1

KEY:		kafka_partition_dynamic
VALUES          [ true | false ]
DESC:		Enables dynamic Kafka partitioning, ie. data is partitioned according to the value
                of the Kafka broker topic partition key. See also kafka_partition_key.
DEFAULT:	false

KEY:            kafka_partition_key
DESC:           Defines the Kafka broker topic partition key. A string of printable characters is
		expected as value. Dynamic names are supported through the use of variables, which
		are computed at the moment data is purged to the backend. The list of supported
		variables follows:

		$peer_src_ip	Record value for peer_src_ip primitive (if primitive is not part
				of the aggregation method then this will be set to a null value).

		$tag		Record value for tag primitive (if primitive is not part of the
				aggregation method then this will be set to a null value).

		$tag2		Record value for tag2 primitive (if primitive is not part of the
				aggregation method then this will be set to a null value).

		$src_host	Record value for src_host primitive (if primitive is not part of
                                the aggregation method then this will be set to a null value).

		$dst_host	Record value for dst_host primitive (if primitive is not part of
                                the aggregation method then this will be set to a null value).

		$src_port	Record value for src_port primitive (if primitive is not part of
                                the aggregation method then this will be set to a null value).

		$dst_port	Record value for dst_port primitive (if primitive is not part of
                                the aggregation method then this will be set to a null value).

		$proto		Record value for proto primitive (if primitive is not part of
				the aggregation method then this will be set to a null value).

		$in_iface	Record value for in_iface primitive (if primitive is not part of
				the aggregation method then this will be set to a null value).
DEFAULT:	none

KEY:            [ bgp_daemon_msglog_kafka_broker_host | bgp_table_dump_kafka_broker_host |
                  bmp_daemon_msglog_kafka_broker_host | bmp_dump_kafka_broker_host |
		  sfacctd_counter_kafka_broker_host | telemetry_daemon_msglog_kafka_broker_host |
		  telemetry_dump_kafka_broker_host ] [GLOBAL]
DESC:           See kafka_broker_host. bgp_daemon_msglog_kafka_* directives are used by the BGP thread/
		daemon to stream data out; bgp_table_dump_kafka_* directives are used by the BGP thread/
		daemon to dump data out at regular time intervals; bmp_daemon_msglog_kafka_* directives
		are used by the BMP thread/daemon to stream data out; bmp_dump_kafka_* directives are
                used by the BMP thread/daemon to dump data out at regular time intervals;
                sfacctd_counter_kafka_* directives are used by sfacctd to stream sFlow counter data
		out; telemetry_daemon_msglog_kafka_* are used by the Streaming Telemetry thread/daemon
		to stream data out; telemetry_dump_kafka_* directives are used by the Streaming Telemetry
		thread/daemon to dump data out at regular time intervals.
DEFAULT:        See kafka_broker_host

KEY:            [ bgp_daemon_msglog_kafka_broker_port | bgp_table_dump_kafka_broker_port |
                  bmp_daemon_msglog_kafka_broker_port | bmp_dump_kafka_broker_port |
		  sfacctd_counter_kafka_broker_port | telemetry_daemon_msglog_kafka_broker_port |
		  telemetry_dump_kafka_broker_port ] [GLOBAL]
DESC:           See kafka_broker_port; see also bgp_daemon_msglog_kafka_broker_host.
DEFAULT:        See kafka_broker_port

KEY:            [ bgp_daemon_msglog_kafka_topic | bgp_table_dump_kafka_topic |
                  bmp_daemon_msglog_kafka_topic | bmp_dump_kafka_topic |
		  sfacctd_counter_kafka_topic | telemetry_daemon_msglog_kafka_topic |
		  telemetry_dump_kafka_topic ] [GLOBAL]
DESC:		See kafka_topic; see also bgp_daemon_msglog_kafka_broker_host. Variables supported by
		the configuration directives described in this section:

                $peer_src_ip    	BGP peer IP address (bgp_*) or sFlow agent IP address (sfacctd_*).

                $bmp_router		BMP peer IP address.

                $telemetry_node		Streaming Telemetry exporter IP address.

		$peer_tcp_port  	BGP peer TCP port.

		$bmp_router_port	BMP peer TCP port.

		$telemetry_node_port	Streaming Telemetry exporter port.
DEFAULT:        none

KEY:            [ bgp_daemon_msglog_kafka_topic_rr | bgp_table_dump_kafka_topic_rr |
                  bmp_daemon_msglog_kafka_topic_rr | bmp_dump_kafka_topic_rr |
		  telemetry_daemon_msglog_kafka_topic_rr | telemetry_dump_kafka_topic_rr ]
		[GLOBAL]
DESC:           See kafka_topic_rr; see also bgp_daemon_msglog_kafka_broker_host.
DEFAULT:        See kafka_topic_rr

KEY:            [ bgp_daemon_msglog_kafka_partition | bgp_table_dump_kafka_partition |
                  bmp_daemon_msglog_kafka_partition | bmp_dump_kafka_partition |
		  sfacctd_counter_kafka_partition | telemetry_daemon_msglog_kafka_partition |
		  telemetry_dump_kafka_partition ] [GLOBAL]
DESC:           See kafka_partition; see also bgp_daemon_msglog_kafka_broker_host.
DEFAULT:        See kafka_partition

KEY:            [ bgp_daemon_msglog_kafka_partition_key |
		  bgp_table_dump_kafka_partition_key |
                  bmp_daemon_msglog_kafka_partition_key | bmp_dump_kafka_partition_key |
                  sfacctd_counter_kafka_partition_key |
		  telemetry_daemon_msglog_kafka_partition_key |
                  telemetry_dump_kafka_partition_key ] [GLOBAL]
DESC:           See kafka_partition_key; see also bgp_daemon_msglog_kafka_broker_host.
DEFAULT:        See kafka_partition_key

KEY:            [ bgp_daemon_msglog_kafka_retry | bmp_daemon_msglog_kafka_retry |
		  sfacctd_counter_kafka_retry | telemetry_daemon_msglog_kafka_retry ] [GLOBAL]
DESC:           Defines the interval of time, in seconds, after which a connection to the Kafka
		broker should be retried after a failure is detected.
DEFAULT:        60

KEY:            [ bgp_daemon_msglog_kafka_config_file | bgp_table_dump_kafka_config_file |
                  bmp_daemon_msglog_kafka_config_file | bmp_dump_kafka_config_file |
                  sfacctd_counter_kafka_config_file | telemetry_daemon_msglog_kafka_config_file |
                  telemetry_dump_kafka_config_file ] [GLOBAL]
DESC:           See kafka_config_file; see also bgp_daemon_msglog_kafka_broker_host.
DEFAULT:        See kafka_config_file

KEY:		pidfile (-F) [GLOBAL]
DESC:		Writes PID of Core process to the specified file. PIDs of the active plugins are written
		aswell by employing the following syntax: 'path/to/pidfile-<plugin_type>-<plugin_name>'.
		This gets particularly useful to recognize which process is which on architectures where
		pmacct does not support the setproctitle() function.
DEFAULT:	none

KEY:		networks_file (-n)
DESC:		Full pathname to a file containing a list of networks - and optionally ASN information
		and BGP next-hop (peer_dst_ip) Purpose of the feature is to act as a resolver when
		network, next-hop and/or peer/origin ASN information is not available through other
		means (ie. BGP, IGP, telemetry protocol) or for the purpose of overriding such
		information with custom/self-defined one.
DEFAULT:	none

KEY:		networks_file_filter
VALUES          [ true | false ]
DESC:           Makes networks_file work as a filter in addition to its basic resolver functionality:
		networks and hosts not belonging to defined networks are zeroed out. This feature can
		interfere with the intended behaviour of networks_no_mask_if_zero, if they are both
		set to true.
DEFAULT:        false

KEY:		networks_file_no_lpm
VALUES          [ true | false ]
DESC:		Makes a matching IP prefix defined in a networks_file win always, even if it is not
		the longest. It applies when the aggregation method includes src_net and/or dst_net
		and nfacctd_net (or equivalents) and/or nfacctd_as (or equivalents) configuration
		directives are set to 'longest' (or 'fallback'). For example we receive the following
		PDU via NetFlow: 

		SrcAddr: 10.0.8.29 (10.0.8.29)
		DstAddr: 192.168.5.47 (192.168.5.47)
		[ .. ]
		SrcMask: 24 (prefix: 10.0.8.0/24)
		DstMask: 27 (prefix: 192.168.5.32/27)

		a BGP peering is available and BGP contains the following prefixes: 192.168.0.0/16 and
		10.0.0.0/8. Such a scenario is typical when more specifics are not re-distributed in
		BGP but are only available in the IGP. A networks_file contains the prefixes 10.0.8.0/24
		and 192.168.5.0/24. 10.0.8.0/24 is the same as in NetFlow; but 192.168.5.0/24 (say,
		representative of a range dedicated to a specific customer across several locations and
		hence composed of several sub-prefies) would not be the longest match and hence the
		prefix from NetFlow, 192.168.5.32/27, would be the outcome of the network aggregation
		process; setting networks_file_no_lpm to true makes 192.168.5.0/24, coming from the
		networks_file, win instead. 
DEFAULT:        false

KEY:		networks_no_mask_if_zero
VALUES		[ true | false ]
DESC:		If set to true, IP prefixes with zero mask - that is, unknown ones or those hitting a
		default route - are not masked (ie. they are applied a full 0xF mask, that is, 32 bits
		for IPv4 addresses and 128 bits for IPv6 ones). The feature applies to *_net fields
		and makes sure individual IP addresses belonging to unknown IP prefixes are not zeroed
		out. This feature can interfere with the intended behaviour of networks_file_filter,
		if they are both set to true.
DEFAULT:	false

KEY:            networks_mask
DESC:           Specifies the network mask - in bits - to apply to IP address values in L3 header. The
		mask is applied sistematically and before evaluating the 'networks_file' content (if
		any is specified). The mask must be part of the aggregation method in order to be
		applied, ie. 'aggregate: dst_net, dst_mask', 'aggregate: src_net, src_mask', etc.
DEFAULT:	none

KEY:		networks_cache_entries
DESC:		Networks Lookup Table (which is the memory structure where the 'networks_file' data is
		loaded) is preeceded by a Network Lookup Cache where lookup results are saved to speed
		up later searches. NLC is structured as an hash table, hence, this directive is aimed to
		set the number of buckets for the hash table. The default value should be suitable for
		most common scenarios, however when facing with large-scale network definitions, it is 
		quite adviceable to tune this parameter to improve performances. A prime number is highly
		recommended.
DEFAULT:	IPv4: 99991; IPv6: 32771	

KEY:		ports_file
DESC:		Full pathname to a file containing a list of (known/interesting/meaningful) ports (one
		for each line, read more about the file syntax into examples/ tree). The directive allows
		to rewrite as zero port numbers not matching any port defined in the list. Indeed, this
		makes sense only if aggregating on either 'src_port' or 'dst_port' primitives.
DEFAULT:	none

KEY:		sql_db
DESC:		Defines the SQL database to use. When using the SQLite3 plugin, this directive refers
		to the full path to the database file; if multiple sqlite3 plugins are in use, it is
		recommended to point them to different files to prevent locking issues.
DEFAULT:	'pmacct'; sqlite3: '/tmp/pmacct.db' 

KEY:            [ sql_table | print_output_file ]
DESC:           In SQL this defines the table to use; in print plugin it defines the file to write output
		to. Dynamic names are supported through the use of variables, which are computed at the
		moment when data is purged to the backend. The list of supported variables follows: 

		%d		The day of the month as a decimal number (range 01 to 31).

		%H		The hour as a decimal number using a 24 hour clock (range 00 to 23).

		%m		The month as a decimal number (range 01 to 12).

		%M		The minute as a decimal number (range 00 to 59).

		%s      	The number of seconds since Epoch, ie., since 1970-01-01 00:00:00 UTC.

		%S		The seconds as a decimal number second (range 00 to 60).

		%w		The day of the week as a decimal, range 0 to 6, Sunday being 0.

		%W		The week number of the current year as a decimal number, range
				00 to 53,  starting  with the first Monday as the first day of
				week 01.

		%Y		The year as a decimal number including the century.

		%z		The +hhmm numeric time zone in ISO8601:1988 format (ie. -0400)

		$tzone		The time zone in rfc3339 format (ie. -04:00 or 'Z' for +00:00)

		$ref		Configured refresh time value for the plugin.

		$hst		Configured sql_history value, in seconds, for the plugin.

		$peer_src_ip	Record value for peer_src_ip primitive (if primitive is not part
				of the aggregation method then this will be set to a null value).

		$tag		Record value for tag primitive (if primitive is not part of the
				aggregation method then this will be set to a null value).

		$tag2		Record value for tag2 primitive (if primitive is not part of the
				aggregation method then this will be set to a null value).

		$post_tag	Configured value of post_tag.

		$post_tag2	Configured value of post_tag2.

		SQL plugins notes:
		Time-related variables require 'sql_history' to be specified in order to work correctly
		(see 'sql_history' entry in this in this document for further information) and that the
		'sql_refresh_time' setting is aligned with the 'sql_history', ie.:

			sql_history: 5m
			sql_refresh_time: 300

		Furthermore, if the 'sql_table_schema' directive is not specified, tables are expected
		to be already in place. This is an example on how to split accounted data among multiple
		tables basing on the day of the week:

			sql_history: 1h
			sql_history_roundoff: h
			sql_table: acct_v4_%w

		The above directives will account data on a hourly basis (1h). Also the above sql_table
		definition will make: Sunday data be inserted into the 'acct_v4_0' table, Monday into
		the 'acct_v4_1' table, and so on. The switch between the tables will happen each day at
		midnight: this behaviour is ensured by the use of the 'sql_history_roundoff' directive. 

		Ideally sql_refresh_time and sql_history values should be aligned for the dynamic tables
		to work; sql_refresh_time with a value smaller than sql_history is also supported; whereas
		the feature does not support values of sql_refresh_time greater than sql_history. The
		maximum table name length is 64 characters.

		Print plugin notes:
		If a non-dynamic filename is selected, content is overwritten to the existing file in
		case print_output_file_append is set to false (default). When creating a target file,
		the needed level of directories are created too (equivalent to mkdir -p), for example
		"/path/to/%Y/%Y-%m/%Y-%m-%d/blabla-%Y%m%d-%H%M.txt". However shell replacements are
		not supported, ie. the '~' symbol to denote the user home directory. Time-related
		variables require 'print_history' to be specified in order to work correctly. The
		output file can be a named pipe (ie. created with mkfifo), however the pipe has to be
		manually created in advance. 

		Common notes:
		The maximum number of variables it may contain is 32.
DEFAULT:	see notes

KEY:		print_output_file_append
VALUES:         [ true | false ]
DESC:		If set to true, print plugin will append to existing files instead of overwriting. If
		appending, and in case of an output format requiring a title, ie. csv, formatted, etc.,
		intuitively the title is not re-printed.
DEFAULT:	false

KEY:		print_output_lock_file
DESC:		If no print_output_file is defined (ie. print plugin output goes to stdout), this
		directive defined a global lock to serialize output to stdout, ie. in cases where
		multiple print plugins are defined or purging events of the same plugin queue up.
		By default output is not serialized and a warning message is printed to flag the
		condition. 

KEY:            print_latest_file
DESC:		Defines the full pathname to pointer(s) to latest file(s). Dynamic names are supported
		through the use of variables, which are computed at the moment when data is purged to the
		backend: refer to print_output_file for a full listing of supported variables; time-based
		variables are not allowed. Three examples follow:

		#1:
                print_output_file: /path/to/spool/foo-%Y%m%d-%H%M.txt
                print_latest_file: /path/to/spool/foo-latest

		#2:
		print_output_file: /path/to/spool/%Y/%Y-%m/%Y-%m-%d/foo-%Y%m%d-%H%M.txt
		print_latest_file: /path/to/spool/latest/foo

		#3:
		print_output_file: /path/to/$peer_src_ip/foo-%Y%m%d-%H%M.txt
		print_latest_file: /path/to//spool/latest/blabla-$peer_src_ip

NOTES:		Update of the latest pointer is done evaluating files name. For correct working of the
		feature, responsibility is put on the user. A file is reckon as latest if it is
		lexicographically greater than an existing one: this is generally fine but requires
		dates to be in %Y%m%d format rather than %d%m%Y. Also, upon restart of the daemon, if
		print_output_file is modified to a different location good practice would be to 1)
		manually delete latest pointer(s) or 2) move existing print_output_file files to the
		new targer location. Finally, if upgrading from pmacct releases before 1.5.0rc1, it is
		recommended to delete existing symlinks.
DEFAULT:	none

KEY:		print_write_empty_file
VALUES:         [ true | false ]
DESC:		If set to true, print plugin will write an empty file (zero length) if there was no
		data to output; this also aligns to the pre 1.5.0 behaviour, as documnted in the
		UPGRADE document. The default behaviour is instead to only produce a log message
		with "ET: X" as Estimated Time value. 
DEFAULT:	false

KEY:		sql_table_schema
DESC:		Full pathname to a file containing a SQL table schema. It allows to create the SQL table
		if it does not exist; a config example where this directive could be useful follows:

			sql_history: 5m
			sql_history_roundoff: h
			sql_table: acct_v4_%Y%m%d_%H%M 
			sql_table_schema: /usr/local/pmacct/acct_v4.schema

		In this configuration, the content of the file pointed by 'sql_table_schema' should be:

			CREATE TABLE acct_v4_%Y%m%d_%H%M (
				[ ... PostgreSQL/MySQL specific schema ... ]
			);

		It is recommended that the content of the file is stripped by any un-necessary comments,
		strings and characters besides the SQL statement. This setup, along with this directive,
		are mostly useful when the dynamic tables are not closed in a 'ring' fashion (e.g., the
		days of the week) but 'open' (e.g., current date).  
DEFAULT:	none

KEY:		sql_table_version
VALUES		[ 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 ]
DESC:		Defines the version of the SQL table. SQL table versioning was introduced to achieve two
		goals: a) make tables work out-of-the-box for the SQL beginners, smaller installations
		and quick try-outs; and in this context b) to allow introduction of new features over
		time without breaking backward compatibility. For the SQL experts, the alternative to
		versioning is 'sql_optimize_clauses' which allows custom mix-and-match of primitives:
		in such a case you have to build yourself custom SQL schemas and indexes. Check in the
		'sql/' sub-tree the SQL table profiles which are supported by the pmacct version you are
		currently using. It is always adviced to explicitely define a sql_table_version in
		order to predict which primitive will be written to which column. All versioning rules
		are captured in sql/README.[mysql|sqlite3|pgsql] documents.
DEFAULT:	1	

KEY:		sql_table_type 
VALUES		[ original | bgp ]
DESC:		BGP-related primitives are divided in legacy and non-legacy. Legacy are src_as, dst_as;
		non-legacy are all the rest. Up to "original" tables v5 src_as and dst_as were written
		in the same field as src_host and dst_host. From "original" table v6 and if sql_table_type
		"bgp" is selected, src_as and dst_as are written in their own field (as_src and as_dst
		respectively). sql_table_type is by default set to "original" and is switched to "bgp"
		automatically if any non-legacy primitive is in use, ie. peer_dst_ip, as_path, etc. This
		directive allows to make the selection explicit and/or circumvent default behaviour.
		Apart from src_as and dst_as, regular table versioning applies to all non-BGP related
		fields, for example: a) if "sql_table_type: bgp" and "sql_table_version: 1" then the "tag"
		field will be written in the "agent_id" column whereas; b) if "sql_table_type: bgp" and
		"sql_table_version: 9" instead, then the "tag" field will be written in the "tag" column.
		All versioning rules are captured in sql/README.[mysql|sqlite3|pgsql] documents.
DEFAULT:	original

KEY:		sql_data
VALUES:		[ typed | unified ]
DESC:		This switch applies to the PostgreSQL plugin and when using default tables up to v5:
		pgsql scripts in the sql/ tree, up to v5, will in fact create a 'unified' table along
		with multiple 'typed' tables. The 'unified' table has IP and MAC addresses specified
		as standard CHAR strings, slower and not space savy but flexible; 'typed' tables
		feature PostgreSQL own types (inet, mac, etc.), resulting in a faster but more rigid
		structure. Since v6 unified mode is being discontinued leading to simplification.
DEFAULT:	typed

KEY:		sql_conn_ca_file
DESC:		In MySQL and PostgreSQL plugins, this is the path name of the Certificate Authority
		(CA) certificate file. If used, it must specify the same certificate used by the
		server. 
DEFAULT:	none

KEY:	 	sql_host
DESC:		Defines the backend server IP/hostname. In case of the MySQL plugin, prepending the
		'unix:' string, ie. 'unix:/path/to/unix.socket', will cause the rest to be treated
		as a UNIX socket (rather than an IP address/hostname).
DEFAULT:	localhost

KEY:            sql_port
DESC:           Defines the backend server TCP/UDP port
DEFAULT:        [ MySQL: 3306; PostgreSQL: 5432 ]

KEY:		sql_user
DESC:		Defines the username to use when connecting to the server.
DEFAULT:	pmacct

KEY:		sql_passwd
DESC:		Defines the password to use when connecting to the server. Note that since this directive
		does encompass a default value (see below), it is not possible to connect to a server
		that performs password authentication with an empy password. 
DEFAULT:	'arealsmartpwd'

KEY:		[ sql_refresh_time | print_refresh_time | amqp_refresh_time | kafka_refresh_time ] (-r)
DESC:		Time interval, in seconds, between consecutive executions of the plugin cache scanner. The
		scanner purges data into the plugin backend. Note: internally all these config directives
		write to the same variable; when using multiple plugins it is recommended to bind refresh
		time definitions to specific plugins, ie.:

		plugins: mysql[x]
		sql_refresh_time[x]: 900

		As doing otherwise can originate unexpected behaviours.
DEFAULT:	60

KEY:		[ sql_startup_delay | print_startup_delay | amqp_startup_delay | kafka_startup_delay ]
DESC:		Defines the time, in seconds, the first cache scan event has to be delayed. This delay
		is, in turn, propagated to the subsequent scans. It comes useful in two scenarios: a) so 
		that multiple plugins can use the same refresh time (ie. sql_refresh_time) value, allowing
		them to spread the writes among the length of the time-bin; b) with NetFlow, when using
		a RDBMS, to keep original flow start time (nfacctd_time_new: false) while enabling the
		sql_dont_try_update feature (for RDBMS efficiency purposes); in such a context,
		sql_startup_delay value should be greater (better >= 2x the value) of the NetFlow active
		flow timeout.
DEFAULT:	0

KEY:		sql_optimize_clauses
VALUES:		[ true | false ]
DESC:		Enables the optimization of the statements sent to the RDBMS essentially allowing to a)
		run stripped-down variants of the default SQL tables or b) totally customized SQL tables
		by a free mix-and-match of the available primitives. Either case, you will need to build
		the custom SQL table schema and indexes. As a rule of thumb when NOT using this directive
		always remember to specify which default SQL table version you intend to stick to by using
		the 'sql_table_version' directive.
DEFAULT:	false

KEY:		[ sql_history | print_history | amqp_history | kafka_history ]
VALUES:		#[s|m|h|d|w|M]
DESC:		Enables historical accounting by placing accounted data into configurable time-bins. It
		does use the 'stamp_inserted' (base time of the time-bin) and 'stamp_updated' (last time
		the time-bin was touched) fields. The supplied value defines the time slot length during
		which counters are accumulated. See also *_history_roundoff'. In nfacctd, where a flow
		can span across multiple time-bins, flow counters can be pro-rated (seconds timestamp
		resolution) over involved the time-bins by setting nfacctd_pro_rating to true.
		The end net effect of this directive is close to time slots in a RRD file. Examples of
		valid values are: '300s' or '5m' - five minutes, '3600s' or '1h' - one hour, '14400s' or
		'4h' - four hours, '86400s' or '1d' - one day, '1w' - one week, '1M' - one month).
DEFAULT:	none

KEY:            [ sql_history_offset | print_history_offset | amqp_history_offset | kafka_history_offset ]
DESC:		Sets an offset to timeslots basetime. If history is set to 30 mins (by default creating
		10:00, 10:30, 11:00, etc. time-bins), with an offset of 900 seconds (so 15 mins) it will
		create 10:15, 10:45, 11:15, etc. time-bins. It expects a positive value, in seconds.
DEFAULT:	0

KEY:		[ sql_history_roundoff | print_history_roundoff | amqp_history_roundoff |
		  kafka_history_roundoff ]
VALUES		[m,h,d,w,M]
DESC:		Enables alignment of minutes (m), hours (h), days of month (d), weeks (w) and months (M)
		in print (to print_refresh_time) and SQL plugins (to sql_history and sql_refresh_time).
		Suppose you go with 'sql_history: 1h', 'sql_history_roundoff: m' and it's 6:34pm. Rounding
		off minutes gives you an hourly timeslot (1h) starting at 6:00pm; so, subsequent ones will
		start at 7:00pm, 8:00pm, etc. Now, you go with 'sql_history: 5m', 'sql_history_roundoff: m'
		and it's 6:37pm. Rounding off minutes will result in a first slot starting at 6:35pm; next
		slot will start at 6:40pm, and then every 5 minutes (6:45pm ... 7:00pm, etc.). 'w' and 'd'
		are mutually exclusive, that is: you can either reset the date to last Monday or reset the
		date to the first day of the month. 
DEFAULT:	none

KEY:            sql_recovery_backup_host
DESC:           Enables recovery mode; recovery mechanism kicks in if DB fails. It works by checking for
		the successful result of each SQL query. By default it is disabled. By using this key
		aggregates are recovered to a secondary DB. See INTERNALS 'Recovery modes' section for 
		details about this topic. SQLite 3.x note: the plugin uses this directive to specify
		a the full path to an alternate database file (e.g., because you have multiple file
		system on a box) to use in the case the primary backend fails.
DEFAULT:	none

KEY:            [ sql_max_writers | print_max_writers | amqp_max_writers | kafka_max_writers ]
DESC:           Sets the maximum number of concurrent writer processes the plugin is allowed to start.
		This setting allows pmacct to degrade gracefully during major backend lock/outages/
		unavailability. The value is split as follows: up to N-1 concurrent processes will
		queue up; the Nth process will go for the recovery mechanism, if configured (ie.
		sql_recovery_backup_host for SQL plugins), writers beyond Nth will stop managing data
		(so, data will be lost at this stage) and an error message is printed out.
DEFAULT:	10

KEY:		[ sql_cache_entries | print_cache_entries | amqp_cache_entries | kafka_cache_entries ]
DESC:		All plugins have a memory cache in order to store data until next purging event (see
		refresh time directives, ie. sql_refresh_time). In case of network traffic data, the
		cache allows to accumulate bytes and packets counters. This directive sets the number
		of cache buckets, the cache being structured in memory as a hash with conflict chains.
		Default value is suitable for mid-sized scenarios, however when facing large-scale
		networks, it is recommended to tune this parameter to improve performances (ie. keep
		conflict chains shorter). Cache entries value should be also reviewed if the amount
		of entries are not sufficient for a full refresh time interval - in which case a
		"Finished cache entries" informational message will appear in the logs. Use a prime
		number of buckets.
NOTES:		* non SQL plugins: the cache structure has two dimensions, a base and a depth. This
		  setting defines the base (the amount of cache buckets) whereas the depth can't be
		  influenced by configuration and is set to an average depth of 10. This means that
		  the default value (16411) allows for approx 150K entries to fit the cache structure.
		  To properly size a plugin cache, it is recommended to determine the maximum amount
		  of entries purged by such plugin and make calculations basing on that; if, for
		  example, the plugin purges a peak of 2M entries then a cache entries value of 259991
		  is sufficient to cover the worse-case scenario. In case memory is constrained, the
		  alternative option is to purge more often (ie. lower print_refresh_time) while
		  retaining the same time-binning (ie. equal print_history) at the expense of having
		  to consolidate/aggregate entries later in the collection pipeline; if opting for
		  this, be careful having print_output_file_append set to true if using the print
		  plugin). 
		* SQL plugins: the cache structure is similar to the one described for the non SQL
		  plugins but slightly different and more complex. Soon this cache structure will
		  be removed and SQL plugins will be migrated to the same structure as the non SQL
		  plugins, as described in the previous paragraph.
		* It is important to estimate how much space will take the base cache structure for
		  a configured amount of cache entries - especially because configuring too many
		  entries for the available memory can result in a crash of the plugin process right
		  at startup. For this purpose, before trying to allocate the cache structure, the
		  plugin will log an informational message saying "base cache memory=<size>". Why
		  the wording "base cache memory": because cache entries, depending on the configured
		  aggregation method, can have extra structures allocated ad-hoc, ie. BGP-, NAT-,
		  MPLS-related primitives; all these can make the total cache memory size increase
		  slightly at runtime. 
		
DEFAULT:	print_cache_entries, amqp_cache_entries, kafka_cache_entries: 16411;
		sql_cache_entries: 32771

KEY:		sql_dont_try_update
VALUES:         [ true | false ]
DESC:		By default pmacct uses an UPDATE-then-INSERT mechanism to write data to the RDBMS; this
		directive instructs pmacct to use a more efficient INSERT-only mechanism. This directive
		is useful for gaining performances by avoiding UPDATE queries. Using this directive puts
		some timing constraints, specifically sql_history == sql_refresh_time, otherwise it may
		lead to duplicate entries and, potentially, loss of data. When used in nfacctd it also
		requires nfacctd_time_new to be enabled.
DEFAULT:	false

KEY:            sql_use_copy
VALUES:         [ true | false ]
DESC:		Instructs the plugin to build non-UPDATE SQL queries using COPY (in place of INSERT). While
		providing same functionalities of INSERT, COPY is also more efficient. To have effect, this
		directive requires 'sql_dont_try_update' to be set to true. It applies to PostgreSQL plugin
		only.
NOTES:		Error handling of the underlying PostgreSQL API is somewhat limited. During a COPY only
		transmission errors are detected but not syntax/semantic ones, ie. related to the query
		and/or the table schema. 
DEFAULT:        false

KEY:		sql_delimiter
DESC:		If sql_use_copy is true, uses the supplied character as delimiter. This is thought in cases
		where the default delimiter is part of any of the supplied strings to be inserted into the
		database, for example certain BGP AS PATHs like "AS1_AS2_AS3_{ASX,ASY,ASZ}".
DEFAULT:	','

KEY:		[ amqp_multi_values | sql_multi_values | kafka_multi_values ]
DESC:		In SQL plugin, sql_multi_values enables the use of multi-values INSERT statements. The value
		of the directive is intended to be the size (in bytes) of the multi-values buffer. The directive
		applies only to MySQL and SQLite 3.x plugins. Inserting many rows at the same time is much
		faster (many times faster in some cases) than using separate single-row INSERT statements.
		It's adviceable to check the size of this pmacct buffer against the size of the corresponding
		MySQL buffer (max_allowed_packet). In AMQP and Kafka plugins, [amqp|kafka]_multi_values allow
		the same with JSON serialization (for Apache Avro see avro_buffer_size); in this case data is
		encoded in JSON objects newline-separated (preferred to JSON arrays for performance).  
DEFAULT:        0

KEY:		[ sql_trigger_exec | print_trigger_exec | amqp_trigger_exec | kafka_trigger_exec ]
DESC:		Defines the executable to be launched at fixed time intervals to post-process aggregates;
		in SQL plugins, intervals are specified by the 'sql_trigger_time' directive; if no interval
		is supplied 'sql_refresh_time' value is used instead: this will result in a trigger being
		fired each purging event (recommended since all environment variables will be set, see next).
		A number of environment variables are set in order to allow the trigger to take actions and 
		are listed in docs/TRIGGER_VARS. Non-SQL plugins feature a simpler implementation: triggers
		can only be fired each time data is written to the backend (ie. print_refresh_time) and no
		environment variables are passed over to the executable.
DEFAULT:	none

KEY:		sql_trigger_time
VALUES:		#[s|m|h|d|w|M]
DESC:		Specifies time interval at which the executable specified by 'sql_trigger_exec' has to
		be launched; if no executables are specified, this key is simply ignored. Values need to be 
		in the 'sql_history' directive syntax (for example, valid values are '300' or '5m', '3600'
		or '1h', '14400' or '4h', '86400' or '1d', '1w', '1M'; eg. if '3600' or '1h' is selected,
		the executable will be fired each hour).
DEFAULT:	none

KEY:		[ sql_preprocess | print_preprocess | amqp_preprocess | kafka_preprocess ]
DESC:		Allows to process aggregates (via a comma-separated list of conditionals and checks, ie.
		"qnum=1000000, minb=10000") while purging data to the backend thus resulting in a powerful
		selection tier; aggregates filtered out may be just discarded or saved through the recovery
		mechanism (if enabled, if supported by the backend). The set of available preprocessing
		directives follows:
		
		KEY: qnum
		DESC: conditional. Subsequent checks will be evaluated only if the number of queries to be
		      created during the current cache-to-DB purging event is '>=' qnum value. SQL plugins
		      only. 

		KEY: minp
		DESC: check. Aggregates on the queue are evaluated one-by-one; each object is marked valid
		      only if the number of packets is '>=' minp value. All plugins. 

		KEY: minf
		DESC: check. Aggregates on the queue are evaluated one-by-one; each object is marked valid
		      only if the number of flows is '>=' minf value. All plugins.

		KEY: minb
		DESC: check. Aggregates on the queue are evaluated one-by-one; each object is marked valid
		      only if the bytes counter is '>=' minb value. An interesting idea is to set its value 
		      to a fraction of the link capacity. Remember that you have also a timeframe reference:
		      the 'sql_refresh_time' seconds. All plugins. 

		      For example, given the following parameters:
		      Link Capacity = 8Mbit/s, THreshold = 0.1%, TImeframe = 60s 
		      minb = ((LC / 8) * TI) * TH -> ((8Mbit/s / 8) * 60s) * 0.1% = 60000 bytes.

		      Given a 8Mbit link, all aggregates which have accounted for at least 60Kb of traffic
		      in the last 60 seconds, will be written to the DB. 

		KEY: maxp
		DESC: check. Aggregates on the queue are evaluated one-by-one; each object is marked valid
		      only if the number of packets is '<' maxp value. SQL plugins only.

		KEY: maxf
		DESC: check. Aggregates on the queue are evaluated one-by-one; each object is marked valid
		      only if the number of flows is '<' maxf value. SQL plugins only.

		KEY: maxb
		DESC: check. Aggregates on the queue are evaluated one-by-one; each object is marked valid
		      only if the bytes counter is '<' maxb value. SQL plugins only. 

		KEY: maxbpp
		DESC: check. Aggregates on the queue are evaluated one-by-one; each object is marked valid
		      only if the number of bytes per packet is '<' maxbpp value. SQL plugins only.

		KEY: maxppf
		DESC: check. Aggregates on the queue are evaluated one-by-one; each object is marked valid
		      only if the number of packets per flow is '<' maxppf value. SQL plugins only.

		KEY: minbpp
		DESC: check. Aggregates on the queue are evaluated one-by-one; each object is marked valid
		      only if the number of bytes per packet is '>=' minbpp value. All plugins. 

		KEY: minppf
		DESC: check. Aggregates on the queue are evaluated one-by-one; each object is marked valid
		      only if the number of packets per flow is '>=' minppf value. All plugins. 

		KEY: fss 
		DESC: check. Enforces flow (aggregate) size dependent sampling, computed against the bytes
		      counter and returns renormalized results. Aggregates which have collected more than the
		      supplied 'fss' threshold in the last time window (specified by the 'sql_refresh_time'
		      configuration key) are sampled. Those under the threshold are sampled with probability
		      p(bytes). The method allows to get much more accurate samples compared to classic 1/N
		      sampling approaches, providing an unbiased estimate of the real bytes counter. It would
		      be also adviceable to hold the the equality 'sql_refresh_time' = 'sql_history'. 
		      For further references: http://www.research.att.com/projects/flowsamp/ and specifically
		      to the papers: N.G. Duffield, C. Lund, M. Thorup, "Charging from sampled network usage",
		      http://www.research.att.com/~duffield/pubs/DLT01-usage.pdf and N.G. Duffield and C. Lund,
		      "Predicting Resource Usage and Estimation Accuracy in an IP Flow Measurement Collection
		      Infrastructure", http://www.research.att.com/~duffield/pubs/p313-duffield-lund.pdf
		      SQL plugins only.

		KEY: fsrc
		DESC: check. Enforces flow (aggregate) sampling under hard resource constraints, computed
		      against the bytes counter and returns renormalized results. The method selects only 'fsrc'
		      flows from the set of the flows collected during the last time window ('sql_refresh_time'),
		      providing an unbiasied estimate of the real bytes counter. It would be also adviceable
		      to hold the equality 'sql_refresh_time' = 'sql_history'.
		      For further references: http://www.research.att.com/projects/flowsamp/ and specifically
		      to the paper: N.G. Duffield, C. Lund, M. Thorup, "Flow Sampling Under Hard Resource
		      Constraints", http://www.research.att.com/~duffield/pubs/DLT03-constrained.pdf 
		      SQL plugins only.

		KEY: usrf 
		DESC: action. Applies the renormalization factor 'usrf' to counters of each aggregate. Its use 
		      is suitable for use in conjunction with uniform sampling methods (for example simple random
		      - e.g. sFlow, 'sampling_rate' directive or simple systematic - e.g. sampled NetFlow by
		      Cisco and Juniper). The factor is applied to recovered aggregates also. It would be also
		      adviceable to hold the equality 'sql_refresh_time' = 'sql_history'. Before using this action
		      to renormalize counters generated by sFlow, take also a read of the 'sfacctd_renormalize'
		      key. SQL plugins only.

		KEY: adjb 
		DESC: action. Adds (or subtracts) 'adjb' bytes to the bytes counter multiplied by the number of
		      packet in each aggregate. This is a particularly useful action when - for example - fixed
		      lower (link, llc, etc.) layer sizes need to be included into the bytes counter (as explained
		      by Q7 in FAQS document). SQL plugins only.

		KEY: recover
		DESC: action. If previously evaluated checks have marked the aggregate as invalid, a positive
		      'recover' value makes the packet to be handled through the recovery mechanism (if enabled).  
		      SQL plugins only.

		For example, during a data purge, in order to filter in only aggregates counting 100KB or more
		the following line can be used to instrument the print plugin: 'print_preprocess: minb=100000'. 
DEFAULT:	none

KEY:		[ sql_preprocess_type | print_preprocess_type | amqp_preprocess_type | kafka_preprocess_type ]
VALUES:		[ any | all ]
DESC:		When more checks are to be evaluated, this directive tells whether aggregates on the queue
		are valid if they just match one of the checks (any) or all of them (all).
DEFAULT:	any

KEY:		timestamps_secs
VALUES:		[ true | false ]
DESC:		Sets timestamps (ie. timestamp_start, timestamp_end, timestamp_arrival primitives) resolution
		to seconds, ie. prevents residual time fields like timestamp_start_residual to be populated.
		In nfprobe plugin, when exporting via NetFlow v9 (nfprobe_version: 9), allows to fallback
		to first and last swithed times in seconds.
DEFAULT:        false

KEY:		timestamps_rfc3339
VALUES:		[ true | false ]
DESC:		Formats timestamps (ie. timestamp_start, timestamp_end, timestamp_arrival primitives) in a
		rfc3339 compliant way, ie. if UTC timezone yyyy-MM-ddTHH:mm:ss(.ss)Z. This is set to false
		for backward compatibility.
DEFAULT:	false

KEY:		timestamps_utc
VALUES:		[ true | false ]
DESC:		When presenting timestamps, decode them to UTC even if the underlying operating system is
		set to a different timezone. On the goodness of having a system set to UTC, please read
		Q18 of the FAQS document.
DEFAULT:	false


KEY:            timestamps_since_epoch
VALUES          [ true | false ]
DESC:           All timestamps (ie. timestamp_start, timestamp_end, timestamp_arrival primitives; sql_history-
		related fields stamp_inserted, stamp_updated; etc.) in the standard seconds since the Epoch
		format. This not only makes output more compact but also prevents computationally expensive
		time-formatting functions to be invoked, resulting in speed gains at purge time. In case the
		output is to a RDBMS, setting this directive to true will require changes to the default types
		for timestamp fields in the SQL schema.

		MySQL:          DATETIME ==> INT(8) UNSIGNED
                PostgreSQL:     timestamp without time zone ==> bigint
                SQLite3:        DATETIME ==> INT(8)
DEFAULT:        false

KEY:		[ print_markers | amqp_markers | kafka_markers ]
VALUES:		[ true | false ]
DESC:		Enables the use of start/end markers each time data is purged to the backend. Both start
		and end markers return additional information, ie. writer PID, number of entries purged,
		elapsed time, etc. In the print plugin markers are available for CSV and JSON outputs; in
		the AMQP and Kafka plugins markers are available for JSON and Avro outputs. In the case
		of Kafka topics with multiple partitions, the purge_close message can arrive out of order
		so other mechanisms should be used to correlate messages as being part of the same batch
		(ie. writer_id).  
DEFAULT:	false

KEY:		print_output
VALUES:		[ formatted | csv | json | avro | avro_json | event_formatted | event_csv | custom ]
DESC:		Defines the print plugin output format. 'formatted' outputs in tab-separated format;
		'csv' outputs comma-separated values format, suitable for injection into 3rd party tools.
		'event' variant of both formatted and cvs strips bytes and packets counters fields.
		'json' outpus as JavaScript Object Notation format, suitable for injection in Big Data
		systems; being a self-descriptive format, JSON does not require a event-counterpart; on
		the cons, JSON serialization may introduce some lag due to the string manipulations (as
		an example: 10M lines may be written to disk in 30 secs as CSV and 150 secs as JSON).
		JSON format requires compiling the package against Jansson library (downloadable at the
		following URL: http://www.digip.org/jansson/). 'avro' outputs data using the Apache Avro
		data serialization format: being a binary format, it's more compact than JSON; 'avro_json'
		outputs as JSON-encoded Avro objects, suitable to troubleshoot and/or familiarize with
		the binary format itself. Both 'avro' and 'avro_json' formats require compiling against
		the Apache Avro library (downloadable at the following URL: http://avro.apache.org/).
		'custom' allows to specify own formtting, encoding and backend management (open file,
		close file, markers, etc.), see print_output_custom_lib and print_output_custom_cfg_file.
NOTES:		* Jansson and Avro libraries don't have the concept of unsigned integers. integers up to
		  32 bits are packed as 64 bits signed integers, working around the issue. No work around
		  is possible for unsigned 64 bits integers except encoding them as strings.
		* If the output format is 'avro' and no print_output_file was specified, the Avro-based
		  representation of the data will be converted to JSON-encoded Avro.
		* If the output format is 'formatted', variable length primitives (like AS path) cannot
		  be printed and a warning message will be output instead. This is because, intuitively,
		  it is not possible to properly format the title line upfront with variable length
		  fields. Please use one of the other output formats instead. 
DEFAULT:	formatted

KEY:            print_output_separator
DESC:           Defines the print plugin output separator when print_output is set to csv or event_csv.
		Value is expected to be a single character with the exception of tab (\t) and space (\s)
		strings allowed. Being able to choose a separator comes useful in cases in which the
		default separator value is part of any of the supplied strings, for example certain BGP
		AS PATHs like "AS1_AS2_AS3_{ASX,ASY,ASZ}".
DEFAULT:	','

KEY:		print_output_custom_lib
DESC:		Full path to a non-static library file that can be dinamically linked in pmacct to
		provide custom formatting of output data. The two main use-cases for this feature are
		1) use available encodings (ie. CSV, JSON, etc.) but fix the format of the messages in a
		custom way and 2) use a different encoding than the available ones. pm_custom_output
		structure in plugin_cmn_custom.h can be looked at for guidance of which functions are
		expected to exist in the library and with which arguments they would be called. See an
		example library file in the examples/custom directory.
DEFAULT:	none

KEY:		print_output_custom_cfg_file
DESC:		Full path to a file that is passed to the shared object (.so) library both at init time
		and at runtime, that is, when processing elements. The config file content is opaque to
		pmacct.
DEFAULT:	none

KEY:		[ amqp_output | kafka_output ]
VALUES: 	[ json | avro | avro_json ]
DESC:		Defines the output format for messages sent to a message broker (amqp and kafka plugins).
		'json' is to encode messages as JavaScript Object Notation format. 'json' format requires
		compiling against the Jansson library (downloadable at: http://www.digip.org/jansson/).
		'avro' is to binary-encode messages with the Apache Avro serialization format; 'avro_json'
		is to JSON-encode messages with Avro. 'avro' and 'avro_json' formats require compiling
		against the Apache Avro library (downloadable at: http://avro.apache.org/). Read more
		on the print_output directive.
DEFAULT:	json

KEY:		avro_buffer_size
DESC:		When the Apache Avro format is used to encode the messages sent to a message broker (amqp and
		kafka plugins), this option defines the size in bytes of the buffer used by the Avro data
		serialization system. The buffer needs to be large enough to store at least a single Avro
		record. If the buffer does not have enough capacity to store the number of records defined
		by the [amqp, kafka]_multi_values configuration directive, the current records stored in the
		buffer will be sent to the message broker and the buffer will be cleared to accomodate
		subsequent records.
DEFAULT:	8192

KEY:		avro_schema_file
DESC:		When the Apache Avro format is used to encode the messages sent to a message broker (amqp
		and kafka plugins), export the schema generated to the given file path. The schema can then
		be used by the receiving end to decode the messages. Note that the schema will be dynamically
		built based on the aggregation primitives chosen (this has also effect in the print plugin
		but in this case the schema is also always included in the print_output_file as mandated
		by Avro specification). inotify-tools can be used to take event-driven actions like notify
		a consumer whenever the file is modified. 
DEFAULT:	none

KEY:		kafka_avro_schema_registry
DESC:		The URL to a Confluent Avro Schema Registry. The value is passed to libserdes as argument
		for "schema.registry.url". A sample of the expected value being https://localhost. This is
		a pointer to the REST API https://docs.confluent.io/current/schema-registry/docs/api.html 
		The schema name is auto generated: if the topic is static, the schema name is createad as
		"<kafka_topic>-value" (ie. if kafka_topic is set to 'foobar' then the schema name will be
		"foobar-value"); if the topic is dynamic instead, the schema name is created as "pmacct_
		<plugin type>_<plugin name>" (ie. if plugin name is 'foobar' then the schema name will be
		"pmacct_kafka_foobar"). To confirm that the schema is registered, the following CL can be
		used: "curl -X GET https://<Schema Registry host>/subjects | jq . | grep <schema name>".
		Until reaching a stable 'aggregate' aggregation method, it is recommended to set Schema
		Registry compatibility type to 'none' as the schema may change.
DEFAULT:	none

KEY:            [ print_num_protos | sql_num_protos | amqp_num_protos | kafka_num_protos ]
VALUES:         [ true | false ]
DESC:		Defines whether IP protocols (ie. tcp, udp) should be looked up and presented in string format
		or left numerical. The default is to look protocol names up. If this feature is not available
		for the intended plugin - and NetFlow/IPFIX or libpcap/ULOG daemons are used - a custom
		primitive can be defined (see aggregate_primitives config directive), for example in the case
		of NetFlow/IPFIX:

		name=proto_int field_type=4 len=1 semantics=u_int

DEFAULT:	false

KEY:            sql_num_hosts
VALUES:         [ true | false ]
DESC:           Defines whether IP addresses should be left numerical (in network byte ordering) or converted
		to human-readable string. Applies to MySQL and SQLite plugins only and assumes the INET_ATON()
		and INET6_ATON() function are defined in the RDBMS. INET_ATON() is always defined in MySQL
		whereas INET6_ATON() requires MySQL >= 5.6.3. Both functions are not defined by default in
		SQLite instead and are to be user-defined. Default setting, false, is to convert IP addresses
		and prefixes into strings. If this feature is not available for the intended plugin - and
		NetFlow/IPFIX or libpcap/ULOG daemons are in use - a custom primitive can be defined (see
		aggregate_primitives configuration directive), for example in the case of NetFlow/IPFIX:

		name=src_host_int field_type=8  len=4 semantics=u_int
		name=dst_host_int field_type=12 len=4 semantics=u_int

DEFAULT:        false

KEY:		[ nfacctd_port | sfacctd_port ] (-l) [GLOBAL, NO_PMACCTD, NO_UACCTD]
DESC:		Defines the UDP port where to bind nfacctd (nfacctd_port) and sfacctd (sfacctd_port) daemons.
                SO_REUSEPORT feature can be leveraged on Linux: it allows multiple daemons to bind the same
                local address and port in order to load-balance processing of incoming packets; if doing so
		with incoming NetFlow v9/IPFIX template-based protocols, nfacctd_templates_receiver and
		nfacctd_templates_port should be used. At the end of this document, reference (1) to a
		URL to a presentation of the SO_REUSEPORT feature. To enable SO_REUSEPORT on a Linux system
		supporting it 'sysctl net.core.allow_reuseport=1'.
DEFAULT:	nfacctd_port: 2100; sfacctd_port: 6343 

KEY:		[ nfacctd_ip | sfacctd_ip ] (-L) [GLOBAL, NO_PMACCTD, NO_UACCTD]
DESC:		Defines the IPv4/IPv6 address where to bind the nfacctd (nfacctd_ip) and sfacctd (sfacctd_ip)
		daemons.
DEFAULT:	all interfaces

KEY:		[ nfacctd_kafka_broker_host | sfacctd_kafka_broker_host ] [GLOBAL, NO_PMACCTD, NO_UACCTD]
DESC:		Defines one or multiple, comma-separated, Kafka brokers to receive NetFlow/IPFIX and sFlow
		from. See kafka_broker_host for more info.
DEFAULT:	none

KEY:		[ nfacctd_kafka_broker_port | sfacctd_kafka_broker_port ] [GLOBAL, NO_PMACCTD, NO_UACCTD]
DESC:		Defines the Kafka broker port to receive NetFlow/IPFIX and sFlow from. See kafka_broker_host
		for more info.
DEFAULT:	9092

KEY:            [ nfacctd_kafka_config_file | sfacctd_kafka_config_file ] [GLOBAL, NO_PMACCTD, NO_UACCTD]
DESC:           Full pathname to a file containing directives to configure librdkafka to receive NetFlow/IPFIX
                and sFlow from. See kafka_config_file for more info.
DEFAULT:        none

KEY:            [ nfacctd_kafka_topic | sfacctd_kafka_topic ] [GLOBAL, NO_PMACCTD, NO_UACCTD]
DESC:           Name of the Kafka topic to receive NetFlow/IPFIX and sFlow from. No variables are supported
		for dynamic naming of the topic. See kafka_topic for more info.
DEFAULT:        none

KEY:            [ nfacctd_zmq_address | sfacctd_zmq_address ] [GLOBAL, NO_PMACCTD, NO_UACCTD]
DESC:           Defines the ZeroMQ queue address (host and port) to connect to for consuming NetFlow/IPFIX
		and sFlow from. An example of the expected value is "127.0.0.1:50000".
DEFAULT:        none

KEY:		core_proc_name [GLOBAL]
DESC:		Defines the name of the core process. This is the equivalent to instantiate named plugins but
		for the core process.
DEFAULT:	'default'

KEY:		proc_priority
DESC:		Redefines the process scheduling priority, equivalent to using the 'nice' tool. Each daemon
		process, ie. core, plugins, etc., can define a different priority.
DEFAULT:	0

KEY:		[ nfacctd_allow_file | sfacctd_allow_file ] [GLOBAL, MAP, NO_PMACCTD, NO_UACCTD]
DESC:		Full pathname to a file containing the list of IPv4/IPv6 addresses/prefixes (one for each
		line) allowed to send packets to the daemon. The allow file is intended to be small; for
		longer ACLs, firewall rules should be preferred instead. If no allow file is specified,
		intuitively, that means 'allow all'; if an allow file is specified but its content is
		empty, that means 'deny all'. Content can be reloaded at runtime by sending the daemon a
		SIGUSR2 signal (ie. "killall -USR2 nfacctd"). Sample map in examples/allow.lst.example .
DEFAULT:	none (ie. allow all)

KEY:		nfacctd_time_secs [GLOBAL, NFACCTD_ONLY]
VALUES:		[ true | false ]
DESC:		Makes 'nfacctd' expect times included in NetFlow header to be in seconds rather than msecs.
		This knob makes sense for NetFlow v5 since in NetFlow v9 and IPFIX different fields are
		reserved for secs and msecs timestamps, increasing collector awareness.
DEFAULT:        false

KEY:		[ nfacctd_time_new | pmacctd_time_new | sfacctd_time_new ] [GLOBAL, NO_UACCTD]
VALUES:		[ true | false ]
DESC:		Makes the daemon to ignore external timestamps associated to data, ie. included in NetFlow
		header or pcap header, and generate new ones (reflecting data arrival time to the collector).
		This gets particularly useful to assign flows to time-bins based on the flow arrival time at
		the collector rather than the flow original (start) time.
DEFAULT:        false

KEY:		nfacctd_pro_rating [NFACCTD_ONLY]
VALUES:		[ true | false ]
DESC:		If nfacctd_time_new is set to false (default) and historical accounting (ie. sql_history) is
		enabled, this directive enables pro rating of NetFlow/IPFIX flows over time-bins, if needed.
		For example, if sql_history is set to '5m' (so 300 secs), the considered flow duration is 1000
		secs, its bytes counter is 1000 bytes and, for simplicity, its start time is at the base time
		of t0, time-bin 0, then the flow is inserted in time-bins t0, t1, t2 and t3 and its bytes
		counter is proportionally split among these time-bins: 300 bytes during t0, t1 and t2 and
		100 bytes during t3.
NOTES:          If NetFlow sampling is enabled, it is recommended to have counters renormalization enabled
                (nfacctd_renormalize set to true).
DEFAULT:        false

KEY:		nfacctd_templates_file [GLOBAL, NFACCTD_ONLY]
DESC:		Full pathname to a file to store JSON-serialized templates for NetFlow v9/IPFIX data.
		At startup, nfacctd reads templates stored in this file (if any and if the file exists)
		in order to reduce the initial amount of dropped packets due to unknown templates. In
		steady state, templates received from the network are stored in this file. Warning: if,
		at startup time, data records are encoded with a template structure different than the
		one that was stored in the file, effectiveness of this feature is (intuitively) greatly
		reduced. This file will be created if it does not exist. This feautre requires compiling
		against Jansson library (--enable-jansson when configuring pmacct for compiling).
DEFAULT:        none

KEY:		nfacctd_templates_receiver [GLOBAL, NFACCTD_ONLY]
DESC:		Defines a receiver where to export NetFlow v9/IPFIX templates, ideally a replicator. To
		help in clustered scenarios especially when leveraging SO_REUSEPORT (multiple nfacctd
		listening on the same IP and port). If IPv4, the value is expected as 'address:port'.
		If IPv6, it is expected as '[address]:port'. 
DEFAULT:	none

KEY:		nfacctd_templates_port [GLOBAL, NFACCTD_ONLY]
DESC:		Defines the UDP port where to bind nfacctd for receiving (replicated) templates. If a
		template is received on this port and nfacctd_templates_receiver is specified, it is
		not replicated (in order to avoid the case of infinite loops).  
DEFAULT:	none

KEY:		nfacctd_dtls_port [GLOBAL, NFACCTD_ONLY]
DESC:		Defines the UDP port where to bind nfacctd for receiving NetFlow/IPFIX over DTLS. Needs
		pmacct to be configured for compiling with the --enable-gnutls knob. The files (key,
		certificate, etc.) required by DTLS are to be supplied via the dtls_path config directive.
DEFAULT:	none

KEY:            [ nfacctd_stitching | sfacctd_stitching | pmacctd_stitching | uacctd_stitching ]
VALUES:         [ true | false ]
DESC:		If set to true adds two new fields, timestamp_min and timestamp_max: given an aggregation
		method ('aggregate' config directive), timestamp_min is the timestamp of the first element
		contributing to a certain aggregate, timestamp_max is the timestamp of the last element. In
		case the export protocol provides time references, ie. NetFlow/IPFIX, these are used; if not
		of if using NetFlow/IPFIX as export protocol and nfacctd_time_new is set to true the current
		time (hence time of arrival to the collector) is used instead. The feature is not compatible
		with pro-rating, ie. nfacctd_pro_rating. Also, the feature is supported on all plugins except
		the 'memory' one (please get in touch if you have a use-case for it).
DEFAULT:        false

KEY:            nfacctd_account_options [GLOBAL, NFACCTD_ONLY]
VALUES:         [ true | false ]
DESC:		If set to true account for NetFlow/IPFIX option records. This will require define custom
		primitives via aggregate_primitives. pre_tag_map offers sample_type value of 'option' in
		order to split option data records from flow or event data ones.
DEFAULT:        false

KEY:		[ nfacctd_as | sfacctd_as | pmacctd_as | uacctd_as ] [GLOBAL]
VALUES:		[ netflow | sflow | file | bgp | bmp | longest ]
DESC:		When set to 'netflow' or 'sflow' it instructs nfacctd and sfacctd to populate 'src_as',
		'dst_as', 'peer_src_as' and 'peer_dst_as' primitives from information in NetFlow and sFlow
		datagrams; when set to 'file', it instructs nfacctd and sfacctd to populate 'src_as',
		'dst_as' and 'peer_dst_as' by looking up source and destination IP addresses against a
		supplied networks_file. When 'bgp' or 'bmp' is specified, source and destination IP
		addresses are looked up against the BGP RIB of the peer from which the NetFlow (or sFlow)
		datagram was received (see also bgp_agent_map directive for more complex mappings).
		'longest' behaves in a longest-prefix match wins fashion: in nfacctd and sfacctd lookup
		is done against a networks_file (if specified), sFlow/NetFlow protocol and BGP/BMP (if
		the BMP/BGP thread is started) with the following logics: networks_file < sFlow/NetFlow
		<= BGP/BMP; in pmacctd and uacctd 'longest' logics is networks_file <= BGP/BMP.

		Read nfacctd_net description for an example of operation of the 'longest' method. Unless
		there is a specific goal do achieve, it is highly recommended that this definition, ie.
		nfacctd_as, is kept in sync with its net equivalent, ie. nfacctd_net.
DEFAULT:        none

KEY:            [ nfacctd_net | sfacctd_net | pmacctd_net | uacctd_net ] [GLOBAL]
VALUES:         [ netflow | sflow | mask | file | igp | bgp | bmp | longest ]
DESC:		Determines the method for performing IP prefix aggregation - hence directly influencing
		'src_net', 'dst_net', 'src_mask', 'dst_mask' and 'peer_dst_ip' primitives. 'netflow' and
		'sflow' get values from NetFlow and sFlow protocols respectively; these keywords are only
		valid in nfacctd, sfacctd. 'mask' applies a defined networks_mask; 'file' selects a defined
		networks_file; 'igp', 'bgp' and 'bmp' source values from IGP/IS-IS, BGP and BMP daemon
		respectively. For backward compatibility, the default behaviour in pmacctd and uacctd is:
		'mask' and 'file' are turned on if a networks_mask and a networks_file are respectively
		specified by configuration. If they are both defined, the outcome will be the intersection
		of their definitions. 'longest' behaves in a longest-prefix match wins fashion: in nfacctd
		and sfacctd lookup is done against a networks list (if networks_file is defined) sFlow/
		NetFlow protocol, IGP (if the IGP thread started) and BGP/BMP (if the BGP/BMP thread is
		started) with the following logics: networks_file < sFlow/NetFlow < IGP <= BGP/BMP; in
		pmacctd and uacctd 'longest' logics is: networks_file < IGP <= BGP/BMP. For example we
		receive the following PDU via NetFlow:

                SrcAddr: 10.0.8.29 (10.0.8.29)
                DstAddr: 192.168.5.47 (192.168.5.47)
                [ .. ]
                SrcMask: 24 (prefix: 10.0.8.0/24)
                DstMask: 27 (prefix: 192.168.5.32/27)

                a BGP peering is available and BGP contains the following prefixes: 192.168.0.0/16 and 10.0.0.0/8.
                A networks_file contains the prefixes 10.0.8.0/24 and 192.168.5.0/24. 'longest' would select
		as outcome of the network aggregation process 10.0.8.0/24 for the src_net and src_mask
		respectively and 192.168.5.32/27 for dst_net and dst_mask.

		Unless there is a specific goal to achieve, it is highly recommended that the definition of
		this configuration directive is kept in sync with its ASN equivalent, ie. nfacctd_as.
DEFAULT:	nfacctd: 'netflow'; sfacctd: 'sflow'; pmacctd and uacctd: 'mask', 'file'

KEY:		use_ip_next_hop [GLOBAL]
VALUES:		[ true | false ] 
DESC:		When IP prefix aggregation (ie. nfacctd_net) is set to 'netflow', 'sflow' or 'longest' (in
		which case longest winning match is via 'netflow' or 'sflow') populate 'peer_dst_ip' field
		from NetFlow/sFlow IP next hop field if BGP next-hop is not available.
DEFAULT:        false

KEY:            decode_arista_trailer [GLOBAL, PMACCTD_ONLY]
VALUES:         [ true | false ]
DESC:           Decode output interfaces data present on trailer of libpcap packets passed by Arista.
DEFAULT:        false

KEY:		[ nfacctd_mcast_groups | sfacctd_mcast_groups ] [GLOBAL, NO_PMACCTD, NO_UACCTD]
DESC:		Defines one or more IPv4/IPv6 multicast groups to be joined by the daemon. If more groups are
		supplied, they are expected comma separated. A maximum of 20 multicast groups may be joined by
		a single daemon instance. Some OS (noticeably Solaris -- seems) may also require an interface
		to bind to which - in turn - can be supplied declaring an IP address ('nfacctd_ip' key). 
DEFAULT:	none

KEY:            [ nfacctd_disable_checks | sfacctd_disable_checks ] [GLOBAL, NO_PMACCTD, NO_UACCTD]
VALUES:         [ true | false ]
DESC:           Both nfacctd and sfacctd can log warning messages for failing basic checks against incoming
		NetFlow/sFlow datagrams, ie. sequence number checks, protocol version. You may want to disable 
		such feature, default, because of buggy or non-standard implementations. Also, for sequencing
		checks, the 'export_proto_seqno' primitive is recommended instead (see 'aggregate' description
		and notes).
DEFAULT:        true

KEY:		nfacctd_disable_opt_scope_check [GLOBAL, ONLY_NFACCTD]
VALUES:         [ true | false ]
DESC:		Mainly a workaround to implementations not encoding NetFlow v9/IPIFX option scope correctly,
		this knob allows to disable option scope checking. By doing so, options are considered scoped
		to the system level (ie. to the IP address of the expoter).
DEFAULT:	false

KEY:		pre_tag_map [MAP]
DESC:		Full pathname to a file containing tag mappings. Tags can be internal-only (ie. for filtering
		purposes, see pre_tag_filter configuration directive) or exposed to users (ie. if 'tag', 'tag2'
		and/or 'label' primitives are part of the aggregation method). Take a look to the examples/
		sub-tree for all supported keys and detailed examples (pretag.map.example). Pre-Tagging is
		evaluated in the Core Process and each plugin can be defined a local pre_tag_map. Result of
		evaluation of pre_tag_map overrides any tags passed via NetFlow/sFlow by a pmacct nfprobe/
		sfprobe plugin. Number of map entries (by default 384) can be modified via maps_entries.
		Content can be reloaded at runtime by sending the daemon a SIGUSR2 signal (ie. "killall -USR2
		nfacctd").  
DEFAULT:	none

KEY:		maps_entries
DESC:		Defines the maximum number of entries a map (ie. pre_tag_map and all directives with the
		'MAP' flag in this document) can contain. The default value is suitable for most scenarios,
		though tuning it could be required either to save on memory or to allow for more entries.
		Refer to the specific map directives documentation in this file to see which are affected by
		this setting.
DEFAULT:	384

KEY:            maps_row_len
DESC:		Defines the maximum length of map (ie. pre_tag_map and all directives with the 'MAP' flag in
		this document) rows. The default value is suitable for most scenario, though tuning it could
		be required either to save on memory or to allow for more entries.
DEFAULT:	256

KEY:		maps_refresh [GLOBAL]
VALUES:		[ true | false ]
DESC:		When enabled, this directive allows to reload map files (ie. pre_tag_map and all directives
		with the 'MAP' flag in this document) without restarting the daemon instance. For example,
		it may result particularly useful to reload pre_tag_map or networks_file entries in order
		to reflect some change in the network. After having modified the map files, a SIGUSR2 has
		to be sent (e.g.: in the simplest case "killall -USR2 pmacctd") to the daemon to notify the
		change. If such signal is sent to the daemon and this directive is not enabled, the signal
		is silently discarded. The Core Process is in charge of processing the Pre-Tagging map;
		plugins are devoted to Networks and Ports maps instead. Then, because signals can be sent
		either to the whole daemon (killall) or to just a specific process (kill), this mechanism
		also offers the advantage to elicit local reloads.
DEFAULT:        true

KEY:		maps_index [GLOBAL]
VALUES:		[ true | false ]
DESC:		Enables indexing of maps (ie. pre_tag_map and all directives with the 'MAP' flag in this
		document) to increase lookup speeds on large maps and/or sustained lookup rates. Indexes
		are automatically defined basing on structure and content of the map, up to a maximum of
		8. Indexing of pre_tag_map, bgp_peer_src_as_map, flow_to_rd_map is supported. Only a sub-
		set of pre_tag_map fields are supported, including: ip, bgp_nexthop, vlan, cvlan, src_mac,
		mpls_vpn_rd, mpls_pw_id, src_as, dst_as, peer_src_as, peer_dst_as, input, output. Only IP
		addresses, ie. no IP prefixes, are supported as part of the 'ip' field. Also, negations
		are not supported (ie. 'in=-216' match all but input interface 216). bgp_agent_map and
		sampling_map implement a separate caching mechanism and hence do not leverage this
		feature. Duplicates in the key part of the map entry, key being defined as all fields
		except set_* ones, are not supported and may result in a "out of index space" message.
DEFAULT:        false

KEY:            pre_tag_filter, pre_tag2_filter [NO_GLOBAL]
VALUES:         [ 0-2^64-1 ]
DESC:		Expects one or more tags (when multiple tags are supplied, they need to be comma separated
		and a logical OR is used in the evaluation phase) as value and allows to filter aggregates
		basing upon their tag (or tag2) value: in case of a match, the aggregate is filtered in, ie.
		it is delivered to the plugin it is attached to. This directive has to be attached to a
		plugin (that is, it cannot be global) and is suitable, for example, to split tagged data
		among the active plugins. This directive also allows to specify a value '0' to match untagged
		data, thus allowing to split tagged traffic from untagged one. It also allows negations by
		pre-pending a minus sign to the tag value (ie. '-6' would send everything but traffic tagged
		as '6' to the plugin it is attached to, hence achieving a filter out behaviour) and ranges
		(ie. '10-20' would send over traffic tagged in the range 10..20) and any combination of these.
		This directive makes sense if coupled with 'pre_tag_map'.
DEFAULT:	none

KEY:            pre_tag_label_filter [NO_GLOBAL]
DESC:		Expects one or more labels (when multiple labels are supplied, they need to be comma
		separated and a logical OR is used in the evaluation phase) as value and allows to filter in
		aggregates basing upon their label value(s): only in case of match data is delivered to the
		plugin. This directive has to be attached to a plugin (that is, it cannot be global). Null
		label values (ie. unlabelled data) can be matched using the 'null' keyword. Negations are
		allowed by pre-pending a minus sign to the label value. The use of this directive makes
		sense if coupled with 'pre_tag_map'.
DEFAULT:	none

KEY:            [ post_tag | post_tag2 ] 
VALUES:         [ 1-2^64-1 ]
DESC:           Expects a tag as value. Post-Tagging is evaluated in the plugins. The tag is used as 'tag'
		(post_tag) or 'tag2' (post_tag2) primitive value. Use of these directives hence makes sense
		if tag and/or tag2 primitives are part of the plugin aggregation method.
DEFAULT:	none

KEY:		sampling_rate
VALUES:		[ >= 1 ]
DESC:		Enables packet sampling. It expects a number which is the mean ratio of packets to be sampled
		(1 out of N). The currently implemented sampling algorithm is a simple randomic one. If using 
		any SQL plugin, look also to the powerful 'sql_preprocess' layer and the more advanced sampling
		choices it offers: they will allow to deal with advanced sampling scenarios (e.g. probabilistic
		methods). Finally, note that this 'sampling_rate' directive can be renormalized by using the 
		'usrf' action of the 'sql_preprocess' layer.
DEFAULT:	none

KEY:            sampling_map [GLOBAL, NO_PMACCTD, NO_UACCTD, MAP]
DESC:           Full pathname to a file containing traffic sampling mappings. It is mainly meant to be used
		in conjunction with nfacctd and sfacctd for the purpose of fine-grained reporting of sampling
		rates ('sampling_rate' primitive) circumventing bugs and issues in router operating systems.
		If counters renormalization is wanted, nfacctd_renormalize or sfacctd_renormalize must be
		also set to true. If a specific router is not defined in the map, the sampling rate advertised
		by the router itself is applied. Take a look to the examples/ sub-tree 'sampling.map.example'
		for all supported keys and detailed examples. Number of map entries (by default 384) can be
		modified via maps_entries. Content can be reloaded at runtime by sending the daemon a SIGUSR2
		signal (ie. "killall -USR2 nfacctd").
DEFAULT:	none

KEY:		[ pmacctd_force_frag_handling | uacctd_force_frag_handling ] [GLOBAL, NO_NFACCTD, NO_SFACCTD]
VALUES:		[ true | false ]
DESC:		Forces 'pmacctd' to join together IPv4/IPv6 fragments: 'pmacctd' does this only whether any of
		the port primitives are selected (src_port, dst_port, sum_port); in fact, when not dealing with
		any upper layer primitive, fragments are just handled as normal packets. However, available
		filtering rules ('aggregate_filter', Pre-Tag filter rules) will need such functionality enabled
		whether they need to match TCP/UDP ports. So, this directive aims to support such scenarios.
DEFAULT:        false

KEY:		[ pmacctd_frag_buffer_size | uacctd_frag_buffer_size ] [GLOBAL, NO_NFACCTD, NO_SFACCTD]
DESC:		Defines the maximum size of the fragment buffer. In case IPv6 is enabled two buffers of equal
		size will be allocated. The value is expected in bytes.
DEFAULT:	4MB 

KEY:            [ pmacctd_flow_buffer_size | uacctd_flow_buffer_size ] [GLOBAL, NO_NFACCTD, NO_SFACCTD]
DESC:           Defines the maximum size of the flow buffer. This is an upper limit to avoid unlimited growth
		of the memory structure. This value has to scale accordingly to the link traffic rate. In case
		IPv6 is enabled two buffers of equal size will be allocated. The value is expected in bytes.
DEFAULT:	16MB

KEY:            [ pmacctd_flow_buffer_buckets | uacctd_flow_buffer_buckets ] [GLOBAL, NO_NFACCTD, NO_SFACCTD] 
DESC:           Defines the number of buckets of the flow buffer - which is organized as a chained hash table.
		To exploit better performances, the table should be reasonably flat. This value has to scale to
		higher power of 2 accordingly to the link traffic rate. For example, it has been reported that
		a value of 65536 works just fine under full 100Mbit load.
DEFAULT:	256

KEY:            [ pmacctd_conntrack_buffer_size | uacctd_conntrack_buffer_size ] [GLOBAL, NO_NFACCTD, NO_SFACCTD]
DESC:           Defines the maximum size of the connection tracking buffer. In case IPv6 is enabled two buffers
		of equal size will be allocated. The value is expected in bytes.
DEFAULT:	8MB

KEY:            [ pmacctd_flow_lifetime | uacctd_flow_lifetime ] [GLOBAL, NO_NFACCTD, NO_SFACCTD]
DESC:           Defines how long a non-TCP flow could remain inactive (ie. no packets belonging to such flow
		are received) before considering it expired. The value is expected in seconds.
DEFAULT:	60

KEY:            [ pmacctd_flow_tcp_lifetime | uacctd_flow_tcp_lifetime ] [GLOBAL, NO_NFACCTD, NO_SFACCTD]
DESC:           Defines how long a TCP flow could remain inactive (ie. no packets belonging to such flow are
		received) before considering it expired. The value is expected in seconds.
DEFAULT:	60 secs if classification is disabled; 432000 secs (120 hrs) if clssification is enabled

KEY:		[ pmacctd_ext_sampling_rate | uacctd_ext_sampling_rate | nfacctd_ext_sampling_rate |
		  sfacctd_ext_sampling_rate ] [GLOBAL]
		Flags that captured traffic is being sampled at the specified rate. Such rate can then be
		reported ('sampling_rate' primitive), renormalized by using ie. 'pmacctd_renormalize' or
		exported by the nfprobe/sfprobe plugins. External sampling might be performed by capturing
		frameworks the daemon is linked against (ie. PF_RING, NFLOG) or appliances (ie. sampled
		packet mirroring). In nfacctd and sfacctd daemons this directive can be used to tackle
		corner cases, ie. sampling rate reported by the NetFlow/sFlow agent is missing or not
		correct; in such cases sampling_map can be alternatively used to define sampling rate per
		exporter, ie. in case the rate is not omogeneous across all exporters.
DEFAULT:	none

KEY:		[ sfacctd_renormalize | nfacctd_renormalize | pmacctd_renormalize | uacctd_renormalize ] (-R)
		[GLOBAL]
VALUES:		[ true | false ]
DESC:		Automatically renormalizes bytes/packet counters value basing on available sampling info.
		The feature also calculates an effective sampling rate (sFlow only) which could differ
		from the configured one - expecially at high rates - because of various losses; such
		estimated rate is then used for renormalization purposes.
DEFAULT:        false

KEY:		pmacctd_nonroot [GLOBAL]
VALUES:		[ true | false ]
DESC:		Allow to run pmacctd from a user with non root privileges. This can be desirable on systems
		supporting a tool like setcap, ie. 'setcap "cap_net_raw,cap_net_admin=ep" /path/to/pmacctd',
		to assign specific system capabilities to unprivileged users.
DEFAULT:	false

KEY:            sfacctd_counter_file [GLOBAL, SFACCTD_ONLY]
DESC:           Enables streamed logging of sFlow counters. Each log entry features a time reference, sFlow
		agent IP address event type and a sequence number (to order events when time reference is not
		granular enough). Currently it is not possible to filter in/out specific counter types (ie.
		generic, ethernet, vlan, etc.). The list of supported filename variables follows:

                $peer_src_ip    sFlow agent IP address.

		Files can be re-opened by sending a SIGHUP to the daemon core process. The output file can be
		a named pipe (ie. created with mkfifo), however it has to be manually created in advance.
DEFAULT:	none

KEY:            sfacctd_counter_output [GLOBAL, SFACCTD_ONLY]
VALUES:         [ json ]
DESC:           Defines output format for the streamed logging of sFlow counters. Only JSON format is currently
		supported and requires compiling against Jansson library (--enable-jansson when configuring for
		compiling).
DEFAULT:	json

KEY:		sql_locking_style
VALUES:		[ table | row | none ]
DESC:		Defines the locking style for the SQL table. MySQL supports "table" and "none" values whereas
		PostgreSQL supports "table", "row" and "none" values. With "table" value, the plugin will lock
		the entire table when writing data to the DB with the effect of serializing access to the
		table whenever multiple plugins need to access it simultaneously. Slower but light and safe,
		ie. no risk for deadlocks and transaction-friendly; "row", the plugin will lock only the rows
		it needs to UPDATE/DELETE. It results in better overral performances but has some noticeable
		drawbacks in dealing with transactions and making the UPDATE-then-INSERT mechanism work
		smoothly; "none" disables locking: while this method can help in some cases, ie. when grants
		over the whole database (requirement for "table" locking in MySQL) is not available, it is not
		recommended since serialization allows to contain database load.
DEFAULT:        table

KEY:		nfprobe_timeouts
DESC:		Allows to tune a set of timeouts to be applied over collected packets. The value is expected in
		the following form: 'name=value:name=value:...'. The set of supported timeouts, in seconds, and
		their default values are listed below:

		tcp	(generic tcp flow life)	3600
		tcp.rst (TCP RST flow life)	120
		tcp.fin	(TCP FIN flow life)	300
		udp	(UDP flow life)		300
		icmp	(ICMP flow life)	300
		general	(generic flow life)	3600
		maxlife	(maximum flow life)	604800
		expint	(expiry interval)	60

		expint is the interval between expiry checks, ie. every 60 secs it is checked which flows are
		ready for timeout-based evictioni; unscheduled evictions are possible if it's not possible to
		allocate more memory to cache flows. tcp, tcp.rst, tcp.fin, udp, icmp and general are passive
		timeouts, ie. a tcp flow is evicted after 3600 secs of inactivity ('general' applies to any
		IP protocol not being specified by other timeouts). maxlife is an active timeout and evicts
		flows even if still active and making traffic. 
DEFAULT:	see above

KEY:		nfprobe_hoplimit
VALUES:		[ 1-255 ]
DESC:		Value of TTL for the newly generated NetFlow datagrams.
DEFAULT:	Operating System default

KEY:		nfprobe_maxflows
DESC:		Maximum number of flows that can be tracked simultaneously.
DEFAULT:	8192

KEY:		nfprobe_receiver
DESC:		Defines the remote IP address/hostname and port to which NetFlow datagrams are to be exported.
		If IPv4, the value is expected as 'address:port'. If IPv6, it is expected as '[address]:port'.
DEFAULT:	127.0.0.1:2100

KEY:		nfprobe_dtls
VALUES:         [ true | false ]
DESC:		Enables sending out NetFlow/IPFIX packets over DTLS. Needs pmacct to be configured for
		compiling with the --enable-gnutls knob. The files (key, certificate, etc.) required by
		DTLS are to be supplied via the dtls_path config directive.
DEFAULT:	false

KEY:		nfprobe_dtls_verify_cert
DESC:		If nfprobe_dtls is set to true, this validates that the certificate received from the
		server corresponds to the specified hostname. Sample of an expected value would be
		"www.example.com".
DEFAULT:	none

KEY:            nfprobe_source_ip
DESC:           Defines the local IP address from which NetFlow datagrams are exported. Only a numerical IPv4/
		IPv6 address is expected. The supplied IP address is required to be already configured on
		one of the interfaces. This parameter is also required for graceful encoding of NetFlow v9
		and IPFIX option scoping.
DEFAULT:	IP address is selected by the Operating System

KEY:		nfprobe_version
VALUES:		[ 5, 9, 10 ]
DESC:		Version of outgoing NetFlow datagrams. NetFlow v5/v9 and IPFIX (v10) are supported. NetFlow v5
		features a fixed record structure and if not specifying an 'aggregate' directive it gets
		populated as much as possible; NetFlow v9 and IPFIX feature a dynamic template-based structure
		instead and by default it is populated as: 'src_host, dst_host, src_port, dst_Port, proto, tos'.
DEFAULT:	10

KEY:            nfprobe_engine
DESC:           Allows to define Engine ID and Engine Type fields for NetFlow v5 and Source ID/Obs Domain ID
		for NetFlow v9 and IPFIX respectiely. In case of NetFlow v5 export it expects two non-negative
		numbers, each up to maximum 8-bits length and separated by the ":" symbol; in case of NetFlow
		v9/IPFIX it expects a single non-negative number up-to maximum 32-bits length. This comes
		useful to allow a collector to distinguish between distinct probe instances running on the
		same host (collector would report missing flows due to sequencing jumps); this is also
		important for letting NetFlow v9/IPFIX templates to work correctly: in fact, template IDs get
		automatically selected only inside single daemon instances.
DEFAULT:	[ 0:0, 0 ]

KEY:		[ nfacctd_peer_as | sfacctd_peer_as | nfprobe_peer_as | sfprobe_peer_as ]
VALUES:         [ true | false ]
DESC:		When applied to [ns]fprobe src_as and dst_as fields are valued with peer-AS rather than origin-AS
		as part of the NetFlow/sFlow export. Requirements to enable this feature on the probes are: a) one
		of the nfacctd_as/sfacctd_as/pmacctd_as/uacctd_as set to 'bgp' and b) a fully functional BGP
		daemon (bgp_daemon). When applied to [ns]facctd instead it uses src_as and dst_as values of the
		NetFlow/sFlow export to populate peer_src_as and peer_dst_as primitives.
DEFAULT:        false

KEY:            [ nfprobe_ipprec | sfprobe_ipprec | tee_ipprec ]
DESC:           Marks self-originated NetFlow (nfprobe) and sFlow (sfprobe) messages with the supplied IP
		precedence value.
DEFAULT:	0

KEY:            [ nfprobe_direction | sfprobe_direction ]
VALUES:		[ in, out, tag, tag2 ]
DESC:           Defines traffic direction. Can be statically defined via 'in' and 'out' keywords. It can also
		be dynamically determined via lookup to either 'tag' or 'tag2' values. Tag value of 1 will be
		mapped to 'in' direction, whereas tag value of 2 will be mapped to 'out'. The idea underlying
		tag lookups is that pre_tag_map supports, among the other features, 'filter' matching against
		a supplied tcpdump-like filter expression; doing so against L2 primitives (ie. source or
		destination MAC addresses) allows to dynamically determine traffic direction (see example at
		'examples/pretag.map.example').
DEFAULT:	none

KEY:            [ nfprobe_ifindex | sfprobe_ifindex ]
VALUES:		[ tag, tag2, <1-4294967295> ]
DESC:           Associates an interface index (ifIndex) to a given nfprobe or sfprobe plugin. This is meant as
		an add-on to [ns]probe_direction directive, ie. when multiplexing mirrored traffic from different
		sources on the same interface (ie. split by VLAN). Can be statically defined via a 32-bit integer
		or semi-dynamically determined via lookup to either 'tag' or 'tag2' values (read full elaboration
		on [ns]probe_direction directive). Unless [ns]fprobe_ifindex_override is set true, by default
		this definition will be overridden whenever the ifIndex can be determined dynamically (ie. via
		NFLOG framework).
DEFAULT:	none

KEY:		[ nfprobe_ifindex_override | sfprobe_ifindex_override ] 
VALUES:		[ true | false ]
DESC:		If an ifIndex can be determined dynamically (ie. via NFLOG framework), setting this to true
		allows for a non-zero value computed by [sn]fprobe_ifindex to override the original value;
		if the value is zero, the override does not take place.
DEFAULT:	false

KEY:		nfprobe_dont_cache
VALUES:		[ true | false ]
DESC:		Disables caching and summarisation of flows. By default a NetFlow/IPFIX agent would attempt
		to build uni-directional flows by caching individual packets and waiting for an expiration
		condition (see nfprobe_timeouts). This knob prevents that to happen and, if paired with a
		(external) packet sampling strategy, it makes a NetFlow/IPFIX agent to match sFlow export
		responsiveness. 
DEFAULT:	false

KEY:		nfprobe_tstamp_usec
VALUES:		[ true | false |
DESC:		Exports timestamps to the usec resolution (instead of default msec) using NetFlow v9 / IPFIX
		IEs 154 and 155. This knob is not compatible with timestamps_secs configuration directive. 
DEFAULT:	false

KEY:		sfprobe_receiver
DESC:		Defines the remote IP address/hostname and port to which NetFlow datagrams are to be exported.
		If IPv4, the value is expected as 'address:port'. If IPv6, it is expected as '[address]:port'.
DEFAULT:	127.0.0.1:6343
		
KEY:		sfprobe_agentip
DESC:		Sets the value of agentIp field inside the sFlow datagram header. Only a numerical IPv4/
		IPv6 address is expected. This value must be an IPv4 address if transport, that is
		sfprobe_source_ip and/or sfprobe_receiver, is set to IPv4; or an IPv6 address if transport
		is set to IPv6. 
DEFAULT:	localhost

KEY:		sfprobe_agentsubid
DESC:		Sets the value of agentSubId field inside the sFlow datagram header.
DEFAULT:	1402

KEY:            sfprobe_ifspeed
DESC:           Statically associates an interface speed to a given sfprobe plugin. Value is expected in bps.
DEFAULT:	100000000

KEY:		sfprobe_source_ip
DESC:		Defines the local IP address from which sFlow datagrams are exported. Only a numerical IPv4/
		IPv6 address is expected. The supplied IP address is required to be already configured on
                one of the interfaces. An IPv6 address must be configured in order to successfully export
		to an IPv6 sfprobe_receiver.
DEFAULT:        IP address is selected by the Operating System

KEY:		bgp_daemon [GLOBAL]
VALUES:		[ true | false ]
DESC:		Enables the BGP daemon thread. Neighbors are not defined explicitely but a maximum amount
		of peers is specified (bgp_daemon_max_peers); also, for security purposes, the daemon does
		not implement outbound BGP UPDATE messages and acts passively (ie. it never establishes
		a connection to a remote peer but waits for incoming connections); upon receipt of a BGP
		OPEN message the local daemon presents itself as belonging to the same AS number as the
		remote peer, unless bgp_daemon_as is set, and supporting the same (or a subset of the) BGP
		capabilities; capabilities currently supported are MP-BGP, 4-bytes ASNs, ADD-PATH.
		Per-peer RIBs are maintained. In case of ADD-PATH capability, the correct BGP info is
		linked to traffic data using BGP next-hop (or IP next-hop if use_ip_next_hop is set to
		true) as selector among the paths available.
DEFAULT:	false

KEY:		bmp_daemon [GLOBAL]
VALUES:         [ true | false ]
DESC:           Enables the BMP daemon thread. BMP, BGP Monitoring Protocol, can be used to monitor BGP
		sessions. The BMP daemon supports BMP data, events and stats, ie. initiation, termination,
		peer up, peer down, stats and route monitoring messages. The daemon enables to write BMP
		messages to files, AMQP and Kafka brokers, real-time (msglog) or at regular time intervals
		(dump). Also, route monitoring messages are saved in a RIB structure for IP prefix lookup.
		For further referece see examples in the QUICKSTART document and/or description of the
		bmp_* config keys in this document. The BMP daemon is a separate thread in the NetFlow
		(nfacctd) and sFlow (sfacctd) collectors.
DEFAULT:        false

KEY:		[ bgp_daemon_ip | bmp_daemon_ip ] [GLOBAL]
DESC:		Binds the BGP/BMP daemon to a specific interface. Expects as value an IP address. For the
		BGP daemon the same is value is presented as BGP Router-ID (read more about the BGP Router-ID
		selection process at the bgp_daemon_id config directive description). Setting this directive
		is highly adviced.
DEFAULT:	0.0.0.0

KEY:            bgp_daemon_id [GLOBAL]
DESC:		Defines the BGP Router-ID to the supplied value. Expected value is an IPv4 address. If this
		feature is not used or an invalid IP address is supplied, ie. IPv6, the bgp_daemon_ip value
		is used instead. If also bgp_daemon_ip is not defined or invalid, the BGP Router-ID defaults
		to "1.2.3.4".
DEFAULT:	1.2.3.4

KEY:            bgp_daemon_as [GLOBAL]
DESC:           Defines the BGP Local AS to the supplied value. By default, no value supplied, the session
		will be setup as iBGP with the Local AS received from the remote peer being copied back in
		the BGP OPEN reply. This allows to explicitely set a Local AS which could be different from
		the remote peer one hence establishing an eBGP session.
DEFAULT:        none

KEY:		[ bgp_daemon_port | bmp_daemon_port ] [GLOBAL] 
DESC:		Makes the BGP/BMP daemon listen to a port different from the standard port. Default port for
		BGP is 179/tcp; default port for BMP is 1790. If supported and if enabled on the system, the
		SO_REUSEPORT feature can be leveraged on Linux: it allows multiple daemons to bind the same
		local address and port in order to load-balance processing of incoming packets. This is best
		combined with a list of allowed IP addresses, ie. bgp_daemon_allow_file, to explicitely wire
		peers to collectors. At the end of this document, reference (1) to a URL to a presentation
		of the SO_REUSEPORT feature. To enable SO_REUSEPORT on a Linux system supporting it 'sysctl
		net.core.allow_reuseport=1'.
DEFAULT:	bgp_daemon_port: 179; bmp_daemon_port: 1790

KEY:		[ bgp_daemon_ipprec | bmp_daemon_ipprec ] [GLOBAL]
DESC:		Marks self-originated BGP/BMP messages with the supplied IP precedence value.
DEFAULT:	0

KEY:		[ bgp_daemon_max_peers | bmp_daemon_max_peers ] [GLOBAL]
DESC:		Sets the maximum number of neighbors the BGP/BMP daemon can peer to. Upon reaching of the
		limit, no more BGP/BMP sessions can be established. BGP/BMP neighbors don't need to be
		defined explicitely one-by-one rather an upper boundary to the number of neighbors applies.
		pmacctd, uacctd daemons are limited to only two BGP peers (in a primary/backup fashion, see
		bgp_agent_map); such hardcoded limit is imposed as the only scenarios supported in conjunction
		with the BGP daemon are as NetFlow/sFlow probes on-board software routers and firewalls.
DEFAULT:	10

KEY:		[ bgp_daemon_batch_interval | bmp_daemon_batch_interval ] [GLOBAL]
DESC:		To prevent all BGP/BMP peers contend resources, this defines the time interval, in seconds,
		between any two BGP/BMP peer batches. The first peer in a batch sets the base time, that is
		the time from which the interval is calculated, for that batch.
DEFAULT:	0

KEY:		[ bgp_daemon_batch | bmp_daemon_batch ] [GLOBAL]
DESC:		To prevent all BGP/BMP peers to contend resources, this defines the number of BGP peers in
		each batch. If a BGP/BMP peer is not allowed by an ACL (ie. bgp_daemon_allow_file), room is
		recovered in the current batch; if a BGP/BMP peer in a batch is replenished (ie. connection
		drops, is reset, etc.) no new room is made in the current batch (rationale being: be a bit
		conservative, batch might have been set too big, let's try to limit flapping).
DEFAULT: 	0

KEY:            [ bgp_daemon_msglog_file | bmp_daemon_msglog_file | telemetry_daemon_msglog_file ] [GLOBAL]
DESC:		Enables streamed logging of BGP tables/BMP events/Streaming Telemetry data. Each log entry
		features a time reference, peer/exporter IP address, event type and a sequence number (to
		order events when time reference is not granular enough). BGP UPDATE messages also contain
		full prefix and BGP attributes information. The list of supported filename variables follows:

                $peer_src_ip    	BGP peer IP address.

                $bmp_router		BMP peer IP address.

                $telemetry_node		Streaming Telemetry exporter IP address.

		$peer_tcp_port  	BGP peer TCP port.

		$bmp_router_port	BMP peer TCP port.

		$telemetry_node_port	Streaming Telemetry exporter port.

		Files can be re-opened by sending a SIGHUP to the daemon core process. The output file can be
		a named pipe (ie. created with mkfifo), however it has to be manually created in advance.
DEFAULT:	none

KEY:            [ bgp_daemon_msglog_avro_schema_file | bmp_daemon_msglog_avro_schema_file |
		  bgp_table_dump_avro_schema_file | bmp_dump_avro_schema_file ] [GLOBAL]
DESC:           Export the schema(s) generated to encode BGP/BMP messages to the given file path. The
		schema can then be used by the receiving end to decode the messages. inotify-tools can
		be used to take event-driven actions like notify a consumer whenever the file is
		modified.
DEFAULT:        none

KEY:            [ bgp_daemon_msglog_kafka_avro_schema_registry | bmp_daemon_msglog_kafka_avro_schema_registry
		  bgp_table_dump_kafka_avro_schema_registry | bmp_dump_kafka_avro_schema_registry ] [GLOBAL] 
DESC:           The URL to a Confluent Avro Schema Registry. The value is passed to libserdes as argument
                for "schema.registry.url". A sample of the expected value being https://localhost. This is
                a pointer to the REST API https://docs.confluent.io/current/schema-registry/docs/api.html
                The schema name is auto generated: if the topic is static, the schema name is createad as
                "<kafka_topic>-<bgp/bmp suffix>-value" (ie. if Kafka topic is set to 'foobar' then the
		schema name will be "foobar-bgp-msglog-value", "foobar-bmp-msglog-rm-value", etc.). Dynamic
		topics are not supported. To confirm that the schema is registered, the following CL can
		be used: "curl -X GET https://<Schema Registry host>/subjects | jq . | grep <schema name>".
DEFAULT:        none

KEY:		[ bgp_daemon_msglog_output | bmp_daemon_msglog_output ] [GLOBAL]
VALUES:         [ json | avro | avro_json ]
DESC:		Defines output format for the streamed logging of BGP messages and BMP messages and
		events. JSON, binary-encoded Avro and JSON-encoded Avro formats are supported.
DEFAULT:	json

KEY:		bgp_aspath_radius [GLOBAL]
DESC:		Cuts down AS-PATHs to the specified number of ASN hops. If the same ASN is repeated multiple
		times (ie. as effect of prepending), each of them is regarded as one hop. By default AS-PATHs
		are left intact unless reaching the maximum length of the buffer (128 chars). 
DEFAULT:	none

KEY:		[ bgp_stdcomm_pattern | bgp_extcomm_pattern | bgp_lrgcomm_pattern ] [GLOBAL]
DESC:		Filters BGP standard, extended and large communities against the supplied pattern. The
		underlying idea is that many communities can be attached to a prefix; some of these can
		be of little or no interest for the accounting task; this feature allows to select only
		the relevant ones. The memory plugin brings a buffer limit of 96 chars; all the others
		do not as buffers are handled dynamically. The filter does substring matching, ie.
		12345:64 will match communities in the ranges 64-64, 640-649, 6400-6499 and 64000-64999.
		The '.' symbol can be used to wildcard a defined number of characters, ie. 12345:64...
		will match community values in the range 64000-64999 only. Multiple patterns can be 
		supplied comma-separated.
DEFAULT:	none

KEY:            [ bgp_stdcomm_pattern_to_asn | bgp_lrgcomm_pattern_to_asn ] [GLOBAL]
DESC:           Filters BGP standard communities against the supplied pattern. The algorithm employed is
		the same as for the bgp_*comm_pattern directives: read implementation details there. The
		first matching community is taken and split using the ':' symbol as delimiter. The first
		part is mapped onto the peer AS field while the second is mapped onto the origin AS field;
		in case of Large Communities, the third part is unused. The aim of this directive is to
		deal with IP prefixes on the own address space, ie. statics or connected redistributed
		in BGP. As an example: BGP standard community XXXXX:YYYYY is mapped as: Peer-AS=XXXXX,
		Origin-AS=YYYYY. Multiple patterns can be supplied comma-separated.
DEFAULT:	none

KEY:		bgp_peer_as_skip_subas [GLOBAL]
VALUES:		[ true | false ]
DESC:		When determining the peer AS (source and destination), skip potential confederated sub-AS
		and report the first ASN external to the routing domain. When enabled if no external ASNs
		are found on the AS-PATH except the confederated sub-ASes, the first sub-AS is reported.
DEFAULT:        false

KEY:		bgp_peer_src_as_type [GLOBAL]
VALUES:		[ netflow | sflow | map | bgp ]
DESC:		Defines the method to use to map incoming traffic to a source peer ASN. "map" selects a
		map, reloadable at runtime, specified by the bgp_peer_src_as_map directive (refer to it for
		further information); "bgp" implements native BGP RIB lookups. BGP lookups assume traffic is
		symmetric, which is often not the case, affecting their accuracy.
DEFAULT:	netflow, sflow

KEY:		bgp_peer_src_as_map [GLOBAL, MAP]
DESC:		Full pathname to a file containing source peer AS mappings. The AS can be mapped to one or
		a combination of: ifIndex, source MAC address and BGP next-hop (query against the BGP RIB
		to look up the source IP prefix). This is sufficient to model popular tecniques for both
		public and private BGP peerings. Sample map in 'examples/peers.map.example'. Content can
		be reloaded at runtime by sending the daemon a SIGUSR2 signal (ie. "killall -USR2 nfacctd").
		To automate mapping of MAC addresses to ASNs please see https://github.com/pierky/mactopeer
		Written by Pier Carlo Chiodi, it leverages the popular NAPALM framework and, for the case
		of route-servers at IXPs, PeeringDB info.
DEFAULT:	none

KEY:            bgp_src_std_comm_type [GLOBAL]
VALUES:         [ bgp ]
DESC:		Defines the method to use to map incoming traffic to a set of standard communities. Only
		native BGP RIB lookups are currently supported. BGP lookups assume traffic is symmetric,
		which is often not the case, affecting their accuracy.
DEFAULT:	none

KEY:            bgp_src_ext_comm_type [GLOBAL]
VALUES:         [ bgp ]
DESC:           Defines the method to use to map incoming traffic to a set of extended communities. Only
                native BGP RIB lookups are currently supported. BGP lookups assume traffic is symmetric,
                which is often not the case, affecting their accuracy.
DEFAULT:	none

KEY:            bgp_src_lrg_comm_type [GLOBAL]
VALUES:         [ bgp ]
DESC:           Defines the method to use to map incoming traffic to a set of large communities. Only
                native BGP RIB lookups are currently supported. BGP lookups assume traffic is symmetric,
                which is often not the case, affecting their accuracy.
DEFAULT:        none

KEY:            bgp_src_as_path_type [GLOBAL]
VALUES:         [ bgp ]
DESC:           Defines the method to use to map incoming traffic to an AS-PATH. Only native BGP RIB lookups
		are currently supported. BGP lookups assume traffic is symmetric, which is often not the
		case, affecting their accuracy.
DEFAULT:	none

KEY:            bgp_src_local_pref_type [GLOBAL]
VALUES:         [ map | bgp ]
DESC:           Defines the method to use to map incoming traffic to a local preference. Only native BGP
		RIB lookups are currently supported. BGP lookups assume traffic is symmetric, which is
		often not the case, affecting their accuracy.
DEFAULT:	none

KEY:            bgp_src_local_pref_map [GLOBAL, MAP]
DESC:           Full pathname to a file containing source local preference mappings. The LP value can be
		mapped to one or a combination of: ifIndex, source MAC address and BGP next-hop (query
		against the BGP RIB to look up the source IP prefix). Sample map in 'examples/
		lpref.map.example'. Content can be reloaded at runtime by sending the daemon a SIGUSR2
		signal (ie. "killall -USR2 nfacctd").
DEFAULT:	none

KEY:            bgp_src_med_type [GLOBAL]
VALUES:         [ map | bgp ]
DESC:           Defines the method to use to map incoming traffic to a MED value. Only native BGP RIB
                lookups are currently supported. BGP lookups assume traffic is symmetric, which is often
		not the case, affecting their accuracy.
DEFAULT:	none

KEY:		bgp_src_roa_type [GLOBAL]
VALUES:		[ bgp ]
DESC:           Defines the method to use to map incoming traffic to a ROA status. Only native BGP RIB
		lookups are currently supported. BGP lookups assume traffic is symmetric, which is often
		not the case, affecting their accuracy.
DEFAULT:	none

KEY:            bgp_src_med_map [GLOBAL, MAP]
DESC:           Full pathname to a file containing source MED (Multi Exit Discriminator) mappings. The
		MED value can be mapped to one or a combination of: ifIndex, source MAC address and BGP
		next-hop (query against the BGP RIB to look up the source IP prefix). Sample map in
		'examples/med.map.example'. Content can be reloaded at runtime by sending the daemon a
		SIGUSR2 signal (ie. "killall -USR2 nfacctd").
DEFAULT:	none

KEY:		[ bgp_agent_map | bmp_agent_map ] [GLOBAL, MAP]
DESC:		Full pathname to a file to map source 1a) IP address of NetFlow/IPFIX agents or 1b)
		AgentID of sFlow agents to 2a) session IP address or Router ID of BGP peers or 2b)
		session IP address of BMP peers.
		This feature is meant to provide flexibility in a number of scenarios, for example
		BGP peering with RRs, hub-and-spoke topologies, single-homed networks - but also BGP
		sessions traversing NAT.
		pmacctd and uacctd daemons are required to use this map with at most two "catch-all"
		entries working in a primary/backup fashion (see for more info bgp_agent.map.example
		in the examples section): this is because these daemons do not have a NetFlow/sFlow
		source address to match to.
		Number of map entries (by default 384) can be modified via maps_entries. Content can be
		reloaded at runtime by sending the daemon a SIGUSR2 signal (ie. "killall -USR2 nfacctd").
DEFAULT:	none

KEY:		flow_to_rd_map [GLOBAL, MAP]
DESC:		Full pathname to a file to map flows (typically, a) router or b) ingress router, input
		interfaces or c) MPLS bottom label, BGP next-hop couples) to BGP/MPLS Virtual Private
		Network (VPN) Route Distinguisher (RD), based upon rfc4364. See flow_to_rd.map file in
		the examples section for further info. Definitions in this map do override same data
		received from the export protocol if any (ie. NetFlow v9/IPFIX IE #90). Number of map
		entries (by default 384) can be modified via maps_entries. Content can be reloaded at
		runtime by sending the daemon a SIGUSR2 signal (ie. "killall -USR2 nfacctd").
DEFAULT:	none

KEY:		bgp_follow_default [GLOBAL]
DESC:		Expects positive number value which instructs how many times a default route, if any, can
		be followed in order to successfully resolve source and destination IP prefixes. This is
		aimed at scenarios where neighbors peering with pmacct have a default-only or partial BGP
		view. At each recursion (default route follow-up) the value gets decremented; the process
		stops when one of these conditions is met:

		* both source and destination IP prefixes are resolved
		* there is no available default route
		* the default gateway is not BGP peering with pmacct
		* the the recusion value reaches zero

		As soon as an IP prefix is matched, it is not looked up anymore in case more recursions
		are required (ie. the closer the router is, the most specific the route is assumed to be).
		pmacctd, uacctd daemons are internally limited to only two BGP peers hence this feature
		can't properly work.
DEFAULT:	0

KEY:            bgp_follow_nexthop [GLOBAL]
DESC:		Expects one or more IP prefix(es), ie. 192.168.0.0/16, comma separated. A maximum of 32
		IP prefixes is supported. It follows the BGP next-hop up (using each next-hop as BGP
		source-address for the next BGP RIB lookup), returning the last next-hop part of the
		supplied IP prefix(es) as value for the 'peer_ip_dst' primitive. bgp_agent_map is supported
		at each recursion. This feature is aimed at networks, for example, involving BGP
		confederations; underlying goal being to see the routing-domain "exit-point". The
		The feature is internally protected against routing loops with an hardcoded limit of 20
		lookups; pmacctd, uacctd daemons are internally limited to only two BGP peers hence this
		feature can't properly work.
DEFAULT:	none

KEY:            bgp_follow_nexthop_external [GLOBAL]
VALUES:         [ true | false ]
DESC:           If set to true makes bgp_follow_nexthop return the next-hop from the routing table of
		the last node part of the supplied IP prefix(es) as value for the 'peer_ip_dst' primitive.  
		This may help to pin-point the (set of) exit interface(s).
DEFAULT:	false

KEY:		bgp_disable_router_id_check [GLOBAL]
VALUES:         [ true | false ]
DESC:           If set to true disables the BGP Router-ID check both at BGP OPEN time and BGP lookup.
		This knob is useful, for example, in scenarios where v4 AFs are over a v4 transport and
		v6 AFs are over v6 transport but both share the same BGP Router-ID.  
DEFAULT:	false

KEY:            bgp_neighbors_file [GLOBAL]
DESC:		Writes a list of the BGP neighbors in the established state to the specified file, one
		per line. This gets particularly useful for automation purposes (ie. auto-discovery of
		devices to poll via SNMP).
DEFAULT:	none

KEY:            [ bgp_daemon_allow_file | bmp_daemon_allow_file ] [GLOBAL, MAP]
DESC:           Full pathname to a file containing the list of IP addresses/prefixes (one for each
		line) allowed to establish a BGP/BMP session. Content can be reloaded at runtime by
		sending the daemon a SIGUSR2 signal (ie. "killall -USR2 nfacctd"); changes are
		applied to new sessions only. Sample map in examples/allow.lst.example .
DEFAULT:	none (ie. allow all)

KEY:            bgp_daemon_md5_file [GLOBAL]
DESC:           Full pathname to a file containing the BGP peers (IP address only, one for each line)
		and their corresponding MD5 passwords in CSV format (ie. 10.15.0.1, arealsmartpwd).
		BGP peers not making use of a MD5 password should not be listed. The maximum number
		of peers supported is 8192. For a sample map look in: 'examples/bgp_md5.lst.example'.
		/proc/sys/net/core/optmem_max default value allows for some 150 keys to be registered
		as https://patchwork.ozlabs.org/patch/138861/ documents: its value should be reviewed
		upwards in order to register more keys.
DEFAULT:	none

KEY:		[ bgp_table_peer_buckets | bmp_table_peer_buckets ] [GLOBAL]
VALUES:		[ 1-1000 ]
DESC:		Routing information related to BGP prefixes is kept per-peer in order to simulate a
		multi-RIB environment and is internally structured as an hash with conflict chains.
		This parameter sets the number of buckets of such hash structure; the value is directly
		related to the number of expected BGP peers, should never exceed such amount and: a) if
		only best-path is received this is best set to 1/10 of the expected peers; b) if BGP
		ADD-PATHs is received this is best set to 1/1 of the expected peers. The default value
		proved to work fine up to aprox 100 BGP peers sending best-path only, in lab. More
		buckets means better CPU usage but also increased memory footprint - and vice-versa.
DEFAULT:	13

KEY:		[ bgp_table_per_peer_buckets | bmp_table_per_peer_buckets ] [GLOBAL]
VALUE:		[ 1-128 ]
DESC:		With same background information as bgp_table_peer_buckets, this parameter sets the
		number of buckets over which per-peer information is distributed (hence effectively
		creating a second dimension on top of bgp_table_peer_buckets, useful when much BGP
		information per peer is received, ie. in case of BGP ADD-PATHs). Default proved to
		work fine if BGP sessions are passing best-path only. In case of BGP ADD-PATHs it is
		instead recommended to set this value to 1/3 of the configured maximum number of
		paths per prefix to be exported.
DEFAULT:	1

KEY:            [ bgp_table_attr_hash_buckets | bmp_table_attr_hash_buckets ] [GLOBAL]
VALUE:          [ 1-1000000 ]
DESC:		Sets the number of buckets of BGP attributes hashes (ie. AS-PATH, communities, etc.).
		Default proved to work fine with BGP sessions passing best-path only and with up to
		25 BGP sessions passing ADD-PATH.
DEFAULT:	65535

KEY:            [ bgp_table_per_peer_hash | bmp_table_per_peer_hash ] [GLOBAL]
VALUE:          [ path_id ]
DESC:		If bgp_table_per_peer_buckets is greater than 1, this parameter allows to set the
		hashing to be used. By default hashing happens against the BGP ADD-PATH path_id field.
		Hashing over other fields or field combinations (hashing over BGP next-hop is on the
		radar) are planned to be supported in future.
DEFAULT:	path_id

KEY:            [ bgp_table_dump_file | bmp_dump_file | telemetry_dump_file ] [GLOBAL] 
DESC:           Enables dump of BGP tables/BMP events/Streaming Telemetry data at regular time
		intervals (as defined by, for example, bgp_table_dump_refresh_time) into files.
		The specified file can be a named pipe (ie. created with mkfifo), however it has
		to be manually created in advance. Each dump event features a time reference and
		peer/exporter IP address along with the rest of BGP/BMP/Streaming Telemetry data.
		The list of supported filename variables follows:

                %d              	The day of the month as a decimal number (range 01 to 31).

                %H              	The hour as a decimal number using a 24 hour clock (range 00 to 23).

                %m              	The month as a decimal number (range 01 to 12).

                %M              	The minute as a decimal number (range 00 to 59).

                %s              	The number of seconds since Epoch, ie., since 1970-01-01 00:00:00 UTC.

		%S			The seconds as a decimal number second (range 00 to 60).

                %w              	The day of the week as a decimal, range 0 to 6, Sunday being 0.

                %W              	The week number of the current year as a decimal number, range
                                	00 to 53,  starting  with the first Monday as the first day of
                                	week 01.

                %Y              	The year as a decimal number including the century.

		%z			The +hhmm numeric time zone in ISO8601:1988 format (ie. -0400).

		$tzone			The time zone in rfc3339 format (ie. -04:00 or 'Z' for +00:00).

		$peer_src_ip		BGP peer IP address.

		$bmp_router		BMP peer IP address.

		$telemetry_node		Streaming Telemetry exporter IP address.

		$peer_tcp_port		BGP peer TCP port.

		$bmp_router_port	BMP peer TCP port.

		$telemetry_node_port	Streaming Telemetry exporter port.

DEFAULT:	none

KEY:            [ bgp_table_dump_output | bmp_dump_output ] [GLOBAL]
VALUES:         [ json | avro | avro_json ]
DESC:           Defines output format for the dump of BGP tables and BMP events. JSON, binary-encoded
		Avro and JSON-encoded Avro formats are supported.
DEFAULT:	json

KEY:		[ bgp_table_dump_refresh_time | bmp_dump_refresh_time | telemetry_dump_latest_file ]
		[GLOBAL]
VALUES:		[ 60 .. 86400 ]
DESC:		Time interval, in seconds, between two consecutive executions of the dump of BGP
		tables/BMP events/Streaming Telemetry data to files.
DEFAULT:	0

KEY:            [ bgp_table_dump_latest_file | bmp_dump_latest_file | telemetry_dump_refresh_time ]
		[GLOBAL]
DESC:           Defines the full pathname to pointer(s) to latest file(s). Dynamic names are supported
                through the use of variables, which are computed at the moment when data is purged to the
                backend: refer to bgp_table_dump_file (and companion directives) for a full listing of
		supported variables; time-based variables are not allowed. Update of the latest pointer
		is done evaluating files modification time. See also print_latest_file for examples.
DEFAULT:        none

KEY:		bgp_daemon_lg [GLOBAL]
VALUES:		[ true | false ]
DESC:		Enables the BGP Looking Glass server allowing to perform queries, ie. lookup IP
		addresses/prefixes or get the list of BGP peers, against available BGP RIBs. The
		server is asyncronous and uses ZeroMQ as transport layer to serve incoming queries.
		Sample C/Python LG clients are available in 'examples/lg'. Sample LG server config
		is available in QUICKSTART. Request/Reply Looking Glass formats are documented in
		'docs/LOOKING_GLASS_FORMAT'.
DEFAULT:	false

KEY:            bgp_daemon_lg_ip [GLOBAL]
DESC:           Binds the BGP Looking Glass server to a specific interface. Expects as value an IP
		address.
DEFAULT:        0.0.0.0

KEY:            bgp_daemon_lg_port [GLOBAL]
DESC:           Makes the BGP Looking Glass server listen to a specific port.
DEFAULT:        17900

KEY:		bgp_daemon_lg_user [GLOBAL]
DESC:		Enables plain username/password authentication in the BGP Looking Glass server. This
		directive sets the expected username. By default authentication is disabled.
DEFAULT:	none

KEY:		bgp_daemon_lg_passwd [GLOBAL]
DESC:		Enables plain username/password authentication in the BGP Looking Glass server. This
		directive sets the expected password. By default authentication is disabled.
DEFAULT:	none

KEY:		bgp_daemon_lg_threads [GLOBAL]
DESC:		Defines the amount of threads of the BGP Looking Glass to serve incoming queries.
DEFAULT:	8

KEY:		bgp_daemon_xconnect_map [MAP, GLOBAL]
DESC:		Enables BGP proxying. Full pathname to a file to cross-connect BGP peers (ie. edge
		routers part of an observed network topology) to BGP collectors (ie. nfacctd daemons
		correlating flow and BGP data). The mapping works only against the IP address layer
		and not the BGP Router ID, only 1:1 relationships are formed (ie. this is about
		cross-connecting, not replication) and only one session per BGP peer is supported
		(ie. multiple BGP agents are running on the same IP address or NAT traversal
		scenarios are not supported [yet]). TCP-MD5 is supported on inbound sessions to the
		proxy (via bgp_daemon_md5_file) but not on outbound ones. 
		A sample map is provided in 'examples/bgp_xconnects.map.example'. Number of map
		entries (by default 384) can be modified via maps_entries. Content can be reloaded
		at runtime by sending the daemon a SIGUSR2 signal (ie. "killall -USR2 nfacctd").
DEFAULT:	none

KEY:            [ bmp_daemon_parse_proxy_header ]
VALUES:         [ true | false ]
DESC:           Defines whether to parse the first packet of a connection looking for a
                Proxy Protocol header containing client information (IP addresses and TCP ports).
                The source IP Address and TCP port of the header replaces the peer IP address and
                peer TCP port obtained from the socket.
                The following is a simple HAProxy configuration example where an HAProxy listens on
                TCP port 5001 for BMP packets and forwards them to a PMBMPD daemon listening on TCP
                port 5000.  A binary version 2 Proxy Protocol header is prepended to the first packet
                of the TCP connection.
                    frontend bmp_ha_proxy 
                        bind <HAProxy IP Address>:5001
                        mode tcp
                        default_backend bmpnodes
                    backend bmpnodes
                        mode tcp
                        server bmp-dev <PMBMPD IP Address>:5000 send-proxy-v2
DEFAULT:        false

KEY:		rpki_roas_file [MAP, GLOBAL]
DESC:		Full pathname to a file containing RPKI ROA data. Data encoding is JSON and format
		is according to RIPE Validator format. ROA data can be obtained for example from
		https://rpki.gin.ntt.net/api/export.json . An example of the format:

		{
		  "roas" : [ {
		    "asn" : "AS2914",
		    "prefix" : "128.121.0.0/16",
		    "maxLength" : 16,
		    "ta" : "ARIN"
		  }, {
		    "asn" : "AS2914",
		    "prefix" : "128.121.0.0/19",
		    "maxLength" : 24,
		    "ta" : "ARIN"
		  } ]
		}

		Content can be reloaded at runtime by sending the daemon a SIGUSR2 signal (ie.
		"killall -USR2 nfacctd").
DEFAULT:	none

KEY:		rpki_rtr_cache [GLOBAL]
DESC:		Defines the remote IP address and port of a RPKI RTR cache to connect to. If IPv4,
		the value is expected as 'address:port'. If IPv6, it is expected as '[address]:port'.
DEFAULT:	none

KEY:		rpki_rtr_cache_version [GLOBAL]
VALUES:		[ 0 | 1 ]
DESC:		Defines the RPKI RTR protocol version to use. Version 0 is documented in rfc6810;
		Version 1 is documented in rfc8210.
DEFAULT:	0

KEY:		rpki_rtr_cache_pipe_size [GLOBAL]
DESC:           Defines the size of the kernel socket used for RPKI RTR datagrams (see also
		bgp_daemon_pipe_size for more info).
DEFAULT:        Operating System default

KEY:		rpki_rtr_cache_ipprec [GLOBAL]
DESC:           Marks self-originated RPKI RTR messages with the supplied IP precedence value.
DEFAULT:	0

KEY:		isis_daemon [GLOBAL]
VALUES:		[ true | false ]
DESC:		Enables the skinny IS-IS daemon thread. It implements P2P Hellos, CSNP and PSNP -
		and does not send any LSP information out. It currently supports a single L2 P2P
		neighborship. Testing has been done over a GRE tunnel.
DEFAULT:        false

KEY:		isis_daemon_ip [GLOBAL]
DESC:		Sets the sub-TLV of the Extended IS Reachability TLV that contains an IPv4 address for the
		local end of a link. No default value is set and a non-zero value is mandatory. It should
		be set to the IPv4 address configured on the interface pointed by isis_daemon_iface. 
DEFAULT:	none

KEY:		isis_daemon_net [GLOBAL]
DESC:		Defines the Network entity title (NET) of the IS-IS daemon. In turn a NET defines the area
		addresses for the IS-IS area and the system ID of the router. No default value is set and
		a non-zero value is mandatory. Extensive IS-IS and ISO literature cover the topic, example
		of the NET value format can be found as part of the "Quickstart guide to setup the IS-IS
		daemon" in the QUICKSTART document.
DEFAULT:	none

KEY:		isis_daemon_iface [GLOBAL]
DESC:		Defines the network interface (ie. gre1) where to bind the IS-IS daemon. No default value
		is set and a non-zero value is mandatory.
DEFAULT:	none

KEY:		isis_daemon_mtu [GLOBAL]
DESC:		Defines the available MTU for the IS-IS daemon. P2P HELLOs will be padded to such length.
		When the daemon is configured to set a neighborship with a Cisco router running IOS, this
		value should match the value of the "clns mtu" IOS directive.
DEFAUT:		1476

KEY:		isis_daemon_msglog [GLOBAL]
VALUES:		[ true | false ]
DESC:	 	Enables IS-IS messages logging: as this can get easily verbose, it is intended for debug
		and troubleshooting purposes only.
DEFAULT:        false

KEY:		[ geoip_ipv4_file | geoip_ipv6_file ] [GLOBAL]
DESC:		If pmacct is compiled with --enable-geoip, this defines full pathname to the Maxmind GeoIP
		Country v1 ( http://dev.maxmind.com/geoip/legacy/install/country/ ) IPv4/IPv6 databases
		to use. pmacct, leveraging the Maxmind API, will detect if the file is updated and reload
		it. The use of --enable-geoip is mutually exclusive with --enable-geoipv2. 
DEFAULT:	none

KEY:            geoipv2_file [GLOBAL]
DESC:           If pmacct is compiled with --enable-geoipv2, this defines full pathname to a Maxmind GeoIP
		database v2 (libmaxminddb, ie. https://dev.maxmind.com/geoip/geoip2/geolite2/ ). It does
		allow to resolve GeoIP-related primitives like countries, pocodes and coordinates. Only
		the binary database format is supported (ie. it is not possible to load distinct CSVs for
		IPv4 and IPv6 addresses). --enable-geoip is mutually exclusive with --enable-geoipv2. 
		Files can be reloaded at runtime by sending the daemon a SIGUSR signal (ie. "killall -USR2
		nfacctd").

KEY:		uacctd_group [GLOBAL, UACCTD_ONLY]
DESC:		Sets the Linux Netlink NFLOG multicast group to be joined.
DEFAULT:	0

KEY:		uacctd_nl_size [GLOBAL, UACCTD_ONLY]
DESC:		Sets NFLOG Netlink internal buffer size (specified in bytes). It is 128KB by default, but to
		safely record bursts of high-speed traffic, it could be further increased. For high loads,
		values as large as 2MB are recommended. When modifying this value, it is also recommended
		to reflect the change to the 'snaplen' option.
DEFAULT:	131072

KEY:		uacctd_threshold [GLOBAL, UACCTD_ONLY]
DESC:		Sets the number of packets to queue inside the kernel before sending them to userspace. Higher
		values result in less overhead per packet but increase delay until the packets reach userspace.
DEFAULT:	1

KEY:		tunnel_0 [GLOBAL, NO_NFACCTD, NO_SFACCTD]
DESC:		Defines tunnel inspection in pmacctd and uacctd, disabled by default (note: this feature
		is currently unrelated to tunnel_* primitives). The daemon will then account on tunnelled
		data rather than on the envelope. The implementation approach is stateless, ie. control
		messages are not handled. Up to 4 tunnel layers are supported (ie. <tun proto>, <options>;
		<tun proto>, <options>; ...). Up to 8 tunnel stacks will be supported (ie. configuration
		directives tunnel_0 .. tunnel_8), to be used in a strictly sequential order. First stack
		matched at the first layering, wins. Below tunnel protocols supported and related options:

		GTP, GPRS tunnelling protocol. Expects as option the UDP port identifying the protocol. 
		tunnel_0: gtp, <UDP port> 
DEFAULT:	none

KEY:            tee_receivers [MAP]
DESC:           Defines full pathname to a list of remote IP addresses and ports to which NetFlow/sFlow
		datagrams are to be replicated to. Examples are available in "examples/tee_receivers.lst.
		example" file. Number of map entries (by default 384) can be modified via maps_entries.
		Content can be reloaded at runtime by sending the daemon a SIGUSR2 signal (ie. "killall
		-USR2 nfacctd").
DEFAULT:	none

KEY:		tee_pipe_size
DESC:           Defines the size of the kernel socket to write replicated traffic data. The socket is
		highlighted below with "XXXX":

								 XXXX
		[kernel] ----> [core process] ----> [tee plugin] ----> [kernel] ----> [network]
			       [_____________pmacct____________]

		On Linux systems, if this configuration directive is not specified default socket size
		awarded is defined in /proc/sys/net/core/[rw]mem_default ; the maximum configurable
		socket size is defined in /proc/sys/net/core/[rw]mem_max instead. Still on Linux, the
		"drops" field of /proc/net/udp or /proc/net/udp6 can be checked to ensure its value
		is not increasing.
DEFAULT:	Operating System default

KEY:		tee_source_ip
DESC:           Defines the local IP address from which NetFlow/sFlow datagrams are to be replicate from.
		Only a numerical IPv4/IPv6 address is expected. The supplied IP address is required to be
		already configured on one of the interfaces. Value is ignored when transparent replication
		is enabled.
DEFAULT:	IP address is selected by the Operating System

KEY:		tee_transparent
VALUES:         [ true | false ]
DESC:		Enables transparent replication mode. It essentially spoofs the source IP address to the
		original sender of the datagram. It requires super-user permissions.
DEFAULT:        false

KEY:            tee_max_receiver_pools
DESC:           Tee receivers list is organized in pools (for present and future features that require
		grouping) of receivers. This directive defines the amount of pools to be allocated and
		cannot be changed at runtime.
DEFAULT:	128

KEY:            tee_max_receivers
DESC:           Tee receivers list is organized in pools (for present and future features that require
                grouping) of receivers. This directive defines the amount of receivers per pool to be
		allocated and cannot be changed at runtime.
DEFAULT:	32

KEY:            tee_kafka_config_file
DESC:           Full pathname to a file containing directives to configure librdkafka when emitting
		replicated datagrams to a Kafka broker. See kafka_config_file for more info.
DEFAULT:        none

KEY:		thread_stack
DESC:		Defines the stack size for threads screated by the daemon. The value is expected in
		bytes. A value of 0, default, leaves the stack size to the system default or pmacct
		minimum (8192000) if system default is too low. Some systems may throw an error if
		the defined size is not a multiple of the system page size.
DEFAULT:	0

KEY:		telemetry_daemon [GLOBAL]
VALUES:         [ true | false ]
DESC:		Enables the Streaming Telemetry thread in all daemons except pmtelemetryd (which does
		collect telemetry as part of its core functionalities). Quoting Cisco IOS-XR Telemetry
		Configuration Guide at the time of this writing: "Streaming telemetry lets users direct
		data to a configured receiver. This data can be used for analysis and troubleshooting
		purposes to maintain the health of the network. This is achieved by leveraging the
		capabilities of machine-to-machine communication. The data is used by development and
		operations (DevOps) personnel who plan to optimize networks by collecting analytics of
		the network in real-time, locate where problems occur, and investigate issues in a
		collaborative manner.".
DEFAULT:        false

KEY:		telemetry_daemon_port_tcp [GLOBAL]
DESC:		Makes the Streaming Telemetry daemon, pmtelemetryd, or the Streaming Telemetry thread
		listen on the specified TCP port. (see telemetry/README.telemetry for gRPC support).
DEFAULT:	none

KEY:            telemetry_daemon_port_udp [GLOBAL]
DESC:           Makes the Streaming Telemetry daemon, pmtelemetryd, or the Streaming Telemetry thread
		listen on the specified UDP port. 
DEFAULT:        none

KEY:		telemetry_daemon_ip [GLOBAL]
DESC:		Binds the Streaming Telemetry daemon to a specific interface. Expects as value an IPv4/
		IPv6 address.
DEFAULT:        0.0.0.0

KEY:		telemetry_daemon_decoder [GLOBAL]
VALUES:		[ json | gpb | cisco_v0 | cisco_v1 ]
DESC:		Sets the Streaming Telemetry data decoder to the specified type (over TCP or UDP
		transports. Cisco 32-bits OS versions tend to prepend a 12 bytes proprietary header
		to GPB compact / GPB KV data and this can be read with the 'cisco_v1' decoder; the
		'cisco_v0' is mostly obsoleted at this point. GPB de-marshaling is not supported
		and will produce an output JSON object with a base64'd encoding of the original GPB
		(see telemetry/README.telemetry for gRPC support and GPB de-marshalling). 
DEFAULT:	none

KEY:            telemetry_daemon_max_peers [GLOBAL]
DESC:           Sets the maximum number of exporters the Streaming Telemetry daemon can receive data from.
		Upon reaching of such limit, no more exporters can send data to the daemon.
DEFAULT:        100

KEY:		telemetry_daemon_peer_timeout [GLOBAL]
DESC:		Sets the timeout time, in seconds, to determine when a Streaming Telemetry session is
		to be expired. Applies to UDP and ZeroMQ sessions.
DEFAULT:	300

KEY:		telemetry_daemon_allow_file [GLOBAL, MAP]
DESC:           Full pathname to a file containing the list of IPv4/IPv6 addresses/prefixes (one for
		each line) allowed to send packets to the daemon. The allow file is intended to be
		small for connectionless sessons; for longer ACLs, firewall rules should be preferred
		instead. Content can be reloaded at runtime by sending the daemon a SIGUSR2 signal (ie.
		"killall -USR2 nfacctd"). Sample map in examples/allow.lst.example .
DEFAULT:        none (ie. allow all)

KEY:		telemetry_daemon_pipe_size [GLOBAL]
DESC:           Defines the size of the kernel socket used for Streaming Telemetry datagrams (see also
		bgp_daemon_pipe_size for more info).
DEFAULT:        Operating System default

KEY:		telemetry_daemon_ipprec [GLOBAL]
DESC:           Marks self-originated Streaming Telemetry messages with the supplied IP precedence value.
		Applies to TCP sessions only.
DEFAULT:        0

KEY:		telemetry_daemon_zmq_address [GLOBAL]
DESC:           Defines the ZeroMQ queue address (host and port) to connect to for consuming
		JSON-encoded Streaming Telemetry data from. An example of the expected value is
		"127.0.0.1:50000".
DEFAULT:	none

KEY:		telemetry_daemon_kafka_broker_host [GLOBAL]
DESC:		Defines one or multiple, comma-separated, Kafka brokers to consume JSON-encoded
		Streaming Telemetry data from. See kafka_broker_host for more info.
DEFAULT:	none

KEY:		telemetry_daemon_kafka_broker_port [GLOBAL]
DESC:		Defines the Kafka broker port to consume JSON-encoded Streaming Telemetry data
		from. See kafka_broker_host for more info.
DEFAULT:	9092

KEY:            telemetry_daemon_kafka_config_file [GLOBAL]
DESC:           Full pathname to a file containing directives to configure librdkafka to consume
		JSON-encoded Streaming Telemetry data from. See kafka_config_file for more info.
DEFAULT:        none

KEY:            telemetry_daemon_kafka_topic [GLOBAL]
DESC:           Name of the Kafka topic to consume JSON-encoded Streaming Telemetry data from. No
		variables are supported for dynamic naming of the topic. See kafka_topic for more
		info.
DEFAULT:        none

KEY:            telemetry_daemon_msglog_output [GLOBAL]
VALUES:         [ json ]
DESC:           Defines output format for Streaming Telemetry data (pmtelemetryd). Only JSON format
		is currently supported and requires compiling against Jansson library (--enable-jansson
		when configuring for compiling).
DEFAULT:        json

KEY:            telemetry_dump_output [GLOBAL]
VALUES:         [ json ]
DESC:           Defines output format for the dump of Streaming Telemetry data (pmtelemetryd). Only
		JSON format is currently supported and requires compiling against Jansson library
		(--enable-jansson when configuring for compiling).
DEFAULT:        json

KEY:		classifier_num_roots [GLOBAL]
DESC:		Defines the number of buckets of the nDPI memory structure on which to hash flows.
		The more the buckets, the more memory will be allocated at startup and the smaller
		- and hence more performing - each memory structure will be.
DEFAULT:	512

KEY:		classifier_max_flows [GLOBAL]
DESC:		Maximum number of concurrent flows allowed in the nDPI memory structure.
DEFAULT:	200000000

KEY:		classifier_proto_guess [GLOBAL]
VALUES:         [ true | false ]
DESC:		If DPI classification is unsuccessful, and before giving up, try guessing the protocol 
		given collected flow characteristics, ie. IP protocol, port numbers, etc.
DEFAULT:	false

KEY:		classifier_idle_scan_period [GLOBAL]
DESC:		Defines the time interval, in seconds, at which going through the memory structure to
		find for idle flows to expire.  
DEFAULT:	10

KEY:		classifier_idle_scan_budget [GLOBAL]
DESC:		Defines the amount of idle flows to expire per each classifier_idle_scan_period. This
		feature is to prevent too many flows to expire can disrupt the regular classification
		activity.
DEFAULT:	1024

KEY:		classifier_giveup_proto_tcp [GLOBAL]
DESC:		Defines the maximum amount of packets to try to classify a TCP flow. After such amount
		of trials, the flow will be marked as given up and no classification attempts will be
		made anymore, until it expires.
DEFAULT:	10

KEY:		classifier_giveup_proto_udp [GLOBAL]
DESC:		Same as classifier_giveup_proto_tcp but for UDP flows.
DEFAULT:	8

KEY:		classifier_giveup_proto_other [GLOBAL]
DESC:		Same as classifier_giveup_proto_tcp but for flows which IP protocol is different than
		TCP and UDP.
DEFAULT:	8

KEY:		redis_host
DESC:		Defines the Redis server IP address and port to connect to, ie. "127.0.0.1:6379". 
		The port needs to be specified. This directive, in conjunction with the cluster_*
		ones, enables forming a cluster with the other daemons pointed to the same
		<redis_host, cluster_name>. It needs pmacct to be compiled with --enable-redis. 
DEFAULT:	none

KEY:		redis_db
DESC:		Defines the Redis database to select. The database is a positive integer and, at
		time of this writing, allowed numbers are in the range 0 to 15.
DEFAULT:	0

KEY:		cluster_name
DESC:		Defines the name of the cluster; it will become the prefix of every key stored in
		the Redis database. It enables forming a cluster with the other daemons pointed to
		the same <redis_host, cluster_name>. Cluster name is expected to be a string, ie.
		"test", "pmacct", etc. 
DEFAULT:	none

KEY:		cluster_id
DESC:		Defines the ID of the node inside the <redis_host, cluster_name> cluster. Each daemon
		must be assigned a unique ID and responsibility for respecting this property is left
		to the user. Clsuter ID is expected to be a positive integer.
DEFAULT:	0

KEY:		tmp_asa_bi_flow
VALUES:		[ true | false ]
DESC:		Bi-flows use two counters to report counters, ie. bytes and packets, in forward and
		reverse directions. This hack (ab)uses the packets field in order to store the extra
		bytes counter. The patch specifically targets NetFlow v9/IPFIX field types #231 and
		#232 and has been tested against a Cisco ASA export.
DEFAULT:	false

KEY:		tmp_bgp_lookup_compare_ports
VALUES:		[ true | false ]
DESC:		When looking up BGP RIBs in traffic accounting daemons (ie. nfacctd, sfacctd, etc.), 
		if set to true, try to compare both the socket IP address and the TCP port of a BGP
		session (that is, not only the socket IP address as when this knob is set to false).
		This is always the case when a bgp_agent_map is defined and the 'bgp_port' keyword
		is specified; when 'bgp_port' is not specified (or a bgp_agent_map is not defined),
		this knob essentially forces the comparison against only the BGP Router-ID. This may
		be wanted in NAT traversal scenarios and/or BGP xconnects (bgp_daemon_xconnect_map).
DEFAULT:	false

KEY:		tmp_bgp_daemon_route_refresh
VALUES:		[ true | false ]
DESC:		If set to true, a Route Refresh capability is presented at BGP OPEN message to the
		peers (if, indeed, it was originally set by the peer). When receiving a route refresh
		message, that is simply ignored. This does not intend to be a feature but a hack to
		counter certain vendor bugs.
DEFAULT:	false

REFERENCES:

(1) https://domsch.com/linux/lpc2010/Scaling_techniques_for_servers_with_high_connection%20rates.pdf