File: rfc9332.html

package info (click to toggle)
doc-rfc 20230121-1
  • links: PTS, VCS
  • area: non-free
  • in suites: bookworm, forky, sid, trixie
  • size: 1,609,944 kB
file content (4587 lines) | stat: -rw-r--r-- 286,844 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
<!DOCTYPE html>
<html lang="en" class="RFC">
<head>
<meta charset="utf-8">
<meta content="Common,Latin" name="scripts">
<meta content="initial-scale=1.0" name="viewport">
<title>RFC 9332: Dual-Queue Coupled Active Queue Management (AQM) for Low Latency, Low Loss, and Scalable Throughput (L4S)</title>
<meta content="Koen De Schepper" name="author">
<meta content="Bob Briscoe" name="author">
<meta content="Greg White" name="author">
<meta content="
       This specification defines a framework for coupling the Active Queue
      Management (AQM) algorithms in two queues intended for flows with
      different responses to congestion. This provides a way for the Internet
      to transition from the scaling problems of standard TCP-Reno-friendly
      ('Classic') congestion controls to the family of 'Scalable' congestion
      controls. These are designed for consistently very low queuing latency,
      very low congestion loss, and scaling of per-flow throughput by
      using Explicit Congestion Notification (ECN) in a modified way. Until
      the Coupled Dual Queue (DualQ), these Scalable L4S congestion controls could only be
      deployed where a clean-slate environment could be arranged, such as in
      private data centres. 
       This specification first explains how a Coupled DualQ works. It then
      gives the normative requirements that are necessary for it to work well.
      All this is independent of which two AQMs are used, but pseudocode
      examples of specific AQMs are given in appendices. 
    " name="description">
<meta content="xml2rfc 3.15.3" name="generator">
<meta content="Performance" name="keyword">
<meta content="Queuing Delay" name="keyword">
<meta content="One Way Delay" name="keyword">
<meta content="Round-Trip Time" name="keyword">
<meta content="RTT" name="keyword">
<meta content="Jitter" name="keyword">
<meta content="Congestion Control" name="keyword">
<meta content="Congestion Avoidance" name="keyword">
<meta content="Quality of Service" name="keyword">
<meta content="QoS" name="keyword">
<meta content="Quality of Experience" name="keyword">
<meta content="QoE" name="keyword">
<meta content="Active Queue Management" name="keyword">
<meta content="AQM" name="keyword">
<meta content="Explicit Congestion Notification" name="keyword">
<meta content="ECN" name="keyword">
<meta content="Pacing" name="keyword">
<meta content="Burstiness" name="keyword">
<meta content="9332" name="rfc.number">
<!-- Generator version information:
  xml2rfc 3.15.3
    Python 3.9.15
    appdirs 1.4.4
    ConfigArgParse 1.5.3
    google-i18n-address 2.5.1
    html5lib 1.1
    intervaltree 3.1.0
    Jinja2 3.1.2
    lxml 4.9.0
    MarkupSafe 2.1.1
    pycountry 22.3.5
    PyYAML 6.0
    requests 2.28.0
    setuptools 44.1.1
    six 1.16.0
    wcwidth 0.2.5
    weasyprint 56.1
-->
<link href="rfc9332.xml" rel="alternate" type="application/rfc+xml">
<link href="#copyright" rel="license">
<style type="text/css">/*

  NOTE: Changes at the bottom of this file overrides some earlier settings.

  Once the style has stabilized and has been adopted as an official RFC style,
  this can be consolidated so that style settings occur only in one place, but
  for now the contents of this file consists first of the initial CSS work as
  provided to the RFC Formatter (xml2rfc) work, followed by itemized and
  commented changes found necessary during the development of the v3
  formatters.

*/

/* fonts */
@import url('https://fonts.googleapis.com/css?family=Noto+Sans'); /* Sans-serif */
@import url('https://fonts.googleapis.com/css?family=Noto+Serif'); /* Serif (print) */
@import url('https://fonts.googleapis.com/css?family=Roboto+Mono'); /* Monospace */

:root {
  --font-sans: 'Noto Sans', Arial, Helvetica, sans-serif;
  --font-serif: 'Noto Serif', 'Times', 'Times New Roman', serif;
  --font-mono: 'Roboto Mono', Courier, 'Courier New', monospace;
}

@viewport {
  zoom: 1.0;
  width: extend-to-zoom;
}
@-ms-viewport {
  width: extend-to-zoom;
  zoom: 1.0;
}
/* general and mobile first */
html {
}
body {
  max-width: 90%;
  margin: 1.5em auto;
  color: #222;
  background-color: #fff;
  font-size: 14px;
  font-family: var(--font-sans);
  line-height: 1.6;
  scroll-behavior: smooth;
}
.ears {
  display: none;
}

/* headings */
#title, h1, h2, h3, h4, h5, h6 {
  margin: 1em 0 0.5em;
  font-weight: bold;
  line-height: 1.3;
}
#title {
  clear: both;
  border-bottom: 1px solid #ddd;
  margin: 0 0 0.5em 0;
  padding: 1em 0 0.5em;
}
.author {
  padding-bottom: 4px;
}
h1 {
  font-size: 26px;
  margin: 1em 0;
}
h2 {
  font-size: 22px;
  margin-top: -20px;  /* provide offset for in-page anchors */
  padding-top: 33px;
}
h3 {
  font-size: 18px;
  margin-top: -36px;  /* provide offset for in-page anchors */
  padding-top: 42px;
}
h4 {
  font-size: 16px;
  margin-top: -36px;  /* provide offset for in-page anchors */
  padding-top: 42px;
}
h5, h6 {
  font-size: 14px;
}
#n-copyright-notice {
  border-bottom: 1px solid #ddd;
  padding-bottom: 1em;
  margin-bottom: 1em;
}
/* general structure */
p {
  padding: 0;
  margin: 0 0 1em 0;
  text-align: left;
}
div, span {
  position: relative;
}
div {
  margin: 0;
}
.alignRight.art-text {
  background-color: #f9f9f9;
  border: 1px solid #eee;
  border-radius: 3px;
  padding: 1em 1em 0;
  margin-bottom: 1.5em;
}
.alignRight.art-text pre {
  padding: 0;
}
.alignRight {
  margin: 1em 0;
}
.alignRight > *:first-child {
  border: none;
  margin: 0;
  float: right;
  clear: both;
}
.alignRight > *:nth-child(2) {
  clear: both;
  display: block;
  border: none;
}
svg {
  display: block;
}
svg[font-family~="serif" i], svg [font-family~="serif" i] {
  font-family: var(--font-serif);
}
svg[font-family~="sans-serif" i], svg [font-family~="sans-serif" i] {
  font-family: var(--font-sans);
}
svg[font-family~="monospace" i], svg [font-family~="monospace" i] {
  font-family: var(--font-mono);
}
.alignCenter.art-text {
  background-color: #f9f9f9;
  border: 1px solid #eee;
  border-radius: 3px;
  padding: 1em 1em 0;
  margin-bottom: 1.5em;
}
.alignCenter.art-text pre {
  padding: 0;
}
.alignCenter {
  margin: 1em 0;
}
.alignCenter > *:first-child {
  display: table;
  border: none;
  margin: 0 auto;
}

/* lists */
ol, ul {
  padding: 0;
  margin: 0 0 1em 2em;
}
ol ol, ul ul, ol ul, ul ol {
  margin-left: 1em;
}
li {
  margin: 0 0 0.25em 0;
}
.ulCompact li {
  margin: 0;
}
ul.empty, .ulEmpty {
  list-style-type: none;
}
ul.empty li, .ulEmpty li {
  margin-top: 0.5em;
}
ul.ulBare, li.ulBare {
  margin-left: 0em !important;
}
ul.compact, .ulCompact,
ol.compact, .olCompact {
  line-height: 100%;
  margin: 0 0 0 2em;
}

/* definition lists */
dl {
}
dl > dt {
  float: left;
  margin-right: 1em;
}
/* 
dl.nohang > dt {
  float: none;
}
*/
dl > dd {
  margin-bottom: .8em;
  min-height: 1.3em;
}
dl.compact > dd, .dlCompact > dd {
  margin-bottom: 0em;
}
dl > dd > dl {
  margin-top: 0.5em;
  margin-bottom: 0em;
}

/* links */
a {
  text-decoration: none;
}
a[href] {
  color: #22e; /* Arlen: WCAG 2019 */
}
a[href]:hover {
  background-color: #f2f2f2;
}
figcaption a[href],
a[href].selfRef {
  color: #222;
}
/* XXX probably not this:
a.selfRef:hover {
  background-color: transparent;
  cursor: default;
} */

/* Figures */
tt, code, pre {
  background-color: #f9f9f9;
  font-family: var(--font-mono);
}
pre {
  border: 1px solid #eee;
  margin: 0;
  padding: 1em;
}
img {
  max-width: 100%;
}
figure {
  margin: 0;
}
figure blockquote {
  margin: 0.8em 0.4em 0.4em;
}
figcaption {
  font-style: italic;
  margin: 0 0 1em 0;
}
@media screen {
  pre {
    overflow-x: auto;
    max-width: 100%;
    max-width: calc(100% - 22px);
  }
}

/* aside, blockquote */
aside, blockquote {
  margin-left: 0;
  padding: 1.2em 2em;
}
blockquote {
  background-color: #f9f9f9;
  color: #111; /* Arlen: WCAG 2019 */
  border: 1px solid #ddd;
  border-radius: 3px;
  margin: 1em 0;
}
cite {
  display: block;
  text-align: right;
  font-style: italic;
}

/* tables */
table {
  width: 100%;
  margin: 0 0 1em;
  border-collapse: collapse;
  border: 1px solid #eee;
}
th, td {
  text-align: left;
  vertical-align: top;
  padding: 0.5em 0.75em;
}
th {
  text-align: left;
  background-color: #e9e9e9;
}
tr:nth-child(2n+1) > td {
  background-color: #f5f5f5;
}
table caption {
  font-style: italic;
  margin: 0;
  padding: 0;
  text-align: left;
}
table p {
  /* XXX to avoid bottom margin on table row signifiers. If paragraphs should
     be allowed within tables more generally, it would be far better to select on a class. */
  margin: 0;
}

/* pilcrow */
a.pilcrow {
  color: #666; /* Arlen: AHDJ 2019 */
  text-decoration: none;
  visibility: hidden;
  user-select: none;
  -ms-user-select: none;
  -o-user-select:none;
  -moz-user-select: none;
  -khtml-user-select: none;
  -webkit-user-select: none;
  -webkit-touch-callout: none;
}
@media screen {
  aside:hover > a.pilcrow,
  p:hover > a.pilcrow,
  blockquote:hover > a.pilcrow,
  div:hover > a.pilcrow,
  li:hover > a.pilcrow,
  pre:hover > a.pilcrow {
    visibility: visible;
  }
  a.pilcrow:hover {
    background-color: transparent;
  }
}

/* misc */
hr {
  border: 0;
  border-top: 1px solid #eee;
}
.bcp14 {
  font-variant: small-caps;
}

.role {
  font-variant: all-small-caps;
}

/* info block */
#identifiers {
  margin: 0;
  font-size: 0.9em;
}
#identifiers dt {
  width: 3em;
  clear: left;
}
#identifiers dd {
  float: left;
  margin-bottom: 0;
}
/* Fix PDF info block run off issue */
@media print {
  #identifiers dd {
    float: none;
  }
}
#identifiers .authors .author {
  display: inline-block;
  margin-right: 1.5em;
}
#identifiers .authors .org {
  font-style: italic;
}

/* The prepared/rendered info at the very bottom of the page */
.docInfo {
  color: #666; /* Arlen: WCAG 2019 */
  font-size: 0.9em;
  font-style: italic;
  margin-top: 2em;
}
.docInfo .prepared {
  float: left;
}
.docInfo .prepared {
  float: right;
}

/* table of contents */
#toc  {
  padding: 0.75em 0 2em 0;
  margin-bottom: 1em;
}
nav.toc ul {
  margin: 0 0.5em 0 0;
  padding: 0;
  list-style: none;
}
nav.toc li {
  line-height: 1.3em;
  margin: 0.75em 0;
  padding-left: 1.2em;
  text-indent: -1.2em;
}
/* references */
.references dt {
  text-align: right;
  font-weight: bold;
  min-width: 7em;
}
.references dd {
  margin-left: 8em;
  overflow: auto;
}

.refInstance {
  margin-bottom: 1.25em;
}

.references .ascii {
  margin-bottom: 0.25em;
}

/* index */
.index ul {
  margin: 0 0 0 1em;
  padding: 0;
  list-style: none;
}
.index ul ul {
  margin: 0;
}
.index li {
  margin: 0;
  text-indent: -2em;
  padding-left: 2em;
  padding-bottom: 5px;
}
.indexIndex {
  margin: 0.5em 0 1em;
}
.index a {
  font-weight: 700;
}
/* make the index two-column on all but the smallest screens */
@media (min-width: 600px) {
  .index ul {
    -moz-column-count: 2;
    -moz-column-gap: 20px;
  }
  .index ul ul {
    -moz-column-count: 1;
    -moz-column-gap: 0;
  }
}

/* authors */
address.vcard {
  font-style: normal;
  margin: 1em 0;
}

address.vcard .nameRole {
  font-weight: 700;
  margin-left: 0;
}
address.vcard .label {
  font-family: var(--font-sans);
  margin: 0.5em 0;
}
address.vcard .type {
  display: none;
}
.alternative-contact {
  margin: 1.5em 0 1em;
}
hr.addr {
  border-top: 1px dashed;
  margin: 0;
  color: #ddd;
  max-width: calc(100% - 16px);
}

/* temporary notes */
.rfcEditorRemove::before {
  position: absolute;
  top: 0.2em;
  right: 0.2em;
  padding: 0.2em;
  content: "The RFC Editor will remove this note";
  color: #9e2a00; /* Arlen: WCAG 2019 */
  background-color: #ffd; /* Arlen: WCAG 2019 */
}
.rfcEditorRemove {
  position: relative;
  padding-top: 1.8em;
  background-color: #ffd; /* Arlen: WCAG 2019 */
  border-radius: 3px;
}
.cref {
  background-color: #ffd; /* Arlen: WCAG 2019 */
  padding: 2px 4px;
}
.crefSource {
  font-style: italic;
}
/* alternative layout for smaller screens */
@media screen and (max-width: 1023px) {
  body {
    padding-top: 2em;
  }
  #title {
    padding: 1em 0;
  }
  h1 {
    font-size: 24px;
  }
  h2 {
    font-size: 20px;
    margin-top: -18px;  /* provide offset for in-page anchors */
    padding-top: 38px;
  }
  #identifiers dd {
    max-width: 60%;
  }
  #toc {
    position: fixed;
    z-index: 2;
    top: 0;
    right: 0;
    padding: 0;
    margin: 0;
    background-color: inherit;
    border-bottom: 1px solid #ccc;
  }
  #toc h2 {
    margin: -1px 0 0 0;
    padding: 4px 0 4px 6px;
    padding-right: 1em;
    min-width: 190px;
    font-size: 1.1em;
    text-align: right;
    background-color: #444;
    color: white;
    cursor: pointer;
  }
  #toc h2::before { /* css hamburger */
    float: right;
    position: relative;
    width: 1em;
    height: 1px;
    left: -164px;
    margin: 6px 0 0 0;
    background: white none repeat scroll 0 0;
    box-shadow: 0 4px 0 0 white, 0 8px 0 0 white;
    content: "";
  }
  #toc nav {
    display: none;
    padding: 0.5em 1em 1em;
    overflow: auto;
    height: calc(100vh - 48px);
    border-left: 1px solid #ddd;
  }
}

/* alternative layout for wide screens */
@media screen and (min-width: 1024px) {
  body {
    max-width: 724px;
    margin: 42px auto;
    padding-left: 1.5em;
    padding-right: 29em;
  }
  #toc {
    position: fixed;
    top: 42px;
    right: 42px;
    width: 25%;
    margin: 0;
    padding: 0 1em;
    z-index: 1;
  }
  #toc h2 {
    border-top: none;
    border-bottom: 1px solid #ddd;
    font-size: 1em;
    font-weight: normal;
    margin: 0;
    padding: 0.25em 1em 1em 0;
  }
  #toc nav {
    display: block;
    height: calc(90vh - 84px);
    bottom: 0;
    padding: 0.5em 0 0;
    overflow: auto;
  }
  img { /* future proofing */
    max-width: 100%;
    height: auto;
  }
}

/* pagination */
@media print {
  body {

    width: 100%;
  }
  p {
    orphans: 3;
    widows: 3;
  }
  #n-copyright-notice {
    border-bottom: none;
  }
  #toc, #n-introduction {
    page-break-before: always;
  }
  #toc {
    border-top: none;
    padding-top: 0;
  }
  figure, pre {
    page-break-inside: avoid;
  }
  figure {
    overflow: scroll;
  }
  .breakable pre {
    break-inside: auto;
  }
  h1, h2, h3, h4, h5, h6 {
    page-break-after: avoid;
  }
  h2+*, h3+*, h4+*, h5+*, h6+* {
    page-break-before: avoid;
  }
  pre {
    white-space: pre-wrap;
    word-wrap: break-word;
    font-size: 10pt;
  }
  table {
    border: 1px solid #ddd;
  }
  td {
    border-top: 1px solid #ddd;
  }
}

/* This is commented out here, as the string-set: doesn't
   pass W3C validation currently */
/*
.ears thead .left {
  string-set: ears-top-left content();
}

.ears thead .center {
  string-set: ears-top-center content();
}

.ears thead .right {
  string-set: ears-top-right content();
}

.ears tfoot .left {
  string-set: ears-bottom-left content();
}

.ears tfoot .center {
  string-set: ears-bottom-center content();
}

.ears tfoot .right {
  string-set: ears-bottom-right content();
}
*/

@page :first {
  padding-top: 0;
  @top-left {
    content: normal;
    border: none;
  }
  @top-center {
    content: normal;
    border: none;
  }
  @top-right {
    content: normal;
    border: none;
  }
}

@page {
  size: A4;
  margin-bottom: 45mm;
  padding-top: 20px;
  /* The following is commented out here, but set appropriately by in code, as
     the content depends on the document */
  /*
  @top-left {
    content: 'Internet-Draft';
    vertical-align: bottom;
    border-bottom: solid 1px #ccc;
  }
  @top-left {
    content: string(ears-top-left);
    vertical-align: bottom;
    border-bottom: solid 1px #ccc;
  }
  @top-center {
    content: string(ears-top-center);
    vertical-align: bottom;
    border-bottom: solid 1px #ccc;
  }
  @top-right {
    content: string(ears-top-right);
    vertical-align: bottom;
    border-bottom: solid 1px #ccc;
  }
  @bottom-left {
    content: string(ears-bottom-left);
    vertical-align: top;
    border-top: solid 1px #ccc;
  }
  @bottom-center {
    content: string(ears-bottom-center);
    vertical-align: top;
    border-top: solid 1px #ccc;
  }
  @bottom-right {
      content: '[Page ' counter(page) ']';
      vertical-align: top;
      border-top: solid 1px #ccc;
  }
  */

}

/* Changes introduced to fix issues found during implementation */
/* Make sure links are clickable even if overlapped by following H* */
a {
  z-index: 2;
}
/* Separate body from document info even without intervening H1 */
section {
  clear: both;
}


/* Top align author divs, to avoid names without organization dropping level with org names */
.author {
  vertical-align: top;
}

/* Leave room in document info to show Internet-Draft on one line */
#identifiers dt {
  width: 8em;
}

/* Don't waste quite as much whitespace between label and value in doc info */
#identifiers dd {
  margin-left: 1em;
}

/* Give floating toc a background color (needed when it's a div inside section */
#toc {
  background-color: white;
}

/* Make the collapsed ToC header render white on gray also when it's a link */
@media screen and (max-width: 1023px) {
  #toc h2 a,
  #toc h2 a:link,
  #toc h2 a:focus,
  #toc h2 a:hover,
  #toc a.toplink,
  #toc a.toplink:hover {
    color: white;
    background-color: #444;
    text-decoration: none;
  }
}

/* Give the bottom of the ToC some whitespace */
@media screen and (min-width: 1024px) {
  #toc {
    padding: 0 0 1em 1em;
  }
}

/* Style section numbers with more space between number and title */
.section-number {
  padding-right: 0.5em;
}

/* prevent monospace from becoming overly large */
tt, code, pre {
  font-size: 95%;
}

/* Fix the height/width aspect for ascii art*/
.sourcecode pre,
.art-text pre {
  line-height: 1.12;
}


/* Add styling for a link in the ToC that points to the top of the document */
a.toplink {
  float: right;
  margin-right: 0.5em;
}

/* Fix the dl styling to match the RFC 7992 attributes */
dl > dt,
dl.dlParallel > dt {
  float: left;
  margin-right: 1em;
}
dl.dlNewline > dt {
  float: none;
}

/* Provide styling for table cell text alignment */
table td.text-left,
table th.text-left {
  text-align: left;
}
table td.text-center,
table th.text-center {
  text-align: center;
}
table td.text-right,
table th.text-right {
  text-align: right;
}

/* Make the alternative author contact information look less like just another
   author, and group it closer with the primary author contact information */
.alternative-contact {
  margin: 0.5em 0 0.25em 0;
}
address .non-ascii {
  margin: 0 0 0 2em;
}

/* With it being possible to set tables with alignment
  left, center, and right, { width: 100%; } does not make sense */
table {
  width: auto;
}

/* Avoid reference text that sits in a block with very wide left margin,
   because of a long floating dt label.*/
.references dd {
  overflow: visible;
}

/* Control caption placement */
caption {
  caption-side: bottom;
}

/* Limit the width of the author address vcard, so names in right-to-left
   script don't end up on the other side of the page. */

address.vcard {
  max-width: 30em;
  margin-right: auto;
}

/* For address alignment dependent on LTR or RTL scripts */
address div.left {
  text-align: left;
}
address div.right {
  text-align: right;
}

/* Provide table alignment support.  We can't use the alignX classes above
   since they do unwanted things with caption and other styling. */
table.right {
 margin-left: auto;
 margin-right: 0;
}
table.center {
 margin-left: auto;
 margin-right: auto;
}
table.left {
 margin-left: 0;
 margin-right: auto;
}

/* Give the table caption label the same styling as the figcaption */
caption a[href] {
  color: #222;
}

@media print {
  .toplink {
    display: none;
  }

  /* avoid overwriting the top border line with the ToC header */
  #toc {
    padding-top: 1px;
  }

  /* Avoid page breaks inside dl and author address entries */
  .vcard {
    page-break-inside: avoid;
  }

}
/* Tweak the bcp14 keyword presentation */
.bcp14 {
  font-variant: small-caps;
  font-weight: bold;
  font-size: 0.9em;
}
/* Tweak the invisible space above H* in order not to overlay links in text above */
 h2 {
  margin-top: -18px;  /* provide offset for in-page anchors */
  padding-top: 31px;
 }
 h3 {
  margin-top: -18px;  /* provide offset for in-page anchors */
  padding-top: 24px;
 }
 h4 {
  margin-top: -18px;  /* provide offset for in-page anchors */
  padding-top: 24px;
 }
/* Float artwork pilcrow to the right */
@media screen {
  .artwork a.pilcrow {
    display: block;
    line-height: 0.7;
    margin-top: 0.15em;
  }
}
/* Make pilcrows on dd visible */
@media screen {
  dd:hover > a.pilcrow {
    visibility: visible;
  }
}
/* Make the placement of figcaption match that of a table's caption
   by removing the figure's added bottom margin */
.alignLeft.art-text,
.alignCenter.art-text,
.alignRight.art-text {
   margin-bottom: 0;
}
.alignLeft,
.alignCenter,
.alignRight {
  margin: 1em 0 0 0;
}
/* In print, the pilcrow won't show on hover, so prevent it from taking up space,
   possibly even requiring a new line */
@media print {
  a.pilcrow {
    display: none;
  }
}
/* Styling for the external metadata */
div#external-metadata {
  background-color: #eee;
  padding: 0.5em;
  margin-bottom: 0.5em;
  display: none;
}
div#internal-metadata {
  padding: 0.5em;                       /* to match the external-metadata padding */
}
/* Styling for title RFC Number */
h1#rfcnum {
  clear: both;
  margin: 0 0 -1em;
  padding: 1em 0 0 0;
}
/* Make .olPercent look the same as <ol><li> */
dl.olPercent > dd {
  margin-bottom: 0.25em;
  min-height: initial;
}
/* Give aside some styling to set it apart */
aside {
  border-left: 1px solid #ddd;
  margin: 1em 0 1em 2em;
  padding: 0.2em 2em;
}
aside > dl,
aside > ol,
aside > ul,
aside > table,
aside > p {
  margin-bottom: 0.5em;
}
/* Additional page break settings */
@media print {
  figcaption, table caption {
    page-break-before: avoid;
  }
}
/* Font size adjustments for print */
@media print {
  body  { font-size: 10pt;      line-height: normal; max-width: 96%; }
  h1    { font-size: 1.72em;    padding-top: 1.5em; } /* 1*1.2*1.2*1.2 */
  h2    { font-size: 1.44em;    padding-top: 1.5em; } /* 1*1.2*1.2 */
  h3    { font-size: 1.2em;     padding-top: 1.5em; } /* 1*1.2 */
  h4    { font-size: 1em;       padding-top: 1.5em; }
  h5, h6 { font-size: 1em;      margin: initial; padding: 0.5em 0 0.3em; }
}
/* Sourcecode margin in print, when there's no pilcrow */
@media print {
  .artwork,
  .artwork > pre,
  .sourcecode {
    margin-bottom: 1em;
  }
}
/* Avoid narrow tables forcing too narrow table captions, which may render badly */
table {
  min-width: 20em;
}
/* ol type a */
ol.type-a { list-style-type: lower-alpha; }
ol.type-A { list-style-type: upper-alpha; }
ol.type-i { list-style-type: lower-roman; }
ol.type-I { list-style-type: lower-roman; }
/* Apply the print table and row borders in general, on request from the RPC,
and increase the contrast between border and odd row background slightly */
table {
  border: 1px solid #ddd;
}
td {
  border-top: 1px solid #ddd;
}
tr {
  break-inside: avoid;
}
tr:nth-child(2n+1) > td {
  background-color: #f8f8f8;
}
/* Use style rules to govern display of the TOC. */
@media screen and (max-width: 1023px) {
  #toc nav { display: none; }
  #toc.active nav { display: block; }
}
/* Add support for keepWithNext */
.keepWithNext {
  break-after: avoid-page;
  break-after: avoid-page;
}
/* Add support for keepWithPrevious */
.keepWithPrevious {
  break-before: avoid-page;
}
/* Change the approach to avoiding breaks inside artwork etc. */
figure, pre, table, .artwork, .sourcecode  {
  break-before: auto;
  break-after: auto;
}
/* Avoid breaks between <dt> and <dd> */
dl {
  break-before: auto;
  break-inside: auto;
}
dt {
  break-before: auto;
  break-after: avoid-page;
}
dd {
  break-before: avoid-page;
  break-after: auto;
  orphans: 3;
  widows: 3
}
span.break, dd.break {
  margin-bottom: 0;
  min-height: 0;
  break-before: auto;
  break-inside: auto;
  break-after: auto;
}
/* Undo break-before ToC */
@media print {
  #toc {
    break-before: auto;
  }
}
/* Text in compact lists should not get extra bottom margin space,
   since that would makes the list not compact */
ul.compact p, .ulCompact p,
ol.compact p, .olCompact p {
 margin: 0;
}
/* But the list as a whole needs the extra space at the end */
section ul.compact,
section .ulCompact,
section ol.compact,
section .olCompact {
  margin-bottom: 1em;                    /* same as p not within ul.compact etc. */
}
/* The tt and code background above interferes with for instance table cell
   backgrounds.  Changed to something a bit more selective. */
tt, code {
  background-color: transparent;
}
p tt, p code, li tt, li code {
  background-color: #f8f8f8;
}
/* Tweak the pre margin -- 0px doesn't come out well */
pre {
   margin-top: 0.5px;
}
/* Tweak the compact list text */
ul.compact, .ulCompact,
ol.compact, .olCompact,
dl.compact, .dlCompact {
  line-height: normal;
}
/* Don't add top margin for nested lists */
li > ul, li > ol, li > dl,
dd > ul, dd > ol, dd > dl,
dl > dd > dl {
  margin-top: initial;
}
/* Elements that should not be rendered on the same line as a <dt> */
/* This should match the element list in writer.text.TextWriter.render_dl() */
dd > div.artwork:first-child,
dd > aside:first-child,
dd > figure:first-child,
dd > ol:first-child,
dd > div.sourcecode:first-child,
dd > table:first-child,
dd > ul:first-child {
  clear: left;
}
/* fix for weird browser behaviour when <dd/> is empty */
dt+dd:empty::before{
  content: "\00a0";
}
/* Make paragraph spacing inside <li> smaller than in body text, to fit better within the list */
li > p {
  margin-bottom: 0.5em
}
/* Don't let p margin spill out from inside list items */
li > p:last-of-type:only-child {
  margin-bottom: 0;
}
</style>
<link href="rfc-local.css" rel="stylesheet" type="text/css">
<link href="https://dx.doi.org/10.17487/rfc9332" rel="alternate">
  <link href="urn:issn:2070-1721" rel="alternate">
  <link href="https://datatracker.ietf.org/doc/draft-ietf-tsvwg-aqm-dualq-coupled-25" rel="prev">
  </head>
<body class="xml2rfc">
<script src="https://www.rfc-editor.org/js/metadata.min.js"></script>
<table class="ears">
<thead><tr>
<td class="left">RFC 9332</td>
<td class="center">DualQ Coupled AQMs</td>
<td class="right">January 2023</td>
</tr></thead>
<tfoot><tr>
<td class="left">De Schepper, et al.</td>
<td class="center">Experimental</td>
<td class="right">[Page]</td>
</tr></tfoot>
</table>
<div id="external-metadata" class="document-information"></div>
<div id="internal-metadata" class="document-information">
<dl id="identifiers">
<dt class="label-stream">Stream:</dt>
<dd class="stream">Internet Engineering Task Force (IETF)</dd>
<dt class="label-rfc">RFC:</dt>
<dd class="rfc"><a href="https://www.rfc-editor.org/rfc/rfc9332" class="eref">9332</a></dd>
<dt class="label-category">Category:</dt>
<dd class="category">Experimental</dd>
<dt class="label-published">Published:</dt>
<dd class="published">
<time datetime="2023-01" class="published">January 2023</time>
    </dd>
<dt class="label-issn">ISSN:</dt>
<dd class="issn">2070-1721</dd>
<dt class="label-authors">Authors:</dt>
<dd class="authors">
<div class="author">
      <div class="author-name">K. De Schepper</div>
<div class="org">Nokia Bell Labs</div>
</div>
<div class="author">
      <div class="author-name">B. Briscoe, <span class="editor">Ed.</span>
</div>
<div class="org">Independent</div>
</div>
<div class="author">
      <div class="author-name">G. White</div>
<div class="org">CableLabs</div>
</div>
</dd>
</dl>
</div>
<h1 id="rfcnum">RFC 9332</h1>
<h1 id="title">Dual-Queue Coupled Active Queue Management (AQM) for Low Latency, Low Loss, and Scalable Throughput (L4S)</h1>
<section id="section-abstract">
      <h2 id="abstract"><a href="#abstract" class="selfRef">Abstract</a></h2>
<p id="section-abstract-1">This specification defines a framework for coupling the Active Queue
      Management (AQM) algorithms in two queues intended for flows with
      different responses to congestion. This provides a way for the Internet
      to transition from the scaling problems of standard TCP-Reno-friendly
      ('Classic') congestion controls to the family of 'Scalable' congestion
      controls. These are designed for consistently very low queuing latency,
      very low congestion loss, and scaling of per-flow throughput by
      using Explicit Congestion Notification (ECN) in a modified way. Until
      the Coupled Dual Queue (DualQ), these Scalable L4S congestion controls could only be
      deployed where a clean-slate environment could be arranged, such as in
      private data centres.<a href="#section-abstract-1" class="pilcrow">¶</a></p>
<p id="section-abstract-2">This specification first explains how a Coupled DualQ works. It then
      gives the normative requirements that are necessary for it to work well.
      All this is independent of which two AQMs are used, but pseudocode
      examples of specific AQMs are given in appendices.<a href="#section-abstract-2" class="pilcrow">¶</a></p>
</section>
<div id="status-of-memo">
<section id="section-boilerplate.1">
        <h2 id="name-status-of-this-memo">
<a href="#name-status-of-this-memo" class="section-name selfRef">Status of This Memo</a>
        </h2>
<p id="section-boilerplate.1-1">
            This document is not an Internet Standards Track specification; it is
            published for examination, experimental implementation, and
            evaluation.<a href="#section-boilerplate.1-1" class="pilcrow">¶</a></p>
<p id="section-boilerplate.1-2">
            This document defines an Experimental Protocol for the Internet
            community.  This document is a product of the Internet Engineering
            Task Force (IETF).  It represents the consensus of the IETF community.
            It has received public review and has been approved for publication
            by the Internet Engineering Steering Group (IESG).  Not all documents
            approved by the IESG are candidates for any level of Internet
            Standard; see Section 2 of RFC 7841.<a href="#section-boilerplate.1-2" class="pilcrow">¶</a></p>
<p id="section-boilerplate.1-3">
            Information about the current status of this document, any
            errata, and how to provide feedback on it may be obtained at
            <span><a href="https://www.rfc-editor.org/info/rfc9332">https://www.rfc-editor.org/info/rfc9332</a></span>.<a href="#section-boilerplate.1-3" class="pilcrow">¶</a></p>
</section>
</div>
<div id="copyright">
<section id="section-boilerplate.2">
        <h2 id="name-copyright-notice">
<a href="#name-copyright-notice" class="section-name selfRef">Copyright Notice</a>
        </h2>
<p id="section-boilerplate.2-1">
            Copyright (c) 2023 IETF Trust and the persons identified as the
            document authors. All rights reserved.<a href="#section-boilerplate.2-1" class="pilcrow">¶</a></p>
<p id="section-boilerplate.2-2">
            This document is subject to BCP 78 and the IETF Trust's Legal
            Provisions Relating to IETF Documents
            (<span><a href="https://trustee.ietf.org/license-info">https://trustee.ietf.org/license-info</a></span>) in effect on the date of
            publication of this document. Please review these documents
            carefully, as they describe your rights and restrictions with
            respect to this document. Code Components extracted from this
            document must include Revised BSD License text as described in
            Section 4.e of the Trust Legal Provisions and are provided without
            warranty as described in the Revised BSD License.<a href="#section-boilerplate.2-2" class="pilcrow">¶</a></p>
</section>
</div>
<div id="toc">
<section id="section-toc.1">
        <a href="#" onclick="scroll(0,0)" class="toplink">▲</a><h2 id="name-table-of-contents">
<a href="#name-table-of-contents" class="section-name selfRef">Table of Contents</a>
        </h2>
<nav class="toc"><ul class="compact toc ulBare ulEmpty">
<li class="compact toc ulBare ulEmpty" id="section-toc.1-1.1">
            <p id="section-toc.1-1.1.1"><a href="#section-1" class="auto internal xref">1</a>.  <a href="#name-introduction" class="internal xref">Introduction</a></p>
<ul class="compact toc ulBare ulEmpty">
<li class="compact toc ulBare ulEmpty" id="section-toc.1-1.1.2.1">
                <p id="section-toc.1-1.1.2.1.1" class="keepWithNext"><a href="#section-1.1" class="auto internal xref">1.1</a>.  <a href="#name-outline-of-the-problem" class="internal xref">Outline of the Problem</a></p>
</li>
              <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.1.2.2">
                <p id="section-toc.1-1.1.2.2.1" class="keepWithNext"><a href="#section-1.2" class="auto internal xref">1.2</a>.  <a href="#name-context-scope-and-applicabi" class="internal xref">Context, Scope, and Applicability</a></p>
</li>
              <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.1.2.3">
                <p id="section-toc.1-1.1.2.3.1" class="keepWithNext"><a href="#section-1.3" class="auto internal xref">1.3</a>.  <a href="#name-terminology" class="internal xref">Terminology</a></p>
</li>
              <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.1.2.4">
                <p id="section-toc.1-1.1.2.4.1"><a href="#section-1.4" class="auto internal xref">1.4</a>.  <a href="#name-features" class="internal xref">Features</a></p>
</li>
            </ul>
</li>
          <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.2">
            <p id="section-toc.1-1.2.1"><a href="#section-2" class="auto internal xref">2</a>.  <a href="#name-dualq-coupled-aqm" class="internal xref">DualQ Coupled AQM</a></p>
<ul class="compact toc ulBare ulEmpty">
<li class="compact toc ulBare ulEmpty" id="section-toc.1-1.2.2.1">
                <p id="section-toc.1-1.2.2.1.1"><a href="#section-2.1" class="auto internal xref">2.1</a>.  <a href="#name-coupled-aqm" class="internal xref">Coupled AQM</a></p>
</li>
              <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.2.2.2">
                <p id="section-toc.1-1.2.2.2.1"><a href="#section-2.2" class="auto internal xref">2.2</a>.  <a href="#name-dual-queue" class="internal xref">Dual Queue</a></p>
</li>
              <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.2.2.3">
                <p id="section-toc.1-1.2.2.3.1"><a href="#section-2.3" class="auto internal xref">2.3</a>.  <a href="#name-traffic-classification" class="internal xref">Traffic Classification</a></p>
</li>
              <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.2.2.4">
                <p id="section-toc.1-1.2.2.4.1"><a href="#section-2.4" class="auto internal xref">2.4</a>.  <a href="#name-overall-dualq-coupled-aqm-s" class="internal xref">Overall DualQ Coupled AQM Structure</a></p>
</li>
              <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.2.2.5">
                <p id="section-toc.1-1.2.2.5.1"><a href="#section-2.5" class="auto internal xref">2.5</a>.  <a href="#name-normative-requirements-for-" class="internal xref">Normative Requirements for a DualQ Coupled AQM</a></p>
<ul class="compact toc ulBare ulEmpty">
<li class="compact toc ulBare ulEmpty" id="section-toc.1-1.2.2.5.2.1">
                    <p id="section-toc.1-1.2.2.5.2.1.1"><a href="#section-2.5.1" class="auto internal xref">2.5.1</a>.  <a href="#name-functional-requirements" class="internal xref">Functional Requirements</a></p>
<ul class="compact toc ulBare ulEmpty">
<li class="compact toc ulBare ulEmpty" id="section-toc.1-1.2.2.5.2.1.2.1">
                        <p id="section-toc.1-1.2.2.5.2.1.2.1.1"><a href="#section-2.5.1.1" class="auto internal xref">2.5.1.1</a>.  <a href="#name-requirements-in-unexpected-" class="internal xref">Requirements in Unexpected Cases</a></p>
</li>
                    </ul>
</li>
                  <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.2.2.5.2.2">
                    <p id="section-toc.1-1.2.2.5.2.2.1"><a href="#section-2.5.2" class="auto internal xref">2.5.2</a>.  <a href="#name-management-requirements" class="internal xref">Management Requirements</a></p>
<ul class="compact toc ulBare ulEmpty">
<li class="compact toc ulBare ulEmpty" id="section-toc.1-1.2.2.5.2.2.2.1">
                        <p id="section-toc.1-1.2.2.5.2.2.2.1.1"><a href="#section-2.5.2.1" class="auto internal xref">2.5.2.1</a>.  <a href="#name-configuration" class="internal xref">Configuration</a></p>
</li>
                      <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.2.2.5.2.2.2.2">
                        <p id="section-toc.1-1.2.2.5.2.2.2.2.1"><a href="#section-2.5.2.2" class="auto internal xref">2.5.2.2</a>.  <a href="#name-monitoring" class="internal xref">Monitoring</a></p>
</li>
                      <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.2.2.5.2.2.2.3">
                        <p id="section-toc.1-1.2.2.5.2.2.2.3.1"><a href="#section-2.5.2.3" class="auto internal xref">2.5.2.3</a>.  <a href="#name-anomaly-detection" class="internal xref">Anomaly Detection</a></p>
</li>
                      <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.2.2.5.2.2.2.4">
                        <p id="section-toc.1-1.2.2.5.2.2.2.4.1"><a href="#section-2.5.2.4" class="auto internal xref">2.5.2.4</a>.  <a href="#name-deployment-coexistence-and-" class="internal xref">Deployment, Coexistence, and Scaling</a></p>
</li>
                    </ul>
</li>
                </ul>
</li>
            </ul>
</li>
          <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.3">
            <p id="section-toc.1-1.3.1"><a href="#section-3" class="auto internal xref">3</a>.  <a href="#name-iana-considerations" class="internal xref">IANA Considerations</a></p>
</li>
          <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.4">
            <p id="section-toc.1-1.4.1"><a href="#section-4" class="auto internal xref">4</a>.  <a href="#name-security-considerations" class="internal xref">Security Considerations</a></p>
<ul class="compact toc ulBare ulEmpty">
<li class="compact toc ulBare ulEmpty" id="section-toc.1-1.4.2.1">
                <p id="section-toc.1-1.4.2.1.1"><a href="#section-4.1" class="auto internal xref">4.1</a>.  <a href="#name-low-delay-without-requiring" class="internal xref">Low Delay without Requiring Per-flow Processing</a></p>
</li>
              <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.4.2.2">
                <p id="section-toc.1-1.4.2.2.1"><a href="#section-4.2" class="auto internal xref">4.2</a>.  <a href="#name-handling-unresponsive-flows" class="internal xref">Handling Unresponsive Flows and Overload</a></p>
<ul class="compact toc ulBare ulEmpty">
<li class="compact toc ulBare ulEmpty" id="section-toc.1-1.4.2.2.2.1">
                    <p id="section-toc.1-1.4.2.2.2.1.1"><a href="#section-4.2.1" class="auto internal xref">4.2.1</a>.  <a href="#name-unresponsive-traffic-withou" class="internal xref">Unresponsive Traffic without Overload</a></p>
</li>
                  <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.4.2.2.2.2">
                    <p id="section-toc.1-1.4.2.2.2.2.1"><a href="#section-4.2.2" class="auto internal xref">4.2.2</a>.  <a href="#name-avoiding-short-term-classic" class="internal xref">Avoiding Short-Term Classic Starvation: Sacrifice L4S Throughput or Delay?</a></p>
</li>
                  <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.4.2.2.2.3">
                    <p id="section-toc.1-1.4.2.2.2.3.1"><a href="#section-4.2.3" class="auto internal xref">4.2.3</a>.  <a href="#name-l4s-ecn-saturation-introduc" class="internal xref">L4S ECN Saturation: Introduce Drop or Delay?</a></p>
<ul class="compact toc ulBare ulEmpty">
<li class="compact toc ulBare ulEmpty" id="section-toc.1-1.4.2.2.2.3.2.1">
                        <p id="section-toc.1-1.4.2.2.2.3.2.1.1"><a href="#section-4.2.3.1" class="auto internal xref">4.2.3.1</a>.  <a href="#name-protecting-against-overload" class="internal xref">Protecting against Overload by Unresponsive ECN-Capable Traffic</a></p>
</li>
                    </ul>
</li>
                </ul>
</li>
            </ul>
</li>
          <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.5">
            <p id="section-toc.1-1.5.1"><a href="#section-5" class="auto internal xref">5</a>.  <a href="#name-references" class="internal xref">References</a></p>
<ul class="compact toc ulBare ulEmpty">
<li class="compact toc ulBare ulEmpty" id="section-toc.1-1.5.2.1">
                <p id="section-toc.1-1.5.2.1.1"><a href="#section-5.1" class="auto internal xref">5.1</a>.  <a href="#name-normative-references" class="internal xref">Normative References</a></p>
</li>
              <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.5.2.2">
                <p id="section-toc.1-1.5.2.2.1"><a href="#section-5.2" class="auto internal xref">5.2</a>.  <a href="#name-informative-references" class="internal xref">Informative References</a></p>
</li>
            </ul>
</li>
          <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.6">
            <p id="section-toc.1-1.6.1"><a href="#appendix-A" class="auto internal xref">Appendix A</a>.  <a href="#name-example-dualq-coupled-pi2-a" class="internal xref">Example DualQ Coupled PI2 Algorithm</a></p>
<ul class="compact toc ulBare ulEmpty">
<li class="compact toc ulBare ulEmpty" id="section-toc.1-1.6.2.1">
                <p id="section-toc.1-1.6.2.1.1"><a href="#appendix-A.1" class="auto internal xref">A.1</a>.  <a href="#name-pass-1-core-concepts" class="internal xref">Pass #1: Core Concepts</a></p>
</li>
              <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.6.2.2">
                <p id="section-toc.1-1.6.2.2.1"><a href="#appendix-A.2" class="auto internal xref">A.2</a>.  <a href="#name-pass-2-edge-case-details" class="internal xref">Pass #2: Edge-Case Details</a></p>
</li>
            </ul>
</li>
          <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.7">
            <p id="section-toc.1-1.7.1"><a href="#appendix-B" class="auto internal xref">Appendix B</a>.  <a href="#name-example-dualq-coupled-curvy" class="internal xref">Example DualQ Coupled Curvy RED Algorithm</a></p>
<ul class="compact toc ulBare ulEmpty">
<li class="compact toc ulBare ulEmpty" id="section-toc.1-1.7.2.1">
                <p id="section-toc.1-1.7.2.1.1"><a href="#appendix-B.1" class="auto internal xref">B.1</a>.  <a href="#name-curvy-red-in-pseudocode" class="internal xref">Curvy RED in Pseudocode</a></p>
</li>
              <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.7.2.2">
                <p id="section-toc.1-1.7.2.2.1"><a href="#appendix-B.2" class="auto internal xref">B.2</a>.  <a href="#name-efficient-implementation-of" class="internal xref">Efficient Implementation of Curvy RED</a></p>
</li>
            </ul>
</li>
          <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.8">
            <p id="section-toc.1-1.8.1"><a href="#appendix-C" class="auto internal xref">Appendix C</a>.  <a href="#name-choice-of-coupling-factor-k" class="internal xref">Choice of Coupling Factor, k</a></p>
<ul class="compact toc ulBare ulEmpty">
<li class="compact toc ulBare ulEmpty" id="section-toc.1-1.8.2.1">
                <p id="section-toc.1-1.8.2.1.1"><a href="#appendix-C.1" class="auto internal xref">C.1</a>.  <a href="#name-rtt-dependence" class="internal xref">RTT-Dependence</a></p>
</li>
              <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.8.2.2">
                <p id="section-toc.1-1.8.2.2.1"><a href="#appendix-C.2" class="auto internal xref">C.2</a>.  <a href="#name-guidance-on-controlling-thr" class="internal xref">Guidance on Controlling Throughput Equivalence</a></p>
</li>
            </ul>
</li>
          <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.9">
            <p id="section-toc.1-1.9.1"><a href="#appendix-D" class="auto internal xref"></a><a href="#name-acknowledgements" class="internal xref">Acknowledgements</a></p>
</li>
          <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.10">
            <p id="section-toc.1-1.10.1"><a href="#appendix-E" class="auto internal xref"></a><a href="#name-contributors" class="internal xref">Contributors</a></p>
</li>
          <li class="compact toc ulBare ulEmpty" id="section-toc.1-1.11">
            <p id="section-toc.1-1.11.1"><a href="#appendix-F" class="auto internal xref"></a><a href="#name-authors-addresses" class="internal xref">Authors' Addresses</a></p>
</li>
        </ul>
</nav>
</section>
</div>
<div id="dualq_intro">
<section id="section-1">
      <h2 id="name-introduction">
<a href="#section-1" class="section-number selfRef">1. </a><a href="#name-introduction" class="section-name selfRef">Introduction</a>
      </h2>
<p id="section-1-1">This document specifies a framework for DualQ Coupled AQMs, which can
      serve as the network part of the L4S architecture <span>[<a href="#RFC9330" class="cite xref">RFC9330</a>]</span>. A DualQ Coupled AQM consists of two
      queues: L4S and Classic. The L4S queue is intended for Scalable
      congestion controls that can maintain very low queuing latency
      (sub-millisecond on average) and high throughput at the same time. The
      Coupled DualQ acts like a semi-permeable membrane: the L4S queue
      isolates the sub-millisecond average queuing delay of L4S from Classic
      latency, while the coupling between the queues pools the capacity
      between both queues so that ad hoc numbers of capacity-seeking
      applications all sharing the same capacity can have roughly equivalent
      throughput per flow, whichever queue they use. The DualQ achieves this
      indirectly, without having to inspect transport-layer flow identifiers
      and without compromising the performance of the Classic traffic,
      relative to a single queue. The DualQ design has low complexity and
      requires no configuration for the public Internet.<a href="#section-1-1" class="pilcrow">¶</a></p>
<div id="dualq_problem">
<section id="section-1.1">
        <h3 id="name-outline-of-the-problem">
<a href="#section-1.1" class="section-number selfRef">1.1. </a><a href="#name-outline-of-the-problem" class="section-name selfRef">Outline of the Problem</a>
        </h3>
<p id="section-1.1-1">Latency is becoming the critical performance factor for many
        (perhaps most) applications on the public Internet, e.g., interactive
        web, web services, voice, conversational video, interactive video,
        interactive remote presence, instant messaging, online gaming, remote
        desktop, cloud-based applications, and video-assisted remote control
        of machinery and industrial processes. Once access network bitrates
        reach levels now common in the developed world, further increases
        offer diminishing returns unless latency is also addressed <span>[<a href="#Dukkipati06" class="cite xref">Dukkipati06</a>]</span>. In the last decade or so, much has been done
        to reduce propagation time by placing caches or servers closer to
        users. However, queuing remains a major intermittent component of
        latency.<a href="#section-1.1-1" class="pilcrow">¶</a></p>
<p id="section-1.1-2">Previously, very low latency has only been available for a few
        selected low-rate applications, that confine their sending rate within
        a specially carved-off portion of capacity, which is prioritized over
        other traffic, e.g., Diffserv Expedited Forwarding (EF) <span>[<a href="#RFC3246" class="cite xref">RFC3246</a>]</span>. Up
        to now, it has not been possible to allow any number of low-latency,
        high throughput applications to seek to fully utilize available
        capacity, because the capacity-seeking process itself causes too much
        queuing delay.<a href="#section-1.1-2" class="pilcrow">¶</a></p>
<p id="section-1.1-3">To reduce this queuing delay caused by the capacity-seeking
        process, changes either to the network alone or to end systems alone
        are in progress. L4S involves a recognition that both approaches are
        yielding diminishing returns:<a href="#section-1.1-3" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="section-1.1-4.1">Recent state-of-the-art AQM in the
            network, e.g., Flow Queue CoDel <span>[<a href="#RFC8290" class="cite xref">RFC8290</a>]</span>,
            Proportional Integral controller Enhanced (PIE) <span>[<a href="#RFC8033" class="cite xref">RFC8033</a>]</span>, and Adaptive Random Early Detection (ARED) <span>[<a href="#ARED01" class="cite xref">ARED01</a>]</span>), has reduced queuing delay for all traffic, not
            just a select few applications. However, no matter how good the
            AQM, the capacity-seeking (sawtoothing) rate of TCP-like
            congestion controls represents a lower limit that will cause either
            the queuing delay to vary or the link to be
            underutilized.
     These AQMs are tuned to allow a typical
            capacity-seeking TCP-Reno-friendly flow to induce an average queue
            that roughly doubles the base round-trip time (RTT), adding 5-15 ms of queuing on
            average for a mix of long-running flows and web traffic (cf. 500 microseconds with L4S for the same traffic mix <span>[<a href="#L4Seval22" class="cite xref">L4Seval22</a>]</span>). However, for many applications, low
            delay is not useful unless it is consistently low. With these
            AQMs, 99th percentile queuing delay is 20-30 ms (cf. 2 ms with the
          same traffic over L4S).<a href="#section-1.1-4.1" class="pilcrow">¶</a>
</li>
          <li class="normal" id="section-1.1-4.2">Similarly, recent research into using end-to-end congestion control
            without needing an AQM in the network (e.g., Bottleneck Bandwidth and Round-trip propagation time  (BBR) <span>[<a href="#I-D.cardwell-iccrg-bbr-congestion-control" class="cite xref">BBR-CC</a>]</span>) seems to
            have hit a similar queuing delay floor of about 20 ms on
            average, but there are also regular 25 ms delay spikes due to
            bandwidth probes and 60 ms spikes due to flow-starts.<a href="#section-1.1-4.2" class="pilcrow">¶</a>
</li>
        </ul>
<p id="section-1.1-5">L4S learns from the experience of Data Center TCP (DCTCP) <span>[<a href="#RFC8257" class="cite xref">RFC8257</a>]</span>, which shows the power of complementary changes
        both in the network and on end systems. DCTCP teaches us that two
        small but radical changes to congestion control are needed to cut the
        two major outstanding causes of queuing delay variability:<a href="#section-1.1-5" class="pilcrow">¶</a></p>
<ol start="1" type="1" class="normal type-1" id="section-1.1-6">
<li id="section-1.1-6.1">Far smaller rate variations (sawteeth) than Reno-friendly
            congestion controls.<a href="#section-1.1-6.1" class="pilcrow">¶</a>
</li>
          <li id="section-1.1-6.2">A shift of smoothing and hence smoothing delay from network to
            sender.<a href="#section-1.1-6.2" class="pilcrow">¶</a>
</li>
        </ol>
<p id="section-1.1-7">Without the former, a 'Classic' (e.g., Reno-friendly)
        flow's RTT varies between roughly 1 and 2 times the
        base RTT between the machines in question. Without the latter, a
        'Classic' flow's response to changing events is delayed by a
        worst-case (transcontinental) RTT, which could be hundreds of times
        the actual smoothing delay needed for the RTT of typical traffic from
        localized Content Delivery Networks (CDNs).<a href="#section-1.1-7" class="pilcrow">¶</a></p>
<p id="section-1.1-8">These changes are the two main features of the family of so-called
        'Scalable' congestion controls (which include DCTCP, Prague, and
        Self-Clocked Rate Adaptation for Multimedia (SCReAM)). Both of these changes only reduce delay in combination with a
        complementary change in the network, and they are both only feasible
        with ECN, not drop, for the signalling:<a href="#section-1.1-8" class="pilcrow">¶</a></p>
<ol start="1" type="1" class="normal type-1" id="section-1.1-9">
   <li id="section-1.1-9.1">The smaller sawteeth allow an extremely shallow ECN
            packet-marking threshold in the queue.<a href="#section-1.1-9.1" class="pilcrow">¶</a>
</li>
          <li id="section-1.1-9.2">No smoothing in the network means that every fluctuation of
            the queue is signalled immediately.<a href="#section-1.1-9.2" class="pilcrow">¶</a>
</li>
        </ol>
<p id="section-1.1-10">Without ECN, either of these would lead to very high loss
        levels. In contrast, with ECN, the resulting high marking levels are just
        signals, not impairments.
 (Note that BBRv2 <span>[<a href="#BBRv2" class="cite xref">BBRv2</a>]</span>
        combines the best of both worlds -- it works as a Scalable congestion
        control when ECN is available, but it also aims to minimize delay when ECN
        is absent.)<a href="#section-1.1-10" class="pilcrow">¶</a></p>
<p id="section-1.1-11">However, until now, Scalable congestion controls (like DCTCP) did
        not coexist well in a shared ECN-capable queue with existing Classic
        (e.g., Reno <span>[<a href="#RFC5681" class="cite xref">RFC5681</a>]</span> or CUBIC <span>[<a href="#RFC8312" class="cite xref">RFC8312</a>]</span>) congestion controls -- Scalable controls are
        so aggressive that these 'Classic' algorithms would drive themselves
        to a small capacity share. Therefore, until now, L4S controls could
        only be deployed where a clean-slate environment could be arranged,
        such as in private data centres (hence the name DCTCP).<a href="#section-1.1-11" class="pilcrow">¶</a></p>
<p id="section-1.1-12">One way to solve the problem of coexistence between Scalable and
        Classic flows is to use a per-flow-queuing (FQ) approach such as
        FQ-CoDel <span>[<a href="#RFC8290" class="cite xref">RFC8290</a>]</span>. It classifies packets by flow
        identifier into separate queues in order to isolate sparse flows from
        the higher latency in the queues assigned to heavier flows. However,
        if a Classic flow needs both low delay and high throughput, having a
        queue to itself does not isolate it from the harm it causes to itself.
        Also FQ approaches need to inspect flow identifiers, which is not
        always practical.<a href="#section-1.1-12" class="pilcrow">¶</a></p>
<p id="section-1.1-13">In summary, Scalable congestion controls address the root cause of
        the latency, loss and scaling problems with Classic congestion
        controls. Both FQ and DualQ AQMs can be enablers for this smooth low-latency
        scalable behaviour. The DualQ approach is particularly useful
        because identifying flows is sometimes not practical or desirable.<a href="#section-1.1-13" class="pilcrow">¶</a></p>
</section>
</div>
<div id="dualq_scope">
<section id="section-1.2">
        <h3 id="name-context-scope-and-applicabi">
<a href="#section-1.2" class="section-number selfRef">1.2. </a><a href="#name-context-scope-and-applicabi" class="section-name selfRef">Context, Scope, and Applicability</a>
        </h3>
<p id="section-1.2-1">L4S involves complementary changes in the network and on
        end systems:<a href="#section-1.2-1" class="pilcrow">¶</a></p>
<span class="break"></span><dl class="dlNewline" id="section-1.2-2">
          <dt id="section-1.2-2.1">Network:</dt>
          <dd style="margin-left: 1.5em" id="section-1.2-2.2">A DualQ Coupled AQM (defined in the present
            document) or a modification to flow queue AQMs (described in paragraph "b" in
           Section <a href="https://www.rfc-editor.org/rfc/rfc9330#section-4.2" class="relref">4.2</a> of the L4S architecture <span>[<a href="#RFC9330" class="cite xref">RFC9330</a>]</span>).<a href="#section-1.2-2.2" class="pilcrow">¶</a>
</dd>
          <dd class="break"></dd>
<dt id="section-1.2-2.3">End system:</dt>
          <dd style="margin-left: 1.5em" id="section-1.2-2.4">A Scalable congestion control (defined in Section <a href="https://www.rfc-editor.org/rfc/rfc9331#section-4" class="relref">4</a> of the L4S ECN protocol spec <span>[<a href="#RFC9331" class="cite xref">RFC9331</a>]</span>).<a href="#section-1.2-2.4" class="pilcrow">¶</a>
</dd>
          <dd class="break"></dd>
<dt id="section-1.2-2.5">Packet identifier:</dt>
          <dd style="margin-left: 1.5em" id="section-1.2-2.6">The network and end-system parts
            of L4S can be deployed incrementally, because they both identify
            L4S packets using the experimentally assigned ECN codepoints in the IP header: ECT(1) and
            CE <span>[<a href="#RFC8311" class="cite xref">RFC8311</a>]</span> <span>[<a href="#RFC9331" class="cite xref">RFC9331</a>]</span>.<a href="#section-1.2-2.6" class="pilcrow">¶</a>
</dd>
        <dd class="break"></dd>
</dl>
<p id="section-1.2-3">DCTCP <span>[<a href="#RFC8257" class="cite xref">RFC8257</a>]</span> is an example
        of a Scalable congestion control for controlled environments that has
        been deployed for some time in Linux, Windows, and FreeBSD operating
        systems. During the progress of this document through the IETF, a
        number of other Scalable congestion controls were implemented,
        e.g., Prague over TCP and QUIC <span>[<a href="#I-D.briscoe-iccrg-prague-congestion-control" class="cite xref">PRAGUE-CC</a>]</span> <span>[<a href="#PragueLinux" class="cite xref">PragueLinux</a>]</span>, BBRv2 <span>[<a href="#BBRv2" class="cite xref">BBRv2</a>]</span> <span>[<a href="#I-D.cardwell-iccrg-bbr-congestion-control" class="cite xref">BBR-CC</a>]</span>, and
        the L4S variant of SCReAM for real-time media <span>[<a href="#SCReAM-L4S" class="cite xref">SCReAM-L4S</a>]</span> <span>[<a href="#RFC8298" class="cite xref">RFC8298</a>]</span>.<a href="#section-1.2-3" class="pilcrow">¶</a></p>
<p id="section-1.2-4">The focus of this specification is to enable deployment of the
        network part of the L4S service. Then, without any management
        intervention, applications can exploit this new network capability as
        the applications or their operating systems migrate to Scalable congestion controls, which
        can then evolve <em>while</em> their benefits are
        being enjoyed by everyone on the Internet.<a href="#section-1.2-4" class="pilcrow">¶</a></p>
<p id="section-1.2-5">The DualQ Coupled AQM framework can incorporate any AQM designed
        for a single queue that generates a statistical or deterministic
        mark/drop probability driven by the queue dynamics. Pseudocode
        examples of two different DualQ Coupled AQMs are given in the
        appendices. 
        In many cases the framework simplifies the basic control
        algorithm and requires little extra processing. 
        Therefore, it is
        believed the Coupled AQM would be applicable and easy to deploy in all
        types of buffers such as buffers in cost-reduced mass-market residential
        equipment; buffers in end-system stacks; buffers in carrier-scale
        equipment including remote access servers, routers, firewalls, and
        Ethernet switches; buffers in network interface cards; buffers in
        virtualized network appliances, hypervisors; and so on.<a href="#section-1.2-5" class="pilcrow">¶</a></p>
<p id="section-1.2-6">For the public Internet, nearly all the benefit will typically be
        achieved by deploying the Coupled AQM into either end of the access
        link between a 'site' and the Internet, which is invariably the
        bottleneck (see <span><a href="https://www.rfc-editor.org/rfc/rfc9330#section-6.4" class="relref">Section 6.4</a> of [<a href="#RFC9330" class="cite xref">RFC9330</a>]</span>
        about deployment, which also defines the term 'site' to mean a home,
        an office, a campus, or mobile user equipment).<a href="#section-1.2-6" class="pilcrow">¶</a></p>
<p id="section-1.2-7">Latency is not the only concern of L4S:<a href="#section-1.2-7" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="section-1.2-8.1">The 'Low Loss' part of the name denotes that L4S generally
            achieves zero congestion loss (which would otherwise cause
            retransmission delays), due to its use of ECN.<a href="#section-1.2-8.1" class="pilcrow">¶</a>
</li>
          <li class="normal" id="section-1.2-8.2">The 'Scalable throughput' part of the name denotes that the
            per-flow throughput of Scalable congestion controls should scale
            indefinitely, avoiding the imminent scaling problems with
            'TCP-Friendly' congestion control algorithms <span>[<a href="#RFC3649" class="cite xref">RFC3649</a>]</span>.<a href="#section-1.2-8.2" class="pilcrow">¶</a>
</li>
        </ul>
<p id="section-1.2-9">The former is clearly in scope of this AQM document. However,
        the latter is an outcome of the end-system behaviour and is therefore
        outside the scope of this AQM document, even though the AQM is an
        enabler.<a href="#section-1.2-9" class="pilcrow">¶</a></p>
<p id="section-1.2-10">The overall L4S architecture <span>[<a href="#RFC9330" class="cite xref">RFC9330</a>]</span> gives more detail, including on
        wider deployment aspects such as backwards compatibility of Scalable
        congestion controls in bottlenecks where a DualQ Coupled AQM has not
        been deployed. The supporting papers <span>[<a href="#L4Seval22" class="cite xref">L4Seval22</a>]</span>, <span>[<a href="#DualPI2Linux" class="cite xref">DualPI2Linux</a>]</span>,
        <span>[<a href="#PI2" class="cite xref">PI2</a>]</span>, and <span>[<a href="#PI2param" class="cite xref">PI2param</a>]</span> give the full rationale for the AQM design, both
        discursively and in more precise mathematical form, as well as the
        results of performance evaluations. The main results have been
        validated independently when using the Prague congestion control <span>[<a href="#Boru20" class="cite xref">Boru20</a>]</span> (experiments are run using Prague and DCTCP, but
        only the former is relevant for validation, because Prague fixes a
        number of problems with the Linux DCTCP code that make it unsuitable
        for the public Internet).<a href="#section-1.2-10" class="pilcrow">¶</a></p>
</section>
</div>
<div id="dualq_Terminology">
<section id="section-1.3">
        <h3 id="name-terminology">
<a href="#section-1.3" class="section-number selfRef">1.3. </a><a href="#name-terminology" class="section-name selfRef">Terminology</a>
        </h3>
<p id="section-1.3-1">
    The key words "<span class="bcp14">MUST</span>", "<span class="bcp14">MUST NOT</span>", "<span class="bcp14">REQUIRED</span>", "<span class="bcp14">SHALL</span>", "<span class="bcp14">SHALL NOT</span>", "<span class="bcp14">SHOULD</span>", "<span class="bcp14">SHOULD NOT</span>", "<span class="bcp14">RECOMMENDED</span>", "<span class="bcp14">NOT RECOMMENDED</span>",
    "<span class="bcp14">MAY</span>", and "<span class="bcp14">OPTIONAL</span>" in this document are to be interpreted as
    described in BCP 14 <span>[<a href="#RFC2119" class="cite xref">RFC2119</a>]</span> <span>[<a href="#RFC8174" class="cite xref">RFC8174</a>]</span> 
    when, and only when, they appear in all capitals, as shown here.<a href="#section-1.3-1" class="pilcrow">¶</a></p>
<p id="section-1.3-2">The DualQ Coupled AQM uses two queues for two services:<a href="#section-1.3-2" class="pilcrow">¶</a></p>
<span class="break"></span><dl class="dlParallel" id="section-1.3-3">
          <dt id="section-1.3-3.1">Classic Service/Queue:</dt>
          <dd style="margin-left: 1.5em" id="section-1.3-3.2">The Classic service is
            intended for all the congestion control behaviours that coexist
            with Reno <span>[<a href="#RFC5681" class="cite xref">RFC5681</a>]</span> (e.g., Reno itself,
            CUBIC <span>[<a href="#RFC8312" class="cite xref">RFC8312</a>]</span>, and TFRC <span>[<a href="#RFC5348" class="cite xref">RFC5348</a>]</span>). The term 'Classic queue' means a queue providing the Classic service.<a href="#section-1.3-3.2" class="pilcrow">¶</a>
</dd>
          <dd class="break"></dd>
<dt id="section-1.3-3.3">Low Latency, Low Loss, and Scalable throughput (L4S) Service/Queue:</dt>
          <dd style="margin-left: 1.5em" id="section-1.3-3.4">The
            'L4S' service is intended for traffic from Scalable congestion
            control algorithms, such as the Prague congestion control <span>[<a href="#I-D.briscoe-iccrg-prague-congestion-control" class="cite xref">PRAGUE-CC</a>]</span>, which was
            derived from Data Center TCP <span>[<a href="#RFC8257" class="cite xref">RFC8257</a>]</span>. The
            L4S service is for more general traffic than just Prague
            -- it allows the set of congestion controls with similar
            scaling properties to Prague to evolve, such as the examples listed below (Relentless, SCReAM, etc.). The term 'L4S queue' means a queue providing the L4S service.<a href="#section-1.3-3.4" class="pilcrow">¶</a>
</dd>
          <dd class="break"></dd>
<dt id="section-1.3-3.5">Classic Congestion Control:</dt>
          <dd style="margin-left: 1.5em" id="section-1.3-3.6">A congestion control
            behaviour that can coexist with standard Reno <span>[<a href="#RFC5681" class="cite xref">RFC5681</a>]</span> without causing significantly negative impact
            on its flow rate <span>[<a href="#RFC5033" class="cite xref">RFC5033</a>]</span>. With Classic
            congestion controls, such as Reno or CUBIC, because flow rate has
            scaled since TCP congestion control was first designed in 1988, it
            now takes hundreds of round trips (and growing) to recover after a
            congestion signal (whether a loss or an ECN mark) as shown in the
            examples in Section <a href="https://www.rfc-editor.org/rfc/rfc9330#section-5.1" class="relref">5.1</a> of the L4S architecture <span>[<a href="#RFC9330" class="cite xref">RFC9330</a>]</span> and in <span>[<a href="#RFC3649" class="cite xref">RFC3649</a>]</span>. Therefore, control of queuing and utilization
            becomes very slack, and the slightest disturbances (e.g., from
            new flows starting) prevent a high rate from being attained.<a href="#section-1.3-3.6" class="pilcrow">¶</a>
</dd>
          <dd class="break"></dd>
<dt id="section-1.3-3.7">Scalable Congestion Control:</dt>
          <dd style="margin-left: 1.5em" id="section-1.3-3.8">A congestion control
            where the average time from one congestion signal to the next (the
            recovery time) remains invariant as flow rate scales, all
            other factors being equal. This maintains the same degree of
            control over queuing and utilization whatever the flow rate, as
            well as ensuring that high throughput is robust to disturbances.
            For instance, DCTCP averages 2 congestion signals per round trip,
            whatever the flow rate, as do other recently developed Scalable
            congestion controls, e.g., Relentless TCP <span>[<a href="#I-D.mathis-iccrg-relentless-tcp" class="cite xref">RELENTLESS</a>]</span>, Prague <span>[<a href="#I-D.briscoe-iccrg-prague-congestion-control" class="cite xref">PRAGUE-CC</a>]</span> <span>[<a href="#PragueLinux" class="cite xref">PragueLinux</a>]</span>, BBRv2 <span>[<a href="#BBRv2" class="cite xref">BBRv2</a>]</span> <span>[<a href="#I-D.cardwell-iccrg-bbr-congestion-control" class="cite xref">BBR-CC</a>]</span>, and the L4S
            variant of SCReAM for real-time media <span>[<a href="#SCReAM-L4S" class="cite xref">SCReAM-L4S</a>]</span> <span>[<a href="#RFC8298" class="cite xref">RFC8298</a>]</span>. For the public
            Internet, a Scalable transport has to comply with the requirements
            in <span><a href="https://www.rfc-editor.org/rfc/rfc9331#section-4" class="relref">Section 4</a> of [<a href="#RFC9331" class="cite xref">RFC9331</a>]</span> (a.k.a. the 'Prague L4S requirements').<a href="#section-1.3-3.8" class="pilcrow">¶</a>
</dd>
          <dd class="break"></dd>
<dt id="section-1.3-3.9">C:</dt>
          <dd style="margin-left: 1.5em" id="section-1.3-3.10">Abbreviation for Classic, e.g., when used as
            a subscript.<a href="#section-1.3-3.10" class="pilcrow">¶</a>
</dd>
          <dd class="break"></dd>
<dt id="section-1.3-3.11">L:</dt>
          <dd style="margin-left: 1.5em" id="section-1.3-3.12">
            <p id="section-1.3-3.12.1">Abbreviation for L4S, e.g., when used as a
            subscript.<a href="#section-1.3-3.12.1" class="pilcrow">¶</a></p>
<p id="section-1.3-3.12.2">The terms Classic or L4S can
            also qualify other nouns, such as 'codepoint', 'identifier',
            'classification', 'packet', and 'flow'. For example, an L4S packet
            means a packet with an L4S identifier sent from an L4S congestion
            control.<a href="#section-1.3-3.12.2" class="pilcrow">¶</a></p>
<p id="section-1.3-3.12.3">Both Classic and L4S services can
            cope with a proportion of unresponsive or less-responsive traffic
            as well but, in the L4S case, its rate has to be smooth enough or
            low enough to not build a queue (e.g., DNS, Voice over IP (VoIP), game sync
            datagrams, etc.). The DualQ Coupled AQM behaviour is defined to be
            similar to a single First-In, First-Out (FIFO) queue with respect to unresponsive and
            overload traffic.<a href="#section-1.3-3.12.3" class="pilcrow">¶</a></p>
</dd>
          <dd class="break"></dd>
<dt id="section-1.3-3.13">Reno-friendly:</dt>
          <dd style="margin-left: 1.5em" id="section-1.3-3.14">The subset of Classic traffic that is
            friendly to the standard Reno congestion control defined for TCP
            in <span>[<a href="#RFC5681" class="cite xref">RFC5681</a>]</span>. 
The TFRC spec <span>[<a href="#RFC5348" class="cite xref">RFC5348</a>]</span> indirectly implies that 'friendly' is
defined as "generally within a factor of two of the sending rate
of a TCP flow under the same conditions".  'Reno-friendly' is used here in place of
            'TCP-friendly', given the latter has become imprecise, because the
            TCP protocol is now used with so many different congestion control
            behaviours, and Reno is used in non-TCP transports, such as
            QUIC <span>[<a href="#RFC9000" class="cite xref">RFC9000</a>]</span>.<a href="#section-1.3-3.14" class="pilcrow">¶</a>
</dd>
          <dd class="break"></dd>
<dt id="section-1.3-3.15">DualQ or DualQ AQM:</dt>
          <dd style="margin-left: 1.5em" id="section-1.3-3.16">Used loosely as shorthand for a Dual-Queue Coupled AQM, where the context 
          makes 'Coupled AQM' obvious.<a href="#section-1.3-3.16" class="pilcrow">¶</a>
</dd>
          <dd class="break"></dd>
<dt id="section-1.3-3.17">Classic ECN:</dt>
          <dd style="margin-left: 1.5em" id="section-1.3-3.18">
            <p id="section-1.3-3.18.1">The original Explicit Congestion
            Notification (ECN) protocol <span>[<a href="#RFC3168" class="cite xref">RFC3168</a>]</span> that
            requires ECN signals to be treated as equivalent to drops, both when
            generated in the network and when responded to by the
            sender.<a href="#section-1.3-3.18.1" class="pilcrow">¶</a></p>
<p id="section-1.3-3.18.2">For L4S, the names used for the four codepoints of the 2-bit IP-ECN field are unchanged from those
            defined in the ECN spec <span>[<a href="#RFC3168" class="cite xref">RFC3168</a>]</span>, i.e., Not-ECT, ECT(0), ECT(1), and
            CE, where ECT stands for ECN-Capable Transport and CE stands for
            Congestion Experienced. A packet marked with the CE codepoint is
            termed 'ECN-marked' or sometimes just 'marked' where the context
            makes ECN obvious.<a href="#section-1.3-3.18.2" class="pilcrow">¶</a></p>
</dd>
        <dd class="break"></dd>
</dl>
</section>
</div>
<section id="section-1.4">
        <h3 id="name-features">
<a href="#section-1.4" class="section-number selfRef">1.4. </a><a href="#name-features" class="section-name selfRef">Features</a>
        </h3>
<p id="section-1.4-1">The AQM couples marking and/or dropping from the Classic queue to
        the L4S queue in such a way that a flow will get roughly the same
        throughput whichever it uses. Therefore, both queues can feed into the
        full capacity of a link, and no rates need to be configured for the
        queues.
 The L4S queue enables Scalable congestion controls like DCTCP
        or Prague to give very low and consistently low latency, without
        compromising the performance of competing 'Classic' Internet
        traffic.<a href="#section-1.4-1" class="pilcrow">¶</a></p>
<p id="section-1.4-2">Thousands of tests have been conducted in a typical fixed
        residential broadband setting. Experiments used a range of base round-trip
        delays up to 100 ms and link rates up to 200 Mb/s between the data
        centre and home network, with varying amounts of background traffic in
        both queues. For every L4S packet, the AQM kept the average queuing
        delay below 1 ms (or 2 packets where serialization delay exceeded 1 ms
        on slower links), with the 99th percentile being no worse than 2 ms. No losses at
        all were introduced by the L4S AQM. Details of the extensive
        experiments are available in <span>[<a href="#L4Seval22" class="cite xref">L4Seval22</a>]</span> and <span>[<a href="#DualPI2Linux" class="cite xref">DualPI2Linux</a>]</span>.
 Subjective testing using
        very demanding high-bandwidth low-latency applications over a single
        shared access link is also described in <span>[<a href="#L4Sdemo16" class="cite xref">L4Sdemo16</a>]</span> and summarized in Section <a href="https://www.rfc-editor.org/rfc/rfc9330#section-6.1" class="relref">6.1</a> of the L4S architecture <span>[<a href="#RFC9330" class="cite xref">RFC9330</a>]</span>.<a href="#section-1.4-2" class="pilcrow">¶</a></p>
<p id="section-1.4-3">In all these experiments, the host was connected to the home
        network by fixed Ethernet, in order to quantify the queuing delay that
        can be achieved by a user who cares about delay. It should be
        emphasized that L4S support at the bottleneck link cannot 'undelay'
        bursts introduced by another link on the path, for instance by legacy
        Wi-Fi equipment. However, if L4S support is added to the queue feeding
        the <em>outgoing</em> WAN link of a home gateway,
        it would be counterproductive not to also reduce the burstiness of the
        <em>incoming</em> Wi-Fi. Also, trials of Wi-Fi
        equipment with an L4S DualQ Coupled AQM on the <em>outgoing</em>
        Wi-Fi interface are in progress, and early results of an L4S DualQ
        Coupled AQM in a 5G radio access network testbed with emulated outdoor
        cell edge radio fading are given in <span>[<a href="#L4S_5G" class="cite xref">L4S_5G</a>]</span>.<a href="#section-1.4-3" class="pilcrow">¶</a></p>
<p id="section-1.4-4">Unlike Diffserv EF, the L4S queue does not have
        to be limited to a small proportion of the link capacity in order to
        achieve low delay. The L4S queue can be filled with a heavy load of
        capacity-seeking flows (Prague, BBRv2, etc.) and still achieve low delay.
        The L4S queue does not rely on the presence of other traffic in the
        Classic queue that can be 'overtaken'. 
        It gives low latency to L4S
        traffic whether or not there is Classic traffic. The tail latency of
        traffic served by the Classic AQM is sometimes a little better,
        sometimes a little worse, when a proportion of the traffic is L4S.<a href="#section-1.4-4" class="pilcrow">¶</a></p>
<p id="section-1.4-5">The two queues are only necessary because:<a href="#section-1.4-5" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="section-1.4-6.1">The large variations (sawteeth) of Classic flows need roughly a
            base RTT of queuing delay to ensure full utilization.<a href="#section-1.4-6.1" class="pilcrow">¶</a>
</li>
          <li class="normal" id="section-1.4-6.2">Scalable flows do not need a queue to keep utilization high,
            but they cannot keep latency consistently low if they are mixed
            with Classic traffic.<a href="#section-1.4-6.2" class="pilcrow">¶</a>
</li>
        </ul>
<p id="section-1.4-7">The L4S queue has latency priority within sub-round-trip
        timescales, but over longer periods the coupling from the Classic to
        the L4S AQM (explained below) ensures that it does not have bandwidth
        priority over the Classic queue.<a href="#section-1.4-7" class="pilcrow">¶</a></p>
</section>
</section>
</div>
<div id="dualq_algo">
<section id="section-2">
      <h2 id="name-dualq-coupled-aqm">
<a href="#section-2" class="section-number selfRef">2. </a><a href="#name-dualq-coupled-aqm" class="section-name selfRef">DualQ Coupled AQM</a>
      </h2>
<p id="section-2-1">There are two main aspects to the DualQ Coupled AQM approach:<a href="#section-2-1" class="pilcrow">¶</a></p>
<ol start="1" type="1" class="normal type-1" id="section-2-2">
        <li id="section-2-2.1">The Coupled AQM that addresses throughput equivalence between
          Classic (e.g., Reno or CUBIC) flows and L4S flows (that satisfy
          the Prague L4S requirements).<a href="#section-2-2.1" class="pilcrow">¶</a>
</li>
        <li id="section-2-2.2">The Dual-Queue structure that provides latency separation for L4S
          flows to isolate them from the typically large Classic queue.<a href="#section-2-2.2" class="pilcrow">¶</a>
</li>
      </ol>
<div id="dualq_coupled">
<section id="section-2.1">
        <h3 id="name-coupled-aqm">
<a href="#section-2.1" class="section-number selfRef">2.1. </a><a href="#name-coupled-aqm" class="section-name selfRef">Coupled AQM</a>
        </h3>
<p id="section-2.1-1">In the 1990s, the 'TCP formula' was derived for the relationship
        between the steady-state congestion window, cwnd, and the drop
        probability, p of standard Reno congestion control <span>[<a href="#RFC5681" class="cite xref">RFC5681</a>]</span>. To a first-order approximation, the steady-state
        cwnd of Reno is inversely proportional to the square root of p.<a href="#section-2.1-1" class="pilcrow">¶</a></p>
<p id="section-2.1-2">The design focuses on Reno as the worst case, because if it does no
        harm to Reno, it will not harm CUBIC or any traffic designed to be
        friendly to Reno. TCP CUBIC implements a Reno-friendly mode,
        which is relevant for typical RTTs under 20 ms as long as the
        throughput of a single flow is less than about 350 Mb/s. In such cases,
        it can be assumed that CUBIC traffic behaves similarly to Reno. The
        term 'Classic' will be used for the collection of Reno-friendly
        traffic including CUBIC and potentially other experimental congestion
        controls intended not to significantly impact the flow rate of
        Reno.<a href="#section-2.1-2" class="pilcrow">¶</a></p>
<p id="section-2.1-3">A supporting paper <span>[<a href="#PI2" class="cite xref">PI2</a>]</span> includes the
        derivation of the equivalent rate equation for DCTCP, for which cwnd
        is inversely proportional to p (not the square root), where in this
        case p is the ECN-marking probability. DCTCP is not the only
        congestion control that behaves like this, so the term 'Scalable' will
        be used for all similar congestion control behaviours (see examples in
        <a href="#dualq_scope" class="auto internal xref">Section 1.2</a>). The term 'L4S' is used for traffic
        driven by a Scalable congestion control that also complies with the
        additional 'Prague L4S requirements' <span>[<a href="#RFC9331" class="cite xref">RFC9331</a>]</span>.<a href="#section-2.1-3" class="pilcrow">¶</a></p>
<p id="section-2.1-4">For safe coexistence, under stationary conditions, a Scalable flow
        has to run at roughly the same rate as a Reno TCP flow (all other
        factors being equal). So the drop or marking probability for Classic
        traffic, p_C, has to be distinct from the marking probability for L4S
        traffic, p_L. The original ECN spec <span>[<a href="#RFC3168" class="cite xref">RFC3168</a>]</span> required these probabilities to be the same, but
        <span>[<a href="#RFC8311" class="cite xref">RFC8311</a>]</span> updates <span>[<a href="#RFC3168" class="cite xref">RFC3168</a>]</span> to enable experiments in
        which these probabilities are different.<a href="#section-2.1-4" class="pilcrow">¶</a></p>
<p id="section-2.1-5">Also, to remain stable, Classic sources need the network to smooth
        p_C so it changes relatively slowly. It is hard for a network node to
        know the RTTs of all the flows, so a Classic AQM adds a <em>worst-case</em> RTT of smoothing delay (about 100-200
        ms). In contrast, L4S shifts responsibility for smoothing ECN feedback
        to the sender, which only delays its response by its <em>own</em> RTT, as well as allowing a more immediate
        response if necessary.<a href="#section-2.1-5" class="pilcrow">¶</a></p>
<p id="section-2.1-6">The Coupled AQM achieves safe coexistence by making the Classic
        drop probability p_C proportional to the square of the coupled L4S
        probability p_CL. p_CL is an input to the instantaneous L4S marking
        probability p_L, but it changes as slowly as p_C. This makes the Reno
        flow rate roughly equal the DCTCP flow rate, because the squaring of
        p_CL counterbalances the square root of p_C in the 'TCP formula' of
        Classic Reno congestion control.<a href="#section-2.1-6" class="pilcrow">¶</a></p>
<p id="section-2.1-7">Stating this as a formula, the relation between Classic drop
        probability, p_C, and the coupled L4S probability p_CL needs to take
        the following form:<a href="#section-2.1-7" class="pilcrow">¶</a></p>
<div class="sourcecode" id="section-2.1-8">
<pre>
    p_C = ( p_CL / k )^2,                 (1)</pre><a href="#section-2.1-8" class="pilcrow">¶</a>
</div>
<p id="section-2.1-9">where k is the constant of proportionality, which is termed the
        'coupling factor'.<a href="#section-2.1-9" class="pilcrow">¶</a></p>
</section>
</div>
<div id="dualq">
<section id="section-2.2">
        <h3 id="name-dual-queue">
<a href="#section-2.2" class="section-number selfRef">2.2. </a><a href="#name-dual-queue" class="section-name selfRef">Dual Queue</a>
        </h3>
<p id="section-2.2-1">Classic traffic needs to build a large queue to prevent
        underutilization. Therefore, a separate queue is provided for L4S
        traffic, and it is scheduled with priority over the Classic queue.
        Priority is conditional to prevent starvation of Classic traffic in
        certain conditions (see <a href="#dualq_coupled_structure" class="auto internal xref">Section 2.4</a>).<a href="#section-2.2-1" class="pilcrow">¶</a></p>
<p id="section-2.2-2">Nonetheless, coupled marking ensures that giving priority to L4S
        traffic still leaves the right amount of spare scheduling time for
        Classic flows to each get equivalent throughput to DCTCP flows (all
        other factors, such as RTT, being equal).<a href="#section-2.2-2" class="pilcrow">¶</a></p>
</section>
</div>
<div id="dualq_classification">
<section id="section-2.3">
        <h3 id="name-traffic-classification">
<a href="#section-2.3" class="section-number selfRef">2.3. </a><a href="#name-traffic-classification" class="section-name selfRef">Traffic Classification</a>
        </h3>
<p id="section-2.3-1">Both the Coupled AQM and DualQ mechanisms need an identifier to
        distinguish L4S (L) and Classic (C) packets. 
        Then the coupling
        algorithm can achieve coexistence without having to inspect flow
        identifiers, because it can apply the appropriate marking or dropping
        probability to all flows of each type. A separate
        specification <span>[<a href="#RFC9331" class="cite xref">RFC9331</a>]</span> requires
        the network to treat the ECT(1) and CE codepoints of the ECN field as
        this identifier. An additional process document has proved necessary
        to make the ECT(1) codepoint available for experimentation <span>[<a href="#RFC8311" class="cite xref">RFC8311</a>]</span>.<a href="#section-2.3-1" class="pilcrow">¶</a></p>
<p id="section-2.3-2">For policy reasons, an operator might choose to steer certain
        packets (e.g., from certain flows or with certain addresses) out
        of the L queue, even though they identify themselves as L4S by their
        ECN codepoints. In such cases, the L4S ECN protocol <span>[<a href="#RFC9331" class="cite xref">RFC9331</a>]</span> states that the device "<span class="bcp14">MUST NOT</span>
        alter the end-to-end L4S ECN identifier" so that it is preserved
        end to end. The aim is that each operator can choose how it treats L4S
        traffic locally, but an individual operator does not alter the
        identification of L4S packets, which would prevent other operators
        downstream from making their own choices on how to treat L4S
        traffic.<a href="#section-2.3-2" class="pilcrow">¶</a></p>
<p id="section-2.3-3">In addition, an operator could use other identifiers to classify
        certain additional packet types into the L queue that it deems will
        not risk harm to the L4S service, for instance, addresses of specific
        applications or hosts; specific Diffserv codepoints such as EF, Voice-Admit, or the Non-Queue-Building (NQB)
        per-hop behaviour; or certain protocols (e.g., ARP and DNS) (see <span><a href="https://www.rfc-editor.org/rfc/rfc9331#section-5.4.1" class="relref">Section 5.4.1</a> of [<a href="#RFC9331" class="cite xref">RFC9331</a>]</span>. Note
        that 
 <span>[<a href="#RFC9331" class="cite xref">RFC9331</a>]</span> states that "a network node <span class="bcp14">MUST NOT</span>
 change Not-ECT or ECT(0) in the IP-ECN field into an L4S identifier."
 Thus, the L queue is not solely an L4S queue; it
        can be considered more generally as a low-latency queue.<a href="#section-2.3-3" class="pilcrow">¶</a></p>
</section>
</div>
<div id="dualq_coupled_structure">
<section id="section-2.4">
        <h3 id="name-overall-dualq-coupled-aqm-s">
<a href="#section-2.4" class="section-number selfRef">2.4. </a><a href="#name-overall-dualq-coupled-aqm-s" class="section-name selfRef">Overall DualQ Coupled AQM Structure</a>
        </h3>
<p id="section-2.4-1"><a href="#dualq_fig_structure" class="auto internal xref">Figure 1</a> shows the overall structure
        that any DualQ Coupled AQM is likely to have. This schematic is
        intended to aid understanding of the current designs of DualQ Coupled
        AQMs. However, it is not intended to preclude other innovative ways of
        satisfying the normative requirements in <a href="#dualq_norm_reqs" class="auto internal xref">Section 2.5</a> that minimally define a DualQ Coupled AQM.
        Also, the schematic only illustrates operation under normally expected
        circumstances; behaviour under overload or with operator-specific
        classifiers is deferred to <a href="#dualq_unexpected" class="auto internal xref">Section 2.5.1.1</a>.<a href="#section-2.4-1" class="pilcrow">¶</a></p>
<p id="section-2.4-2">The classifier on the left separates incoming traffic between the
        two queues (L and C). Each queue has its own AQM that determines the
        likelihood of marking or dropping (p_L and p_C). 
        In <span>[<a href="#PI2" class="cite xref">PI2</a>]</span>, it has been
        proved that it is preferable to control load
        with a linear controller, then square the output before applying it as
        a drop probability to Reno-friendly traffic (because Reno congestion
        control decreases its load proportional to the square root of the
        increase in drop). So, the AQM for Classic traffic needs to be
        implemented in two stages: i) a base stage that outputs an internal
        probability p' (pronounced 'p-prime') and ii) a squaring stage that
        outputs p_C, where<a href="#section-2.4-2" class="pilcrow">¶</a></p>
<div class="sourcecode" id="section-2.4-3">
<pre>
    p_C = (p')^2.                         (2)</pre><a href="#section-2.4-3" class="pilcrow">¶</a>
</div>
<p id="section-2.4-4">Substituting for p_C in equation (1) gives<a href="#section-2.4-4" class="pilcrow">¶</a></p>
<div class="sourcecode" id="section-2.4-5">
<pre>
    p' = p_CL / k.</pre><a href="#section-2.4-5" class="pilcrow">¶</a>
</div>
<p id="section-2.4-6">So the slow-moving input to ECN marking in the L queue (the
        coupled L4S probability) is<a href="#section-2.4-6" class="pilcrow">¶</a></p>
<div class="sourcecode" id="section-2.4-7">
<pre>
    p_CL = k*p'.                          (3)</pre><a href="#section-2.4-7" class="pilcrow">¶</a>
</div>
<p id="section-2.4-8">The actual ECN-marking probability p_L that is applied to the L
        queue needs to track the immediate L queue delay under L-only
        congestion conditions, as well as track p_CL under coupled congestion
        conditions. So the L queue uses a 'Native AQM' that calculates a
        probability p'_L as a function of the instantaneous L queue delay.
        And given the L queue has conditional priority over the C queue,
        whenever the L queue grows, the AQM ought to apply marking probability
        p'_L, but p_L ought to not fall below p_CL. This suggests<a href="#section-2.4-8" class="pilcrow">¶</a></p>
<div class="sourcecode" id="section-2.4-9">
<pre>
    p_L = max(p'_L, p_CL),                (4)</pre><a href="#section-2.4-9" class="pilcrow">¶</a>
</div>
<p id="section-2.4-10">which has also been found to work very well in
        practice.<a href="#section-2.4-10" class="pilcrow">¶</a></p>
<p id="section-2.4-11">The two transformations of p' in equations (2) and (3) implement
        the required coupling given in equation (1) earlier.<a href="#section-2.4-11" class="pilcrow">¶</a></p>
<p id="section-2.4-12">The constant of proportionality or coupling factor, k, in equation
        (1) determines the ratio between the congestion probabilities (loss or
        marking) experienced by L4S and Classic traffic. Thus, k indirectly
        determines the ratio between L4S and Classic flow rates, because flows
        (assuming they are responsive) adjust their rate in response to
        congestion probability. <a href="#dualq_Choosing_k" class="auto internal xref">Appendix C.2</a> gives
        guidance on the choice of k and its effect on relative flow rates.<a href="#section-2.4-12" class="pilcrow">¶</a></p>
<span id="name-dualq-coupled-aqm-schematic"></span><div id="dualq_fig_structure">
<figure id="figure-1">
          <div class="alignLeft art-text artwork" id="section-2.4-13.1">
<pre>
                        _________
                               | |    ,------.
                 L4S (L) queue | |===&gt;| ECN  |
                    ,'| _______|_|    |marker|\
                  &lt;'  |         |     `------'\\
                   //`'         v        ^ p_L \\
                  //       ,-------.     |      \\
                 //        |Native |p'_L |       \\,.
                //         |  L4S  |---&gt;(MAX)    &lt;  |   ___
   ,----------.//          |  AQM  |     ^ p_CL   `\|.'Cond-`.
   |  IP-ECN  |/           `-------'     |          / itional \
==&gt;|Classifier|            ,-------.   (k*p')       [ priority]==&gt;
   |          |\           |  Base |     |          \scheduler/
   `----------'\\          |  AQM  |----&gt;:        ,'|`-.___.-'
                \\         |       |p'   |      &lt;'  |
                 \\        `-------'   (p'^2)    //`'
                  \\            ^        |      //
                   \\,.         |        v p_C //
                   &lt;  | _________     .------.//
                    `\|   |      |    | Drop |/
              Classic (C) |queue |===&gt;|/mark |
                        __|______|    `------'

Legend: ===&gt; traffic flow
        ---&gt; control dependency
</pre>
</div>
<figcaption><a href="#figure-1" class="selfRef">Figure 1</a>:
<a href="#name-dualq-coupled-aqm-schematic" class="selfRef">DualQ Coupled AQM Schematic</a>
          </figcaption></figure>
</div>
<p id="section-2.4-14">After the AQMs have applied their dropping or marking, the
        scheduler forwards their packets to the link. Even though the
        scheduler gives priority to the L queue, it is not as strong as the
        coupling from the C queue. This is because, as the C queue grows, the
        'Base AQM' applies more congestion signals to L traffic (as well as to C).
        As L flows reduce their rate in response, they use less than the
        scheduling share for L traffic. So, because the scheduler is work
        preserving, it schedules any C traffic in the gaps.<a href="#section-2.4-14" class="pilcrow">¶</a></p>
<p id="section-2.4-15">Giving priority to the L queue has the benefit of very low L queue
        delay, because the L queue is kept empty whenever L traffic is
        controlled by the coupling. Also, there only has to be a coupling in
        one direction -- from Classic to L4S. Priority has to be conditional in
        some way to prevent the C queue from being starved in the short term (see
        <a href="#dualq_Overload_Starvation" class="auto internal xref">Section 4.2.2</a>) to give C traffic a means
        to push in, as explained next. With normal responsive L traffic, the
        coupled ECN marking gives C traffic the ability to push back against
        even strict priority, by congestion marking the L traffic to make it
        yield some space. However, if there is just a small finite set of C
        packets (e.g., a DNS request or an initial window of data), some
        Classic AQMs will not induce enough ECN marking in the L queue, no
        matter how long the small set of C packets waits. Then, if the L queue
        happens to remain busy, the C traffic would never get a scheduling
        opportunity from a strict priority scheduler. Ideally, the Classic AQM
        would be designed to increase the coupled marking the longer that C
        packets have been waiting, but this is not always practical -- hence
        the need for L priority to be conditional. Giving a small weight or
        limited waiting time for C traffic improves response times for short
        Classic messages, such as DNS requests, and improves Classic flow
        startup because immediate capacity is available.<a href="#section-2.4-15" class="pilcrow">¶</a></p>
<p id="section-2.4-16">Example DualQ Coupled AQM algorithms called 'DualPI2' and 'Curvy RED'
        are given in Appendices <a href="#dualq_Ex_algo_pi2" class="auto internal xref">A</a> and <a href="#dualq_Ex_algo" class="auto internal xref">B</a>. Either example AQM can be used to couple
        packet marking and dropping across a DualQ:<a href="#section-2.4-16" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="section-2.4-17.1">
            <p id="section-2.4-17.1.1">DualPI2 uses a Proportional Integral (PI) controller as the Base
        AQM. Indeed, this Base AQM with just the squared output and no L4S
        queue can be used as a drop-in replacement for PIE <span>[<a href="#RFC8033" class="cite xref">RFC8033</a>]</span>, in which case it is just called PI2 <span>[<a href="#PI2" class="cite xref">PI2</a>]</span>. 
        PI2 is a principled simplification of PIE that is both
        more responsive and more stable in the face of dynamically varying
        load.<a href="#section-2.4-17.1.1" class="pilcrow">¶</a></p>
</li>
          <li class="normal" id="section-2.4-17.2">
            <p id="section-2.4-17.2.1">Curvy RED is derived from RED <span>[<a href="#RED" class="cite xref">RED</a>]</span>, except
        its configuration parameters are delay-based to make them insensitive
        to link rate, and it requires fewer operations per packet than RED.
        However, DualPI2 is more responsive and stable over a wider range of
        RTTs than Curvy RED. As a consequence, at the time of writing, DualPI2
        has attracted more development and evaluation attention than Curvy
        RED, leaving the Curvy RED design not so fully evaluated.<a href="#section-2.4-17.2.1" class="pilcrow">¶</a></p>
</li>
        </ul>
<p id="section-2.4-18">Both AQMs regulate their queue against targets configured in units
        of time rather than bytes. As already explained, this ensures
        configuration can be invariant for different drain rates. With AQMs in
        a DualQ structure this is particularly important because the drain
        rate of each queue can vary rapidly as flows for the two queues arrive
        and depart, even if the combined link rate is constant.<a href="#section-2.4-18" class="pilcrow">¶</a></p>
<p id="section-2.4-19">It would be possible to control the queues with other alternative
        AQMs, as long as the normative requirements (those expressed in
        capitals) in <a href="#dualq_norm_reqs" class="auto internal xref">Section 2.5</a> are observed.<a href="#section-2.4-19" class="pilcrow">¶</a></p>
<p id="section-2.4-20">The two queues could optionally be part of a larger queuing
        hierarchy, such as the initial example ideas in <span>[<a href="#I-D.briscoe-tsvwg-l4s-diffserv" class="cite xref">L4S-DIFFSERV</a>]</span>.<a href="#section-2.4-20" class="pilcrow">¶</a></p>
</section>
</div>
<div id="dualq_norm_reqs">
<section id="section-2.5">
        <h3 id="name-normative-requirements-for-">
<a href="#section-2.5" class="section-number selfRef">2.5. </a><a href="#name-normative-requirements-for-" class="section-name selfRef">Normative Requirements for a DualQ Coupled AQM</a>
        </h3>
<p id="section-2.5-1">The following requirements are intended to capture only the
        essential aspects of a DualQ Coupled AQM. They are intended to be
        independent of the particular AQMs implemented for each queue but to
        still define the DualQ framework built around those AQMs.<a href="#section-2.5-1" class="pilcrow">¶</a></p>
<div id="dualq_functional_reqs">
<section id="section-2.5.1">
          <h4 id="name-functional-requirements">
<a href="#section-2.5.1" class="section-number selfRef">2.5.1. </a><a href="#name-functional-requirements" class="section-name selfRef">Functional Requirements</a>
          </h4>
<p id="section-2.5.1-1">A DualQ Coupled AQM implementation <span class="bcp14">MUST</span> comply with the
          prerequisite L4S behaviours for any L4S network node (not just a
          DualQ) as specified in <span><a href="https://www.rfc-editor.org/rfc/rfc9331#section-5" class="relref">Section 5</a> of [<a href="#RFC9331" class="cite xref">RFC9331</a>]</span>. These primarily concern
          classification and re-marking as briefly summarized earlier in <a href="#dualq_classification" class="auto internal xref">Section 2.3</a>. But
           <span><a href="https://www.rfc-editor.org/rfc/rfc9331#section-5.5" class="relref">Section 5.5</a> of [<a href="#RFC9331" class="cite xref">RFC9331</a>]</span> also gives guidance on reducing the burstiness of the
          link technology underlying any L4S AQM.<a href="#section-2.5.1-1" class="pilcrow">¶</a></p>
<p id="section-2.5.1-2">A DualQ Coupled AQM implementation <span class="bcp14">MUST</span> utilize two queues,
          each with an AQM algorithm.<a href="#section-2.5.1-2" class="pilcrow">¶</a></p>
<p id="section-2.5.1-3">The AQM algorithm for the low-latency (L) queue <span class="bcp14">MUST</span> be able to
          apply ECN marking to ECN-capable packets.<a href="#section-2.5.1-3" class="pilcrow">¶</a></p>
<p id="section-2.5.1-4">The scheduler draining the two queues <span class="bcp14">MUST</span> give L4S packets
          priority over Classic, although priority <span class="bcp14">MUST</span> be bounded in order
          not to starve Classic traffic (see <a href="#dualq_Overload_Starvation" class="auto internal xref">Section 4.2.2</a>). The scheduler <span class="bcp14">SHOULD</span> be
          work-conserving, or otherwise close to work-conserving. This is
          because Classic traffic needs to be able to efficiently fill any
          space left by L4S traffic even though the scheduler would otherwise
          allocate it to L4S.<a href="#section-2.5.1-4" class="pilcrow">¶</a></p>
<p id="section-2.5.1-5"><span>[<a href="#RFC9331" class="cite xref">RFC9331</a>]</span> defines the meaning of
          an ECN marking on L4S traffic, relative to drop of Classic traffic.
          In order to ensure coexistence of Classic and Scalable L4S traffic,
          it says,
          "the likelihood that the AQM drops a Not-ECT Classic packet
          (p_C) <span class="bcp14">MUST</span> be roughly proportional to the square of the likelihood
          that it would have marked it if it had been an L4S packet (p_L)."
          The term 'likelihood' is used to allow for marking and dropping to
          be either probabilistic or deterministic.<a href="#section-2.5.1-5" class="pilcrow">¶</a></p>
<p id="section-2.5.1-6">For the current specification, this translates into the following
          requirement. A DualQ Coupled AQM <span class="bcp14">MUST</span> apply ECN marking to traffic
          in the L queue that is no lower than that derived from the
          likelihood of drop (or ECN marking) in the Classic queue using equation
          (1).<a href="#section-2.5.1-6" class="pilcrow">¶</a></p>
<p id="section-2.5.1-7">The constant of proportionality, k, in equation (1) determines the
          relative flow rates of Classic and L4S flows when the AQM concerned
          is the bottleneck (all other factors being equal). The L4S ECN
          protocol <span>[<a href="#RFC9331" class="cite xref">RFC9331</a>]</span> says,

          "The
          constant of proportionality (k) does not have to be standardised for
          interoperability, but a value of 2 is <span class="bcp14">RECOMMENDED</span>."<a href="#section-2.5.1-7" class="pilcrow">¶</a></p>
<p id="section-2.5.1-8">Assuming Scalable congestion controls for the Internet will be as
          aggressive as DCTCP, this will ensure their congestion window will
          be roughly the same as that of a Standards Track TCP Reno congestion
          control (Reno) <span>[<a href="#RFC5681" class="cite xref">RFC5681</a>]</span> and other Reno-friendly
          controls, such as TCP CUBIC in its Reno-friendly mode.<a href="#section-2.5.1-8" class="pilcrow">¶</a></p>
<p id="section-2.5.1-9">The choice of k is a matter of operator policy, and operators <span class="bcp14">MAY</span>
          choose a different value using the guidelines in <a href="#dualq_Choosing_k" class="auto internal xref">Appendix C.2</a>.<a href="#section-2.5.1-9" class="pilcrow">¶</a></p>
<p id="section-2.5.1-10">If multiple customers or users share capacity at a bottleneck
          (e.g., in the Internet access link of a campus network), the
          operator's choice of k will determine capacity sharing between the
          flows of different customers. However, on the public Internet,
          access network operators typically isolate customers from each other
          with some form of Layer 2 multiplexing 
(OFDM(A) in DOCSIS 3.1,
 CDMA in 3G, and SC-FDMA in LTE) or Layer 3 scheduling (Weighted Round Robin (WRR) for DSL) rather than
          relying on host congestion controls to share capacity between
          customers <span>[<a href="#RFC0970" class="cite xref">RFC0970</a>]</span>. In such cases, the choice
          of k will solely affect relative flow rates within each customer's
          access capacity, not between customers. Also, k will not affect
          relative flow rates at any times when all flows are Classic or all
          flows are L4S, and it will not affect the relative throughput of
          small flows.<a href="#section-2.5.1-10" class="pilcrow">¶</a></p>
<p id="section-2.5.1-11"></p>
<div id="dualq_unexpected">
<section id="section-2.5.1.1">
            <h5 id="name-requirements-in-unexpected-">
<a href="#section-2.5.1.1" class="section-number selfRef">2.5.1.1. </a><a href="#name-requirements-in-unexpected-" class="section-name selfRef">Requirements in Unexpected Cases</a>
            </h5>
<p id="section-2.5.1.1-1">The flexibility to allow operator-specific classifiers (<a href="#dualq_classification" class="auto internal xref">Section 2.3</a>) leads to the need to specify what
            the AQM in each queue ought to do with packets that do not carry
            the ECN field expected for that queue. It is expected that the AQM
            in each queue will inspect the ECN field to determine what sort of
            congestion notification to signal, then it will decide whether to
            apply congestion notification to this particular packet, as
            follows:<a href="#section-2.5.1.1-1" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="section-2.5.1.1-2.1">
                <p id="section-2.5.1.1-2.1.1">If a packet that does not carry an ECT(1) or a CE codepoint
                is classified into the L queue, then:<a href="#section-2.5.1.1-2.1.1" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="section-2.5.1.1-2.1.2.1">if the packet is ECT(0), the L AQM <span class="bcp14">SHOULD</span> apply
                    CE marking using a probability appropriate to Classic
                    congestion control and appropriate to the target delay in
                    the L queue<a href="#section-2.5.1.1-2.1.2.1" class="pilcrow">¶</a>
</li>
                  <li class="normal" id="section-2.5.1.1-2.1.2.2">
                    <p id="section-2.5.1.1-2.1.2.2.1">if the packet is Not-ECT, the appropriate action
                    depends on whether some other function is protecting the L
                    queue from misbehaving flows (e.g., per-flow queue
                    protection <span>[<a href="#I-D.briscoe-docsis-q-protection" class="cite xref">DOCSIS-Q-PROT</a>]</span> or latency
                    policing):<a href="#section-2.5.1.1-2.1.2.2.1" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="section-2.5.1.1-2.1.2.2.2.1">if separate queue protection is provided, the L AQM
                        <span class="bcp14">SHOULD</span> ignore the packet and forward it unchanged,
                        meaning it should not calculate whether to apply
                        congestion notification, and it should neither drop nor
                        CE mark the packet (for instance, the operator might
                        classify EF traffic that is unresponsive to drop into
                        the L queue, alongside responsive L4S-ECN traffic)<a href="#section-2.5.1.1-2.1.2.2.2.1" class="pilcrow">¶</a>
</li>
                      <li class="normal" id="section-2.5.1.1-2.1.2.2.2.2">if separate queue protection is not provided, the L
                        AQM <span class="bcp14">SHOULD</span> apply drop using a drop probability
                        appropriate to Classic congestion control and
                        to the target delay in the L queue<a href="#section-2.5.1.1-2.1.2.2.2.2" class="pilcrow">¶</a>
</li>
                    </ul>
</li>
                </ul>
</li>
              <li class="normal" id="section-2.5.1.1-2.2">
                <p id="section-2.5.1.1-2.2.1">If a packet that carries an ECT(1) codepoint is classified
                into the C queue:<a href="#section-2.5.1.1-2.2.1" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="section-2.5.1.1-2.2.2.1">the C AQM <span class="bcp14">SHOULD</span> apply CE marking using the Coupled AQM
                    probability p_CL (= k*p').<a href="#section-2.5.1.1-2.2.2.1" class="pilcrow">¶</a>
</li>
                </ul>
</li>
            </ul>
<p id="section-2.5.1.1-3">The above requirements are worded as "<span class="bcp14">SHOULD</span>"s, because
            operator-specific classifiers are for flexibility, by definition.
            Therefore, alternative actions might be appropriate in the
            operator's specific circumstances. 
            An example would be where the
            operator knows that certain legacy traffic set to one
            codepoint actually has a congestion response associated with
            another codepoint.<a href="#section-2.5.1.1-3" class="pilcrow">¶</a></p>
<p id="section-2.5.1.1-4">If the DualQ Coupled AQM has detected overload, it <span class="bcp14">MUST</span>
            introduce Classic drop to both types of ECN-capable traffic until
            the overload episode has subsided. Introducing drop if ECN marking
            is persistently high is recommended in 
            
            Section <a href="https://www.rfc-editor.org/rfc/rfc3168#section-7" class="relref">7</a> of the ECN spec <span>[<a href="#RFC3168" class="cite xref">RFC3168</a>]</span>
            and in Section <a href="https://www.rfc-editor.org/rfc/rfc7567#section-4.2.1" class="relref">4.2.1</a> of
            the AQM Recommendations <span>[<a href="#RFC7567" class="cite xref">RFC7567</a>]</span>.<a href="#section-2.5.1.1-4" class="pilcrow">¶</a></p>
</section>
</div>
</section>
</div>
<section id="section-2.5.2">
          <h4 id="name-management-requirements">
<a href="#section-2.5.2" class="section-number selfRef">2.5.2. </a><a href="#name-management-requirements" class="section-name selfRef">Management Requirements</a>
          </h4>
<p id="section-2.5.2-1"></p>
<div id="dualq_config">
<section id="section-2.5.2.1">
            <h5 id="name-configuration">
<a href="#section-2.5.2.1" class="section-number selfRef">2.5.2.1. </a><a href="#name-configuration" class="section-name selfRef">Configuration</a>
            </h5>
<p id="section-2.5.2.1-1">By default, a DualQ Coupled AQM <span class="bcp14">SHOULD NOT</span> need any
            configuration for use at a bottleneck on the public
            Internet <span>[<a href="#RFC7567" class="cite xref">RFC7567</a>]</span>. The following parameters
            <span class="bcp14">MAY</span> be operator-configurable, e.g., to tune for non-Internet
            settings:<a href="#section-2.5.2.1-1" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="section-2.5.2.1-2.1">Optional packet classifier(s) to use in addition to the ECN
                field (see <a href="#dualq_classification" class="auto internal xref">Section 2.3</a>).<a href="#section-2.5.2.1-2.1" class="pilcrow">¶</a>
</li>
              <li class="normal" id="section-2.5.2.1-2.2">
                <p id="section-2.5.2.1-2.2.1">Expected typical RTT, which can be used to determine the
                queuing delay of the Classic AQM at its operating point, in
                order to prevent typical lone flows from underutilizing
                capacity. For example:<a href="#section-2.5.2.1-2.2.1" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="section-2.5.2.1-2.2.2.1">for the PI2 algorithm (<a href="#dualq_Ex_algo_pi2" class="auto internal xref">Appendix A</a>), the queuing delay target is
                    dependent on the typical RTT.<a href="#section-2.5.2.1-2.2.2.1" class="pilcrow">¶</a>
</li>
                  <li class="normal" id="section-2.5.2.1-2.2.2.2">for the Curvy RED algorithm (<a href="#dualq_Ex_algo" class="auto internal xref">Appendix B</a>), the queuing delay at the desired
                    operating point of the curvy ramp is configured to
                    encompass a typical RTT.<a href="#section-2.5.2.1-2.2.2.2" class="pilcrow">¶</a>
</li>
                  <li class="normal" id="section-2.5.2.1-2.2.2.3">if another Classic AQM was used, it would be likely to
                    need an operating point for the queue based on the typical
                    RTT, and if so, it <span class="bcp14">SHOULD</span> be expressed in units of
                    time.<a href="#section-2.5.2.1-2.2.2.3" class="pilcrow">¶</a>
</li>
                </ul>
<p id="section-2.5.2.1-2.2.3">An operating point that is manually calculated might
                be directly configurable instead, e.g., for links with
                large numbers of flows where underutilization by a single
                flow would be unlikely.<a href="#section-2.5.2.1-2.2.3" class="pilcrow">¶</a></p>
</li>
              <li class="normal" id="section-2.5.2.1-2.3">
                <p id="section-2.5.2.1-2.3.1">Expected maximum RTT, which can be used to set the
                stability parameter(s) of the Classic AQM. For example:<a href="#section-2.5.2.1-2.3.1" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="section-2.5.2.1-2.3.2.1">for the PI2 algorithm (<a href="#dualq_Ex_algo_pi2" class="auto internal xref">Appendix A</a>), the gain parameters of the
                    PI algorithm depend on the maximum RTT.<a href="#section-2.5.2.1-2.3.2.1" class="pilcrow">¶</a>
</li>
                  <li class="normal" id="section-2.5.2.1-2.3.2.2">for the Curvy RED algorithm (<a href="#dualq_Ex_algo" class="auto internal xref">Appendix B</a>), the smoothing parameter is
                    chosen to filter out transients in the queue within a
                    maximum RTT.<a href="#section-2.5.2.1-2.3.2.2" class="pilcrow">¶</a>
</li>
                </ul>
<p id="section-2.5.2.1-2.3.3">Any stability parameter that is manually calculated
                assuming a maximum RTT might be directly configurable
                instead.<a href="#section-2.5.2.1-2.3.3" class="pilcrow">¶</a></p>
</li>
              <li class="normal" id="section-2.5.2.1-2.4">Coupling factor, k (see <a href="#dualq_Choosing_k" class="auto internal xref">Appendix C.2</a>).<a href="#section-2.5.2.1-2.4" class="pilcrow">¶</a>
</li>
              <li class="normal" id="section-2.5.2.1-2.5">
                <p id="section-2.5.2.1-2.5.1">A limit to the conditional priority of L4S. This is
                scheduler-dependent, but it <span class="bcp14">SHOULD</span> be expressed as a relation
                between the max delay of a C packet and an L packet. For
                example:<a href="#section-2.5.2.1-2.5.1" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="section-2.5.2.1-2.5.2.1">for a WRR scheduler, a weight ratio between L and C of
                    w:1 means that the maximum delay of a C packet is w times
                    that of an L packet.<a href="#section-2.5.2.1-2.5.2.1" class="pilcrow">¶</a>
</li>
                  <li class="normal" id="section-2.5.2.1-2.5.2.2">for a time-shifted FIFO (TS-FIFO) scheduler (see <a href="#dualq_Overload_Starvation" class="auto internal xref">Section 4.2.2</a>), a time-shift of
                    tshift means that the maximum delay to a C packet is
                    tshift greater than that of an L packet. tshift could be
                    expressed as a multiple of the typical RTT rather than as
                    an absolute delay.<a href="#section-2.5.2.1-2.5.2.2" class="pilcrow">¶</a>
</li>
                </ul>
</li>
              <li class="normal" id="section-2.5.2.1-2.6">The maximum Classic ECN-marking probability, p_Cmax, before
                introducing drop.<a href="#section-2.5.2.1-2.6" class="pilcrow">¶</a>
</li>
            </ul>
</section>
</div>
<section id="section-2.5.2.2">
            <h5 id="name-monitoring">
<a href="#section-2.5.2.2" class="section-number selfRef">2.5.2.2. </a><a href="#name-monitoring" class="section-name selfRef">Monitoring</a>
            </h5>
<p id="section-2.5.2.2-1">An experimental DualQ Coupled AQM <span class="bcp14">SHOULD</span> allow the operator to
            monitor each of the following operational statistics on demand,
            per queue and per configurable sample interval, for performance
            monitoring and perhaps also for accounting in some cases:<a href="#section-2.5.2.2-1" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="section-2.5.2.2-2.1">bits forwarded, from which utilization can be
                calculated;<a href="#section-2.5.2.2-2.1" class="pilcrow">¶</a>
</li>
              <li class="normal" id="section-2.5.2.2-2.2">total packets in the three categories: arrived, presented
                to the AQM, and forwarded. The difference between the first
                two will measure any non-AQM tail discard. The difference
                between the last two will measure proactive AQM discard;<a href="#section-2.5.2.2-2.2" class="pilcrow">¶</a>
</li>
              <li class="normal" id="section-2.5.2.2-2.3">ECN packets marked, non-ECN packets dropped, and ECN packets
                dropped, which can be combined with the three total packet
                counts above to calculate marking and dropping
                probabilities; and<a href="#section-2.5.2.2-2.3" class="pilcrow">¶</a>
</li>
              <li class="normal" id="section-2.5.2.2-2.4">
                <p id="section-2.5.2.2-2.4.1">queue delay (not including serialization delay of the head
                packet or medium acquisition delay) -- see further notes
                below.<a href="#section-2.5.2.2-2.4.1" class="pilcrow">¶</a></p>
<p id="section-2.5.2.2-2.4.2">Unlike the other statistics,
                queue delay cannot be captured in a simple accumulating
                counter. Therefore, the type of queue delay statistics
                produced (mean, percentiles, etc.) will depend on
                implementation constraints. To facilitate comparative
                evaluation of different implementations and approaches, an
                implementation <span class="bcp14">SHOULD</span> allow mean and 99th percentile queue
                delay to be derived (per queue per sample interval). A
                relatively simple way to do this would be to store a
                coarse-grained histogram of queue delay. This could be done
                with a small number of bins with configurable edges that
                represent contiguous ranges of queue delay. Then, over a
                sample interval, each bin would accumulate a count of the
                number of packets that had fallen within each range. The
                maximum queue delay per queue per interval <span class="bcp14">MAY</span> also be
                recorded, to aid diagnosis of faults and anomalous events.<a href="#section-2.5.2.2-2.4.2" class="pilcrow">¶</a></p>
</li>
            </ul>
</section>
<section id="section-2.5.2.3">
            <h5 id="name-anomaly-detection">
<a href="#section-2.5.2.3" class="section-number selfRef">2.5.2.3. </a><a href="#name-anomaly-detection" class="section-name selfRef">Anomaly Detection</a>
            </h5>
<p id="section-2.5.2.3-1">An experimental DualQ Coupled AQM <span class="bcp14">SHOULD</span> asynchronously report
            the following data about anomalous conditions:<a href="#section-2.5.2.3-1" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="section-2.5.2.3-2.1">
                <p id="section-2.5.2.3-2.1.1">Start time and duration of overload state.<a href="#section-2.5.2.3-2.1.1" class="pilcrow">¶</a></p>
<p id="section-2.5.2.3-2.1.2">A hysteresis mechanism <span class="bcp14">SHOULD</span> be used to
                prevent flapping in and out of overload causing an event
                storm. For instance, exiting from overload state could trigger
                one report but also latch a timer. Then, during that time, if
                the AQM enters and exits overload state any number of times,
                the duration in overload state is accumulated, but no new
                report is generated until the first time the AQM is out of
                overload once the timer has expired.<a href="#section-2.5.2.3-2.1.2" class="pilcrow">¶</a></p>
</li>
            </ul>
</section>
<section id="section-2.5.2.4">
            <h5 id="name-deployment-coexistence-and-">
<a href="#section-2.5.2.4" class="section-number selfRef">2.5.2.4. </a><a href="#name-deployment-coexistence-and-" class="section-name selfRef">Deployment, Coexistence, and Scaling</a>
            </h5>
<p id="section-2.5.2.4-1"><span>[<a href="#RFC5706" class="cite xref">RFC5706</a>]</span> suggests that deployment, coexistence,
            and scaling should also be covered as management requirements. The
            raison d'etre of the DualQ Coupled AQM is to enable
            deployment and coexistence of Scalable congestion controls (as
            incremental replacements for today's Reno-friendly controls that
            do not scale with bandwidth-delay product). Therefore, there is no
            need to repeat these motivating issues here given they are already
            explained in the Introduction and detailed in the L4S
            architecture <span>[<a href="#RFC9330" class="cite xref">RFC9330</a>]</span>.<a href="#section-2.5.2.4-1" class="pilcrow">¶</a></p>
<p id="section-2.5.2.4-2">The descriptions of specific DualQ Coupled AQM algorithms in
            the appendices cover scaling of their configuration parameters,
            e.g., with respect to RTT and sampling frequency.<a href="#section-2.5.2.4-2" class="pilcrow">¶</a></p>
</section>
</section>
</section>
</div>
</section>
</div>
<div id="dualq_IANA">
<section id="section-3">
      <h2 id="name-iana-considerations">
<a href="#section-3" class="section-number selfRef">3. </a><a href="#name-iana-considerations" class="section-name selfRef">IANA Considerations</a>
      </h2>
<p id="section-3-1">This document has no IANA actions.<a href="#section-3-1" class="pilcrow">¶</a></p>
</section>
</div>
<div id="dualq_Security_Considerations">
<section id="section-4">
      <h2 id="name-security-considerations">
<a href="#section-4" class="section-number selfRef">4. </a><a href="#name-security-considerations" class="section-name selfRef">Security Considerations</a>
      </h2>
<p id="section-4-1"></p>
<section id="section-4.1">
        <h3 id="name-low-delay-without-requiring">
<a href="#section-4.1" class="section-number selfRef">4.1. </a><a href="#name-low-delay-without-requiring" class="section-name selfRef">Low Delay without Requiring Per-flow Processing</a>
        </h3>
<p id="section-4.1-1">The L4S architecture <span>[<a href="#RFC9330" class="cite xref">RFC9330</a>]</span>
        compares the DualQ and FQ approaches to L4S. The
        privacy considerations section in that document motivates the DualQ on
        the grounds that users who want to encrypt application flow
        identifiers, e.g., in IPsec or other encrypted VPN tunnels, don't
        have to sacrifice low delay (<span>[<a href="#RFC8404" class="cite xref">RFC8404</a>]</span> encourages
        avoidance of such privacy compromises).<a href="#section-4.1-1" class="pilcrow">¶</a></p>
<p id="section-4.1-2">The security considerations section of the L4S architecture <span>[<a href="#RFC9330" class="cite xref">RFC9330</a>]</span> also
        includes subsections on policing of relative flow rates (Section <a href="https://www.rfc-editor.org/rfc/rfc9330#section-8.1" class="relref">8.1</a>) and on
        policing of flows that cause excessive queuing delay (Section <a href="https://www.rfc-editor.org/rfc/rfc9330#section-8.2" class="relref">8.2</a>). It explains
        that the interests of users do not collide in the same way for delay
        as they do for bandwidth. For someone to get more of the bandwidth of
        a shared link, someone else necessarily gets less (a 'zero-sum game'),
        whereas queuing delay can be reduced for everyone, without any need
        for someone else to lose out. It also explains that, on the current
        Internet, scheduling usually enforces separation of bandwidth between
        'sites' (e.g., households, businesses, or mobile users), but it is not
        common to need to schedule or police the bandwidth used by individual
        application flows.<a href="#section-4.1-2" class="pilcrow">¶</a></p>
<p id="section-4.1-3">By the above arguments, per-flow rate policing might not be
        necessary, and in trusted environments (e.g., private data centres),
        it is certainly unlikely to be needed. Therefore, because it is hard
        to avoid complexity and unintended side effects with per-flow rate
        policing, it needs to be separable from a basic AQM, as an option,
        under policy control. On this basis, the DualQ Coupled AQM provides
        low delay without prejudging the question of per-flow rate
        policing.<a href="#section-4.1-3" class="pilcrow">¶</a></p>
<p id="section-4.1-4">Nonetheless, the interests of users or flows might conflict,
        e.g., in case of accident or malice. Then per-flow rate control
        could be necessary. If per-flow rate control is needed, it can be provided
        as a modular addition to a DualQ. And similarly, if protection against
        excessive queue delay is needed, a per-flow queue protection option
        can be added to a DualQ (e.g., <span>[<a href="#I-D.briscoe-docsis-q-protection" class="cite xref">DOCSIS-Q-PROT</a>]</span>).<a href="#section-4.1-4" class="pilcrow">¶</a></p>
</section>
<div id="dualq_Overload">
<section id="section-4.2">
        <h3 id="name-handling-unresponsive-flows">
<a href="#section-4.2" class="section-number selfRef">4.2. </a><a href="#name-handling-unresponsive-flows" class="section-name selfRef">Handling Unresponsive Flows and Overload</a>
        </h3>
<p id="section-4.2-1">In the absence of any per-flow control, it is important that the
        basic DualQ Coupled AQM gives unresponsive flows no more throughput
        advantage than a single-queue AQM would, and that it at least handles
        overload situations. Overload means that incoming load significantly
        or persistently exceeds output capacity, but it is not intended to be
        a precise term -- significant and persistent are matters of
        degree.<a href="#section-4.2-1" class="pilcrow">¶</a></p>
<p id="section-4.2-2">A trade-off needs to be made between complexity and the risk of
        either traffic class harming the other. In overloaded conditions, the
        higher priority L4S service will have to sacrifice some aspect of its
        performance. Depending on the degree of overload, alternative
        solutions may relax a different factor: for example, throughput, delay,
        or drop. These choices need to be made either by the developer or by
        operator policy, rather than by the IETF. 
        Subsequent subsections
        discuss handling different degrees of overload:<a href="#section-4.2-2" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="section-4.2-3.1">
            <p id="section-4.2-3.1.1">Unresponsive flows (L and/or C) but not overloaded,
            i.e., the sum of unresponsive load before adding any
            responsive traffic is below capacity.<a href="#section-4.2-3.1.1" class="pilcrow">¶</a></p>
<ul class="normal ulEmpty">
<li class="normal ulEmpty" id="section-4.2-3.1.2.1">This case is handled by the regular Coupled DualQ (<a href="#dualq_coupled" class="auto internal xref">Section 2.1</a>) but not discussed there. So below,
                <a href="#dualq_unresponsive_wo_overload" class="auto internal xref">Section 4.2.1</a> explains the
                design goal and how it is achieved in practice.<a href="#section-4.2-3.1.2.1" class="pilcrow">¶</a>
</li>
            </ul>
</li>
          <li class="normal" id="section-4.2-3.2">
            <p id="section-4.2-3.2.1">Unresponsive flows (L and/or C) causing persistent overload,
            i.e., the sum of unresponsive load even before adding any
            responsive traffic persistently exceeds capacity.<a href="#section-4.2-3.2.1" class="pilcrow">¶</a></p>
<ul class="normal ulEmpty">
<li class="normal ulEmpty" id="section-4.2-3.2.2.1">This case is not covered by the regular Coupled DualQ
                mechanism (<a href="#dualq_coupled" class="auto internal xref">Section 2.1</a>), but the last paragraph
                in <a href="#dualq_unexpected" class="auto internal xref">Section 2.5.1.1</a> sets out a requirement to
                handle the case where ECN-capable traffic could starve
                non-ECN-capable traffic. <a href="#dualq_Overload_Saturation" class="auto internal xref">Section 4.2.3</a> below discusses the
                general options and gives specific examples.<a href="#section-4.2-3.2.2.1" class="pilcrow">¶</a>
</li>
            </ul>
</li>
          <li class="normal" id="section-4.2-3.3">
            <p id="section-4.2-3.3.1">Short-term overload that lies between the 'not overloaded' and
            'persistently overloaded' cases.<a href="#section-4.2-3.3.1" class="pilcrow">¶</a></p>
<ul class="normal ulEmpty">
<li class="normal ulEmpty" id="section-4.2-3.3.2.1">For the period before overload is deemed persistent, <a href="#dualq_Overload_Starvation" class="auto internal xref">Section 4.2.2</a> discusses options for
                more immediate mechanisms at the scheduler timescale. These
                prevent short-term starvation of the C queue by making the
                priority of the L queue conditional, as required in <a href="#dualq_functional_reqs" class="auto internal xref">Section 2.5.1</a>.<a href="#section-4.2-3.3.2.1" class="pilcrow">¶</a>
</li>
            </ul>
</li>
        </ul>
<div id="dualq_unresponsive_wo_overload">
<section id="section-4.2.1">
          <h4 id="name-unresponsive-traffic-withou">
<a href="#section-4.2.1" class="section-number selfRef">4.2.1. </a><a href="#name-unresponsive-traffic-withou" class="section-name selfRef">Unresponsive Traffic without Overload</a>
          </h4>
<p id="section-4.2.1-1">When one or more L flows and/or C flows are unresponsive, but
          their total load is within the link capacity so that they do not
          saturate the coupled marking (below 100%), the goal of a DualQ AQM
          is to behave no worse than a single-queue AQM.<a href="#section-4.2.1-1" class="pilcrow">¶</a></p>
<p id="section-4.2.1-2">Tests have shown that this is indeed the case with no additional
          mechanism beyond the regular Coupled DualQ of <a href="#dualq_coupled" class="auto internal xref">Section 2.1</a> (see the results of 'overload experiments'
          in <span>[<a href="#L4Seval22" class="cite xref">L4Seval22</a>]</span>). Perhaps counterintuitively, whether
          the unresponsive flow classifies itself into the L or the C queue,
          the DualQ system behaves as if it has subtracted from the overall
          link capacity. Then, the coupling shares out the remaining capacity
          between any competing responsive flows (in either queue). See also
          <a href="#dualq_Overload_Starvation" class="auto internal xref">Section 4.2.2</a>, which discusses
          scheduler-specific details.<a href="#section-4.2.1-2" class="pilcrow">¶</a></p>
</section>
</div>
<div id="dualq_Overload_Starvation">
<section id="section-4.2.2">
          <h4 id="name-avoiding-short-term-classic">
<a href="#section-4.2.2" class="section-number selfRef">4.2.2. </a><a href="#name-avoiding-short-term-classic" class="section-name selfRef">Avoiding Short-Term Classic Starvation: Sacrifice L4S Throughput or Delay?</a>
          </h4>
<p id="section-4.2.2-1">Priority of L4S is required to be conditional (see Sections <a href="#dualq_coupled_structure" class="auto internal xref">2.4</a> and <a href="#dualq_functional_reqs" class="auto internal xref">2.5.1</a>) to avoid short-term starvation of
          Classic. Otherwise, as explained in <a href="#dualq_coupled_structure" class="auto internal xref">Section 2.4</a>, even a lone responsive L4S flow
          could temporarily block a small finite set of C packets
          (e.g., an initial window or DNS request). The blockage would
          only be brief, but it could be longer for certain AQM
          implementations that can only increase the congestion signal coupled
          from the C queue when C packets are actually being dequeued. There
          is then the question of whether to sacrifice L4S throughput or L4S
          delay (or some other policy) to make the priority conditional:<a href="#section-4.2.2-1" class="pilcrow">¶</a></p>
<span class="break"></span><dl class="dlNewline" id="section-4.2.2-2">
            <dt id="section-4.2.2-2.1">Sacrifice L4S throughput: </dt>
            <dd id="section-4.2.2-2.2" style="margin-left: 1.5em">
<div id="dualq_Minimum_Service">
              <p id="section-4.2.2-2.2.1">By using WRR as the conditional priority scheduler, the L4S
              service can sacrifice some throughput during overload. This can
              be thought of as guaranteeing either a minimum throughput
              service for Classic traffic or a maximum delay
              for a packet at the head of the Classic queue.<a href="#section-4.2.2-2.2.1" class="pilcrow">¶</a></p>
<aside id="section-4.2.2-2.2.2">
                <p id="section-4.2.2-2.2.2.1">Cautionary note: a WRR scheduler can only
              guarantee Classic throughput if Classic sources are sending
              enough to use it -- congestion signals can undermine
              scheduling because they determine how much responsive traffic of
              each class arrives for scheduling in the first place. This is
              why scheduling is only relied on to handle short-term
              starvation, until congestion signals build up and the sources
              react. Even during long-term overload (discussed more fully in
              <a href="#dualq_Overload_Saturation" class="auto internal xref">Section 4.2.3</a>), it's pragmatic to
              discard packets from both queues, which again thins the traffic
              before it reaches the scheduler. This is because a scheduler
              cannot be relied on to handle long-term overload since the right
              scheduler weight cannot be known for every scenario.<a href="#section-4.2.2-2.2.2.1" class="pilcrow">¶</a></p>
</aside>
<p id="section-4.2.2-2.2.3">The scheduling weight of the Classic queue
              should be small (e.g., 1/16). In most traffic scenarios, the
              scheduler will not interfere and it will not need to, because
              the coupling mechanism and the end systems will determine the
              share of capacity across both queues as if it were a single
              pool. However, if L4S traffic is over-aggressive or
              unresponsive, the scheduler weight for Classic traffic will at
              least be large enough to ensure it does not starve in the
              short term.<a href="#section-4.2.2-2.2.3" class="pilcrow">¶</a></p>
<p id="section-4.2.2-2.2.4">Although WRR scheduling is
              only expected to address short-term overload, there are
              (somewhat rare) cases when WRR has an effect on capacity shares
              over longer timescales. But its effect is minor, and it
              certainly does no harm. Specifically, in cases where the ratio
              of L4S to Classic flows (e.g., 19:1) is greater than the
              ratio of their scheduler weights (e.g., 15:1), the L4S flows
              will get less than an equal share of the capacity, but only
              slightly. For instance, with the example numbers given, each L4S
              flow will get (15/16)/19 = 4.9% when ideally each would get
              1/20 = 5%. In the rather specific case of an unresponsive flow
              taking up just less than the capacity set aside for L4S
              (e.g., 14/16 in the above example), using WRR could
              significantly reduce the capacity left for any responsive L4S
              flows.<a href="#section-4.2.2-2.2.4" class="pilcrow">¶</a></p>
<p id="section-4.2.2-2.2.5">The scheduling weight of the
              Classic queue should not be too small, otherwise a C packet at
              the head of the queue could be excessively delayed by a
              continually busy L queue. For instance, if the Classic weight is
              1/16, the maximum that a Classic packet at the head of the queue
              can be delayed by L traffic is the serialization delay of 15
              MTU-sized packets.<a href="#section-4.2.2-2.2.5" class="pilcrow">¶</a></p>
</div>
            </dd>
<dd class="break"></dd>
<dt id="section-4.2.2-2.3">Sacrifice L4S delay:</dt>
            <dd id="section-4.2.2-2.4" style="margin-left: 1.5em">
<div id="dualq_Delay_Overload">
              <p id="section-4.2.2-2.4.1">The operator could choose to
              control overload of the Classic queue by allowing some delay to
              'leak' across to the L4S queue. The scheduler can be made to
              behave like a single FIFO queue with
              different service times by implementing a very simple
              conditional priority scheduler that could be called a
              "time-shifted FIFO" (TS-FIFO) (see the Modifier Earliest Deadline First
              (MEDF) scheduler <span>[<a href="#MEDF" class="cite xref">MEDF</a>]</span>). This scheduler
              adds tshift to the queue delay of the next L4S packet, before
              comparing it with the queue delay of the next Classic packet,
              then it selects the packet with the greater adjusted queue
              delay.<a href="#section-4.2.2-2.4.1" class="pilcrow">¶</a></p>
<p id="section-4.2.2-2.4.2">Under regular conditions, the
              TS-FIFO scheduler behaves just like a strict priority
              scheduler. But under moderate or high overload, it prevents
              starvation of the Classic queue, because the time-shift (tshift)
              defines the maximum extra queuing delay of Classic packets
              relative to L4S. 
              This would control milder overload of
              responsive traffic by introducing delay to defer invoking the
              overload mechanisms in <a href="#dualq_Overload_Saturation" class="auto internal xref">Section 4.2.3</a>, particularly when close to
              the maximum congestion signal.<a href="#section-4.2.2-2.4.2" class="pilcrow">¶</a></p>
</div>
          </dd>
<dd class="break"></dd>
</dl>
<p id="section-4.2.2-3">The example implementations in Appendices <a href="#dualq_Ex_algo_pi2" class="auto internal xref">A</a>
          and <a href="#dualq_Ex_algo" class="auto internal xref">B</a> could both be implemented with
          either policy.<a href="#section-4.2.2-3" class="pilcrow">¶</a></p>
</section>
</div>
<div id="dualq_Overload_Saturation">
<section id="section-4.2.3">
          <h4 id="name-l4s-ecn-saturation-introduc">
<a href="#section-4.2.3" class="section-number selfRef">4.2.3. </a><a href="#name-l4s-ecn-saturation-introduc" class="section-name selfRef">L4S ECN Saturation: Introduce Drop or Delay?</a>
          </h4>
<p id="section-4.2.3-1">This section concerns persistent overload caused by unresponsive
          L and/or C flows. To keep the throughput of both L4S and Classic
          flows roughly equal over the full load range, a different control
          strategy needs to be defined above the point where the L4S AQM
          persistently saturates to an ECN marking probability of 100%, leaving
          no room to push back the load any harder. L4S ECN marking will
          saturate first (assuming the coupling factor k&gt;1), even though
          saturation could be caused by the sum of unresponsive traffic in
          either or both queues exceeding the link capacity.<a href="#section-4.2.3-1" class="pilcrow">¶</a></p>
<p id="section-4.2.3-2">The term 'unresponsive' includes cases where a flow becomes
          temporarily unresponsive, for instance, a real-time flow that takes
          a while to adapt its rate in response to congestion, or a standard
          Reno flow that is normally responsive, but above a certain
          congestion level it will not be able to reduce its congestion window
          below the allowed minimum of 2 segments <span>[<a href="#RFC5681" class="cite xref">RFC5681</a>]</span>, effectively becoming unresponsive. (Note that
          L4S traffic ought to remain responsive below a window of 2 segments.
          See the L4S requirements <span>[<a href="#RFC9331" class="cite xref">RFC9331</a>]</span>.)<a href="#section-4.2.3-2" class="pilcrow">¶</a></p>
<p id="section-4.2.3-3">Saturation raises the question of whether to relieve congestion
          by introducing some drop into the L4S queue or by allowing delay to
          grow in both queues (which could eventually lead to drop due to
          buffer exhaustion anyway):<a href="#section-4.2.3-3" class="pilcrow">¶</a></p>
<span class="break"></span><dl class="dlNewline" id="section-4.2.3-4">
            <dt id="section-4.2.3-4.1">Drop on Saturation:</dt>
            <dd style="margin-left: 1.5em" id="section-4.2.3-4.2">Persistent saturation can be
              defined by a maximum threshold for coupled L4S ECN marking
              (assuming k&gt;1) before saturation starts to make the flow
              rates of the different traffic types diverge. Above that, the
              drop probability of Classic traffic is applied to all packets of
              all traffic types. Then experiments have shown that queuing
              delay can be kept at the target in any overload situation,
              including with unresponsive traffic, and no further measures are
              required (<a href="#dualq_overload_unresp_ect" class="auto internal xref">Section 4.2.3.1</a>).<a href="#section-4.2.3-4.2" class="pilcrow">¶</a>
</dd>
            <dd class="break"></dd>
<dt id="section-4.2.3-4.3">Delay on Saturation:</dt>
            <dd style="margin-left: 1.5em" id="section-4.2.3-4.4">When L4S marking saturates,
              instead of introducing L4S drop, the drop and marking
              probabilities of both queues could be capped. Beyond that, delay
              will grow either solely in the queue with unresponsive traffic
              (if WRR is used) or in both queues (if TS-FIFO is
              used). In either case, the higher delay ought to control
              temporary high congestion. If the overload is more persistent,
              eventually the combined DualQ will overflow and tail drop will
              control congestion.<a href="#section-4.2.3-4.4" class="pilcrow">¶</a>
</dd>
          <dd class="break"></dd>
</dl>
<p id="section-4.2.3-5">The example implementation in <a href="#dualq_Ex_algo_pi2" class="auto internal xref">Appendix A</a>
          solely applies the "drop on saturation" policy. The DOCSIS
          specification of a DualQ Coupled AQM <span>[<a href="#DOCSIS3.1" class="cite xref">DOCSIS3.1</a>]</span>
          also implements the 'drop on saturation' policy with a very shallow
          L buffer. However, the addition of DOCSIS per-flow Queue Protection
          <span>[<a href="#I-D.briscoe-docsis-q-protection" class="cite xref">DOCSIS-Q-PROT</a>]</span> turns this into
          'delay on saturation' by redirecting some packets of the flow or flows
          that are most responsible for L queue overload into the C queue, which has a
          higher delay target. If overload continues, this again becomes 'drop
          on saturation' as the level of drop in the C queue rises to maintain
          the target delay of the C queue.<a href="#section-4.2.3-5" class="pilcrow">¶</a></p>
<div id="dualq_overload_unresp_ect">
<section id="section-4.2.3.1">
            <h5 id="name-protecting-against-overload">
<a href="#section-4.2.3.1" class="section-number selfRef">4.2.3.1. </a><a href="#name-protecting-against-overload" class="section-name selfRef">Protecting against Overload by Unresponsive ECN-Capable Traffic</a>
            </h5>
<p id="section-4.2.3.1-1">Without a specific overload mechanism, unresponsive traffic
            would have a greater advantage if it were also ECN-capable. The
            advantage is undetectable at normal low levels of marking.
            However, it would become significant with the higher levels of
            marking typical during overload, when it could evade a significant
            degree of drop. This is an issue whether the ECN-capable traffic
            is L4S or Classic.<a href="#section-4.2.3.1-1" class="pilcrow">¶</a></p>
<p id="section-4.2.3.1-2">This raises the question of whether and when to introduce drop
            of ECN-capable traffic, as required by both Section <a href="https://www.rfc-editor.org/rfc/rfc3168#section-7" class="relref">7</a> of the ECN spec <span>[<a href="#RFC3168" class="cite xref">RFC3168</a>]</span> and Section <a href="https://www.rfc-editor.org/rfc/rfc7567#section-4.2.1" class="relref">4.2.1</a> of the AQM
            recommendations <span>[<a href="#RFC7567" class="cite xref">RFC7567</a>]</span>.<a href="#section-4.2.3.1-2" class="pilcrow">¶</a></p>
<p id="section-4.2.3.1-3">As an example, experiments with the DualPI2 AQM (<a href="#dualq_Ex_algo_pi2" class="auto internal xref">Appendix A</a>) have shown that introducing 'drop on
            saturation' at 100% coupled L4S marking addresses this problem
            with unresponsive ECN, and it also addresses the saturation
            problem. At saturation, DualPI2 switches into overload mode, where
            the Base AQM is driven by the max delay of both queues, and it
            introduces probabilistic drop to both queues equally.

     It leaves
            only a small range of congestion levels just below saturation
            where unresponsive traffic gains any advantage from using the ECN
            capability (relative to being unresponsive without ECN), and the
            advantage is hardly detectable (see <span>[<a href="#DualQ-Test" class="cite xref">DualQ-Test</a>]</span>
            and section IV-G of <span>[<a href="#L4Seval22" class="cite xref">L4Seval22</a>]</span>). Also, overload with
            an unresponsive ECT(1) flow gets no more bandwidth advantage than
            with ECT(0).<a href="#section-4.2.3.1-3" class="pilcrow">¶</a></p>
</section>
</div>
</section>
</div>
</section>
</div>
</section>
</div>
<section id="section-5">
      <h2 id="name-references">
<a href="#section-5" class="section-number selfRef">5. </a><a href="#name-references" class="section-name selfRef">References</a>
      </h2>
<section id="section-5.1">
        <h3 id="name-normative-references">
<a href="#section-5.1" class="section-number selfRef">5.1. </a><a href="#name-normative-references" class="section-name selfRef">Normative References</a>
        </h3>
<dl class="references">
<dt id="RFC2119">[RFC2119]</dt>
        <dd>
<span class="refAuthor">Bradner, S.</span>, <span class="refTitle">"Key words for use in RFCs to Indicate Requirement Levels"</span>, <span class="seriesInfo">BCP 14</span>, <span class="seriesInfo">RFC 2119</span>, <span class="seriesInfo">DOI 10.17487/RFC2119</span>, <time datetime="1997-03" class="refDate">March 1997</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc2119">https://www.rfc-editor.org/info/rfc2119</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC3168">[RFC3168]</dt>
        <dd>
<span class="refAuthor">Ramakrishnan, K.</span>, <span class="refAuthor">Floyd, S.</span>, and <span class="refAuthor">D. Black</span>, <span class="refTitle">"The Addition of Explicit Congestion Notification (ECN) to IP"</span>, <span class="seriesInfo">RFC 3168</span>, <span class="seriesInfo">DOI 10.17487/RFC3168</span>, <time datetime="2001-09" class="refDate">September 2001</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc3168">https://www.rfc-editor.org/info/rfc3168</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC8311">[RFC8311]</dt>
        <dd>
<span class="refAuthor">Black, D.</span>, <span class="refTitle">"Relaxing Restrictions on Explicit Congestion Notification (ECN) Experimentation"</span>, <span class="seriesInfo">RFC 8311</span>, <span class="seriesInfo">DOI 10.17487/RFC8311</span>, <time datetime="2018-01" class="refDate">January 2018</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc8311">https://www.rfc-editor.org/info/rfc8311</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC9331">[RFC9331]</dt>
      <dd>
<span class="refAuthor">De Schepper, K.</span> and <span class="refAuthor">B. Briscoe, Ed.</span>, <span class="refTitle">"The Explicit Congestion Notification (ECN) Protocol for Low Latency, Low Loss, and Scalable Throughput (L4S)"</span>, <span class="seriesInfo">RFC 9331</span>, <span class="seriesInfo">DOI 10.17487/RFC9331</span>, <time datetime="2023-01" class="refDate">January 2023</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc9331">https://www.rfc-editor.org/info/rfc9331</a>&gt;</span>. </dd>
<dd class="break"></dd>
</dl>
</section>
<section id="section-5.2">
        <h3 id="name-informative-references">
<a href="#section-5.2" class="section-number selfRef">5.2. </a><a href="#name-informative-references" class="section-name selfRef">Informative References</a>
        </h3>
<dl class="references">
<dt id="Alizadeh-stability">[Alizadeh-stability]</dt>
        <dd>
<span class="refAuthor">Alizadeh, M.</span>, <span class="refAuthor">Javanmard, A.</span>, and <span class="refAuthor">B. Prabhakar</span>, <span class="refTitle">"Analysis of DCTCP: Stability, Convergence, and Fairness"</span>, <span class="refContent">SIGMETRICS '11: Proceedings of the ACM SIGMETRICS Joint International Conference on Measurement and Modeling of Computer Systems, pp. 73-84</span>, <span class="seriesInfo">DOI 10.1145/1993744.1993753</span>, <time datetime="2011-06" class="refDate">June 2011</time>, <span>&lt;<a href="https://dl.acm.org/citation.cfm?id=1993753">https://dl.acm.org/citation.cfm?id=1993753</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="AQMmetrics">[AQMmetrics]</dt>
        <dd>
<span class="refAuthor">Kwon, M.</span> and <span class="refAuthor">S. Fahmy</span>, <span class="refTitle">"A Comparison of Load-based and Queue-based Active Queue Management Algorithms"</span>, <span class="refContent">Proc. Int'l Soc. for Optical Engineering (SPIE), Vol. 4866, pp. 35-46</span>, <span class="seriesInfo">DOI 10.1117/12.473021</span>, <time datetime="2002" class="refDate">2002</time>, <span>&lt;<a href="https://www.cs.purdue.edu/homes/fahmy/papers/ldc.pdf">https://www.cs.purdue.edu/homes/fahmy/papers/ldc.pdf</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="ARED01">[ARED01]</dt>
        <dd>
<span class="refAuthor">Floyd, S.</span>, <span class="refAuthor">Gummadi, R.</span>, and <span class="refAuthor">S. Shenker</span>, <span class="refTitle">"Adaptive RED: An Algorithm for Increasing the Robustness of RED's Active Queue Management"</span>, <span class="refContent">ACIRI Technical Report 301</span>, <time datetime="2001-08" class="refDate">August 2001</time>, <span>&lt;<a href="https://www.icsi.berkeley.edu/icsi/node/2032">https://www.icsi.berkeley.edu/icsi/node/2032</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="I-D.cardwell-iccrg-bbr-congestion-control">[BBR-CC]</dt>
        <dd>
<span class="refAuthor">Cardwell, N.</span>, <span class="refAuthor">Cheng, Y.</span>, <span class="refAuthor">Hassas Yeganeh, S.</span>, <span class="refAuthor">Swett, I.</span>, and <span class="refAuthor">V. Jacobson</span>, <span class="refTitle">"BBR Congestion Control"</span>, <span class="refContent">Work in Progress</span>, <span class="seriesInfo">Internet-Draft, draft-cardwell-iccrg-bbr-congestion-control-02</span>, <time datetime="2022-03-07" class="refDate">7 March 2022</time>, <span>&lt;<a href="https://datatracker.ietf.org/doc/html/draft-cardwell-iccrg-bbr-congestion-control-02">https://datatracker.ietf.org/doc/html/draft-cardwell-iccrg-bbr-congestion-control-02</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="BBRv2">[BBRv2]</dt>
        <dd>
<span class="refTitle">"TCP BBR v2 Alpha/Preview Release"</span>, <span class="refContent">commit 17700ca</span>, <time datetime="2022-06" class="refDate">June 2022</time>, <span>&lt;<a href="https://github.com/google/bbr">https://github.com/google/bbr</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="Boru20">[Boru20]</dt>
        <dd>
<span class="refAuthor">Boru Oljira, D.</span>, <span class="refAuthor">Grinnemo, K-J.</span>, <span class="refAuthor">Brunstrom, A.</span>, and <span class="refAuthor">J. Taheri</span>, <span class="refTitle">"Validating the Sharing Behavior and Latency Characteristics of the L4S Architecture"</span>, <span class="refContent">ACM SIGCOMM Computer Communication Review, Vol. 50, Issue 2, pp. 37-44</span>, <span class="seriesInfo">DOI 10.1145/3402413.3402419</span>, <time datetime="2020-05" class="refDate">May 2020</time>, <span>&lt;<a href="https://dl.acm.org/doi/abs/10.1145/3402413.3402419">https://dl.acm.org/doi/abs/10.1145/3402413.3402419</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="CCcensus19">[CCcensus19]</dt>
        <dd>
<span class="refAuthor">Mishra, A.</span>, <span class="refAuthor">Sun, X.</span>, <span class="refAuthor">Jain, A.</span>, <span class="refAuthor">Pande, S.</span>, <span class="refAuthor">Joshi, R.</span>, and <span class="refAuthor">B. Leong</span>, <span class="refTitle">"The Great Internet TCP Congestion Control Census"</span>, <span class="refContent">Proceedings of the ACM on Measurement and Analysis of Computing Systems, Vol. 3, Issue 3, Article No. 45, pp. 1-24</span>, <span class="seriesInfo">DOI 10.1145/3366693</span>, <time datetime="2019-12" class="refDate">December 2019</time>, <span>&lt;<a href="https://doi.org/10.1145/3366693">https://doi.org/10.1145/3366693</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="CoDel">[CoDel]</dt>
        <dd>
<span class="refAuthor">Nichols, K.</span> and <span class="refAuthor">V. Jacobson</span>, <span class="refTitle">"Controlling Queue Delay"</span>, <span class="refContent">ACM Queue, Vol. 10, Issue 5</span>, <time datetime="2012-05" class="refDate">May 2012</time>, <span>&lt;<a href="https://queue.acm.org/issuedetail.cfm?issue=2208917">https://queue.acm.org/issuedetail.cfm?issue=2208917</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="CRED_Insights">[CRED_Insights]</dt>
        <dd>
<span class="refAuthor">Briscoe, B.</span> and <span class="refAuthor">K. De Schepper</span>, <span class="refTitle">"Insights from Curvy RED (Random Early Detection)"</span>, <span class="refContent">BT Technical Report, TR-TUB8-2015-003</span>, <span class="seriesInfo">DOI 10.48550/arXiv.1904.07339</span>, <time datetime="2015-08" class="refDate">August 2015</time>, <span>&lt;<a href="https://arxiv.org/abs/1904.07339">https://arxiv.org/abs/1904.07339</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="I-D.briscoe-docsis-q-protection">[DOCSIS-Q-PROT]</dt>
        <dd>
<span class="refAuthor">Briscoe, B., Ed.</span> and <span class="refAuthor">G. White</span>, <span class="refTitle">"The DOCSIS® Queue Protection to Preserve Low Latency"</span>, <span class="refContent">Work in Progress</span>, <span class="seriesInfo">Internet-Draft, draft-briscoe-docsis-q-protection-06</span>, <time datetime="2022-05-13" class="refDate">13 May 2022</time>, <span>&lt;<a href="https://datatracker.ietf.org/doc/html/draft-briscoe-docsis-q-protection-06">https://datatracker.ietf.org/doc/html/draft-briscoe-docsis-q-protection-06</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="DOCSIS3.1">[DOCSIS3.1]</dt>
        <dd>
<span class="refAuthor">CableLabs</span>, <span class="refTitle">"DOCSIS 3.1 MAC and Upper Layer Protocols Interface Specification"</span>, <span class="refContent">CM-SP-MULPIv3.1, Data-Over-Cable Service Interface Specifications DOCSIS 3.1 Version I17 or later</span>, <time datetime="2019-01" class="refDate">January 2019</time>, <span>&lt;<a href="https://specification-search.cablelabs.com/CM-SP-MULPIv3">https://specification-search.cablelabs.com/CM-SP-MULPIv3</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="DualPI2Linux">[DualPI2Linux]</dt>
        <dd>
<span class="refAuthor">Albisser, O.</span>, <span class="refAuthor">De Schepper, K.</span>, <span class="refAuthor">Briscoe, B.</span>, <span class="refAuthor">Tilmans, O.</span>, and <span class="refAuthor">H. Steen</span>, <span class="refTitle">"DUALPI2 - Low Latency, Low Loss and Scalable (L4S) AQM"</span>, <span class="seriesInfo">Proceedings of Linux Netdev 0x13 </span>, <time datetime="2019-03" class="refDate">March 2019</time>, <span>&lt;<a href="https://www.netdevconf.org/0x13/session.html?talk-DUALPI2-AQM">https://www.netdevconf.org/0x13/session.html?talk-DUALPI2-AQM</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="DualQ-Test">[DualQ-Test]</dt>
        <dd>
<span class="refAuthor">Steen, H.</span>, <span class="refTitle">"Destruction Testing: Ultra-Low Delay using Dual Queue Coupled Active Queue Management"</span>, <span class="refContent">Master's Thesis, Department of Informatics, University of Oslo</span>, <time datetime="2017-05" class="refDate">May 2017</time>. </dd>
<dd class="break"></dd>
<dt id="Dukkipati06">[Dukkipati06]</dt>
        <dd>
<span class="refAuthor">Dukkipati, N.</span> and <span class="refAuthor">N. McKeown</span>, <span class="refTitle">"Why Flow-Completion Time is the Right Metric for Congestion Control"</span>, <span class="refContent">ACM SIGCOMM Computer Communication Review, Vol. 36, Issue 1, pp. 59-62</span>, <span class="seriesInfo">DOI 10.1145/1111322.1111336</span>, <time datetime="2006-01" class="refDate">January 2006</time>, <span>&lt;<a href="https://dl.acm.org/doi/10.1145/1111322.1111336">https://dl.acm.org/doi/10.1145/1111322.1111336</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="Heist21">[Heist21]</dt>
        <dd>
<span class="refTitle">"L4S Tests"</span>, <span class="refContent">commit e21cd91</span>, <time datetime="2021-08" class="refDate">August 2021</time>, <span>&lt;<a href="https://github.com/heistp/l4s-tests">https://github.com/heistp/l4s-tests</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="I-D.briscoe-tsvwg-l4s-diffserv">[L4S-DIFFSERV]</dt>
        <dd>
<span class="refAuthor">Briscoe, B.</span>, <span class="refTitle">"Interactions between Low Latency, Low Loss, Scalable Throughput (L4S) and Differentiated Services"</span>, <span class="refContent">Work in Progress</span>, <span class="seriesInfo">Internet-Draft, draft-briscoe-tsvwg-l4s-diffserv-02</span>, <time datetime="2018-11-04" class="refDate">4 November 2018</time>, <span>&lt;<a href="https://datatracker.ietf.org/doc/html/draft-briscoe-tsvwg-l4s-diffserv-02">https://datatracker.ietf.org/doc/html/draft-briscoe-tsvwg-l4s-diffserv-02</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="L4Sdemo16">[L4Sdemo16]</dt>
        <dd>
<span class="refAuthor">Bondarenko, O.</span>, <span class="refAuthor">De Schepper, K.</span>, <span class="refAuthor">Tsang, I.</span>, <span class="refAuthor">Briscoe, B.</span>, <span class="refAuthor">Petlund, A.</span>, and <span class="refAuthor">C. Griwodz</span>, <span class="refTitle">"Ultra-Low Delay for All: Live Experience, Live Analysis"</span>, <span class="refContent">Proceedings of the 7th International Conference on Multimedia Systems, Article No. 33, pp. 1-4</span>, <span class="seriesInfo">DOI 10.1145/2910017.2910633</span>, <time datetime="2016-05" class="refDate">May 2016</time>, <span>&lt;<a href="https://dl.acm.org/citation.cfm?doid=2910017.2910633">https://dl.acm.org/citation.cfm?doid=2910017.2910633</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="L4Seval22">[L4Seval22]</dt>
        <dd>
<span class="refAuthor">De Schepper, K.</span>, <span class="refAuthor">Albisser, O.</span>, <span class="refAuthor">Tilmans, O.</span>, and <span class="refAuthor">B. Briscoe</span>, <span class="refTitle">"Dual Queue Coupled AQM: Deployable Very Low Queuing Delay for All"</span>, <span class="refContent">Preprint submitted to IEEE/ACM Transactions on Networking</span>, <span class="seriesInfo">DOI 10.48550/arXiv.2209.01078</span>, <time datetime="2022-09" class="refDate">September 2022</time>, <span>&lt;<a href="https://arxiv.org/abs/2209.01078">https://arxiv.org/abs/2209.01078</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="L4S_5G">[L4S_5G]</dt>
        <dd>
<span class="refAuthor">Willars, P.</span>, <span class="refAuthor">Wittenmark, E.</span>, <span class="refAuthor">Ronkainen, H.</span>, <span class="refAuthor">Östberg, C.</span>, <span class="refAuthor">Johansson, I.</span>, <span class="refAuthor">Strand, J.</span>, <span class="refAuthor">Lédl, P.</span>, and <span class="refAuthor">D. Schnieders</span>, <span class="refTitle">"Enabling time-critical applications over 5G with rate adaptation"</span>, <span class="refContent">Ericsson - Deutsche Telekom White Paper, BNEW-21:025455</span>, <time datetime="2021-05" class="refDate">May 2021</time>, <span>&lt;<a href="https://www.ericsson.com/en/reports-and-papers/white-papers/enabling-time-critical-applications-over-5g-with-rate-adaptation">https://www.ericsson.com/en/reports-and-papers/white-papers/enabling-time-critical-applications-over-5g-with-rate-adaptation</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="Labovitz10">[Labovitz10]</dt>
        <dd>
<span class="refAuthor">Labovitz, C.</span>, <span class="refAuthor">Iekel-Johnson, S.</span>, <span class="refAuthor">McPherson, D.</span>, <span class="refAuthor">Oberheide, J.</span>, and <span class="refAuthor">F. Jahanian</span>, <span class="refTitle">"Internet Inter-Domain Traffic"</span>, <span class="refContent">ACM SIGCOMM Computer Communication Review, Vol. 40, Issue 4, pp. 75-86</span>, <span class="seriesInfo">DOI 10.1145/1851275.1851194</span>, <time datetime="2010-08" class="refDate">August 2010</time>, <span>&lt;<a href="https://doi.org/10.1145/1851275.1851194">https://doi.org/10.1145/1851275.1851194</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="LLD">[LLD]</dt>
        <dd>
<span class="refAuthor">White, G.</span>, <span class="refAuthor">Sundaresan, K.</span>, and <span class="refAuthor">B. Briscoe</span>, <span class="refTitle">"Low Latency DOCSIS: Technology Overview"</span>, <span class="refContent">CableLabs White Paper</span>, <time datetime="2019-02" class="refDate">February 2019</time>, <span>&lt;<a href="https://cablela.bs/low-latency-docsis-technology-overview-february-2019">https://cablela.bs/low-latency-docsis-technology-overview-february-2019</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="MEDF">[MEDF]</dt>
        <dd>
<span class="refAuthor">Menth, M.</span>, <span class="refAuthor">Schmid, M.</span>, <span class="refAuthor">Heiss, H.</span>, and <span class="refAuthor">T. Reim</span>, <span class="refTitle">"MEDF - A Simple Scheduling Algorithm for Two Real-Time Transport Service Classes with Application in the UTRAN"</span>, <span class="refContent">Proc. IEEE Conference on Computer Communications (INFOCOM'03), Vol. 2, pp. 1116-1122</span>, <span class="seriesInfo">DOI 10.1109/INFCOM.2003.1208948</span>, <time datetime="2003-03" class="refDate">March 2003</time>, <span>&lt;<a href="https://doi.org/10.1109/INFCOM.2003.1208948">https://doi.org/10.1109/INFCOM.2003.1208948</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="PI2">[PI2]</dt>
        <dd>
<span class="refAuthor">De Schepper, K.</span>, <span class="refAuthor">Bondarenko, O.</span>, <span class="refAuthor">Briscoe, B.</span>, and <span class="refAuthor">I. Tsang</span>, <span class="refTitle">"PI2: A Linearized AQM for both Classic and Scalable TCP"</span>, <span class="refContent">ACM CoNEXT'16</span>, <span class="seriesInfo">DOI 10.1145/2999572.2999578</span>, <time datetime="2016-12" class="refDate">December 2016</time>, <span>&lt;<a href="https://dl.acm.org/doi/10.1145/2999572.2999578">https://dl.acm.org/doi/10.1145/2999572.2999578</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="PI2param">[PI2param]</dt>
        <dd>
<span class="refAuthor">Briscoe, B.</span>, <span class="refTitle">"PI2 Parameters"</span>, <span class="refContent">Technical Report, TR-BB-2021-001, arXiv:2107.01003 [cs.NI]</span>, <span class="seriesInfo">DOI 10.48550/arXiv.2107.01003</span>, <time datetime="2021-07" class="refDate">July 2021</time>, <span>&lt;<a href="https://arxiv.org/abs/2107.01003">https://arxiv.org/abs/2107.01003</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="I-D.briscoe-iccrg-prague-congestion-control">[PRAGUE-CC]</dt>
        <dd>
<span class="refAuthor">De Schepper, K.</span>, <span class="refAuthor">Tilmans, O.</span>, and <span class="refAuthor">B. Briscoe</span>, <span class="refTitle">"Prague Congestion Control"</span>, <span class="refContent">Work in Progress</span>, <span class="seriesInfo">Internet-Draft, draft-briscoe-iccrg-prague-congestion-control-01</span>, <time datetime="2022-07-11" class="refDate">11 July 2022</time>, <span>&lt;<a href="https://datatracker.ietf.org/doc/html/draft-briscoe-iccrg-prague-congestion-control-01">https://datatracker.ietf.org/doc/html/draft-briscoe-iccrg-prague-congestion-control-01</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="PragueLinux">[PragueLinux]</dt>
        <dd>
<span class="refAuthor">Briscoe, B.</span>, <span class="refAuthor">De Schepper, K.</span>, <span class="refAuthor">Albisser, O.</span>, <span class="refAuthor">Misund, J.</span>, <span class="refAuthor">Tilmans, O.</span>, <span class="refAuthor">Kuehlewind, M.</span>, and <span class="refAuthor">A. Ahmed</span>, <span class="refTitle">"Implementing the 'TCP Prague' Requirements for L4S"</span>, <span class="refContent">Proceedings of Linux Netdev 0x13</span>, <time datetime="2019-03" class="refDate">March 2019</time>, <span>&lt;<a href="https://www.netdevconf.org/0x13/session.html?talk-tcp-prague-l4s">https://www.netdevconf.org/0x13/session.html?talk-tcp-prague-l4s</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RED">[RED]</dt>
        <dd>
<span class="refAuthor">Floyd, S.</span> and <span class="refAuthor">V. Jacobson</span>, <span class="refTitle">"Random Early Detection Gateways for Congestion Avoidance"</span>, <span class="refContent">IEEE/ACM Transactions on Networking, Volume 1, Issue 4, pp. 397-413</span>, <span class="seriesInfo">DOI 10.1109/90.251892</span>, <time datetime="1993-08" class="refDate">August 1993</time>, <span>&lt;<a href="https://dl.acm.org/doi/10.1109/90.251892">https://dl.acm.org/doi/10.1109/90.251892</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="I-D.mathis-iccrg-relentless-tcp">[RELENTLESS]</dt>
        <dd>
<span class="refAuthor">Mathis, M.</span>, <span class="refTitle">"Relentless Congestion Control"</span>, <span class="refContent">Work in Progress</span>, <span class="seriesInfo">Internet-Draft, draft-mathis-iccrg-relentless-tcp-00</span>, <time datetime="2009-03-04" class="refDate">4 March 2009</time>, <span>&lt;<a href="https://datatracker.ietf.org/doc/html/draft-mathis-iccrg-relentless-tcp-00">https://datatracker.ietf.org/doc/html/draft-mathis-iccrg-relentless-tcp-00</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC0970">[RFC0970]</dt>
        <dd>
<span class="refAuthor">Nagle, J.</span>, <span class="refTitle">"On Packet Switches With Infinite Storage"</span>, <span class="seriesInfo">RFC 970</span>, <span class="seriesInfo">DOI 10.17487/RFC0970</span>, <time datetime="1985-12" class="refDate">December 1985</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc970">https://www.rfc-editor.org/info/rfc970</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC2914">[RFC2914]</dt>
        <dd>
<span class="refAuthor">Floyd, S.</span>, <span class="refTitle">"Congestion Control Principles"</span>, <span class="seriesInfo">BCP 41</span>, <span class="seriesInfo">RFC 2914</span>, <span class="seriesInfo">DOI 10.17487/RFC2914</span>, <time datetime="2000-09" class="refDate">September 2000</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc2914">https://www.rfc-editor.org/info/rfc2914</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC3246">[RFC3246]</dt>
        <dd>
<span class="refAuthor">Davie, B.</span>, <span class="refAuthor">Charny, A.</span>, <span class="refAuthor">Bennet, J.C.R.</span>, <span class="refAuthor">Benson, K.</span>, <span class="refAuthor">Le Boudec, J.Y.</span>, <span class="refAuthor">Courtney, W.</span>, <span class="refAuthor">Davari, S.</span>, <span class="refAuthor">Firoiu, V.</span>, and <span class="refAuthor">D. Stiliadis</span>, <span class="refTitle">"An Expedited Forwarding PHB (Per-Hop Behavior)"</span>, <span class="seriesInfo">RFC 3246</span>, <span class="seriesInfo">DOI 10.17487/RFC3246</span>, <time datetime="2002-03" class="refDate">March 2002</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc3246">https://www.rfc-editor.org/info/rfc3246</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC3649">[RFC3649]</dt>
        <dd>
<span class="refAuthor">Floyd, S.</span>, <span class="refTitle">"HighSpeed TCP for Large Congestion Windows"</span>, <span class="seriesInfo">RFC 3649</span>, <span class="seriesInfo">DOI 10.17487/RFC3649</span>, <time datetime="2003-12" class="refDate">December 2003</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc3649">https://www.rfc-editor.org/info/rfc3649</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC5033">[RFC5033]</dt>
        <dd>
<span class="refAuthor">Floyd, S.</span> and <span class="refAuthor">M. Allman</span>, <span class="refTitle">"Specifying New Congestion Control Algorithms"</span>, <span class="seriesInfo">BCP 133</span>, <span class="seriesInfo">RFC 5033</span>, <span class="seriesInfo">DOI 10.17487/RFC5033</span>, <time datetime="2007-08" class="refDate">August 2007</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc5033">https://www.rfc-editor.org/info/rfc5033</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC5348">[RFC5348]</dt>
        <dd>
<span class="refAuthor">Floyd, S.</span>, <span class="refAuthor">Handley, M.</span>, <span class="refAuthor">Padhye, J.</span>, and <span class="refAuthor">J. Widmer</span>, <span class="refTitle">"TCP Friendly Rate Control (TFRC): Protocol Specification"</span>, <span class="seriesInfo">RFC 5348</span>, <span class="seriesInfo">DOI 10.17487/RFC5348</span>, <time datetime="2008-09" class="refDate">September 2008</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc5348">https://www.rfc-editor.org/info/rfc5348</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC5681">[RFC5681]</dt>
        <dd>
<span class="refAuthor">Allman, M.</span>, <span class="refAuthor">Paxson, V.</span>, and <span class="refAuthor">E. Blanton</span>, <span class="refTitle">"TCP Congestion Control"</span>, <span class="seriesInfo">RFC 5681</span>, <span class="seriesInfo">DOI 10.17487/RFC5681</span>, <time datetime="2009-09" class="refDate">September 2009</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc5681">https://www.rfc-editor.org/info/rfc5681</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC5706">[RFC5706]</dt>
        <dd>
<span class="refAuthor">Harrington, D.</span>, <span class="refTitle">"Guidelines for Considering Operations and Management of New Protocols and Protocol Extensions"</span>, <span class="seriesInfo">RFC 5706</span>, <span class="seriesInfo">DOI 10.17487/RFC5706</span>, <time datetime="2009-11" class="refDate">November 2009</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc5706">https://www.rfc-editor.org/info/rfc5706</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC7567">[RFC7567]</dt>
        <dd>
<span class="refAuthor">Baker, F., Ed.</span> and <span class="refAuthor">G. Fairhurst, Ed.</span>, <span class="refTitle">"IETF Recommendations Regarding Active Queue Management"</span>, <span class="seriesInfo">BCP 197</span>, <span class="seriesInfo">RFC 7567</span>, <span class="seriesInfo">DOI 10.17487/RFC7567</span>, <time datetime="2015-07" class="refDate">July 2015</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc7567">https://www.rfc-editor.org/info/rfc7567</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC8033">[RFC8033]</dt>
        <dd>
<span class="refAuthor">Pan, R.</span>, <span class="refAuthor">Natarajan, P.</span>, <span class="refAuthor">Baker, F.</span>, and <span class="refAuthor">G. White</span>, <span class="refTitle">"Proportional Integral Controller Enhanced (PIE): A Lightweight Control Scheme to Address the Bufferbloat Problem"</span>, <span class="seriesInfo">RFC 8033</span>, <span class="seriesInfo">DOI 10.17487/RFC8033</span>, <time datetime="2017-02" class="refDate">February 2017</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc8033">https://www.rfc-editor.org/info/rfc8033</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC8034">[RFC8034]</dt>
        <dd>
<span class="refAuthor">White, G.</span> and <span class="refAuthor">R. Pan</span>, <span class="refTitle">"Active Queue Management (AQM) Based on Proportional Integral Controller Enhanced (PIE) for Data-Over-Cable Service Interface Specifications (DOCSIS) Cable Modems"</span>, <span class="seriesInfo">RFC 8034</span>, <span class="seriesInfo">DOI 10.17487/RFC8034</span>, <time datetime="2017-02" class="refDate">February 2017</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc8034">https://www.rfc-editor.org/info/rfc8034</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC8174">[RFC8174]</dt>
        <dd>
<span class="refAuthor">Leiba, B.</span>, <span class="refTitle">"Ambiguity of Uppercase vs Lowercase in RFC 2119 Key Words"</span>, <span class="seriesInfo">BCP 14</span>, <span class="seriesInfo">RFC 8174</span>, <span class="seriesInfo">DOI 10.17487/RFC8174</span>, <time datetime="2017-05" class="refDate">May 2017</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc8174">https://www.rfc-editor.org/info/rfc8174</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC8257">[RFC8257]</dt>
        <dd>
<span class="refAuthor">Bensley, S.</span>, <span class="refAuthor">Thaler, D.</span>, <span class="refAuthor">Balasubramanian, P.</span>, <span class="refAuthor">Eggert, L.</span>, and <span class="refAuthor">G. Judd</span>, <span class="refTitle">"Data Center TCP (DCTCP): TCP Congestion Control for Data Centers"</span>, <span class="seriesInfo">RFC 8257</span>, <span class="seriesInfo">DOI 10.17487/RFC8257</span>, <time datetime="2017-10" class="refDate">October 2017</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc8257">https://www.rfc-editor.org/info/rfc8257</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC8290">[RFC8290]</dt>
        <dd>
<span class="refAuthor">Hoeiland-Joergensen, T.</span>, <span class="refAuthor">McKenney, P.</span>, <span class="refAuthor">Taht, D.</span>, <span class="refAuthor">Gettys, J.</span>, and <span class="refAuthor">E. Dumazet</span>, <span class="refTitle">"The Flow Queue CoDel Packet Scheduler and Active Queue Management Algorithm"</span>, <span class="seriesInfo">RFC 8290</span>, <span class="seriesInfo">DOI 10.17487/RFC8290</span>, <time datetime="2018-01" class="refDate">January 2018</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc8290">https://www.rfc-editor.org/info/rfc8290</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC8298">[RFC8298]</dt>
        <dd>
<span class="refAuthor">Johansson, I.</span> and <span class="refAuthor">Z. Sarker</span>, <span class="refTitle">"Self-Clocked Rate Adaptation for Multimedia"</span>, <span class="seriesInfo">RFC 8298</span>, <span class="seriesInfo">DOI 10.17487/RFC8298</span>, <time datetime="2017-12" class="refDate">December 2017</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc8298">https://www.rfc-editor.org/info/rfc8298</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC8312">[RFC8312]</dt>
        <dd>
<span class="refAuthor">Rhee, I.</span>, <span class="refAuthor">Xu, L.</span>, <span class="refAuthor">Ha, S.</span>, <span class="refAuthor">Zimmermann, A.</span>, <span class="refAuthor">Eggert, L.</span>, and <span class="refAuthor">R. Scheffenegger</span>, <span class="refTitle">"CUBIC for Fast Long-Distance Networks"</span>, <span class="seriesInfo">RFC 8312</span>, <span class="seriesInfo">DOI 10.17487/RFC8312</span>, <time datetime="2018-02" class="refDate">February 2018</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc8312">https://www.rfc-editor.org/info/rfc8312</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC8404">[RFC8404]</dt>
        <dd>
<span class="refAuthor">Moriarty, K., Ed.</span> and <span class="refAuthor">A. Morton, Ed.</span>, <span class="refTitle">"Effects of Pervasive Encryption on Operators"</span>, <span class="seriesInfo">RFC 8404</span>, <span class="seriesInfo">DOI 10.17487/RFC8404</span>, <time datetime="2018-07" class="refDate">July 2018</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc8404">https://www.rfc-editor.org/info/rfc8404</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC9000">[RFC9000]</dt>
        <dd>
<span class="refAuthor">Iyengar, J., Ed.</span> and <span class="refAuthor">M. Thomson, Ed.</span>, <span class="refTitle">"QUIC: A UDP-Based Multiplexed and Secure Transport"</span>, <span class="seriesInfo">RFC 9000</span>, <span class="seriesInfo">DOI 10.17487/RFC9000</span>, <time datetime="2021-05" class="refDate">May 2021</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc9000">https://www.rfc-editor.org/info/rfc9000</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="RFC9330">[RFC9330]</dt>
        <dd>
<span class="refAuthor">Briscoe, B., Ed.</span>, <span class="refAuthor">De Schepper, K.</span>, <span class="refAuthor">Bagnulo, M.</span>, and <span class="refAuthor">G. White</span>, <span class="refTitle">"Low Latency, Low Loss, and Scalable Throughput (L4S) Internet Service: Architecture"</span>, <span class="seriesInfo">RFC 9330</span>, <span class="seriesInfo">DOI 10.17487/RFC9330</span>, <time datetime="2023-01" class="refDate">January 2023</time>, <span>&lt;<a href="https://www.rfc-editor.org/info/rfc9330">https://www.rfc-editor.org/info/rfc9330</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="SCReAM-L4S">[SCReAM-L4S]</dt>
        <dd>
<span class="refTitle">"SCReAM"</span>, <span class="refContent">commit fda6c53</span>, <time datetime="2022-06" class="refDate">June 2022</time>, <span>&lt;<a href="https://github.com/EricssonResearch/scream">https://github.com/EricssonResearch/scream</a>&gt;</span>. </dd>
<dd class="break"></dd>
<dt id="SigQ-Dyn">[SigQ-Dyn]</dt>
      <dd>
<span class="refAuthor">Briscoe, B.</span>, <span class="refTitle">"Rapid Signalling of Queue Dynamics"</span>, <span class="refContent">Technical Report, TR-BB-2017-001</span>, <span class="seriesInfo">DOI 10.48550/arXiv.1904.07044</span>, <time datetime="2017-09" class="refDate">September 2017</time>, <span>&lt;<a href="https://arxiv.org/abs/1904.07044">https://arxiv.org/abs/1904.07044</a>&gt;</span>. </dd>
<dd class="break"></dd>
</dl>
</section>
</section>
<div id="dualq_Ex_algo_pi2">
<section id="appendix-A">
      <h2 id="name-example-dualq-coupled-pi2-a">
<a href="#appendix-A" class="section-number selfRef">Appendix A. </a><a href="#name-example-dualq-coupled-pi2-a" class="section-name selfRef">Example DualQ Coupled PI2 Algorithm</a>
      </h2>
<p id="appendix-A-1">As a first concrete example, the pseudocode below gives the DualPI2
      algorithm. DualPI2 follows the structure of the DualQ Coupled AQM
      framework in <a href="#dualq_fig_structure" class="auto internal xref">Figure 1</a>. A simple ramp
      function (configured in units of queuing time) with unsmoothed ECN
      marking is used for the Native L4S AQM. The ramp can also be configured
      as a step function. The PI2 algorithm <span>[<a href="#PI2" class="cite xref">PI2</a>]</span> is used
      for the Classic AQM. PI2 is an improved variant of the PIE
      AQM <span>[<a href="#RFC8033" class="cite xref">RFC8033</a>]</span>.<a href="#appendix-A-1" class="pilcrow">¶</a></p>
<p id="appendix-A-2">The pseudocode will be introduced in two passes. The first pass
      explains the core concepts, deferring handling of edge-cases like
      overload to the second pass. To aid comparison, line numbers are kept in
      step between the two passes by using letter suffixes where the longer
      code needs extra lines.<a href="#appendix-A-2" class="pilcrow">¶</a></p>
<p id="appendix-A-3">All variables are assumed to be floating point in their basic units
      (size in bytes, time in seconds, rates in bytes/second, alpha and beta
      in Hz, and probabilities from 0 to 1). Constants expressed in k (kilo), M
      (mega), G (giga), u (micro), m (milli), %, and so forth, are assumed to be
      converted to their appropriate multiple or fraction to represent the
      basic units. A real implementation that wants to use integer values
      needs to handle appropriate scaling factors and allow
      appropriate resolution of its integer types (including temporary
      internal values during calculations).<a href="#appendix-A-3" class="pilcrow">¶</a></p>
<p id="appendix-A-4">A full open source implementation for Linux is available at
      <span>&lt;<a href="https://github.com/L4STeam/sch_dualpi2_upstream">https://github.com/L4STeam/sch_dualpi2_upstream</a>&gt;</span> and explained in <span>[<a href="#DualPI2Linux" class="cite xref">DualPI2Linux</a>]</span>. The specification of the DualQ Coupled AQM for
      DOCSIS cable modems and cable modem termination systems (CMTSs) is available in <span>[<a href="#DOCSIS3.1" class="cite xref">DOCSIS3.1</a>]</span>
      and explained in <span>[<a href="#LLD" class="cite xref">LLD</a>]</span>.<a href="#appendix-A-4" class="pilcrow">¶</a></p>
<div id="dualq_Ex_algo_pi2-1">
<section id="appendix-A.1">
        <h3 id="name-pass-1-core-concepts">
<a href="#appendix-A.1" class="section-number selfRef">A.1. </a><a href="#name-pass-1-core-concepts" class="section-name selfRef">Pass #1: Core Concepts</a>
        </h3>
<p id="appendix-A.1-1">The pseudocode manipulates three main structures of variables: the
        packet (pkt), the L4S queue (lq), and the Classic queue (cq). The
        pseudocode consists of the following six functions:<a href="#appendix-A.1-1" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="appendix-A.1-2.1">The initialization function dualpi2_params_init(...) (<a href="#dualq_fig_Algo_pi2_core_header" class="auto internal xref">Figure 2</a>) that sets parameter
            defaults (the API for setting non-default values is omitted for
            brevity).<a href="#appendix-A.1-2.1" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.1-2.2">The enqueue function dualpi2_enqueue(lq, cq, pkt) (<a href="#dualq_fig_Algo_pi2_enqueue" class="auto internal xref">Figure 3</a>).<a href="#appendix-A.1-2.2" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.1-2.3">The dequeue function dualpi2_dequeue(lq, cq, pkt) (<a href="#dualq_fig_Algo_pi2_dequeue" class="auto internal xref">Figure 4</a>).<a href="#appendix-A.1-2.3" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.1-2.4">The recurrence function recur(q, likelihood) for de-randomized
            ECN marking (shown at the end of <a href="#dualq_fig_Algo_pi2_dequeue" class="auto internal xref">Figure 4</a>).<a href="#appendix-A.1-2.4" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.1-2.5">The L4S AQM function laqm(qdelay) (<a href="#dualq_fig_Algo_laqm_core" class="auto internal xref">Figure 5</a>) used to calculate the
            ECN-marking probability for the L4S queue.<a href="#appendix-A.1-2.5" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.1-2.6">The Base AQM function that implements the PI algorithm
            dualpi2_update(lq, cq) (<a href="#dualq_fig_Algo_pi2_core" class="auto internal xref">Figure 6</a>)
            used to regularly update the base probability (p'), which is
            squared for the Classic AQM as well as being coupled across to the
            L4S queue.<a href="#appendix-A.1-2.6" class="pilcrow">¶</a>
</li>
        </ul>
<p id="appendix-A.1-3">It also uses the following functions that are not shown in
        full here:<a href="#appendix-A.1-3" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="appendix-A.1-4.1">scheduler(), which selects between the head packets of the two
            queues. The choice of scheduler technology is discussed later.<a href="#appendix-A.1-4.1" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.1-4.2">cq.byt() or lq.byt() returns the current length
            (a.k.a. backlog) of the relevant queue in bytes.<a href="#appendix-A.1-4.2" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.1-4.3">cq.len() or lq.len() returns the current length of the relevant
            queue in packets.<a href="#appendix-A.1-4.3" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.1-4.4">cq.time() or lq.time() returns the current queuing delay of the
            relevant queue in units of time (see <a href="#note_qdelay" class="internal xref">Note a</a> below).<a href="#appendix-A.1-4.4" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.1-4.5">mark(pkt) and drop(pkt) for ECN marking and dropping a
            packet.<a href="#appendix-A.1-4.5" class="pilcrow">¶</a>
</li>
        </ul>
<p id="appendix-A.1-5">In experiments so far (building on experiments with PIE) on
        broadband access links ranging from 4 Mb/s to 200 Mb/s with base RTTs
        from 5 ms to 100 ms, DualPI2 achieves good results with the default
        parameters in <a href="#dualq_fig_Algo_pi2_core_header" class="auto internal xref">Figure 2</a>. The
        parameters are categorised by whether they relate to the PI2 AQM,
        the L4S AQM, or the framework coupling them together. Constants and
        variables derived from these parameters are also included at the end
        of each category. Each parameter is explained as it is encountered in
        the walk-through of the pseudocode below, and the rationale for the
        chosen defaults are given so that sensible values can be used in
        scenarios other than the regular public Internet.<a href="#appendix-A.1-5" class="pilcrow">¶</a></p>
<span id="name-example-header-pseudocode-f"></span><div id="dualq_fig_Algo_pi2_core_header">
<figure id="figure-2">
          <div class="sourcecode" id="appendix-A.1-6.1">
<pre>
1:  dualpi2_params_init(...) {         % Set input parameter defaults
2:    % DualQ Coupled framework parameters
5:    limit = MAX_LINK_RATE * 250 ms               % Dual buffer size
3:    k = 2                                         % Coupling factor
4:    % NOT SHOWN % scheduler-dependent weight or equival't parameter
6:
7:    % PI2 Classic AQM parameters
8:    target = 15 ms                             % Queue delay target
9:    RTT_max = 100 ms                      % Worst case RTT expected
10:   % PI2 constants derived from above PI2 parameters
11:   p_Cmax = min(1/k^2, 1)             % Max Classic drop/mark prob
12:   Tupdate = min(target, RTT_max/3)        % PI sampling interval
13:   alpha = 0.1 * Tupdate / RTT_max^2      % PI integral gain in Hz
14:   beta = 0.3 / RTT_max               % PI proportional gain in Hz
15:
16:   % L4S ramp AQM parameters
17:   minTh = 800 us        % L4S min marking threshold in time units
18:   range = 400 us                % Range of L4S ramp in time units
19:   Th_len = 1 pkt           % Min L4S marking threshold in packets
20:   % L4S constants
21:   p_Lmax = 1                               % Max L4S marking prob
22: }</pre>
</div>
<figcaption><a href="#figure-2" class="selfRef">Figure 2</a>:
<a href="#name-example-header-pseudocode-f" class="selfRef">Example Header Pseudocode for DualQ Coupled PI2 AQM</a>
          </figcaption></figure>
</div>
<p id="appendix-A.1-7">The overall goal of the code is to apply the marking and dropping
        probabilities for L4S and Classic traffic (p_L and p_C). These are
        derived from the underlying base probabilities p'_L and p' driven,
        respectively, by the traffic in the L and C queues. The marking
        probability for the L queue (p_L) depends on both the base probability
        in its own queue (p'_L) and a probability called p_CL, which is
        coupled across from p' in the C queue (see <a href="#dualq_coupled_structure" class="auto internal xref">Section 2.4</a> for the derivation of the specific
        equations and dependencies).<a href="#appendix-A.1-7" class="pilcrow">¶</a></p>
<p id="appendix-A.1-8">The probabilities p_CL and p_C are derived in lines 4 and 5 of the
        dualpi2_update() function (<a href="#dualq_fig_Algo_pi2_core" class="auto internal xref">Figure 6</a>)
        then used in the dualpi2_dequeue() function where p_L is also derived
        from p_CL at line 6 (<a href="#dualq_fig_Algo_pi2_dequeue" class="auto internal xref">Figure 4</a>). The
        code walk-through below builds up to explaining that part of the code
        eventually, but it starts from packet arrival.<a href="#appendix-A.1-8" class="pilcrow">¶</a></p>
<span id="name-example-enqueue-pseudocode-"></span><div id="dualq_fig_Algo_pi2_enqueue">
<figure id="figure-3">
          <div class="sourcecode" id="appendix-A.1-9.1">
<pre>
1:  dualpi2_enqueue(lq, cq, pkt) { % Test limit and classify lq or cq
2:    if ( lq.byt() + cq.byt() + MTU &gt; limit)
3:      drop(pkt)                     % drop packet if buffer is full
4:    timestamp(pkt)     % only needed if using the sojourn technique
5:    % Packet classifier
6:    if ( ecn(pkt) modulo 2 == 1 )         % ECN bits = ECT(1) or CE
7:      lq.enqueue(pkt)
8:    else                             % ECN bits = not-ECT or ECT(0)
9:      cq.enqueue(pkt)
10: }</pre>
</div>
<figcaption><a href="#figure-3" class="selfRef">Figure 3</a>:
<a href="#name-example-enqueue-pseudocode-" class="selfRef">Example Enqueue Pseudocode for DualQ Coupled PI2 AQM</a>
          </figcaption></figure>
</div>
<span id="name-example-dequeue-pseudocode-"></span><div id="dualq_fig_Algo_pi2_dequeue">
<figure id="figure-4">
          <div class="sourcecode" id="appendix-A.1-10.1">
<pre>
1:  dualpi2_dequeue(lq, cq, pkt) {     % Couples L4S &amp; Classic queues
2:    while ( lq.byt() + cq.byt() &gt; 0 ) {
3:      if ( scheduler() == lq ) {
4:        lq.dequeue(pkt)                      % Scheduler chooses lq
5:        p'_L = laqm(lq.time())                        % Native LAQM
6:        p_L = max(p'_L, p_CL)                  % Combining function
7:        if ( recur(lq, p_L) )                      % Linear marking
8:          mark(pkt)
9:      } else {
10:       cq.dequeue(pkt)                      % Scheduler chooses cq
11:       if ( recur(cq, p_C) ) {            % probability p_C = p'^2
12:         if ( ecn(pkt) == 0 ) {           % if ECN field = not-ECT
13:           drop(pkt)                                % squared drop
14:           continue        % continue to the top of the while loop
15:         }
16:         mark(pkt)                                  % squared mark
17:       }
18:     }
19:     return(pkt)                      % return the packet and stop
20:   }
21:   return(NULL)                             % no packet to dequeue
22: }

23: recur(q, likelihood) {   % Returns TRUE with a certain likelihood
24:   q.count += likelihood
25:   if (q.count &gt; 1) {
26:     q.count -= 1
27:     return TRUE
28:   }
29:   return FALSE
30: }</pre>
</div>
<figcaption><a href="#figure-4" class="selfRef">Figure 4</a>:
<a href="#name-example-dequeue-pseudocode-" class="selfRef">Example Dequeue Pseudocode for DualQ Coupled PI2 AQM</a>
          </figcaption></figure>
</div>
<p id="appendix-A.1-11">When packets arrive, a common queue limit is checked first as shown
        in line 2 of the enqueuing pseudocode in <a href="#dualq_fig_Algo_pi2_enqueue" class="auto internal xref">Figure 3</a>. This assumes a shared buffer
        for the two queues (<a href="#note_separate_buffers" class="internal xref">Note b</a> discusses the merits of separate buffers).
        In order to avoid any bias against larger packets, 1 MTU of space is
        always allowed, and the limit is deliberately tested before
        enqueue.<a href="#appendix-A.1-11" class="pilcrow">¶</a></p>
<p id="appendix-A.1-12">If limit is not exceeded, the packet is timestamped in line 4 (only
        if the sojourn time technique is being used to measure queue delay;
        see <a href="#note_qdelay" class="internal xref">Note a</a> below for alternatives).<a href="#appendix-A.1-12" class="pilcrow">¶</a></p>
<p id="appendix-A.1-13">At lines 5-9, the packet is classified and enqueued to the Classic
        or L4S queue dependent on the least significant bit (LSB) of the ECN field
        in the IP header (line 6). Packets with a codepoint having an LSB of 0
        (Not-ECT and ECT(0)) will be enqueued in the Classic queue. Otherwise,
        ECT(1) and CE packets will be enqueued in the L4S queue. Optional
        additional packet classification flexibility is omitted for brevity
        (see the L4S ECN protocol <span>[<a href="#RFC9331" class="cite xref">RFC9331</a>]</span>).<a href="#appendix-A.1-13" class="pilcrow">¶</a></p>
<p id="appendix-A.1-14">The dequeue pseudocode (<a href="#dualq_fig_Algo_pi2_dequeue" class="auto internal xref">Figure 4</a>) is repeatedly called whenever
        the lower layer is ready to forward a packet. It schedules one packet
        for dequeuing (or zero if the queue is empty) then returns control to
        the caller so that it does not block while that packet is being
        forwarded. While making this dequeue decision, it also makes the
        necessary AQM decisions on dropping or marking. The alternative of
        applying the AQMs at enqueue would shift some processing from the
        critical time when each packet is dequeued. However, it would also add
        a whole queue of delay to the control signals, making the control loop
        sloppier (for a typical RTT, it would double the Classic queue's
        feedback delay).<a href="#appendix-A.1-14" class="pilcrow">¶</a></p>
<p id="appendix-A.1-15">All the dequeue code is contained within a large while loop so that
        if it decides to drop a packet, it will continue until it selects a
        packet to schedule. Line 3 of the dequeue pseudocode is where the
        scheduler chooses between the L4S queue (lq) and the Classic queue
        (cq). Detailed implementation of the scheduler is not shown (see
        discussion later).<a href="#appendix-A.1-15" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="appendix-A.1-16.1">
            <p id="appendix-A.1-16.1.1">If an L4S packet is scheduled, in lines 7 and 8 the packet is
            ECN-marked with likelihood p_L. The recur() function at the end of
            <a href="#dualq_fig_Algo_pi2_dequeue" class="auto internal xref">Figure 4</a> is used, which is
            preferred over random marking because it avoids delay due to
            randomization when interpreting congestion signals, but it still
            desynchronizes the sawteeth of the flows. Line 6 calculates p_L
            as the maximum of the coupled L4S probability p_CL and the
            probability from the Native L4S AQM p'_L. This implements the
            max() function shown in <a href="#dualq_fig_structure" class="auto internal xref">Figure 1</a> to
            couple the outputs of the two AQMs together. Of the two
            probabilities input to p_L in line 6:<a href="#appendix-A.1-16.1.1" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="appendix-A.1-16.1.2.1">p'_L is calculated per packet in line 5 by the laqm()
                function (see <a href="#dualq_fig_Algo_laqm_core" class="auto internal xref">Figure 5</a>), whereas<a href="#appendix-A.1-16.1.2.1" class="pilcrow">¶</a>
</li>
              <li class="normal" id="appendix-A.1-16.1.2.2">p_CL is maintained by the dualpi2_update() function,
                which runs every Tupdate (Tupdate is set in line 12 of <a href="#dualq_fig_Algo_pi2_core_header" class="auto internal xref">Figure 2</a>).<a href="#appendix-A.1-16.1.2.2" class="pilcrow">¶</a>
</li>
            </ul>
</li>
          <li class="normal" id="appendix-A.1-16.2">If a Classic packet is scheduled, lines 10 to 17 drop or mark
            the packet with probability p_C.<a href="#appendix-A.1-16.2" class="pilcrow">¶</a>
</li>
        </ul>
<p id="appendix-A.1-17">The Native L4S AQM algorithm (<a href="#dualq_fig_Algo_laqm_core" class="auto internal xref">Figure 5</a>) is a ramp function, similar to
        the RED algorithm, but simplified as follows:<a href="#appendix-A.1-17" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="appendix-A.1-18.1">The extent of the ramp is defined in units of queuing delay,
            not bytes, so that configuration remains invariant as the queue
            departure rate varies.<a href="#appendix-A.1-18.1" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.1-18.2">It uses instantaneous queuing delay, which avoids the
            complexity of smoothing, but also avoids embedding a worst-case
            RTT of smoothing delay in the network (see <a href="#dualq_coupled" class="auto internal xref">Section 2.1</a>).<a href="#appendix-A.1-18.2" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.1-18.3">The ramp rises linearly directly from 0 to 1, not to an
            intermediate value of p'_L as RED would, because there is no need
            to keep ECN-marking probability low.<a href="#appendix-A.1-18.3" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.1-18.4">Marking does not have to be randomized. Determinism is used
            instead of randomness to reduce the delay necessary to smooth out
            the noise of randomness from the signal.<a href="#appendix-A.1-18.4" class="pilcrow">¶</a>
</li>
        </ul>
<p id="appendix-A.1-19">The ramp function requires two configuration parameters, the
        minimum threshold (minTh) and the width of the ramp (range), both in
        units of queuing time, as shown in lines 17 and 18 of the
        initialization function in <a href="#dualq_fig_Algo_pi2_core_header" class="auto internal xref">Figure 2</a>. The ramp function can be
        configured as a step (see <a href="#note_ramp" class="internal xref">Note c</a>).<a href="#appendix-A.1-19" class="pilcrow">¶</a></p>
<p id="appendix-A.1-20">Although the DCTCP paper <span>[<a href="#Alizadeh-stability" class="cite xref">Alizadeh-stability</a>]</span>
        recommends an ECN-marking threshold of 0.17*RTT_typ, it also shows
        that the threshold can be much shallower with hardly any worse
        underutilization of the link (because the amplitude of DCTCP's
        sawteeth is so small). Based on extensive experiments, for the public
        Internet the default minimum ECN-marking threshold (target) in <a href="#dualq_fig_Algo_pi2_core_header" class="auto internal xref">Figure 2</a> is considered a good
        compromise, even though it is a significantly smaller fraction of
        RTT_typ.<a href="#appendix-A.1-20" class="pilcrow">¶</a></p>
<span id="name-example-pseudocode-for-the-"></span><div id="dualq_fig_Algo_laqm_core">
<figure id="figure-5">
          <div class="sourcecode" id="appendix-A.1-21.1">
<pre>
1:  laqm(qdelay) {               % Returns Native L4S AQM probability
2:    if (qdelay &gt;= maxTh)
3:      return 1
4:    else if (qdelay &gt; minTh)
5:      return (qdelay - minTh)/range  % Divide could use a bit-shift
6:    else
7:      return 0
8:  }</pre>
</div>
<figcaption><a href="#figure-5" class="selfRef">Figure 5</a>:
<a href="#name-example-pseudocode-for-the-" class="selfRef">Example Pseudocode for the Native L4S AQM</a>
          </figcaption></figure>
</div>
<p id="appendix-A.1-22"></p>
<span id="name-example-pi-update-pseudocod"></span><div id="dualq_fig_Algo_pi2_core">
<figure id="figure-6">
          <div class="sourcecode" id="appendix-A.1-23.1">
<pre>
1:  dualpi2_update(lq, cq) {                % Update p' every Tupdate
2:    curq = cq.time()  % use queuing time of first-in Classic packet
3:    p' = p' + alpha * (curq - target) + beta * (curq - prevq)
4:    p_CL = k * p'  % Coupled L4S prob = base prob * coupling factor
5:    p_C = p'^2                       % Classic prob = (base prob)^2
6:    prevq = curq
7:  }</pre>
</div>
<figcaption><a href="#figure-6" class="selfRef">Figure 6</a>:
<a href="#name-example-pi-update-pseudocod" class="selfRef">Example PI-update Pseudocode for DualQ Coupled PI2 AQM</a>
          </figcaption></figure>
</div>
<p style="margin-left: 1.5em" id="appendix-A.1-24" class="keepWithPrevious">(Note: Clamping p' within the range [0,1] omitted for clarity -- see below.)<a href="#appendix-A.1-24" class="pilcrow">¶</a></p>
<p id="appendix-A.1-25">The coupled marking probability p_CL depends on the base
        probability (p'), which is kept up to date by executing the core PI algorithm in
        <a href="#dualq_fig_Algo_pi2_core" class="auto internal xref">Figure 6</a> every Tupdate.<a href="#appendix-A.1-25" class="pilcrow">¶</a></p>
<p id="appendix-A.1-26">Note that p' solely depends on the queuing time in the Classic
        queue. In line 2, the current queuing delay (curq) is evaluated from
        how long the head packet was in the Classic queue (cq). The function
        cq.time() (not shown) subtracts the time stamped at enqueue from the
        current time (see <a href="#note_qdelay" class="internal xref">Note a</a> below) and implicitly takes the current queuing
        delay as 0 if the queue is empty.<a href="#appendix-A.1-26" class="pilcrow">¶</a></p>
<p id="appendix-A.1-27">The algorithm centres on line 3, which is a classical
        PI controller that alters p' dependent on: a)
        the error between the current queuing delay (curq) and the target
        queuing delay (target) and b) the change in queuing delay since the
        last sample. The name 'PI' represents the fact that the second factor
        (how fast the queue is growing) is Proportional
        to load while the first is the Integral of
        the load (so it removes any standing queue in excess of the
        target).<a href="#appendix-A.1-27" class="pilcrow">¶</a></p>
<p id="appendix-A.1-28">The target parameter can be set based on local knowledge, but the
        aim is for the default to be a good compromise for anywhere in the
        intended deployment environment -- the public Internet. According
        to <span>[<a href="#PI2param" class="cite xref">PI2param</a>]</span>, the target queuing delay on line 8 of
        <a href="#dualq_fig_Algo_pi2_core_header" class="auto internal xref">Figure 2</a> is related to the
        typical base RTT worldwide, RTT_typ, by two factors: target = RTT_typ
        * g * f. Below, we summarize the rationale behind these factors and
        introduce a further adjustment. The two factors ensure that, in a
        large proportion of cases (say 90%), the sawtooth variations in RTT of
        a single flow will fit within the buffer without underutilizing the
        link. Frankly, these factors are educated guesses, but with the
        emphasis closer to 'educated' than to 'guess' (see <span>[<a href="#PI2param" class="cite xref">PI2param</a>]</span> for the full background):<a href="#appendix-A.1-28" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="appendix-A.1-29.1">RTT_typ is taken as 25 ms. This is based on an average CDN
            latency measured in each country weighted by the number of
            Internet users in that country to produce an overall weighted
            average for the Internet <span>[<a href="#PI2param" class="cite xref">PI2param</a>]</span>. Countries
            were ranked by number of Internet users, and once 90% of Internet
            users were covered, smaller countries were excluded to avoid
            small sample sizes that would be less representative. Also, importantly, the data
            for the average CDN latency in China (with the largest number of
            Internet users) has been removed, because the CDN latency was a
            significant outlier and, on reflection, the experimental technique
            seemed inappropriate to the CDN market in China.<a href="#appendix-A.1-29.1" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.1-29.2">g is taken as 0.38. The factor g is a geometry factor that
            characterizes the shape of the sawteeth of prevalent Classic
            congestion controllers. The geometry factor is the fraction of the
            amplitude of the sawtooth variability in queue delay that lies
            below the AQM's target. 
            For instance, at low bitrates, the
            geometry factor of standard Reno is 0.5, but at higher rates, it
            tends towards just under 1. According to the census of congestion
            controllers conducted by Mishra et al. in Jul-Oct
            2019 <span>[<a href="#CCcensus19" class="cite xref">CCcensus19</a>]</span>, most Classic TCP traffic
            uses CUBIC. And, according to the analysis in <span>[<a href="#PI2param" class="cite xref">PI2param</a>]</span>, if running over a PI2 AQM, a large proportion
            of this CUBIC traffic would be in its Reno-friendly mode, which
            has a geometry factor of ~0.39 (for all known implementations). The
            rest of the CUBIC traffic would be in true CUBIC mode, which has a
            geometry factor of ~0.36. Without modelling the sawtooth profiles
            from all the other less prevalent congestion controllers, we
            estimate a 7:3 weighted average of these two, resulting in an
            average geometry factor of 0.38.<a href="#appendix-A.1-29.2" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.1-29.3">f is taken as 2. The factor f is a safety factor that increases
            the target queue to allow for the distribution of RTT_typ around
            its mean. Otherwise, the target queue would only avoid
            underutilization for those users below the mean. It also provides
            a safety margin for the proportion of paths in use that span
            beyond the distance between a user and their local CDN. Currently,
            no data is available on the variance of queue delay around the
            mean in each region, so there is plenty of room for this guess to
            become more educated.<a href="#appendix-A.1-29.3" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.1-29.4">
            <span>[<a href="#PI2param" class="cite xref">PI2param</a>]</span> recommends target = RTT_typ * g * f =
            25 ms * 0.38 * 2 = 19 ms. However, a further adjustment is
            warranted, because target is moving year-on-year. 
            The paper is
            based on data collected in 2019, and it mentions evidence from the Speedtest Global Index
            that suggests RTT_typ reduced by 17% (fixed) or 12%
            (mobile) between 2020 and 2021. Therefore, we recommend a default
            of target = 15 ms at the time of writing (2021).<a href="#appendix-A.1-29.4" class="pilcrow">¶</a>
</li>
        </ul>
<p id="appendix-A.1-30">Operators can always use the data and discussion in <span>[<a href="#PI2param" class="cite xref">PI2param</a>]</span> to configure a more appropriate target for their
        environment. For instance, an operator might wish to question the
        assumptions called out in that paper, such as the goal of no
        underutilization for a large majority of single flow transfers (given
        many large transfers use multiple flows to avoid the scaling
        limitations of Classic flows).<a href="#appendix-A.1-30" class="pilcrow">¶</a></p>
<p id="appendix-A.1-31">The two 'gain factors' in line 3 of <a href="#dualq_fig_Algo_pi2_core" class="auto internal xref">Figure 6</a>, alpha and beta, respectively
        weight how strongly each of the two elements (Integral and
        Proportional) alters p'. They are in units of 'per second of delay' or
        Hz, because they transform differences in queuing delay into changes
        in probability (assuming probability has a value from 0 to 1).<a href="#appendix-A.1-31" class="pilcrow">¶</a></p>
<p id="appendix-A.1-32">Alpha and beta determine how much p' ought to change after each
        update interval (Tupdate). For a smaller Tupdate, p' should change by
        the same amount per second but in finer more frequent steps. So alpha
        depends on Tupdate (see line 13 of the initialization function in
        <a href="#dualq_fig_Algo_pi2_core_header" class="auto internal xref">Figure 2</a>). It is best to update
        p' as frequently as possible, but Tupdate will probably be constrained
        by hardware performance. As shown in line 12, the update interval
        should be frequent enough to update at least once in the time taken
        for the target queue to drain ('target') as long as it updates at
        least three times per maximum RTT. Tupdate defaults to 16 ms in the
        reference Linux implementation because it has to be rounded to a
        multiple of 4 ms. For link rates from 4 to 200 Mb/s and a maximum RTT
        of 100 ms, it has been verified through extensive testing that
        Tupdate = 16 ms (as also recommended in the PIE spec <span>[<a href="#RFC8033" class="cite xref">RFC8033</a>]</span>) is sufficient.<a href="#appendix-A.1-32" class="pilcrow">¶</a></p>
<p id="appendix-A.1-33">The choice of alpha and beta also determines the AQM's stable
        operating range. The AQM ought to change p' as fast as possible in
        response to changes in load without overcompensating and therefore
        causing oscillations in the queue. Therefore, the values of alpha and
        beta also depend on the RTT of the expected worst-case flow
        (RTT_max).<a href="#appendix-A.1-33" class="pilcrow">¶</a></p>
<p id="appendix-A.1-34">The maximum RTT of a PI controller (RTT_max in line 9 of <a href="#dualq_fig_Algo_pi2_core_header" class="auto internal xref">Figure 2</a>) is not an absolute maximum,
        but more instability (more queue variability) sets in for long-running
        flows with an RTT above this value. The propagation delay halfway
        round the planet and back in glass fibre is 200 ms. However, hardly
        any traffic traverses such extreme paths and, since the significant
        consolidation of Internet traffic between 2007 and 2009 <span>[<a href="#Labovitz10" class="cite xref">Labovitz10</a>]</span>, a high and growing proportion of all Internet
        traffic (roughly two-thirds at the time of writing) has been served
        from CDNs or 'cloud' services
        distributed close to end users. The Internet might change again, but
        for now, designing for a maximum RTT of 100 ms is a good compromise
        between faster queue control at low RTT and some instability on the
        occasions when a longer path is necessary.<a href="#appendix-A.1-34" class="pilcrow">¶</a></p>
<p id="appendix-A.1-35">Recommended derivations of the gain constants alpha and beta can be
        approximated for Reno over a PI2 AQM as:
 alpha = 0.1 * Tupdate / RTT_max^2;
 beta = 0.3 / RTT_max,
 as shown in lines 13 and 14 of
        <a href="#dualq_fig_Algo_pi2_core_header" class="auto internal xref">Figure 2</a>. These are derived
        from the stability analysis in <span>[<a href="#PI2" class="cite xref">PI2</a>]</span>. For the default
        values of Tupdate = 16 ms and RTT_max = 100 ms, they result in alpha =
        0.16; beta = 3.2 (discrepancies are due to rounding). These defaults
        have been verified with a wide range of link rates, target delays, and
        traffic models with mixed and similar RTTs, short and long
        flows, etc.<a href="#appendix-A.1-35" class="pilcrow">¶</a></p>
<p id="appendix-A.1-36">In corner cases, p' can overflow the range [0,1] so the resulting
        value of p' has to be bounded (omitted from the pseudocode). Then, as
        already explained, the coupled and Classic probabilities are derived
        from the new p' in lines 4 and 5 of <a href="#dualq_fig_Algo_pi2_core" class="auto internal xref">Figure 6</a> as p_CL = k*p' and p_C = p'^2.<a href="#appendix-A.1-36" class="pilcrow">¶</a></p>
<p id="appendix-A.1-37">Because the coupled L4S marking probability (p_CL) is factored up
        by k, the dynamic gain parameters alpha and beta are also inherently
        factored up by k for the L4S queue. So, the effective gain factor for
        the L4S queue is k*alpha (with defaults alpha = 0.16 Hz and k = 2,
        effective L4S alpha = 0.32 Hz).<a href="#appendix-A.1-37" class="pilcrow">¶</a></p>
<p id="appendix-A.1-38">Unlike in PIE <span>[<a href="#RFC8033" class="cite xref">RFC8033</a>]</span>, alpha and beta do not
        need to be tuned every Tupdate dependent on p'. Instead, in PI2, alpha
        and beta are independent of p' because the squaring applied to Classic
        traffic tunes them inherently. This is explained in <span>[<a href="#PI2" class="cite xref">PI2</a>]</span>, which also explains why this more principled approach
        removes the need for most of the heuristics that had to be added to
        PIE.<a href="#appendix-A.1-38" class="pilcrow">¶</a></p>
<p id="appendix-A.1-39">Nonetheless, an implementer might wish to add selected details to
        either AQM. For instance, the Linux reference DualPI2 implementation
        includes the following (not shown in the pseudocode above):<a href="#appendix-A.1-39" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="appendix-A.1-40.1">Classic and coupled marking or dropping (i.e., based on p_C
            and p_CL from the PI controller) is not applied to a packet if the
            aggregate queue length in bytes is &lt; 2 MTU (prior to enqueuing
            the packet or dequeuing it, depending on whether the AQM is
            configured to be applied at enqueue or dequeue); and<a href="#appendix-A.1-40.1" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.1-40.2">in the WRR scheduler, the 'credit' indicating which queue
            should transmit is only changed if there are packets in both
            queues (i.e., if there is actual resource contention). This
            means that a properly paced L flow might never be delayed by the
            WRR. The WRR credit is reset in favour of the L queue when the
            link is idle.<a href="#appendix-A.1-40.2" class="pilcrow">¶</a>
</li>
        </ul>
<p id="appendix-A.1-41">An implementer might also wish to add other heuristics,
        e.g., burst protection <span>[<a href="#RFC8033" class="cite xref">RFC8033</a>]</span> or enhanced
        burst protection <span>[<a href="#RFC8034" class="cite xref">RFC8034</a>]</span>.<a href="#appendix-A.1-41" class="pilcrow">¶</a></p>
<p id="appendix-A.1-42">Notes:<a href="#appendix-A.1-42" class="pilcrow">¶</a></p>
<ol start="1" type="a" class="normal type-a" id="appendix-A.1-43">
   <li id="appendix-A.1-43.1">
<div id="note_qdelay">
            <p id="appendix-A.1-43.1.1">The drain rate of the queue can vary
            if it is scheduled relative to other queues or if it accommodates
            fluctuations in a wireless medium. To auto-adjust to changes in
            drain rate, the queue needs to be measured in time, not bytes or
            packets <span>[<a href="#AQMmetrics" class="cite xref">AQMmetrics</a>]</span> <span>[<a href="#CoDel" class="cite xref">CoDel</a>]</span>.
            Queuing delay could be measured directly as the sojourn time (a.k.a.
            service time) of the queue by storing a per-packet timestamp as
            each packet is enqueued and subtracting it from the system time
            when the packet is dequeued. If timestamping is not easy to
            introduce with certain hardware, queuing delay could be predicted
            indirectly by dividing the size of the queue by the predicted
            departure rate, which might be known precisely for some link
            technologies (see, for example, DOCSIS PIE <span>[<a href="#RFC8034" class="cite xref">RFC8034</a>]</span>).<a href="#appendix-A.1-43.1.1" class="pilcrow">¶</a></p>
<p id="appendix-A.1-43.1.2">However, sojourn time is slow to detect bursts.
            For instance, if a burst arrives at an empty queue, the sojourn
            time only fully measures the burst's delay when its last packet is
            dequeued, even though the queue has known the size of the burst
            since its last packet was enqueued -- so it could have signalled
            congestion earlier. To remedy this, each head packet can be marked
            when it is dequeued based on the expected delay of the tail packet
            behind it, as explained below, rather than based on the head
            packet's own delay due to the packets in front of it. "Underutilization with Bursty Traffic" in <span>[<a href="#Heist21" class="cite xref">Heist21</a>]</span> identifies a specific scenario where bursty
            traffic significantly hits utilization of the L queue. If this
            effect proves to be more widely applicable, using the delay behind
            the head could improve performance.<a href="#appendix-A.1-43.1.2" class="pilcrow">¶</a></p>
<p id="appendix-A.1-43.1.3">The
            delay behind the head can be implemented by dividing the backlog
            at dequeue by the link rate or equivalently multiplying the
            backlog by the delay per unit of backlog. The implementation
            details will depend on whether the link rate is known; if it is
            not, a moving average of the delay per unit backlog can be
            maintained. This delay consists of serialization as well as media
            acquisition for shared media. So the details will depend strongly
            on the specific link technology. This approach should be less
            sensitive to timing errors and cost less in operations and memory
            than the otherwise equivalent 'scaled sojourn time' metric, which
            is the sojourn time of a packet scaled by the ratio of the queue
            sizes when the packet departed and arrived <span>[<a href="#SigQ-Dyn" class="cite xref">SigQ-Dyn</a>]</span>.<a href="#appendix-A.1-43.1.3" class="pilcrow">¶</a></p>
</div>
          </li>
<li id="appendix-A.1-43.2">
<div id="note_separate_buffers">Line 2 of the dualpi2_enqueue() function (<a href="#dualq_fig_Algo_pi2_enqueue" class="auto internal xref">Figure 3</a>) assumes an implementation
            where lq and cq share common buffer memory. An alternative
            implementation could use separate buffers for each queue, in which
            case the arriving packet would have to be classified first to
            determine which buffer to check for available space. The choice is
            a trade-off; a shared buffer can use less memory whereas separate
            buffers isolate the L4S queue from tail drop due to large bursts
            of Classic traffic (e.g., a Classic Reno TCP during slow-start
            over a long RTT).<a href="#note_separate_buffers" class="pilcrow">¶</a>
</div>
          </li>
<li id="appendix-A.1-43.3">
<div id="note_ramp">
            <p id="appendix-A.1-43.3.1">There has been some concern that using the step function of
            DCTCP for the Native L4S AQM requires end systems to smooth the
            signal for an unnecessarily large number of round trips to ensure
            sufficient fidelity. A ramp is no worse than a step in initial
            experiments with existing DCTCP. Therefore, it is recommended that
            a ramp is configured in place of a step, which will allow
            congestion control algorithms to investigate faster smoothing
            algorithms.<a href="#appendix-A.1-43.3.1" class="pilcrow">¶</a></p>
<p id="appendix-A.1-43.3.2">A ramp is more general than a
            step, because an operator can effectively turn the ramp into a
            step function, as used by DCTCP, by setting the range to zero.
            There will not be a divide by zero problem at line 5 of <a href="#dualq_fig_Algo_laqm_core" class="auto internal xref">Figure 5</a> because, if minTh is equal to
            maxTh, the condition for this ramp calculation cannot arise.<a href="#appendix-A.1-43.3.2" class="pilcrow">¶</a></p>
</div>
        </li>
</ol>
</section>
</div>
<div id="dualq_Ex_algo_pi2-2">
<section id="appendix-A.2">
        <h3 id="name-pass-2-edge-case-details">
<a href="#appendix-A.2" class="section-number selfRef">A.2. </a><a href="#name-pass-2-edge-case-details" class="section-name selfRef">Pass #2: Edge-Case Details</a>
        </h3>
<p id="appendix-A.2-1">This section takes a second pass through the pseudocode to add
        details of two edge-cases: low link rate and overload. <a href="#dualq_fig_Algo_pi2_full_dequeue" class="auto internal xref">Figure 7</a> repeats the dequeue
        function of <a href="#dualq_fig_Algo_pi2_dequeue" class="auto internal xref">Figure 4</a>, but with
        details of both edge-cases added. Similarly, <a href="#dualq_fig_Algo_pi2_full_core" class="auto internal xref">Figure 8</a> repeats the core PI algorithm
        of <a href="#dualq_fig_Algo_pi2_core" class="auto internal xref">Figure 6</a>, but with overload details
        added. The initialization, enqueue, L4S AQM, and recur functions are
        unchanged.<a href="#appendix-A.2-1" class="pilcrow">¶</a></p>
<p id="appendix-A.2-2">The link rate can be so low that it takes a single packet queue
        longer to serialize than the threshold delay at which ECN marking
        starts to be applied in the L queue. Therefore, a minimum marking
        threshold parameter in units of packets rather than time is necessary
        (Th_len, default 1 packet in line 19 of <a href="#dualq_fig_Algo_pi2_core_header" class="auto internal xref">Figure 2</a>) to ensure that the ramp
        does not trigger excessive marking on slow links. Where an
        implementation knows the link rate, it can set up this minimum at the
        time it is configured. 
        For instance, it would divide 1 MTU by the link
        rate to convert it into a serialization time, then if the lower
        threshold of the Native L AQM ramp was lower than this serialization
        time, it could increase the thresholds to shift the bottom of the ramp
        to 2 MTU. This is the approach used in DOCSIS <span>[<a href="#DOCSIS3.1" class="cite xref">DOCSIS3.1</a>]</span>, because the configured link rate is dedicated to
        the DualQ.<a href="#appendix-A.2-2" class="pilcrow">¶</a></p>
<p id="appendix-A.2-3">The pseudocode given here applies where the link rate is unknown,
        which is more common for software implementations that might be
        deployed in scenarios where the link is shared with other queues. In
        lines 5a to 5d in <a href="#dualq_fig_Algo_pi2_full_dequeue" class="auto internal xref">Figure 7</a>, the
        native L4S marking probability, p'_L, is zeroed if the queue is only 1
        packet (in the default configuration).<a href="#appendix-A.2-3" class="pilcrow">¶</a></p>
<aside id="appendix-A.2-4">
          <p id="appendix-A.2-4.1">Linux implementation note: In Linux, the check that the
          queue exceeds Th_len before marking with the Native L4S AQM is
          actually at enqueue, not dequeue; otherwise, it would exempt the last
          packet of a burst from being marked. The result of the check is
          conveyed from enqueue to the dequeue function via a boolean in the
          packet metadata.<a href="#appendix-A.2-4.1" class="pilcrow">¶</a></p>
</aside>
<p id="appendix-A.2-5">Persistent overload is deemed to have occurred when Classic
        drop/marking probability reaches p_Cmax. Above this point, the Classic
        drop probability is applied to both the L and C queues, irrespective of
        whether any packet is ECN-capable. ECT packets that are not dropped
        can still be ECN-marked.<a href="#appendix-A.2-5" class="pilcrow">¶</a></p>
<p id="appendix-A.2-6">In line 11 of the initialization function (<a href="#dualq_fig_Algo_pi2_core_header" class="auto internal xref">Figure 2</a>), the maximum Classic drop
        probability p_Cmax = min(1/k^2, 1) or 1/4 for the default coupling
        factor k = 2. In practice, 25% has been found to be a good threshold to
        preserve fairness between ECN-capable and non-ECN-capable traffic.
        This protects the queues against both temporary overload from
        responsive flows and more persistent overload from any unresponsive
        traffic that falsely claims to be responsive to ECN.<a href="#appendix-A.2-6" class="pilcrow">¶</a></p>
<p id="appendix-A.2-7">When the Classic ECN-marking probability reaches the p_Cmax
        threshold (1/k^2), the marking probability that is coupled to the L4S queue,
        p_CL, will always be 100% for any k (by equation (1) in <a href="#dualq_coupled" class="auto internal xref">Section 2.1</a>). So, for readability, the constant p_Lmax is
        defined as 1 in line 21 of the initialization function (<a href="#dualq_fig_Algo_pi2_core_header" class="auto internal xref">Figure 2</a>). This is intended to ensure
        that the L4S queue starts to introduce dropping once ECN marking
        saturates at 100% and can rise no further. The 'Prague L4S
        requirements' <span>[<a href="#RFC9331" class="cite xref">RFC9331</a>]</span> state
        that when an L4S congestion control detects a drop, it falls back to
        a response that coexists with 'Classic' Reno congestion control. So, it
        is correct that when the L4S queue drops packets, it drops them
        proportional to p'^2, as if they are Classic packets.<a href="#appendix-A.2-7" class="pilcrow">¶</a></p>
<p id="appendix-A.2-8">The two queues each test for overload in lines 4b and 12b of the
        dequeue function (<a href="#dualq_fig_Algo_pi2_full_dequeue" class="auto internal xref">Figure 7</a>).
        Lines 8c to 8g drop L4S packets with probability p'^2. Lines 8h to 8i
        mark the remaining packets with probability p_CL. Given p_Lmax = 1,
        all remaining packets will be marked because, to have reached the else
        block at line 8b, p_CL &gt;= 1.<a href="#appendix-A.2-8" class="pilcrow">¶</a></p>
<p id="appendix-A.2-9">Line 2a in the core PI algorithm (<a href="#dualq_fig_Algo_pi2_full_core" class="auto internal xref">Figure 8</a>) deals with overload of the
        L4S queue when there is little or no Classic traffic. This is
        necessary, because the core PI algorithm maintains the appropriate
        drop probability to regulate overload, but it depends on the length of
        the Classic queue. If there is little or no Classic queue, the naive PI-update function 
        (<a href="#dualq_fig_Algo_pi2_core" class="auto internal xref">Figure 6</a>) would drop
        nothing, even if the L4S queue were overloaded -- so tail drop would
        have to take over (lines 2 and 3 of <a href="#dualq_fig_Algo_pi2_enqueue" class="auto internal xref">Figure 3</a>).<a href="#appendix-A.2-9" class="pilcrow">¶</a></p>
<p id="appendix-A.2-10">Instead, line 2a of the full PI-update function (<a href="#dualq_fig_Algo_pi2_full_core" class="auto internal xref">Figure 8</a>) ensures that the Base PI AQM
        in line 3 is driven by whichever of the two queue delays is greater,
        but line 3 still always uses the same Classic target (default 15 ms).
        If L queue delay is greater just because there is little or no Classic
        traffic, normally it will still be well below the Base AQM target.
        This is because L4S traffic is also governed by the shallow threshold
        of its own Native AQM (lines 5a to 6 of the dequeue algorithm in <a href="#dualq_fig_Algo_pi2_full_dequeue" class="auto internal xref">Figure 7</a>). So the Base AQM will be
        driven to zero and not contribute.
 However, if the L queue is
        overloaded by traffic that is unresponsive to its marking, the max()
        in line 2a of <a href="#dualq_fig_Algo_pi2_full_core" class="auto internal xref">Figure 8</a> enables the L queue to smoothly take over driving the Base
        AQM into overload mode even if there is little or no Classic traffic.
        Then the Base AQM will keep the L queue to the Classic target (default
        15 ms) by shedding L packets.<a href="#appendix-A.2-10" class="pilcrow">¶</a></p>
<span id="name-example-dequeue-pseudocode-f"></span><div id="dualq_fig_Algo_pi2_full_dequeue">
<figure id="figure-7">
          <div class="sourcecode" id="appendix-A.2-11.1">
<pre>
1:  dualpi2_dequeue(lq, cq, pkt) {     % Couples L4S &amp; Classic queues
2:    while ( lq.byt() + cq.byt() &gt; 0 ) {
3:      if ( scheduler() == lq ) {
4a:       lq.dequeue(pkt)                             % L4S scheduled
4b:       if ( p_CL &lt; p_Lmax ) {      % Check for overload saturation
5a:         if (lq.len()&gt;Th_len)                   % &gt;1 packet queued
5b:           p'_L = laqm(lq.time())                    % Native LAQM
5c:         else
5d:           p'_L = 0                 % Suppress marking 1 pkt queue
6:          p_L = max(p'_L, p_CL)                % Combining function
7:          if ( recur(lq, p_L)                       %Linear marking
8a:           mark(pkt)
8b:       } else {                              % overload saturation
8c:         if ( recur(lq, p_C) ) {          % probability p_C = p'^2
8e:           drop(pkt)      % revert to Classic drop due to overload
8f:           continue        % continue to the top of the while loop
8g:         }
8h:         if ( recur(lq, p_CL) )        % probability p_CL = k * p'
8i:           mark(pkt)         % linear marking of remaining packets
8j:       }
9:      } else {
10:       cq.dequeue(pkt)                         % Classic scheduled
11:       if ( recur(cq, p_C) ) {            % probability p_C = p'^2
12a:        if ( (ecn(pkt) == 0)                % ECN field = not-ECT
12b:             OR (p_C &gt;= p_Cmax) ) {       % Overload disables ECN
13:           drop(pkt)                     % squared drop, redo loop
14:           continue        % continue to the top of the while loop
15:         }
16:         mark(pkt)                                  % squared mark
17:       }
18:     }
19:     return(pkt)                      % return the packet and stop
20:   }
21:   return(NULL)                             % no packet to dequeue
22: }
</pre>
</div>
<figcaption><a href="#figure-7" class="selfRef">Figure 7</a>:
<a href="#name-example-dequeue-pseudocode-f" class="selfRef">Example Dequeue Pseudocode for DualQ Coupled PI2 AQM (Including Code for Edge-Cases)</a>
          </figcaption></figure>
</div>
<span id="name-example-pi-update-pseudocode"></span><div id="dualq_fig_Algo_pi2_full_core">
<figure id="figure-8">
          <div class="sourcecode" id="appendix-A.2-12.1">
<pre>
1:  dualpi2_update(lq, cq) {                % Update p' every Tupdate
2a:   curq = max(cq.time(), lq.time())    % use greatest queuing time
3:    p' = p' + alpha * (curq - target) + beta * (curq - prevq)
4:    p_CL = p' * k  % Coupled L4S prob = base prob * coupling factor
5:    p_C = p'^2                       % Classic prob = (base prob)^2
6:    prevq = curq
7:  }
</pre>
</div>
<figcaption><a href="#figure-8" class="selfRef">Figure 8</a>:
<a href="#name-example-pi-update-pseudocode" class="selfRef">Example PI-update Pseudocode for DualQ Coupled PI2 AQM (Including Overload Code)</a>
          </figcaption></figure>
</div>
<p id="appendix-A.2-13"></p>
<p id="appendix-A.2-14">The choice of scheduler technology is critical to overload
        protection (see <a href="#dualq_Overload_Starvation" class="auto internal xref">Section 4.2.2</a>).<a href="#appendix-A.2-14" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="appendix-A.2-15.1">A well-understood weighted scheduler such as WRR is recommended. As long as the scheduler weight
            for Classic is small (e.g., 1/16), its exact value is
            unimportant, because it does not normally determine capacity
            shares. The weight is only important to prevent unresponsive L4S
            traffic starving Classic traffic in the short term (see <a href="#dualq_Overload_Starvation" class="auto internal xref">Section 4.2.2</a>). This is because capacity
            sharing between the queues is normally determined by the coupled
            congestion signal, which overrides the scheduler, by making L4S
            sources leave roughly equal per-flow capacity available for
            Classic flows.<a href="#appendix-A.2-15.1" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-A.2-15.2">
            <p id="appendix-A.2-15.2.1">Alternatively, a time-shifted FIFO (TS-FIFO) could be used. It
            works by selecting the head packet that has waited the longest,
            biased against the Classic traffic by a time-shift of tshift. To
            implement TS-FIFO, the scheduler() function in line 3 of
            the dequeue code would simply be implemented as the scheduler()
            function at the bottom of <a href="#dualq_fig_Algo_Real" class="auto internal xref">Figure 10</a> in
            <a href="#dualq_Ex_algo" class="auto internal xref">Appendix B</a>. For the public Internet, a good
            value for tshift is 50 ms. For private networks with smaller
            diameter, about 4*target would be reasonable. TS-FIFO is a very
            simple scheduler, but complexity might need to be added to address
            some deficiencies (which is why it is not recommended over
            WRR):<a href="#appendix-A.2-15.2.1" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="appendix-A.2-15.2.2.1">TS-FIFO does not fully isolate latency in the L4S queue
                from uncontrolled bursts in the Classic queue;<a href="#appendix-A.2-15.2.2.1" class="pilcrow">¶</a>
</li>
              <li class="normal" id="appendix-A.2-15.2.2.2">using sojourn time for TS-FIFO is only appropriate if
                timestamping of packets is feasible; and<a href="#appendix-A.2-15.2.2.2" class="pilcrow">¶</a>
</li>
              <li class="normal" id="appendix-A.2-15.2.2.3">even if timestamping is supported, the sojourn time of the
                head packet is always stale, so a more instantaneous measure
                of queue delay could be used (see <a href="#note_qdelay" class="internal xref">Note a</a> in <a href="#dualq_Ex_algo_pi2-1" class="auto internal xref">Appendix A.1</a>).<a href="#appendix-A.2-15.2.2.3" class="pilcrow">¶</a>
</li>
            </ul>
</li>
          <li class="normal" id="appendix-A.2-15.3">A strict priority scheduler would be inappropriate as discussed
            in <a href="#dualq_Overload_Starvation" class="auto internal xref">Section 4.2.2</a>.<a href="#appendix-A.2-15.3" class="pilcrow">¶</a>
</li>
        </ul>
</section>
</div>
</section>
</div>
<div id="dualq_Ex_algo">
<section id="appendix-B">
      <h2 id="name-example-dualq-coupled-curvy">
<a href="#appendix-B" class="section-number selfRef">Appendix B. </a><a href="#name-example-dualq-coupled-curvy" class="section-name selfRef">Example DualQ Coupled Curvy RED Algorithm</a>
      </h2>
<p id="appendix-B-1">As another example of a DualQ Coupled AQM algorithm, the pseudocode
      below gives the Curvy-RED-based algorithm. Although the AQM was designed
      to be efficient in integer arithmetic, to aid understanding it is first
      given using floating point arithmetic (<a href="#dualq_fig_Algo_Real" class="auto internal xref">Figure 10</a>). Then, one possible optimization for
      integer arithmetic is given, also in pseudocode (<a href="#dualq_fig_Algo_Int" class="auto internal xref">Figure 11</a>). To aid comparison, the line numbers are
      kept in step between the two by using letter suffixes where the longer
      code needs extra lines.<a href="#appendix-B-1" class="pilcrow">¶</a></p>
<div id="dualq_Ex_algo_float">
<section id="appendix-B.1">
        <h3 id="name-curvy-red-in-pseudocode">
<a href="#appendix-B.1" class="section-number selfRef">B.1. </a><a href="#name-curvy-red-in-pseudocode" class="section-name selfRef">Curvy RED in Pseudocode</a>
        </h3>
<p id="appendix-B.1-1">The pseudocode manipulates three main structures of variables: the
        packet (pkt), the L4S queue (lq), and the Classic queue (cq). It is defined 
        and described below in the following three functions:<a href="#appendix-B.1-1" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="appendix-B.1-2.1">the initialization function cred_params_init(...) (<a href="#dualq_fig_Algo_pi2_core_header" class="auto internal xref">Figure 2</a>) that sets parameter
            defaults (the API for setting non-default values is omitted for
            brevity);<a href="#appendix-B.1-2.1" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-B.1-2.2">the dequeue function cred_dequeue(lq, cq, pkt) (<a href="#dualq_fig_Algo_pi2_dequeue" class="auto internal xref">Figure 4</a>); and<a href="#appendix-B.1-2.2" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-B.1-2.3">the scheduling function scheduler(), which selects between the
            head packets of the two queues.<a href="#appendix-B.1-2.3" class="pilcrow">¶</a>
</li>
        </ul>
<p id="appendix-B.1-3">It also uses the following functions that are either shown
        elsewhere or not shown in full here:<a href="#appendix-B.1-3" class="pilcrow">¶</a></p>
<ul class="normal">
<li class="normal" id="appendix-B.1-4.1">the enqueue function, which is identical to that used for
            DualPI2, dualpi2_enqueue(lq, cq, pkt) in <a href="#dualq_fig_Algo_pi2_enqueue" class="auto internal xref">Figure 3</a>;<a href="#appendix-B.1-4.1" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-B.1-4.2">mark(pkt) and drop(pkt) for ECN marking and dropping a
            packet;<a href="#appendix-B.1-4.2" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-B.1-4.3">cq.byt() or lq.byt() returns the current length
            (a.k.a. backlog) of the relevant queue in bytes; and<a href="#appendix-B.1-4.3" class="pilcrow">¶</a>
</li>
          <li class="normal" id="appendix-B.1-4.4">cq.time() or lq.time() returns the current queuing delay of the
            relevant queue in units of time (see <a href="#note_qdelay" class="internal xref">Note a</a> in <a href="#dualq_Ex_algo_pi2-1" class="auto internal xref">Appendix A.1</a>).<a href="#appendix-B.1-4.4" class="pilcrow">¶</a>
</li>
        </ul>
<p id="appendix-B.1-5">Because Curvy RED was evaluated before DualPI2, certain
        improvements introduced for DualPI2 were not evaluated for Curvy RED.
        In the pseudocode below, the straightforward improvements have been
        added on the assumption they will provide similar benefits, but that
        has not been proven experimentally. They are: i) a conditional
        priority scheduler instead of strict priority; ii) a time-based
        threshold for the Native L4S AQM; and iii) ECN support for the Classic
        AQM. A recent evaluation has proved that a minimum ECN-marking
        threshold (minTh) greatly improves performance, so this is also
        included in the pseudocode.<a href="#appendix-B.1-5" class="pilcrow">¶</a></p>
<p id="appendix-B.1-6">Overload protection has not been added to the Curvy RED pseudocode
        below so as not to detract from the main features. It would be added
        in exactly the same way as in <a href="#dualq_Ex_algo_pi2-2" class="auto internal xref">Appendix A.2</a> for
        the DualPI2 pseudocode. The Native L4S AQM uses a step threshold, but
        a ramp like that described for DualPI2 could be used instead. The
        scheduler uses the simple TS-FIFO algorithm, but it could be replaced
        with WRR.<a href="#appendix-B.1-6" class="pilcrow">¶</a></p>
<p id="appendix-B.1-7">The Curvy RED algorithm has not been maintained or evaluated to the
        same degree as the DualPI2 algorithm. In initial experiments on
        broadband access links ranging from 4 Mb/s to 200 Mb/s with base RTTs
        from 5 ms to 100 ms, Curvy RED achieved good results with the default
        parameters in <a href="#dualq_fig_Algo_cred_core_header" class="auto internal xref">Figure 9</a>.<a href="#appendix-B.1-7" class="pilcrow">¶</a></p>
<p id="appendix-B.1-8">The parameters are categorized by whether they relate to the
        Classic AQM, the L4S AQM, or the framework coupling them together.
        Constants and variables derived from these parameters are also
        included at the end of each category. These are the raw input
        parameters for the algorithm. A configuration front-end could accept
        more meaningful parameters (e.g., RTT_max and RTT_typ) and convert
        them into these raw parameters, as has been done for DualPI2 in <a href="#dualq_Ex_algo_pi2" class="auto internal xref">Appendix A</a>. Where necessary, parameters are
        explained further in the walk-through of the pseudocode below.<a href="#appendix-B.1-8" class="pilcrow">¶</a></p>
<span id="name-example-header-pseudocode-fo"></span><div id="dualq_fig_Algo_cred_core_header">
<figure id="figure-9">
          <div class="sourcecode" id="appendix-B.1-9.1">
<pre>
1:  cred_params_init(...) {            % Set input parameter defaults
2:    % DualQ Coupled framework parameters
3:    limit = MAX_LINK_RATE * 250 ms               % Dual buffer size
4:    k' = 1                        % Coupling factor as a power of 2
5:    tshift = 50 ms                % Time-shift of TS-FIFO scheduler
6:    % Constants derived from Classic AQM parameters
7:    k = 2^k'                    % Coupling factor from equation (1)
6:
7:    % Classic AQM parameters
8:    g_C = 5            % EWMA smoothing parameter as a power of 1/2
9:    S_C = -1          % Classic ramp scaling factor as a power of 2
10:   minTh = 500 ms    % No Classic drop/mark below this queue delay
11:   % Constants derived from Classic AQM parameters
12:   gamma = 2^(-g_C)                     % EWMA smoothing parameter
13:   range_C = 2^S_C                         % Range of Classic ramp
14:
15:   % L4S AQM parameters
16:   T = 1 ms             % Queue delay threshold for Native L4S AQM
17:   % Constants derived from above parameters
18:   S_L = S_C - k'        % L4S ramp scaling factor as a power of 2
19:   range_L = 2^S_L                             % Range of L4S ramp
20: }
</pre>
</div>
<figcaption><a href="#figure-9" class="selfRef">Figure 9</a>:
<a href="#name-example-header-pseudocode-fo" class="selfRef">Example Header Pseudocode for DualQ Coupled Curvy RED AQM</a>
          </figcaption></figure>
</div>
<span id="name-example-dequeue-pseudocode-fo"></span><div id="dualq_fig_Algo_Real">
<figure id="figure-10">
          <div class="sourcecode" id="appendix-B.1-10.1">
<pre>
1:  cred_dequeue(lq, cq, pkt) {       % Couples L4S &amp; Classic queues
2:    while ( lq.byt() + cq.byt() &gt; 0 ) {
3:      if ( scheduler() == lq ) {
4:        lq.dequeue(pkt)                            % L4S scheduled
5a:       p_CL = (Q_C - minTh) / range_L
5b:       if (  ( lq.time() &gt; T )
5c:          OR ( p_CL &gt; maxrand(U) ) )
6:          mark(pkt)
7:      } else {
8:        cq.dequeue(pkt)                        % Classic scheduled
9a:       Q_C = gamma * cq.time() + (1-gamma) * Q_C % Classic Q EWMA
10a:      sqrt_p_C = (Q_C - minTh) / range_C
10b:      if ( sqrt_p_C &gt; maxrand(2*U) ) {
11:         if ( (ecn(pkt) == 0)  {            % ECN field = not-ECT
12:           drop(pkt)                    % Squared drop, redo loop
13:           continue       % continue to the top of the while loop
14:         }
15:         mark(pkt)
16:       }
17:     }
18:     return(pkt)                % return the packet and stop here
19:   }
20:   return(NULL)                            % no packet to dequeue
21: }

22: maxrand(u) {                % return the max of u random numbers
23:   maxr=0
24:   while (u-- &gt; 0)
25:     maxr = max(maxr, rand())                   % 0 &lt;= rand() &lt; 1
26:   return(maxr)
27: }

28: scheduler() {
29:   if ( lq.time() + tshift &gt;= cq.time() )
30:     return lq;
31:   else
32:     return cq;
33: }
</pre>
</div>
<figcaption><a href="#figure-10" class="selfRef">Figure 10</a>:
<a href="#name-example-dequeue-pseudocode-fo" class="selfRef">Example Dequeue Pseudocode for DualQ Coupled Curvy RED AQM</a>
          </figcaption></figure>
</div>
<p id="appendix-B.1-11">The dequeue pseudocode (<a href="#dualq_fig_Algo_Real" class="auto internal xref">Figure 10</a>) is
        repeatedly called whenever the lower layer is ready to forward a
        packet. It schedules one packet for dequeuing (or zero if the queue is
        empty) then returns control to the caller so that it does not block
        while that packet is being forwarded. While making this dequeue
        decision, it also makes the necessary AQM decisions on dropping or
        marking. The alternative of applying the AQMs at enqueue would shift
        some processing from the critical time when each packet is dequeued.
        However, it would also add a whole queue of delay to the control
        signals, making the control loop very sloppy.<a href="#appendix-B.1-11" class="pilcrow">¶</a></p>
<p id="appendix-B.1-12">The code is written assuming the AQMs are applied on dequeue
        (<a href="#dualq_note_dequeue" class="internal xref">Note 1</a>). All the dequeue
        code is contained within a large while loop so that if it decides to
        drop a packet, it will continue until it selects a packet to schedule.
        If both queues are empty, the routine returns NULL at line 20. Line 3
        of the dequeue pseudocode is where the conditional priority scheduler
        chooses between the L4S queue (lq) and the Classic queue (cq). The
        TS-FIFO scheduler is shown at lines 28-33, which would be
        suitable if simplicity is paramount (see <a href="#dualq_note_conditional_priority" class="internal xref">Note 2</a>).<a href="#appendix-B.1-12" class="pilcrow">¶</a></p>
<p id="appendix-B.1-13">Within each queue, the decision whether to forward, drop, or mark is
        taken as follows (to simplify the explanation, it is assumed that
        U = 1):<a href="#appendix-B.1-13" class="pilcrow">¶</a></p>
<span class="break"></span><dl class="dlNewline" id="appendix-B.1-14">
          <dt id="appendix-B.1-14.1">L4S:</dt>
          <dd style="margin-left: 1.5em" id="appendix-B.1-14.2">
            <p id="appendix-B.1-14.2.1">If the test at line 3 determines there is an
            L4S packet to dequeue, the tests at lines 5b and 5c determine
            whether to mark it. The first is a simple test of whether the L4S
            queue delay (lq.time()) is greater than a step threshold T 
            (<a href="#dualq_note_step" class="internal xref">Note 3</a>). The second
            test is similar to the random ECN marking in RED but with the
            following differences: i) marking depends on queuing time, not
            bytes, in order to scale for any link rate without being
            reconfigured; ii) marking of the L4S queue depends on a logical OR
            of two tests: one against its own queuing time and one against the
            queuing time of the <em>other</em> (Classic)
            queue; iii) the tests are against the instantaneous queuing time
            of the L4S queue but against a smoothed average of the other (Classic)
            queue; and iv) the queue is compared with the maximum of U random
            numbers (but if U = 1, this is the same as the single random number
            used in RED).<a href="#appendix-B.1-14.2.1" class="pilcrow">¶</a></p>
<p id="appendix-B.1-14.2.2">Specifically, in line 5a, the
            coupled marking probability p_CL is set to the amount by which the
            averaged Classic queuing delay Q_C exceeds the minimum queuing
            delay threshold (minTh), all divided by the L4S scaling parameter
            range_L. range_L represents the queuing delay (in seconds) added
            to minTh at which marking probability would hit 100%. Then, in line
            5c (if U = 1), the result is compared with a uniformly distributed
            random number between 0 and 1, which ensures that, over range_L,
            marking probability will linearly increase with queuing time.<a href="#appendix-B.1-14.2.2" class="pilcrow">¶</a></p>
</dd>
          <dd class="break"></dd>
<dt id="appendix-B.1-14.3">Classic:</dt>
          <dd style="margin-left: 1.5em" id="appendix-B.1-14.4">
            <p id="appendix-B.1-14.4.1">If the scheduler at line 3 chooses to
            dequeue a Classic packet and jumps to line 7, the test at line 10b
            determines whether to drop or mark it. But before that, line 9a
            updates Q_C, which is an exponentially weighted moving average
            (Note <a href="#dualq_note_non-EWMA" class="auto internal xref">4</a>) of
            the queuing time of the Classic queue, where cq.time() is the
            current instantaneous queuing time of the packet at the head of
            the Classic queue (zero if empty), and gamma is the exponentially weighted moving average (EWMA) constant
            (default 1/32; see line 12 of the initialization function).<a href="#appendix-B.1-14.4.1" class="pilcrow">¶</a></p>
<p id="appendix-B.1-14.4.2">Lines 10a and 10b implement the Classic
            AQM. In line 10a, the averaged queuing time Q_C is divided by the
            Classic scaling parameter range_C, in the same way that queuing
            time was scaled for L4S marking. This scaled queuing time will be
            squared to compute Classic drop probability. So, before it is
            squared, it is effectively the square root of the drop
            probability; hence, it is given the variable name sqrt_p_C. The
            squaring is done by comparing it with the maximum out of two
            random numbers (assuming U = 1). Comparing it with the maximum out
            of two is the same as the logical 'AND' of two tests, which
            ensures drop probability rises with the square of queuing
            time.<a href="#appendix-B.1-14.4.2" class="pilcrow">¶</a></p>
</dd>
        <dd class="break"></dd>
</dl>
<p id="appendix-B.1-15">The AQM functions in each queue (lines 5c and 10b) are two cases
        of a new generalization of RED called 'Curvy RED', motivated as follows.
        When the performance of this AQM was compared with FQ-CoDel and PIE,
        their goal of holding queuing delay to a fixed target seemed
        misguided <span>[<a href="#CRED_Insights" class="cite xref">CRED_Insights</a>]</span>. As the number of flows
        increases, if the AQM does not allow host congestion controllers to
        increase queuing delay, it has to introduce abnormally high levels of
        loss. Then loss rather than queuing becomes the dominant cause of
        delay for short flows, due to timeouts and tail losses.<a href="#appendix-B.1-15" class="pilcrow">¶</a></p>
<p id="appendix-B.1-16">Curvy RED constrains delay with a softened target that allows some
        increase in delay as load increases. This is achieved by increasing
        drop probability on a convex curve relative to queue growth (the
        square curve in the Classic queue, if U = 1). Like RED, the curve hugs
        the zero axis while the queue is shallow. Then, as load increases, it
        introduces a growing barrier to higher delay. But, unlike RED, it
        requires only two parameters, not three. The disadvantage of Curvy RED
        (compared to a PI controller, for example) is that it is not adapted to
        a wide range of RTTs. Curvy RED can be used as is when the RTT range
        to be supported is limited; otherwise, an adaptation mechanism is
        needed.<a href="#appendix-B.1-16" class="pilcrow">¶</a></p>
<p id="appendix-B.1-17">From our limited experiments with Curvy RED so far, recommended
        values of these parameters are: S_C = -1; g_C = 5; T = 5 * MTU at the
        link rate (about 1 ms at 60 Mb/s) for the range of base RTTs typical on
        the public Internet. <span>[<a href="#CRED_Insights" class="cite xref">CRED_Insights</a>]</span> explains why these
        parameters are applicable whatever rate link this AQM implementation
        is deployed on and how the parameters would need to be adjusted for a
        scenario with a different range of RTTs (e.g., a data centre). The
        setting of k depends on policy (see <a href="#dualq_norm_reqs" class="auto internal xref">Section 2.5</a>
        and <a href="#dualq_Choosing_k" class="auto internal xref">Appendix C.2</a>, respectively, for its recommended
        setting and guidance on alternatives).<a href="#appendix-B.1-17" class="pilcrow">¶</a></p>
<p id="appendix-B.1-18">There is also a cUrviness parameter, U, which is a small positive
        integer. It is likely to take the same hard-coded value for all
        implementations, once experiments have determined a good value. Only
        U = 1 has been used in experiments so far, but results might be even
        better with U = 2 or higher.<a href="#appendix-B.1-18" class="pilcrow">¶</a></p>
<p id="appendix-B.1-19">Notes:<a href="#appendix-B.1-19" class="pilcrow">¶</a></p>
<ol start="1" type="1" class="normal type-1" id="appendix-B.1-20">
   <li id="appendix-B.1-20.1">
<div id="dualq_note_dequeue">The alternative of applying the
            AQMs at enqueue would shift some processing from the critical time
            when each packet is dequeued. However, it would also add a whole
            queue of delay to the control signals, making the control loop
            sloppier (for a typical RTT, it would double the Classic queue's
            feedback delay). On a platform where packet timestamping is
            feasible, e.g., Linux, it is also easiest to apply the AQMs at
            dequeue, because that is where queuing time is also measured.<a href="#dualq_note_dequeue" class="pilcrow">¶</a>
</div>
          </li>
<li id="appendix-B.1-20.2">
<div id="dualq_note_conditional_priority">WRR better isolates
            the L4S queue from large delay bursts in the Classic queue, but it
            is slightly less simple than TS-FIFO. If WRR were used, a low
            default Classic weight (e.g., 1/16) would need to be
            configured in place of the time-shift in line 5 of the
            initialization function (<a href="#dualq_fig_Algo_cred_core_header" class="auto internal xref">Figure 9</a>).<a href="#dualq_note_conditional_priority" class="pilcrow">¶</a>
</div>
          </li>
<li id="appendix-B.1-20.3">
<div id="dualq_note_step">A step function is shown for
            simplicity. A ramp function (see <a href="#dualq_fig_Algo_laqm_core" class="auto internal xref">Figure 5</a> and the discussion around it
            in <a href="#dualq_Ex_algo_pi2-1" class="auto internal xref">Appendix A.1</a>) is recommended, because
            it is more general than a step and has the potential to enable L4S
            congestion controls to converge more rapidly.<a href="#dualq_note_step" class="pilcrow">¶</a>
</div>
          </li>
<li id="appendix-B.1-20.4">
<div id="dualq_note_non-EWMA">An EWMA is only one possible way
            to filter bursts; other more adaptive smoothing methods could be
            valid, and it might be appropriate to decrease the EWMA faster than
            it increases, e.g., by using the minimum of the smoothed and
            instantaneous queue delays, min(Q_C, qc.time()).<a href="#dualq_note_non-EWMA" class="pilcrow">¶</a>
</div>
        </li>
</ol>
</section>
</div>
<section id="appendix-B.2">
        <h3 id="name-efficient-implementation-of">
<a href="#appendix-B.2" class="section-number selfRef">B.2. </a><a href="#name-efficient-implementation-of" class="section-name selfRef">Efficient Implementation of Curvy RED</a>
        </h3>
<p id="appendix-B.2-1">Although code optimization depends on the platform, the following
        notes explain where the design of Curvy RED was particularly motivated
        by efficient implementation.<a href="#appendix-B.2-1" class="pilcrow">¶</a></p>
<p id="appendix-B.2-2">The Classic AQM at line 10b in <a href="#dualq_fig_Algo_Real" class="auto internal xref">Figure 10</a> calls maxrand(2*U), which gives twice
        as much curviness as the call to maxrand(U) in the marking function at
        line 5c. This is the trick that implements the square rule in equation
        (1) (<a href="#dualq_coupled" class="auto internal xref">Section 2.1</a>). This is based on the fact that,
        given a number X from 1 to 6, the probability that two dice throws
        will both be less than X is the square of the probability that one
        throw will be less than X. 
        So, when U = 1, the L4S marking function is
        linear and the Classic dropping function is squared. If U = 2, L4S would
        be a square function and Classic would be quartic. And so on.<a href="#appendix-B.2-2" class="pilcrow">¶</a></p>
<p id="appendix-B.2-3">The maxrand(u) function in lines 22-27 simply generates u random
        numbers and returns the maximum. Typically, maxrand(u) could be run in
        parallel out of band. For instance, if U = 1, the Classic queue would
        require the maximum of two random numbers. So, instead of calling
        maxrand(2*U) in-band, the maximum of every pair of values from a
        pseudorandom number generator could be generated out of band and held
        in a buffer ready for the Classic queue to consume.<a href="#appendix-B.2-3" class="pilcrow">¶</a></p>
<span id="name-optimised-example-dequeue-p"></span><div id="dualq_fig_Algo_Int">
<figure id="figure-11">
          <div class="sourcecode" id="appendix-B.2-4.1">
<pre>
1:  cred_dequeue(lq, cq, pkt) {       % Couples L4S &amp; Classic queues
2:    while ( lq.byt() + cq.byt() &gt; 0 ) {
3:      if ( scheduler() == lq ) {
4:        lq.dequeue(pkt)                            % L4S scheduled
5:        if ((lq.time() &gt; T) OR (Q_C &gt;&gt; (S_L-2) &gt; maxrand(U)))
6:          mark(pkt)
7:      } else {
8:        cq.dequeue(pkt)                        % Classic scheduled
9:        Q_C += (qc.ns() - Q_C) &gt;&gt; g_C             % Classic Q EWMA
10:       if ( (Q_C &gt;&gt; (S_C-2) ) &gt; maxrand(2*U) ) {
11:         if ( (ecn(pkt) == 0)  {            % ECN field = not-ECT
12:           drop(pkt)                    % Squared drop, redo loop
13:           continue       % continue to the top of the while loop
14:         }
15:         mark(pkt)
16:       }
17:     }
18:     return(pkt)                % return the packet and stop here
19:   }
20:   return(NULL)                            % no packet to dequeue
21: }
</pre>
</div>
<figcaption><a href="#figure-11" class="selfRef">Figure 11</a>:
<a href="#name-optimised-example-dequeue-p" class="selfRef">Optimised Example Dequeue Pseudocode for DualQ Coupled AQM using Integer Arithmetic</a>
          </figcaption></figure>
</div>
<p id="appendix-B.2-5">The two ranges, range_L and range_C, are expressed as powers of 2 so
        that division can be implemented as a right bit-shift (&gt;&gt;) in
        lines 5 and 10 of the integer variant of the pseudocode (<a href="#dualq_fig_Algo_Int" class="auto internal xref">Figure 11</a>).<a href="#appendix-B.2-5" class="pilcrow">¶</a></p>
<p id="appendix-B.2-6">For the integer variant of the pseudocode, an integer version of
        the rand() function used at line 25 of the maxrand() function in <a href="#dualq_fig_Algo_Real" class="auto internal xref">Figure 10</a> would be arranged to return an integer
        in the range 0 &lt;= maxrand() &lt; 2^32 (not shown). This would scale
        up all the floating point probabilities in the range [0,1] by
        2^32.<a href="#appendix-B.2-6" class="pilcrow">¶</a></p>
<p id="appendix-B.2-7">Queuing delays are also scaled up by 2^32, but in two stages: i) in
        line 9, queuing time qc.ns() is returned in integer nanoseconds, making
        the value about 2^30 times larger than when the units were seconds, and then
        ii) in lines 5 and 10, an adjustment of -2 to the right bit-shift
        multiplies the result by 2^2, to complete the scaling by 2^32.<a href="#appendix-B.2-7" class="pilcrow">¶</a></p>
<p id="appendix-B.2-8">In line 8 of the initialization function, the EWMA constant gamma
        is represented as an integer power of 2, g_C, so that in line 9 of the
        integer code (<a href="#dualq_fig_Algo_Int" class="auto internal xref">Figure 11</a>), the division needed to weight the moving average can be
        implemented by a right bit-shift (&gt;&gt; g_C).<a href="#appendix-B.2-8" class="pilcrow">¶</a></p>
</section>
</section>
</div>
<section id="appendix-C">
      <h2 id="name-choice-of-coupling-factor-k">
<a href="#appendix-C" class="section-number selfRef">Appendix C. </a><a href="#name-choice-of-coupling-factor-k" class="section-name selfRef">Choice of Coupling Factor, k</a>
      </h2>
<p id="appendix-C-1"></p>
<div id="dualq_rtt-dependence">
<section id="appendix-C.1">
        <h3 id="name-rtt-dependence">
<a href="#appendix-C.1" class="section-number selfRef">C.1. </a><a href="#name-rtt-dependence" class="section-name selfRef">RTT-Dependence</a>
        </h3>
<p id="appendix-C.1-1">Where Classic flows compete for the same capacity, their relative
        flow rates depend not only on the congestion probability but also on
        their end-to-end RTT (= base RTT + queue delay). The rates of
        Reno <span>[<a href="#RFC5681" class="cite xref">RFC5681</a>]</span> flows competing over an AQM are
        roughly inversely proportional to their RTTs. CUBIC exhibits similar
        RTT-dependence when in Reno-friendly mode, but it is less
        RTT-dependent otherwise.<a href="#appendix-C.1-1" class="pilcrow">¶</a></p>
<p id="appendix-C.1-2">Until the early experiments with the DualQ Coupled AQM, the
        importance of the reasonably large Classic queue in mitigating
        RTT-dependence when the base RTT is low had not been appreciated.
        Appendix <a href="https://www.rfc-editor.org/rfc/rfc9331#appendix-A.1.6" class="relref">A.1.6</a> 
        of the L4S ECN Protocol <span>[<a href="#RFC9331" class="cite xref">RFC9331</a>]</span> uses numerical examples to
        explain why bloated buffers had concealed the RTT-dependence of
        Classic congestion controls before that time.
 Then, it explains why,
        the more that queuing delays have reduced, the more that
        RTT-dependence has surfaced as a potential starvation problem for long
        RTT flows, when competing against very short RTT flows.<a href="#appendix-C.1-2" class="pilcrow">¶</a></p>
<p id="appendix-C.1-3">Given that congestion control on end systems is voluntary, there is
        no reason why it has to be voluntarily RTT-dependent. The
        RTT-dependence of existing Classic traffic cannot be 'undeployed'.
        Therefore, <span>[<a href="#RFC9331" class="cite xref">RFC9331</a>]</span> requires L4S
        congestion controls to be significantly less RTT-dependent than the
        standard Reno congestion control <span>[<a href="#RFC5681" class="cite xref">RFC5681</a>]</span>, at
        least at low RTT. Then RTT-dependence ought to be no worse than it is
        with appropriately sized Classic buffers. Following this approach
        means there is no need for network devices to address RTT-dependence,
        although there would be no harm if they did, which per-flow queuing
        inherently does.<a href="#appendix-C.1-3" class="pilcrow">¶</a></p>
</section>
</div>
<div id="dualq_Choosing_k">
<section id="appendix-C.2">
        <h3 id="name-guidance-on-controlling-thr">
<a href="#appendix-C.2" class="section-number selfRef">C.2. </a><a href="#name-guidance-on-controlling-thr" class="section-name selfRef">Guidance on Controlling Throughput Equivalence</a>
        </h3>
<p id="appendix-C.2-1">The coupling factor, k, determines the balance between L4S and
        Classic flow rates (see <a href="#dualq_config" class="auto internal xref">Section 2.5.2.1</a> and equation
        (1) in <a href="#dualq_coupled" class="auto internal xref">Section 2.1</a>).<a href="#appendix-C.2-1" class="pilcrow">¶</a></p>
<p id="appendix-C.2-2">For the public Internet, a coupling factor of k = 2 is recommended
        and justified below. For scenarios other than the public Internet, a
        good coupling factor can be derived by plugging the appropriate
        numbers into the same working.<a href="#appendix-C.2-2" class="pilcrow">¶</a></p>
<p id="appendix-C.2-3">To summarize the maths below, from equation (7) it can be seen that
        choosing k = 1.64 would theoretically make L4S throughput roughly the
        same as Classic, <em>if their actual end-to-end RTTs were the same</em>.
        However, even if the base RTTs are the same, the actual RTTs are
        unlikely to be the same, because Classic traffic needs a fairly large
        queue to avoid underutilization and excess drop, whereas L4S does
        not.<a href="#appendix-C.2-3" class="pilcrow">¶</a></p>
<p id="appendix-C.2-4">Therefore, to determine the appropriate coupling factor policy, the
        operator needs to decide at what base RTT it wants L4S and Classic
        flows to have roughly equal throughput, once the effect of the
        additional Classic queue on Classic throughput has been taken into
        account. With this approach, a network operator can determine a good
        coupling factor without knowing the precise L4S algorithm for reducing
        RTT-dependence -- or even in the absence of any algorithm.<a href="#appendix-C.2-4" class="pilcrow">¶</a></p>
<p id="appendix-C.2-5">The following additional terminology will be used, with appropriate
        subscripts:<a href="#appendix-C.2-5" class="pilcrow">¶</a></p>
<span class="break"></span><dl class="dlParallel" id="appendix-C.2-6">
          <dt id="appendix-C.2-6.1">r:</dt>
          <dd style="margin-left: 1.5em" id="appendix-C.2-6.2">Packet rate [pkt/s]<a href="#appendix-C.2-6.2" class="pilcrow">¶</a>
</dd>
          <dd class="break"></dd>
<dt id="appendix-C.2-6.3">R:</dt>
          <dd style="margin-left: 1.5em" id="appendix-C.2-6.4">RTT [s/round]<a href="#appendix-C.2-6.4" class="pilcrow">¶</a>
</dd>
          <dd class="break"></dd>
<dt id="appendix-C.2-6.5">p:</dt>
          <dd style="margin-left: 1.5em" id="appendix-C.2-6.6">ECN-marking probability []<a href="#appendix-C.2-6.6" class="pilcrow">¶</a>
</dd>
        <dd class="break"></dd>
</dl>
<p id="appendix-C.2-7">On the Classic side, we consider Reno as the most sensitive and
        therefore worst-case Classic congestion control. We will also consider
        CUBIC in its Reno-friendly mode ('CReno') as the most prevalent
        congestion control, according to the references and analysis in <span>[<a href="#PI2param" class="cite xref">PI2param</a>]</span>. In either case, the Classic packet rate in steady
        state is given by the well-known square root formula for Reno
        congestion control:<a href="#appendix-C.2-7" class="pilcrow">¶</a></p>
<div class="sourcecode" id="appendix-C.2-8">
<pre>
    r_C = 1.22 / (R_C * p_C^0.5)          (5)</pre><a href="#appendix-C.2-8" class="pilcrow">¶</a>
</div>
<p id="appendix-C.2-9">On the L4S side, we consider the Prague congestion
        control <span>[<a href="#I-D.briscoe-iccrg-prague-congestion-control" class="cite xref">PRAGUE-CC</a>]</span> as the
        reference for steady-state dependence on congestion. Prague conforms
        to the same equation as DCTCP, but we do not use the equation derived
        in the DCTCP paper, which is only appropriate for step marking. The
        coupled marking, p_CL, is the appropriate one when considering
        throughput equivalence with Classic flows. Unlike step marking,
        coupled markings are inherently spaced out, so we use the formula for
        DCTCP packet rate with probabilistic marking derived in Appendix A of
        <span>[<a href="#PI2" class="cite xref">PI2</a>]</span>. We use the equation without RTT-independence
        enabled, which will be explained later.<a href="#appendix-C.2-9" class="pilcrow">¶</a></p>
<div class="sourcecode" id="appendix-C.2-10">
<pre>
    r_L = 2 / (R_L * p_CL)                (6)</pre><a href="#appendix-C.2-10" class="pilcrow">¶</a>
</div>
<p id="appendix-C.2-11">For packet rate equivalence, we equate the two packet rates and
        rearrange the equation into the same form as equation (1) (copied from <a href="#dualq_coupled" class="auto internal xref">Section 2.1</a>) so the two can be
        equated and simplified to produce a formula for a theoretical coupling
        factor, which we shall call k*:<a href="#appendix-C.2-11" class="pilcrow">¶</a></p>
<div class="sourcecode" id="appendix-C.2-12">
<pre>
    r_c = r_L
=&gt;  p_C = (p_CL/1.64 * R_L/R_C)^2.

    p_C = ( p_CL / k )^2.                 (1)

    k* = 1.64 * (R_C / R_L).              (7)
</pre><a href="#appendix-C.2-12" class="pilcrow">¶</a>
</div>
<p id="appendix-C.2-13">We say that this coupling factor is theoretical, because it is in
        terms of two RTTs, which raises two practical questions: i) for
        multiple flows with different RTTs, the RTT for each traffic class
        would have to be derived from the RTTs of all the flows in that class
        (actually the harmonic mean would be needed) and ii) a network node
        cannot easily know the RTT of the flows anyway.<a href="#appendix-C.2-13" class="pilcrow">¶</a></p>
<p id="appendix-C.2-14">RTT-dependence is caused by window-based congestion control, so it
        ought to be reversed there, not in the network. Therefore, we use a
        fixed coupling factor in the network and reduce RTT-dependence in L4S
        senders. We cannot expect Classic senders to all be updated to reduce
        their RTT-dependence. But solely addressing the problem in L4S senders
        at least makes RTT-dependence no worse -- not just between L4S senders,
        but also between L4S and Classic senders.<a href="#appendix-C.2-14" class="pilcrow">¶</a></p>
<p id="appendix-C.2-15">Throughput equivalence is defined for flows
        under comparable conditions, including with the same base
        RTT <span>[<a href="#RFC2914" class="cite xref">RFC2914</a>]</span>. So if we assume the same base RTT,
        R_b, for comparable flows, we can put both R_C and R_L in terms of
        R_b.<a href="#appendix-C.2-15" class="pilcrow">¶</a></p>
<p id="appendix-C.2-16">We can approximate the L4S RTT to be hardly greater than the base
        RTT, i.e., R_L ~= R_b. And we can replace R_C with (R_b + q_C),
        where the Classic queue, q_C, depends on the target queue delay that
        the operator has configured for the Classic AQM.<a href="#appendix-C.2-16" class="pilcrow">¶</a></p>
<p id="appendix-C.2-17">Taking PI2 as an example Classic AQM, it seems that we could just
        take R_C = R_b + target (recommended 15 ms by default in <a href="#dualq_Ex_algo_pi2-1" class="auto internal xref">Appendix A.1</a>). However, target is roughly the queue
        depth reached by the tips of the sawteeth of a congestion control, not
        the average <span>[<a href="#PI2param" class="cite xref">PI2param</a>]</span>. That is R_max = R_b +
        target.<a href="#appendix-C.2-17" class="pilcrow">¶</a></p>
<p id="appendix-C.2-18">The position of the average in relation to the max depends on the
        amplitude and geometry of the sawteeth. We consider two examples:
        Reno <span>[<a href="#RFC5681" class="cite xref">RFC5681</a>]</span>, as the most sensitive worst case,
        and CUBIC <span>[<a href="#RFC8312" class="cite xref">RFC8312</a>]</span> in its Reno-friendly mode
        ('CReno') as the most prevalent congestion control algorithm on the
        Internet according to the references in <span>[<a href="#PI2param" class="cite xref">PI2param</a>]</span>.
        Both are Additive Increase Multiplicative Decrease (AIMD), so we will generalize using b as the multiplicative
        decrease factor (b_r = 0.5 for Reno, b_c = 0.7 for CReno). Then<a href="#appendix-C.2-18" class="pilcrow">¶</a></p>
<div class="sourcecode" id="appendix-C.2-19">
<pre>
  R_C  = (R_max + b*R_max) / 2
       = R_max * (1+b)/2.

R_reno = 0.75 * (R_b + target);    R_creno = 0.85 * (R_b + target).
                                                                  (8)
</pre><a href="#appendix-C.2-19" class="pilcrow">¶</a>
</div>
<p id="appendix-C.2-20">Plugging all this into equation (7), at any particular base RTT, R_b, we get a fixed coupling factor
        for each:<a href="#appendix-C.2-20" class="pilcrow">¶</a></p>
<div class="sourcecode" id="appendix-C.2-21">
<pre>
k_reno = 1.64*0.75*(R_b+target)/R_b
       = 1.23*(1 + target/R_b);    k_creno = 1.39 * (1 + target/R_b).
</pre><a href="#appendix-C.2-21" class="pilcrow">¶</a>
</div>
<p id="appendix-C.2-22">An operator can then choose the base RTT at which it wants
        throughput to be equivalent. For instance, if we recommend that the
        operator chooses R_b = 25 ms, as a typical base RTT between Internet
        users and CDNs <span>[<a href="#PI2param" class="cite xref">PI2param</a>]</span>, then these coupling
        factors become:<a href="#appendix-C.2-22" class="pilcrow">¶</a></p>
<div class="sourcecode" id="appendix-C.2-23">
<pre>
k_reno = 1.23 * (1 + 15/25)        k_creno  = 1.39 * (1 + 15/25)
       = 1.97                               = 2.22
       ~= 2.                                ~= 2.                 (9)
</pre><a href="#appendix-C.2-23" class="pilcrow">¶</a>
</div>
<p id="appendix-C.2-24">The approximation is relevant to any of the above example DualQ
        Coupled algorithms, which use a coupling factor that is an integer
        power of 2 to aid efficient implementation. It also fits best for the
        worst case (Reno).<a href="#appendix-C.2-24" class="pilcrow">¶</a></p>
<p id="appendix-C.2-25">To check the outcome of this coupling factor, we can express the
        ratio of L4S to Classic throughput by substituting from their rate
        equations (5) and (6), then also substituting for p_C in terms of
        p_CL using equation (1) with k = 2 as just determined for the
        Internet:<a href="#appendix-C.2-25" class="pilcrow">¶</a></p>
<div class="sourcecode" id="appendix-C.2-26">
<pre>
r_L / r_C  = 2 (R_C * p_C^0.5) / 1.22 (R_L * p_CL)
           = (R_C * p_CL) / (1.22 * R_L * p_CL)
           = R_C / (1.22 * R_L).                                 (10)
</pre><a href="#appendix-C.2-26" class="pilcrow">¶</a>
</div>
<p id="appendix-C.2-27">As an example, we can then consider single competing CReno and
        Prague flows, by expressing both their RTTs in (10) in terms of their
        base RTTs, R_bC and R_bL. So R_C is replaced by equation (8) for
        CReno. And R_L is replaced by the max() function below, which
        represents the effective RTT of the current Prague congestion
        control <span>[<a href="#I-D.briscoe-iccrg-prague-congestion-control" class="cite xref">PRAGUE-CC</a>]</span> in its
        (default) RTT-independent mode, because it sets a floor to the
        effective RTT that it uses for additive increase:<a href="#appendix-C.2-27" class="pilcrow">¶</a></p>
<div class="sourcecode" id="appendix-C.2-28">
<pre>
r_L / r_C ~= 0.85 * (R_bC + target) / (1.22 * max(R_bL, R_typ))
          ~= (R_bC + target) / (1.4 * max(R_bL, R_typ)).
</pre><a href="#appendix-C.2-28" class="pilcrow">¶</a>
</div>
<p id="appendix-C.2-29">It can be seen that, for base RTTs below target (15 ms), both the
        numerator and the denominator plateau, which has the desired effect of
        limiting RTT-dependence.<a href="#appendix-C.2-29" class="pilcrow">¶</a></p>
<p id="appendix-C.2-30">At the start of the above derivations, an explanation was promised
        for why the L4S throughput equation in equation (6) did not need to
        model RTT-independence. This is because we only use one point -- at the
        typical base RTT where the operator chooses to calculate the coupling
        factor. Then throughput equivalence will at least hold at that chosen
        point. Nonetheless, assuming Prague senders implement RTT-independence
        over a range of RTTs below this, the throughput equivalence will then
        extend over that range as well.<a href="#appendix-C.2-30" class="pilcrow">¶</a></p>
<p id="appendix-C.2-31">Congestion control designers can choose different ways to reduce
        RTT-dependence. And each operator can make a policy choice to decide
        on a different base RTT, and therefore a different k, at which it
        wants throughput equivalence. Nonetheless, for the Internet, it makes
        sense to choose what is believed to be the typical RTT most users
        experience, because a Classic AQM's target queuing delay is also
        derived from a typical RTT for the Internet.<a href="#appendix-C.2-31" class="pilcrow">¶</a></p>
<p id="appendix-C.2-32">As a non-Internet example, for localized traffic from a particular
        ISP's data centre, using the measured RTTs, it was calculated that a
        value of k = 8 would achieve throughput equivalence, and experiments
        verified the formula very closely.<a href="#appendix-C.2-32" class="pilcrow">¶</a></p>
<p id="appendix-C.2-33">But, for a typical mix of RTTs across the general Internet, a value
        of k = 2 is recommended as a good workable compromise.<a href="#appendix-C.2-33" class="pilcrow">¶</a></p>
</section>
</div>
</section>
<section id="appendix-D">
      <h2 id="name-acknowledgements">
<a href="#name-acknowledgements" class="section-name selfRef">Acknowledgements</a>
      </h2>
<p id="appendix-D-1">Thanks to <span class="contact-name">Anil Agarwal</span>, <span class="contact-name">Sowmini Varadhan</span>, <span class="contact-name">Gabi Bracha</span>,
      <span class="contact-name">Nicolas Kuhn</span>, <span class="contact-name">Greg Skinner</span>,
      <span class="contact-name">Tom Henderson</span>, <span class="contact-name">David Pullen</span>,
      <span class="contact-name">Mirja Kühlewind</span>, <span class="contact-name">Gorry       Fairhurst</span>, <span class="contact-name">Pete Heist</span>, <span class="contact-name">Ermin       Sakic</span>, and <span class="contact-name">Martin Duke</span> for detailed review
      comments, particularly of the appendices, and suggestions on how to make
      the explanations clearer. Thanks also to <span class="contact-name">Tom       Henderson</span> for insight on the choice of schedulers and queue delay
      measurement techniques. And thanks to the area reviewers <span class="contact-name">Christer Holmberg</span>, <span class="contact-name">Lars Eggert</span>, and
      <span class="contact-name">Roman Danyliw</span>.<a href="#appendix-D-1" class="pilcrow">¶</a></p>
<p id="appendix-D-2">The early contributions of <span class="contact-name">Koen De Schepper</span>, <span class="contact-name">Bob Briscoe</span>, <span class="contact-name">Olga       Bondarenko</span>, and <span class="contact-name">Inton Tsang</span> were partly funded by the European Community
      under its Seventh Framework Programme through the Reducing Internet
      Transport Latency (RITE) project (ICT-317700). Contributions of <span class="contact-name">Koen De       Schepper</span> and <span class="contact-name">Olivier Tilmans</span> were also partly funded by the 5Growth and
      DAEMON EU H2020 projects. <span class="contact-name">Bob Briscoe</span>'s contribution was also
      partly funded by the Comcast Innovation Fund and the Research Council of
      Norway through the TimeIn project. The views expressed here are solely
      those of the authors.<a href="#appendix-D-2" class="pilcrow">¶</a></p>
</section>
<section id="appendix-E">
      <h2 id="name-contributors">
<a href="#name-contributors" class="section-name selfRef">Contributors</a>
      </h2>
<p id="appendix-E-1">The following contributed implementations and evaluations that
      validated and helped to improve this specification:<a href="#appendix-E-1" class="pilcrow">¶</a></p>
<p id="appendix-E-2"><span class="contact-name">Olga Albisser</span> &lt;olga@albisser.org&gt; of Simula Research Lab,
          Norway (Olga Bondarenko during early draft versions) implemented the
          prototype DualPI2 AQM for Linux with Koen De Schepper and conducted
          extensive evaluations as well as implementing the live performance
          visualization GUI <span>[<a href="#L4Sdemo16" class="cite xref">L4Sdemo16</a>]</span>.<a href="#appendix-E-2" class="pilcrow">¶</a></p>
<p id="appendix-E-3"><span class="contact-name">Olivier Tilmans</span> &lt;olivier.tilmans@nokia-bell-labs.com&gt; of
          Nokia Bell Labs, Belgium prepared and maintains the Linux
          implementation of DualPI2 for upstreaming.<a href="#appendix-E-3" class="pilcrow">¶</a></p>
<p id="appendix-E-4"><span class="contact-name">Shravya K.S.</span> wrote a model for the ns-3 simulator based on draft-ietf-tsvwg-aqm-dualq-coupled-01 (a draft version of this document). Based on this initial work, <span class="contact-name">Tom           Henderson</span> &lt;tomh@tomh.org&gt; updated that earlier model and
          created a model for the DualQ variant specified as part of the Low Latency
          DOCSIS specification, as well as conducting extensive
          evaluations.<a href="#appendix-E-4" class="pilcrow">¶</a></p>
<p id="appendix-E-5"><span class="contact-name">Ing Jyh (Inton) Tsang</span> of Nokia, Belgium built the End-to-End Data
          Centre to the Home broadband testbed on which DualQ Coupled AQM
          implementations were tested.<a href="#appendix-E-5" class="pilcrow">¶</a></p>
</section>
<div id="authors-addresses">
<section id="appendix-F">
      <h2 id="name-authors-addresses">
<a href="#name-authors-addresses" class="section-name selfRef">Authors' Addresses</a>
      </h2>
<address class="vcard">
        <div dir="auto" class="left"><span class="fn nameRole">Koen De Schepper</span></div>
<div dir="auto" class="left"><span class="org">Nokia Bell Labs</span></div>
<div dir="auto" class="left"><span class="locality">Antwerp</span></div>
<div dir="auto" class="left"><span class="country-name">Belgium</span></div>
<div class="email">
<span>Email:</span>
<a href="mailto:koen.de_schepper@nokia.com" class="email">koen.de_schepper@nokia.com</a>
</div>
<div class="url">
<span>URI:</span>
<a href="https://www.bell-labs.com/about/researcher-profiles/koende_schepper/" class="url">https://www.bell-labs.com/about/researcher-profiles/koende_schepper/</a>
</div>
</address>
<address class="vcard">
        <div dir="auto" class="left"><span class="fn nameRole">Bob Briscoe (<span class="role">editor</span>)</span></div>
<div dir="auto" class="left"><span class="org">Independent</span></div>
<div dir="auto" class="left"><span class="country-name">United Kingdom</span></div>
<div class="email">
<span>Email:</span>
<a href="mailto:ietf@bobbriscoe.net" class="email">ietf@bobbriscoe.net</a>
</div>
<div class="url">
<span>URI:</span>
<a href="https://bobbriscoe.net/" class="url">https://bobbriscoe.net/</a>
</div>
</address>
<address class="vcard">
        <div dir="auto" class="left"><span class="fn nameRole">Greg White</span></div>
<div dir="auto" class="left"><span class="org">CableLabs</span></div>
<div dir="auto" class="left">
<span class="locality">Louisville</span>, <span class="region">CO</span> </div>
<div dir="auto" class="left"><span class="country-name">United States of America</span></div>
<div class="email">
<span>Email:</span>
<a href="mailto:G.White@CableLabs.com" class="email">G.White@CableLabs.com</a>
</div>
</address>
</section>
</div>
<script>const toc = document.getElementById("toc");
toc.querySelector("h2").addEventListener("click", e => {
  toc.classList.toggle("active");
});
toc.querySelector("nav").addEventListener("click", e => {
  toc.classList.remove("active");
});
</script>
</body>
</html>