File: cubic_test.go

package info (click to toggle)
golang-github-lucas-clemente-quic-go 0.50.1-2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 4,496 kB
  • sloc: sh: 54; makefile: 7
file content (240 lines) | stat: -rw-r--r-- 10,655 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
package congestion

import (
	"math"
	"time"

	"github.com/quic-go/quic-go/internal/protocol"

	. "github.com/onsi/ginkgo/v2"
	. "github.com/onsi/gomega"
)

const (
	numConnections         uint32  = 2
	nConnectionBeta        float32 = (float32(numConnections) - 1 + beta) / float32(numConnections)
	nConnectionBetaLastMax float32 = (float32(numConnections) - 1 + betaLastMax) / float32(numConnections)
	nConnectionAlpha       float32 = 3 * float32(numConnections) * float32(numConnections) * (1 - nConnectionBeta) / (1 + nConnectionBeta)
	maxCubicTimeInterval           = 30 * time.Millisecond
)

var _ = Describe("Cubic", func() {
	var (
		clock mockClock
		cubic *Cubic
	)

	BeforeEach(func() {
		clock = mockClock{}
		cubic = NewCubic(&clock)
		cubic.SetNumConnections(int(numConnections))
	})

	renoCwnd := func(currentCwnd protocol.ByteCount) protocol.ByteCount {
		return currentCwnd + protocol.ByteCount(float32(maxDatagramSize)*nConnectionAlpha*float32(maxDatagramSize)/float32(currentCwnd))
	}

	cubicConvexCwnd := func(initialCwnd protocol.ByteCount, rtt, elapsedTime time.Duration) protocol.ByteCount {
		offset := protocol.ByteCount((elapsedTime+rtt)/time.Microsecond) << 10 / 1000000
		deltaCongestionWindow := 410 * offset * offset * offset * maxDatagramSize >> 40
		return initialCwnd + deltaCongestionWindow
	}

	It("works above origin (with tighter bounds)", func() {
		// Convex growth.
		const rttMin = 100 * time.Millisecond
		const rttMinS = float32(rttMin/time.Millisecond) / 1000.0
		currentCwnd := 10 * maxDatagramSize
		initialCwnd := currentCwnd

		clock.Advance(time.Millisecond)
		initialTime := clock.Now()
		expectedFirstCwnd := renoCwnd(currentCwnd)
		currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, initialTime)
		Expect(expectedFirstCwnd).To(Equal(currentCwnd))

		// Normal TCP phase.
		// The maximum number of expected reno RTTs can be calculated by
		// finding the point where the cubic curve and the reno curve meet.
		maxRenoRtts := int(math.Sqrt(float64(nConnectionAlpha/(0.4*rttMinS*rttMinS*rttMinS))) - 2)
		for i := 0; i < maxRenoRtts; i++ {
			// Alternatively, we expect it to increase by one, every time we
			// receive current_cwnd/Alpha acks back.  (This is another way of
			// saying we expect cwnd to increase by approximately Alpha once
			// we receive current_cwnd number ofacks back).
			numAcksThisEpoch := int(float32(currentCwnd/maxDatagramSize) / nConnectionAlpha)

			initialCwndThisEpoch := currentCwnd
			for n := 0; n < numAcksThisEpoch; n++ {
				// Call once per ACK.
				expectedNextCwnd := renoCwnd(currentCwnd)
				currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
				Expect(currentCwnd).To(Equal(expectedNextCwnd))
			}
			// Our byte-wise Reno implementation is an estimate.  We expect
			// the cwnd to increase by approximately one MSS every
			// cwnd/kDefaultTCPMSS/Alpha acks, but it may be off by as much as
			// half a packet for smaller values of current_cwnd.
			cwndChangeThisEpoch := currentCwnd - initialCwndThisEpoch
			Expect(cwndChangeThisEpoch).To(BeNumerically("~", maxDatagramSize, maxDatagramSize/2))
			clock.Advance(100 * time.Millisecond)
		}

		for i := 0; i < 54; i++ {
			maxAcksThisEpoch := currentCwnd / maxDatagramSize
			interval := time.Duration(100*1000/maxAcksThisEpoch) * time.Microsecond
			for n := 0; n < int(maxAcksThisEpoch); n++ {
				clock.Advance(interval)
				currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
				expectedCwnd := cubicConvexCwnd(initialCwnd, rttMin, clock.Now().Sub(initialTime))
				// If we allow per-ack updates, every update is a small cubic update.
				Expect(currentCwnd).To(Equal(expectedCwnd))
			}
		}
		expectedCwnd := cubicConvexCwnd(initialCwnd, rttMin, clock.Now().Sub(initialTime))
		currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
		Expect(currentCwnd).To(Equal(expectedCwnd))
	})

	It("works above the origin with fine grained cubing", func() {
		// Start the test with an artificially large cwnd to prevent Reno
		// from over-taking cubic.
		currentCwnd := 1000 * maxDatagramSize
		initialCwnd := currentCwnd
		rttMin := 100 * time.Millisecond
		clock.Advance(time.Millisecond)
		initialTime := clock.Now()

		currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
		clock.Advance(600 * time.Millisecond)
		currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())

		// We expect the algorithm to perform only non-zero, fine-grained cubic
		// increases on every ack in this case.
		for i := 0; i < 100; i++ {
			clock.Advance(10 * time.Millisecond)
			expectedCwnd := cubicConvexCwnd(initialCwnd, rttMin, clock.Now().Sub(initialTime))
			nextCwnd := cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
			// Make sure we are performing cubic increases.
			Expect(nextCwnd).To(Equal(expectedCwnd))
			// Make sure that these are non-zero, less-than-packet sized increases.
			Expect(nextCwnd).To(BeNumerically(">", currentCwnd))
			cwndDelta := nextCwnd - currentCwnd
			Expect(maxDatagramSize / 10).To(BeNumerically(">", cwndDelta))
			currentCwnd = nextCwnd
		}
	})

	It("handles per ack updates", func() {
		// Start the test with a large cwnd and RTT, to force the first
		// increase to be a cubic increase.
		initialCwndPackets := 150
		currentCwnd := protocol.ByteCount(initialCwndPackets) * maxDatagramSize
		rttMin := 350 * time.Millisecond

		// Initialize the epoch
		clock.Advance(time.Millisecond)
		// Keep track of the growth of the reno-equivalent cwnd.
		rCwnd := renoCwnd(currentCwnd)
		currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
		initialCwnd := currentCwnd

		// Simulate the return of cwnd packets in less than
		// MaxCubicInterval() time.
		maxAcks := int(float32(initialCwndPackets) / nConnectionAlpha)
		interval := maxCubicTimeInterval / time.Duration(maxAcks+1)

		// In this scenario, the first increase is dictated by the cubic
		// equation, but it is less than one byte, so the cwnd doesn't
		// change.  Normally, without per-ack increases, any cwnd plateau
		// will cause the cwnd to be pinned for MaxCubicTimeInterval().  If
		// we enable per-ack updates, the cwnd will continue to grow,
		// regardless of the temporary plateau.
		clock.Advance(interval)
		rCwnd = renoCwnd(rCwnd)
		Expect(cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())).To(Equal(currentCwnd))
		for i := 1; i < maxAcks; i++ {
			clock.Advance(interval)
			nextCwnd := cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
			rCwnd = renoCwnd(rCwnd)
			// The window shoud increase on every ack.
			Expect(nextCwnd).To(BeNumerically(">", currentCwnd))
			Expect(nextCwnd).To(Equal(rCwnd))
			currentCwnd = nextCwnd
		}

		// After all the acks are returned from the epoch, we expect the
		// cwnd to have increased by nearly one packet.  (Not exactly one
		// packet, because our byte-wise Reno algorithm is always a slight
		// under-estimation).  Without per-ack updates, the current_cwnd
		// would otherwise be unchanged.
		minimumExpectedIncrease := maxDatagramSize * 9 / 10
		Expect(currentCwnd).To(BeNumerically(">", initialCwnd+minimumExpectedIncrease))
	})

	It("handles loss events", func() {
		rttMin := 100 * time.Millisecond
		currentCwnd := 422 * maxDatagramSize
		expectedCwnd := renoCwnd(currentCwnd)
		// Initialize the state.
		clock.Advance(time.Millisecond)
		Expect(cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())).To(Equal(expectedCwnd))

		// On the first loss, the last max congestion window is set to the
		// congestion window before the loss.
		preLossCwnd := currentCwnd
		Expect(cubic.lastMaxCongestionWindow).To(BeZero())
		expectedCwnd = protocol.ByteCount(float32(currentCwnd) * nConnectionBeta)
		Expect(cubic.CongestionWindowAfterPacketLoss(currentCwnd)).To(Equal(expectedCwnd))
		Expect(cubic.lastMaxCongestionWindow).To(Equal(preLossCwnd))
		currentCwnd = expectedCwnd

		// On the second loss, the current congestion window has not yet
		// reached the last max congestion window.  The last max congestion
		// window will be reduced by an additional backoff factor to allow
		// for competition.
		preLossCwnd = currentCwnd
		expectedCwnd = protocol.ByteCount(float32(currentCwnd) * nConnectionBeta)
		Expect(cubic.CongestionWindowAfterPacketLoss(currentCwnd)).To(Equal(expectedCwnd))
		currentCwnd = expectedCwnd
		Expect(preLossCwnd).To(BeNumerically(">", cubic.lastMaxCongestionWindow))
		expectedLastMax := protocol.ByteCount(float32(preLossCwnd) * nConnectionBetaLastMax)
		Expect(cubic.lastMaxCongestionWindow).To(Equal(expectedLastMax))
		Expect(expectedCwnd).To(BeNumerically("<", cubic.lastMaxCongestionWindow))
		// Simulate an increase, and check that we are below the origin.
		currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
		Expect(cubic.lastMaxCongestionWindow).To(BeNumerically(">", currentCwnd))

		// On the final loss, simulate the condition where the congestion
		// window had a chance to grow nearly to the last congestion window.
		currentCwnd = cubic.lastMaxCongestionWindow - 1
		preLossCwnd = currentCwnd
		expectedCwnd = protocol.ByteCount(float32(currentCwnd) * nConnectionBeta)
		Expect(cubic.CongestionWindowAfterPacketLoss(currentCwnd)).To(Equal(expectedCwnd))
		expectedLastMax = preLossCwnd
		Expect(cubic.lastMaxCongestionWindow).To(Equal(expectedLastMax))
	})

	It("works below origin", func() {
		// Concave growth.
		rttMin := 100 * time.Millisecond
		currentCwnd := 422 * maxDatagramSize
		expectedCwnd := renoCwnd(currentCwnd)
		// Initialize the state.
		clock.Advance(time.Millisecond)
		Expect(cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())).To(Equal(expectedCwnd))

		expectedCwnd = protocol.ByteCount(float32(currentCwnd) * nConnectionBeta)
		Expect(cubic.CongestionWindowAfterPacketLoss(currentCwnd)).To(Equal(expectedCwnd))
		currentCwnd = expectedCwnd
		// First update after loss to initialize the epoch.
		currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
		// Cubic phase.
		for i := 0; i < 40; i++ {
			clock.Advance(100 * time.Millisecond)
			currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
		}
		expectedCwnd = 553632 * maxDatagramSize / 1460
		Expect(currentCwnd).To(Equal(expectedCwnd))
	})
})