File: server_selector.go

package info (click to toggle)
golang-mongodb-mongo-driver 1.17.1%2Bds1-2
  • links: PTS, VCS
  • area: main
  • in suites: experimental, sid, trixie
  • size: 25,988 kB
  • sloc: perl: 533; ansic: 491; python: 432; sh: 327; makefile: 174
file content (420 lines) | stat: -rw-r--r-- 12,022 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
// Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0

package description

import (
	"encoding/json"
	"fmt"
	"math"
	"time"

	"go.mongodb.org/mongo-driver/mongo/readpref"
	"go.mongodb.org/mongo-driver/tag"
)

// ServerSelector is an interface implemented by types that can perform server selection given a topology description
// and list of candidate servers. The selector should filter the provided candidates list and return a subset that
// matches some criteria.
type ServerSelector interface {
	SelectServer(Topology, []Server) ([]Server, error)
}

// ServerSelectorFunc is a function that can be used as a ServerSelector.
type ServerSelectorFunc func(Topology, []Server) ([]Server, error)

// SelectServer implements the ServerSelector interface.
func (ssf ServerSelectorFunc) SelectServer(t Topology, s []Server) ([]Server, error) {
	return ssf(t, s)
}

// serverSelectorInfo contains metadata concerning the server selector for the
// purpose of publication.
type serverSelectorInfo struct {
	Type      string
	Data      string               `json:",omitempty"`
	Selectors []serverSelectorInfo `json:",omitempty"`
}

// String returns the JSON string representation of the serverSelectorInfo.
func (sss serverSelectorInfo) String() string {
	bytes, _ := json.Marshal(sss)

	return string(bytes)
}

// serverSelectorInfoGetter is an interface that defines an info() method to
// get the serverSelectorInfo.
type serverSelectorInfoGetter interface {
	info() serverSelectorInfo
}

type compositeSelector struct {
	selectors []ServerSelector
}

func (cs *compositeSelector) info() serverSelectorInfo {
	csInfo := serverSelectorInfo{Type: "compositeSelector"}

	for _, sel := range cs.selectors {
		if getter, ok := sel.(serverSelectorInfoGetter); ok {
			csInfo.Selectors = append(csInfo.Selectors, getter.info())
		}
	}

	return csInfo
}

// String returns the JSON string representation of the compositeSelector.
func (cs *compositeSelector) String() string {
	return cs.info().String()
}

// CompositeSelector combines multiple selectors into a single selector by applying them in order to the candidates
// list.
//
// For example, if the initial candidates list is [s0, s1, s2, s3] and two selectors are provided where the first
// matches s0 and s1 and the second matches s1 and s2, the following would occur during server selection:
//
// 1. firstSelector([s0, s1, s2, s3]) -> [s0, s1]
// 2. secondSelector([s0, s1]) -> [s1]
//
// The final list of candidates returned by the composite selector would be [s1].
func CompositeSelector(selectors []ServerSelector) ServerSelector {
	return &compositeSelector{selectors: selectors}
}

func (cs *compositeSelector) SelectServer(t Topology, candidates []Server) ([]Server, error) {
	var err error
	for _, sel := range cs.selectors {
		candidates, err = sel.SelectServer(t, candidates)
		if err != nil {
			return nil, err
		}
	}
	return candidates, nil
}

type latencySelector struct {
	latency time.Duration
}

// LatencySelector creates a ServerSelector which selects servers based on their average RTT values.
func LatencySelector(latency time.Duration) ServerSelector {
	return &latencySelector{latency: latency}
}

func (latencySelector) info() serverSelectorInfo {
	return serverSelectorInfo{Type: "latencySelector"}
}

func (selector latencySelector) String() string {
	return selector.info().String()
}

func (selector *latencySelector) SelectServer(t Topology, candidates []Server) ([]Server, error) {
	if selector.latency < 0 {
		return candidates, nil
	}
	if t.Kind == LoadBalanced {
		// In LoadBalanced mode, there should only be one server in the topology and it must be selected.
		return candidates, nil
	}

	switch len(candidates) {
	case 0, 1:
		return candidates, nil
	default:
		min := time.Duration(math.MaxInt64)
		for _, candidate := range candidates {
			if candidate.AverageRTTSet {
				if candidate.AverageRTT < min {
					min = candidate.AverageRTT
				}
			}
		}

		if min == math.MaxInt64 {
			return candidates, nil
		}

		max := min + selector.latency

		viableIndexes := make([]int, 0, len(candidates))
		for i, candidate := range candidates {
			if candidate.AverageRTTSet {
				if candidate.AverageRTT <= max {
					viableIndexes = append(viableIndexes, i)
				}
			}
		}
		if len(viableIndexes) == len(candidates) {
			return candidates, nil
		}
		result := make([]Server, len(viableIndexes))
		for i, idx := range viableIndexes {
			result[i] = candidates[idx]
		}
		return result, nil
	}
}

type writeServerSelector struct{}

// WriteSelector selects all the writable servers.
func WriteSelector() ServerSelector {
	return writeServerSelector{}
}

func (writeServerSelector) info() serverSelectorInfo {
	return serverSelectorInfo{Type: "writeSelector"}
}

func (selector writeServerSelector) String() string {
	return selector.info().String()
}

func (writeServerSelector) SelectServer(t Topology, candidates []Server) ([]Server, error) {
	switch t.Kind {
	case Single, LoadBalanced:
		return candidates, nil
	default:
		// Determine the capacity of the results slice.
		selected := 0
		for _, candidate := range candidates {
			switch candidate.Kind {
			case Mongos, RSPrimary, Standalone:
				selected++
			}
		}

		// Append candidates to the results slice.
		result := make([]Server, 0, selected)
		for _, candidate := range candidates {
			switch candidate.Kind {
			case Mongos, RSPrimary, Standalone:
				result = append(result, candidate)
			}
		}
		return result, nil
	}
}

type readPrefServerSelector struct {
	rp                *readpref.ReadPref
	isOutputAggregate bool
}

// ReadPrefSelector selects servers based on the provided read preference.
func ReadPrefSelector(rp *readpref.ReadPref) ServerSelector {
	return readPrefServerSelector{
		rp:                rp,
		isOutputAggregate: false,
	}
}

func (selector readPrefServerSelector) info() serverSelectorInfo {
	return serverSelectorInfo{
		Type: "readPrefSelector",
		Data: selector.rp.String(),
	}
}

func (selector readPrefServerSelector) String() string {
	return selector.info().String()
}

func (selector readPrefServerSelector) SelectServer(t Topology, candidates []Server) ([]Server, error) {
	if t.Kind == LoadBalanced {
		// In LoadBalanced mode, there should only be one server in the topology and it must be selected. We check
		// this before checking MaxStaleness support because there's no monitoring in this mode, so the candidate
		// server wouldn't have a wire version set, which would result in an error.
		return candidates, nil
	}

	switch t.Kind {
	case Single:
		return candidates, nil
	case ReplicaSetNoPrimary, ReplicaSetWithPrimary:
		return selectForReplicaSet(selector.rp, selector.isOutputAggregate, t, candidates)
	case Sharded:
		return selectByKind(candidates, Mongos), nil
	}

	return nil, nil
}

// OutputAggregateSelector selects servers based on the provided read preference
// given that the underlying operation is aggregate with an output stage.
func OutputAggregateSelector(rp *readpref.ReadPref) ServerSelector {
	return readPrefServerSelector{
		rp:                rp,
		isOutputAggregate: true,
	}
}

func selectForReplicaSet(rp *readpref.ReadPref, isOutputAggregate bool, t Topology, candidates []Server) ([]Server, error) {
	if err := verifyMaxStaleness(rp, t); err != nil {
		return nil, err
	}

	// If underlying operation is an aggregate with an output stage, only apply read preference
	// if all candidates are 5.0+. Otherwise, operate under primary read preference.
	if isOutputAggregate {
		for _, s := range candidates {
			if s.WireVersion.Max < 13 {
				return selectByKind(candidates, RSPrimary), nil
			}
		}
	}

	switch rp.Mode() {
	case readpref.PrimaryMode:
		return selectByKind(candidates, RSPrimary), nil
	case readpref.PrimaryPreferredMode:
		selected := selectByKind(candidates, RSPrimary)

		if len(selected) == 0 {
			selected = selectSecondaries(rp, candidates)
			return selectByTagSet(selected, rp.TagSets()), nil
		}

		return selected, nil
	case readpref.SecondaryPreferredMode:
		selected := selectSecondaries(rp, candidates)
		selected = selectByTagSet(selected, rp.TagSets())
		if len(selected) > 0 {
			return selected, nil
		}
		return selectByKind(candidates, RSPrimary), nil
	case readpref.SecondaryMode:
		selected := selectSecondaries(rp, candidates)
		return selectByTagSet(selected, rp.TagSets()), nil
	case readpref.NearestMode:
		selected := selectByKind(candidates, RSPrimary)
		selected = append(selected, selectSecondaries(rp, candidates)...)
		return selectByTagSet(selected, rp.TagSets()), nil
	}

	return nil, fmt.Errorf("unsupported mode: %d", rp.Mode())
}

func selectSecondaries(rp *readpref.ReadPref, candidates []Server) []Server {
	secondaries := selectByKind(candidates, RSSecondary)
	if len(secondaries) == 0 {
		return secondaries
	}
	if maxStaleness, set := rp.MaxStaleness(); set {
		primaries := selectByKind(candidates, RSPrimary)
		if len(primaries) == 0 {
			baseTime := secondaries[0].LastWriteTime
			for i := 1; i < len(secondaries); i++ {
				if secondaries[i].LastWriteTime.After(baseTime) {
					baseTime = secondaries[i].LastWriteTime
				}
			}

			var selected []Server
			for _, secondary := range secondaries {
				estimatedStaleness := baseTime.Sub(secondary.LastWriteTime) + secondary.HeartbeatInterval
				if estimatedStaleness <= maxStaleness {
					selected = append(selected, secondary)
				}
			}

			return selected
		}

		primary := primaries[0]

		var selected []Server
		for _, secondary := range secondaries {
			estimatedStaleness := secondary.LastUpdateTime.Sub(secondary.LastWriteTime) - primary.LastUpdateTime.Sub(primary.LastWriteTime) + secondary.HeartbeatInterval
			if estimatedStaleness <= maxStaleness {
				selected = append(selected, secondary)
			}
		}
		return selected
	}

	return secondaries
}

func selectByTagSet(candidates []Server, tagSets []tag.Set) []Server {
	if len(tagSets) == 0 {
		return candidates
	}

	for _, ts := range tagSets {
		// If this tag set is empty, we can take a fast path because the empty list is a subset of all tag sets, so
		// all candidate servers will be selected.
		if len(ts) == 0 {
			return candidates
		}

		var results []Server
		for _, s := range candidates {
			// ts is non-empty, so only servers with a non-empty set of tags need to be checked.
			if len(s.Tags) > 0 && s.Tags.ContainsAll(ts) {
				results = append(results, s)
			}
		}

		if len(results) > 0 {
			return results
		}
	}

	return []Server{}
}

func selectByKind(candidates []Server, kind ServerKind) []Server {
	// Record the indices of viable candidates first and then append those to the returned slice
	// to avoid appending costly Server structs directly as an optimization.
	viableIndexes := make([]int, 0, len(candidates))
	for i, s := range candidates {
		if s.Kind == kind {
			viableIndexes = append(viableIndexes, i)
		}
	}
	if len(viableIndexes) == len(candidates) {
		return candidates
	}
	result := make([]Server, len(viableIndexes))
	for i, idx := range viableIndexes {
		result[i] = candidates[idx]
	}
	return result
}

func verifyMaxStaleness(rp *readpref.ReadPref, t Topology) error {
	maxStaleness, set := rp.MaxStaleness()
	if !set {
		return nil
	}

	if maxStaleness < 90*time.Second {
		return fmt.Errorf("max staleness (%s) must be greater than or equal to 90s", maxStaleness)
	}

	if len(t.Servers) < 1 {
		// Maybe we should return an error here instead?
		return nil
	}

	// we'll assume all candidates have the same heartbeat interval.
	s := t.Servers[0]
	idleWritePeriod := 10 * time.Second

	if maxStaleness < s.HeartbeatInterval+idleWritePeriod {
		return fmt.Errorf(
			"max staleness (%s) must be greater than or equal to the heartbeat interval (%s) plus idle write period (%s)",
			maxStaleness, s.HeartbeatInterval, idleWritePeriod,
		)
	}

	return nil
}