File: logfile.go

package info (click to toggle)
docker.io 28.5.2%2Bdfsg3-2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 68,176 kB
  • sloc: sh: 5,867; makefile: 863; ansic: 184; python: 162; asm: 159
file content (914 lines) | stat: -rw-r--r-- 24,638 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.23

package loggerutils

import (
	"compress/gzip"
	"context"
	"encoding/json"
	"fmt"
	"io"
	"io/fs"
	"math"
	"os"
	"slices"
	"strconv"
	"sync"
	"time"

	"github.com/containerd/containerd/v2/pkg/tracing"
	"github.com/containerd/log"
	"github.com/docker/docker/daemon/logger"
	"github.com/docker/docker/pkg/pools"
	"github.com/pkg/errors"
	"go.opentelemetry.io/otel/attribute"
)

// rotateFileMetadata is a metadata of the gzip header of the compressed log file
type rotateFileMetadata struct {
	LastTime time.Time `json:"lastTime,omitempty"`
}

// LogFile is Logger implementation for default Docker logging.
type LogFile struct {
	mu       sync.Mutex // protects the logfile access
	closed   chan struct{}
	rotateMu sync.Mutex // blocks the next rotation until the current rotation is completed
	// Lock out readers while performing a non-atomic sequence of filesystem
	// operations (RLock: open, Lock: rename, delete).
	//
	// fsopMu should be locked for writing only while holding rotateMu.
	fsopMu sync.RWMutex

	// Logger configuration

	capacity int64 // maximum size of each file
	maxFiles int   // maximum number of files
	compress bool  // whether old versions of log files are compressed
	perms    os.FileMode

	// Log file codec

	createDecoder MakeDecoderFn
	getTailReader GetTailReaderFunc

	// Log reader state in a 1-buffered channel.
	//
	// Share memory by communicating: receive to acquire, send to release.
	// The state struct is passed around by value so that use-after-send
	// bugs cannot escalate to data races.
	//
	// A method which receives the state value takes ownership of it. The
	// owner is responsible for either passing ownership along or sending
	// the state back to the channel. By convention, the semantics of
	// passing along ownership is expressed with function argument types.
	// Methods which take a pointer *logReadState argument borrow the state,
	// analogous to functions which require a lock to be held when calling.
	// The caller retains ownership. Calling a method which takes a
	// value logFileState argument gives ownership to the callee.
	read chan logReadState

	decompress *sharedTempFileConverter

	pos           logPos    // Current log file write position.
	f             *os.File  // Current log file for writing.
	lastTimestamp time.Time // timestamp of the last log
}

type logPos struct {
	// Size of the current file.
	size int64
	// File rotation sequence number (modulo 2**16).
	rotation uint16
}

type logReadState struct {
	// Current log file position.
	pos logPos
	// Wait list to be notified of the value of pos next time it changes.
	wait []chan<- logPos
}

// MakeDecoderFn creates a decoder
type MakeDecoderFn func(rdr io.Reader) Decoder

// Decoder is for reading logs
// It is created by the log reader by calling the `MakeDecoderFunc`
type Decoder interface {
	// Reset resets the decoder
	// Reset is called for certain events, such as log rotations
	Reset(io.Reader)
	// Decode decodes the next log message from the stream
	Decode() (*logger.Message, error)
	// Close signals to the decoder that it can release whatever resources it was using.
	Close()
}

// SizeReaderAt defines a ReaderAt that also reports its size.
// This is used for tailing log files.
type SizeReaderAt interface {
	io.Reader
	io.ReaderAt
	Size() int64
}

// GetTailReaderFunc is used to truncate a reader to only read as much as is required
// in order to get the passed in number of log lines.
// It returns the sectioned reader, the number of lines that the section reader
// contains, and any error that occurs.
type GetTailReaderFunc func(ctx context.Context, f SizeReaderAt, nLogLines int) (rdr SizeReaderAt, nLines int, err error)

// NewLogFile creates new LogFile
func NewLogFile(logPath string, capacity int64, maxFiles int, compress bool, decodeFunc MakeDecoderFn, perms os.FileMode, getTailReader GetTailReaderFunc) (*LogFile, error) {
	logFile, err := openFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, perms)
	if err != nil {
		return nil, err
	}

	size, err := logFile.Seek(0, io.SeekEnd)
	if err != nil {
		return nil, err
	}

	pos := logPos{
		size: size,
		// Force a wraparound on first rotation to shake out any
		// modular-arithmetic bugs.
		rotation: math.MaxUint16,
	}
	st := make(chan logReadState, 1)
	st <- logReadState{pos: pos}

	return &LogFile{
		f:             logFile,
		read:          st,
		pos:           pos,
		closed:        make(chan struct{}),
		capacity:      capacity,
		maxFiles:      maxFiles,
		compress:      compress,
		decompress:    newSharedTempFileConverter(decompress),
		createDecoder: decodeFunc,
		perms:         perms,
		getTailReader: getTailReader,
	}, nil
}

// WriteLogEntry writes the provided log message to the current log file.
// This may trigger a rotation event if the max file/capacity limits are hit.
func (w *LogFile) WriteLogEntry(timestamp time.Time, marshalled []byte) error {
	select {
	case <-w.closed:
		return errors.New("cannot write because the output file was closed")
	default:
	}
	w.mu.Lock()
	defer w.mu.Unlock()

	// Are we due for a rotation?
	if w.capacity != -1 && w.pos.size >= w.capacity {
		if err := w.rotate(); err != nil {
			return errors.Wrap(err, "error rotating log file")
		}
	}

	n, err := w.f.Write(marshalled)
	if err != nil {
		return errors.Wrap(err, "error writing log entry")
	}
	w.pos.size += int64(n)
	w.lastTimestamp = timestamp

	// Notify any waiting readers that there is a new log entry to read.
	st := <-w.read
	defer func() { w.read <- st }()
	st.pos = w.pos

	for _, c := range st.wait {
		c <- st.pos
	}
	// Optimization: retain the backing array to save a heap allocation next
	// time a reader appends to the list.
	if st.wait != nil {
		st.wait = st.wait[:0]
	}
	return nil
}

func (w *LogFile) rotate() (retErr error) {
	w.rotateMu.Lock()
	noCompress := w.maxFiles <= 1 || !w.compress
	defer func() {
		// If we aren't going to run the goroutine to compress the log file, then we need to unlock in this function.
		// Otherwise the lock will be released in the goroutine that handles compression.
		if retErr != nil || noCompress {
			w.rotateMu.Unlock()
		}
	}()

	fname := w.f.Name()
	if err := w.f.Close(); err != nil {
		// if there was an error during a prior rotate, the file could already be closed
		if !errors.Is(err, fs.ErrClosed) {
			return errors.Wrap(err, "error closing file")
		}
	}

	file, err := func() (*os.File, error) {
		w.fsopMu.Lock()
		defer w.fsopMu.Unlock()

		if err := rotate(fname, w.maxFiles, w.compress); err != nil {
			log.G(context.TODO()).WithError(err).Warn("Error rotating log file, log data may have been lost")
		} else {
			// We may have readers working their way through the
			// current log file so we can't truncate it. We need to
			// start writing new logs to an empty file with the same
			// name as the current one so we need to rotate the
			// current file out of the way.
			if w.maxFiles < 2 {
				if err := unlink(fname); err != nil && !errors.Is(err, fs.ErrNotExist) {
					log.G(context.TODO()).WithError(err).Error("Error unlinking current log file")
				}
			} else {
				if err := os.Rename(fname, fname+".1"); err != nil && !errors.Is(err, fs.ErrNotExist) {
					log.G(context.TODO()).WithError(err).Error("Error renaming current log file")
				}
			}
		}

		// Notwithstanding the above, open with the truncate flag anyway
		// in case rotation didn't work out as planned.
		return openFile(fname, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, w.perms)
	}()
	if err != nil {
		return err
	}
	w.f = file
	w.pos = logPos{rotation: w.pos.rotation + 1}

	if noCompress {
		return nil
	}

	ts := w.lastTimestamp
	go func() {
		defer w.rotateMu.Unlock()
		// No need to hold fsopMu as at no point will the filesystem be
		// in a state which would cause problems for readers. Opening
		// the uncompressed file is tried first, falling back to the
		// compressed one. compressFile only deletes the uncompressed
		// file once the compressed one is fully written out, so at no
		// point during the compression process will a reader fail to
		// open a complete copy of the file.
		if err := compressFile(fname+".1", ts); err != nil {
			log.G(context.TODO()).WithError(err).Error("Error compressing log file after rotation")
		}
	}()

	return nil
}

func rotate(name string, maxFiles int, compress bool) error {
	if maxFiles < 2 {
		return nil
	}

	var extension string
	if compress {
		extension = ".gz"
	}

	lastFile := fmt.Sprintf("%s.%d%s", name, maxFiles-1, extension)
	err := unlink(lastFile)
	if err != nil && !errors.Is(err, fs.ErrNotExist) {
		return errors.Wrap(err, "error removing oldest log file")
	}

	for i := maxFiles - 1; i > 1; i-- {
		toPath := name + "." + strconv.Itoa(i) + extension
		fromPath := name + "." + strconv.Itoa(i-1) + extension
		err := os.Rename(fromPath, toPath)
		log.G(context.TODO()).WithError(err).WithField("source", fromPath).WithField("target", toPath).Trace("Rotating log file")
		if err != nil && !errors.Is(err, fs.ErrNotExist) {
			return err
		}
	}

	return nil
}

func compressFile(fileName string, lastTimestamp time.Time) (retErr error) {
	file, err := open(fileName)
	if err != nil {
		if errors.Is(err, fs.ErrNotExist) {
			log.G(context.TODO()).WithField("file", fileName).WithError(err).Debug("Could not open log file to compress")
			return nil
		}
		return errors.Wrap(err, "failed to open log file")
	}
	defer func() {
		file.Close()
		if retErr == nil {
			err := unlink(fileName)
			if err != nil && !errors.Is(err, fs.ErrNotExist) {
				retErr = errors.Wrap(err, "failed to remove source log file")
			}
		}
	}()

	outFile, err := openFile(fileName+".gz", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0o640)
	if err != nil {
		return errors.Wrap(err, "failed to open or create gzip log file")
	}
	defer func() {
		outFile.Close()
		if retErr != nil {
			if err := unlink(fileName + ".gz"); err != nil && !errors.Is(err, fs.ErrNotExist) {
				log.G(context.TODO()).WithError(err).Error("Error cleaning up after failed log compression")
			}
		}
	}()

	compressWriter := gzip.NewWriter(outFile)
	defer compressWriter.Close()

	// Add the last log entry timestamp to the gzip header
	extra := rotateFileMetadata{}
	extra.LastTime = lastTimestamp
	compressWriter.Header.Extra, err = json.Marshal(&extra)
	if err != nil {
		// Here log the error only and don't return since this is just an optimization.
		log.G(context.TODO()).Warningf("Failed to marshal gzip header as JSON: %v", err)
	}

	_, err = pools.Copy(compressWriter, file)
	if err != nil {
		return errors.Wrapf(err, "error compressing log file %s", fileName)
	}

	return nil
}

// MaxFiles return maximum number of files
func (w *LogFile) MaxFiles() int {
	return w.maxFiles
}

// Close closes underlying file and signals all readers to stop.
func (w *LogFile) Close() error {
	w.mu.Lock()
	defer w.mu.Unlock()
	select {
	case <-w.closed:
		return nil
	default:
	}
	if err := w.f.Close(); err != nil && !errors.Is(err, fs.ErrClosed) {
		return err
	}
	close(w.closed)
	// Wait until any in-progress rotation is complete.
	w.rotateMu.Lock()
	defer w.rotateMu.Unlock()
	return nil
}

// ReadLogs decodes entries from log files.
//
// It is the caller's responsibility to call ConsumerGone on the LogWatcher.
func (w *LogFile) ReadLogs(ctx context.Context, config logger.ReadConfig) *logger.LogWatcher {
	ctx, span := tracing.StartSpan(ctx, "logger.LogFile.ReadLogs")
	defer span.End()

	span.SetAttributes(tracing.Attribute("config", config))

	watcher := logger.NewLogWatcher()
	// Lock out filesystem operations so that we can capture the read
	// position and atomically open the corresponding log file, without the
	// file getting rotated out from under us.
	w.fsopMu.RLock()
	// Capture the read position synchronously to ensure that we start
	// following from the last entry logged before ReadLogs was called,
	// which is required for flake-free unit testing.
	st := <-w.read
	pos := st.pos
	w.read <- st
	go w.readLogsLocked(ctx, pos, config, watcher)
	return watcher
}

// tailFiles must be called with w.fsopMu locked for reads.
// w.fsopMu.RUnlock() is called before returning.
func (w *LogFile) tailFiles(ctx context.Context, config logger.ReadConfig, watcher *logger.LogWatcher, current SizeReaderAt, dec Decoder, fwd *forwarder) (cont bool) {
	if config.Tail == 0 {
		w.fsopMu.RUnlock()
		return true
	}

	ctx, span := tracing.StartSpan(ctx, "logger.Logfile.TailLogs")
	defer func() {
		span.SetAttributes(attribute.Bool("continue", cont))
		span.End()
	}()

	files, err := w.openRotatedFiles(ctx, config)
	w.fsopMu.RUnlock()

	if err != nil {
		// TODO: Should we allow this to continue (as in set `cont=true`) and not error out the log stream?
		err = errors.Wrap(err, "error opening rotated log files")
		span.SetStatus(err)
		watcher.Err <- err
		return false
	}

	if current.Size() > 0 {
		files = append(files, &sizeReaderAtOpener{current, "current"})
	}

	return tailFiles(ctx, files, watcher, dec, w.getTailReader, config.Tail, fwd)
}

type sizeReaderAtOpener struct {
	SizeReaderAt
	ref string
}

func (o *sizeReaderAtOpener) ReaderAt(context.Context) (sizeReaderAtCloser, error) {
	return &sizeReaderAtWithCloser{o, nil}, nil
}

func (o *sizeReaderAtOpener) Close() {}

func (o *sizeReaderAtOpener) Ref() string {
	return o.ref
}

type sizeReaderAtWithCloser struct {
	SizeReaderAt
	close func() error
}

func (r *sizeReaderAtWithCloser) ReadAt(p []byte, offset int64) (int, error) {
	if r.SizeReaderAt == nil {
		return 0, io.EOF
	}
	return r.SizeReaderAt.ReadAt(p, offset)
}

func (r *sizeReaderAtWithCloser) Read(p []byte) (int, error) {
	if r.SizeReaderAt == nil {
		return 0, io.EOF
	}
	return r.SizeReaderAt.Read(p)
}

func (r *sizeReaderAtWithCloser) Size() int64 {
	if r.SizeReaderAt == nil {
		return 0
	}
	return r.SizeReaderAt.Size()
}

func (r *sizeReaderAtWithCloser) Close() error {
	if r.close != nil {
		return r.close()
	}
	return nil
}

// readLogsLocked is the bulk of the implementation of ReadLogs.
//
// w.fsopMu must be locked for reading when calling this method.
// w.fsopMu.RUnlock() is called before returning.
func (w *LogFile) readLogsLocked(ctx context.Context, currentPos logPos, config logger.ReadConfig, watcher *logger.LogWatcher) {
	ctx, span := tracing.StartSpan(ctx, "logger.Logfile.ReadLogsLocked")
	defer span.End()

	defer close(watcher.Msg)

	currentFile, err := open(w.f.Name())
	if err != nil {
		w.fsopMu.RUnlock()
		span.SetStatus(err)
		watcher.Err <- err
		return
	}
	defer currentFile.Close()

	dec := w.createDecoder(nil)
	defer dec.Close()

	fwd := newForwarder(config)

	// At this point, w.tailFiles is responsible for unlocking w.fsopmu
	ok := w.tailFiles(ctx, config, watcher, io.NewSectionReader(currentFile, 0, currentPos.size), dec, fwd)

	if !ok {
		return
	}

	if !config.Follow {
		return
	}

	(&follow{
		LogFile:   w,
		Watcher:   watcher,
		Decoder:   dec,
		Forwarder: fwd,
	}).Do(ctx, currentFile, currentPos)
}

type fileOpener interface {
	ReaderAt(context.Context) (ra sizeReaderAtCloser, err error)
	Close()
	Ref() string
}

// simpleFileOpener just holds a reference to an already open file
type simpleFileOpener struct {
	f      *os.File
	sz     int64
	closed bool
}

func (o *simpleFileOpener) ReaderAt(context.Context) (sizeReaderAtCloser, error) {
	if o.closed {
		return nil, errors.New("file is closed")
	}

	if o.sz == 0 {
		stat, err := o.f.Stat()
		if err != nil {
			return nil, errors.Wrap(err, "error stating file")
		}
		o.sz = stat.Size()
	}
	return &sizeReaderAtWithCloser{io.NewSectionReader(o.f, 0, o.sz), nil}, nil
}

func (o *simpleFileOpener) Ref() string {
	return o.f.Name()
}

func (o *simpleFileOpener) Close() {
	_ = o.f.Close()
	o.closed = true
}

// converter function used by shareTempFileConverter
func decompress(dst io.WriteSeeker, src io.ReadSeeker) error {
	if _, err := src.Seek(0, io.SeekStart); err != nil {
		return err
	}
	rc, err := gzip.NewReader(src)
	if err != nil {
		return err
	}
	_, err = pools.Copy(dst, rc)
	if err != nil {
		return err
	}
	return rc.Close()
}

// compressedFileOpener holds a reference to compressed a log file and will
// lazily open a decompressed version of the file.
type compressedFileOpener struct {
	closed bool

	f *os.File

	lf       *LogFile
	ifBefore time.Time
}

func (cfo *compressedFileOpener) ReaderAt(ctx context.Context) (_ sizeReaderAtCloser, retErr error) {
	_, span := tracing.StartSpan(ctx, "logger.Logfile.Compressed.ReaderAt")
	defer func() {
		if retErr != nil {
			span.SetStatus(retErr)
		}
		span.End()
	}()

	span.SetAttributes(attribute.String("file", cfo.f.Name()))

	if cfo.closed {
		return nil, errors.New("compressed file closed")
	}

	gzr, err := gzip.NewReader(cfo.f)
	if err != nil {
		return nil, err
	}
	defer gzr.Close()

	// Extract the last log entry timestamp from the gzip header
	// Use this to determine if we even need to read this file based on inputs
	extra := &rotateFileMetadata{}
	err = json.Unmarshal(gzr.Header.Extra, extra)
	if err == nil && !extra.LastTime.IsZero() && extra.LastTime.Before(cfo.ifBefore) {
		span.SetAttributes(attribute.Bool("skip", true))
		return &sizeReaderAtWithCloser{}, nil
	}
	if err == nil {
		span.SetAttributes(attribute.Stringer("lastLogTime", extra.LastTime))
	}

	span.AddEvent("Start decompress")
	return cfo.lf.decompress.Do(cfo.f)
}

func (cfo *compressedFileOpener) Close() {
	cfo.closed = true
	cfo.f.Close()
}

func (cfo *compressedFileOpener) Ref() string {
	return cfo.f.Name()
}

type emptyFileOpener struct{}

func (emptyFileOpener) ReaderAt(context.Context) (sizeReaderAtCloser, error) {
	return &sizeReaderAtWithCloser{}, nil
}

func (emptyFileOpener) Close() {}

func (emptyFileOpener) Ref() string {
	return "null"
}

// openRotatedFiles returns a slice of files open for reading, in order from
// oldest to newest, and calls w.fsopMu.RUnlock() before returning.
//
// This method must only be called with w.fsopMu locked for reading.
func (w *LogFile) openRotatedFiles(ctx context.Context, config logger.ReadConfig) (_ []fileOpener, retErr error) {
	var out []fileOpener

	defer func() {
		if retErr != nil {
			for _, fo := range out {
				fo.Close()
			}
		}
	}()

	for i := w.maxFiles; i > 1; i-- {
		fo, err := w.openRotatedFile(ctx, i-1, config)
		if err != nil {
			return nil, err
		}
		out = append(out, fo)
	}

	return out, nil
}

func (w *LogFile) openRotatedFile(ctx context.Context, i int, config logger.ReadConfig) (fileOpener, error) {
	f, err := open(fmt.Sprintf("%s.%d", w.f.Name(), i))
	if err == nil {
		return &simpleFileOpener{
			f: f,
		}, nil
	}

	if !errors.Is(err, fs.ErrNotExist) {
		return nil, errors.Wrap(err, "error opening rotated log file")
	}

	f, err = open(fmt.Sprintf("%s.%d.gz", w.f.Name(), i))
	if err != nil {
		if !errors.Is(err, fs.ErrNotExist) {
			return nil, errors.Wrap(err, "error opening file for decompression")
		}
		return &emptyFileOpener{}, nil
	}

	return &compressedFileOpener{
		f:        f,
		lf:       w,
		ifBefore: config.Since,
	}, nil
}

// This is used to improve type safety around tailing logs
// Some log readers require the log file to be closed, so this makes sure all
// implementers have a closer even if it may be a no-op.
// This is opposed to asserting a type.
type sizeReaderAtCloser interface {
	SizeReaderAt
	io.Closer
}

func getTailFiles(ctx context.Context, files []fileOpener, nLines int, getTailReader GetTailReaderFunc) (_ []sizeReaderAtCloser, retErr error) {
	ctx, span := tracing.StartSpan(ctx, "logger.Logfile.CollectTailFiles")
	span.SetAttributes(attribute.Int("requested_lines", nLines))

	defer func() {
		if retErr != nil {
			span.SetStatus(retErr)
		}
		span.End()
	}()
	out := make([]sizeReaderAtCloser, 0, len(files))

	defer func() {
		if retErr != nil {
			for _, ra := range out {
				if err := ra.Close(); err != nil {
					log.G(ctx).WithError(err).Warn("Error closing log reader")
				}
			}
		}
	}()

	if nLines <= 0 {
		for _, fo := range files {
			span.AddEvent("Open file", attribute.String("file", fo.Ref()))

			ra, err := fo.ReaderAt(ctx)
			if err != nil {
				return nil, err
			}
			out = append(out, ra)

		}
		return out, nil
	}

	for i := len(files) - 1; i >= 0 && nLines > 0; i-- {
		if err := ctx.Err(); err != nil {
			return nil, errors.Wrap(err, "stopping parsing files to tail due to error")
		}

		fo := files[i]

		fileAttr := attribute.String("file", fo.Ref())
		span.AddEvent("Open file", fileAttr)

		ra, err := fo.ReaderAt(ctx)
		if err != nil {
			return nil, err
		}

		span.AddEvent("Scan file to tail", fileAttr, attribute.Int("remaining_lines", nLines))

		tail, n, err := getTailReader(ctx, ra, nLines)
		if err != nil {
			ra.Close()
			log.G(ctx).WithError(err).Warn("Error scanning log file for tail file request, skipping")
			continue
		}
		nLines -= n
		out = append(out, &sizeReaderAtWithCloser{tail, ra.Close})
	}

	slices.Reverse(out)

	return out, nil
}

func tailFiles(ctx context.Context, files []fileOpener, watcher *logger.LogWatcher, dec Decoder, getTailReader GetTailReaderFunc, nLines int, fwd *forwarder) (cont bool) {
	ctx, cancel := context.WithCancel(ctx)
	defer cancel()

	go func() {
		select {
		case <-ctx.Done():
		case <-watcher.WatchConsumerGone():
			cancel()
		}
	}()

	readers, err := getTailFiles(ctx, files, nLines, getTailReader)
	if err != nil {
		watcher.Err <- err
		return false
	}

	var idx int
	defer func() {
		// Make sure all are released if there is an early return.
		if !cont {
			for _, r := range readers[idx:] {
				if err := r.Close(); err != nil {
					log.G(ctx).WithError(err).Debug("Error closing log reader")
				}
			}
		}
	}()

	for _, ra := range readers {
		select {
		case <-watcher.WatchConsumerGone():
			return false
		case <-ctx.Done():
			return false
		default:
		}

		dec.Reset(ra)

		cancel := context.AfterFunc(ctx, func() {
			if err := ra.Close(); err != nil {
				log.G(ctx).WithError(err).Debug("Error closing log reader")
			}
		})

		ok := fwd.Do(ctx, watcher, func() (*logger.Message, error) {
			msg, err := dec.Decode()
			if err != nil && !errors.Is(err, io.EOF) {
				// We have an error decoding the stream, but we don't want to error out
				// the whole log reader.
				// If we return anything other than EOF then the forwarder will return
				// false and we'll exit the loop.
				// Instead just log the error here and return an EOF so we can move to
				// the next file.
				log.G(ctx).WithError(err).Warn("Error decoding log file")
				return nil, io.EOF
			}
			return msg, err
		})
		cancel()
		idx++
		if !ok {
			return false
		}
	}

	return true
}

type forwarder struct {
	since, until time.Time
}

func newForwarder(config logger.ReadConfig) *forwarder {
	return &forwarder{since: config.Since, until: config.Until}
}

// Do reads log messages from dec and sends the messages matching the filter
// conditions to watcher. Do returns cont=true iff it has read all messages from
// dec without encountering a message with a timestamp which is after the
// configured until time.
func (fwd *forwarder) Do(ctx context.Context, watcher *logger.LogWatcher, next func() (*logger.Message, error)) (cont bool) {
	ctx, span := tracing.StartSpan(ctx, "logger.Logfile.Forward")
	defer func() {
		span.SetAttributes(attribute.Bool("continue", cont))
		span.End()
	}()

	for {
		select {
		case <-watcher.WatchConsumerGone():
			span.AddEvent("watch consumer gone")
			return false
		case <-ctx.Done():
			span.AddEvent(ctx.Err().Error())
			return false
		default:
		}

		msg, err := next()
		if err != nil {
			if errors.Is(err, io.EOF) {
				span.AddEvent("EOF")
				return true
			}
			span.SetStatus(err)
			log.G(ctx).WithError(err).Debug("Error while decoding log entry, not continuing")
			return false
		}

		if !fwd.since.IsZero() {
			if msg.Timestamp.Before(fwd.since) {
				continue
			}
			// We've found our first message with a timestamp >= since. As message
			// timestamps might not be monotonic, we need to skip the since check for all
			// subsequent messages so we do not filter out later messages which happen to
			// have timestamps before since.
			fwd.since = time.Time{}
		}
		if !fwd.until.IsZero() && msg.Timestamp.After(fwd.until) {
			log.G(ctx).Debug("Log is newer than requested window, skipping remaining logs")
			return false
		}

		select {
		case <-ctx.Done():
			span.AddEvent(ctx.Err().Error())
			return false
		case <-watcher.WatchConsumerGone():
			span.AddEvent("watch consumer gone")
			return false
		case watcher.Msg <- msg:
		}
	}
}