File: attachment_interfaces.go

package info (click to toggle)
golang-github-henrybear327-go-proton-api 1.0.0-4
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 1,088 kB
  • sloc: sh: 55; makefile: 26
file content (91 lines) | stat: -rw-r--r-- 2,635 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
package proton

import (
	"bytes"
	"context"

	"github.com/ProtonMail/gluon/async"
	"github.com/bradenaw/juniper/parallel"
)

// AttachmentAllocator abstract the attachment download buffer creation.
type AttachmentAllocator interface {
	// NewBuffer should return a new byte buffer for use. Note that this function may be called from multiple go-routines.
	NewBuffer() *bytes.Buffer
}

type DefaultAttachmentAllocator struct{}

func NewDefaultAttachmentAllocator() *DefaultAttachmentAllocator {
	return &DefaultAttachmentAllocator{}
}

func (DefaultAttachmentAllocator) NewBuffer() *bytes.Buffer {
	return bytes.NewBuffer(nil)
}

// Scheduler allows the user to specify how the attachment data for the message should be downloaded.
type Scheduler interface {
	Schedule(ctx context.Context, attachmentIDs []string, storageProvider AttachmentAllocator, downloader func(context.Context, string, *bytes.Buffer) error) ([]*bytes.Buffer, error)
}

// SequentialScheduler downloads the attachments one by one.
type SequentialScheduler struct{}

func NewSequentialScheduler() *SequentialScheduler {
	return &SequentialScheduler{}
}

func (SequentialScheduler) Schedule(ctx context.Context, attachmentIDs []string, storageProvider AttachmentAllocator, downloader func(context.Context, string, *bytes.Buffer) error) ([]*bytes.Buffer, error) {
	result := make([]*bytes.Buffer, len(attachmentIDs))
	for i, v := range attachmentIDs {

		select {
		case <-ctx.Done():
			return nil, ctx.Err()
		default:
		}

		buffer := storageProvider.NewBuffer()
		if err := downloader(ctx, v, buffer); err != nil {
			return nil, err
		}

		result[i] = buffer
	}

	return result, nil
}

type ParallelScheduler struct {
	workers      int
	panicHandler async.PanicHandler
}

func NewParallelScheduler(workers int, panicHandler async.PanicHandler) *ParallelScheduler {
	if workers == 0 {
		workers = 1
	}

	return &ParallelScheduler{workers: workers}
}

func (p ParallelScheduler) Schedule(ctx context.Context, attachmentIDs []string, storageProvider AttachmentAllocator, downloader func(context.Context, string, *bytes.Buffer) error) ([]*bytes.Buffer, error) {
	// If we have less attachments than the maximum works, reduce worker count to match attachment count.
	workers := p.workers
	if len(attachmentIDs) < workers {
		workers = len(attachmentIDs)
	}

	return parallel.MapContext(ctx, workers, attachmentIDs, func(ctx context.Context, id string) (*bytes.Buffer, error) {
		defer async.HandlePanic(p.panicHandler)

		buffer := storageProvider.NewBuffer()
		if err := downloader(ctx, id, buffer); err != nil {
			return nil, err
		}

		return buffer, nil
	})

}