1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
|
package tools
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"hash"
"io"
"os"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/tr"
)
const (
// memoryBufferLimit is the number of bytes to buffer in memory before
// spooling the contents of an `io.Reader` in `Spool()` to a temporary
// file on disk.
memoryBufferLimit = 1024
)
// CopyWithCallback copies reader to writer while performing a progress callback
func CopyWithCallback(writer io.Writer, reader io.Reader, totalSize int64, cb CopyCallback) (int64, error) {
if success, _ := CloneFile(writer, reader); success {
if cb != nil {
cb(totalSize, totalSize, 0)
}
return totalSize, nil
}
if cb == nil {
return io.Copy(writer, reader)
}
cbReader := &CallbackReader{
C: cb,
TotalSize: totalSize,
Reader: reader,
}
return io.Copy(writer, cbReader)
}
// Get a new Hash instance of the type used to hash LFS content
func NewLfsContentHash() hash.Hash {
return sha256.New()
}
// HashingReader wraps a reader and calculates the hash of the data as it is read
type HashingReader struct {
reader io.Reader
hasher hash.Hash
}
func NewHashingReader(r io.Reader) *HashingReader {
return &HashingReader{r, NewLfsContentHash()}
}
func NewHashingReaderPreloadHash(r io.Reader, hash hash.Hash) *HashingReader {
return &HashingReader{r, hash}
}
func (r *HashingReader) Hash() string {
return hex.EncodeToString(r.hasher.Sum(nil))
}
func (r *HashingReader) Read(b []byte) (int, error) {
w, err := r.reader.Read(b)
if err == nil || err == io.EOF {
_, e := r.hasher.Write(b[0:w])
if e != nil && err == nil {
return w, e
}
}
return w, err
}
// RetriableReader wraps a error response of reader as RetriableError()
type RetriableReader struct {
reader io.Reader
}
func NewRetriableReader(r io.Reader) io.Reader {
return &RetriableReader{r}
}
func (r *RetriableReader) Read(b []byte) (int, error) {
n, err := r.reader.Read(b)
// EOF is a successful response as it is used to signal a graceful end
// of input c.f. https://git.io/v6riQ
//
// Otherwise, if the error is non-nil and already retriable (in the
// case that the underlying reader `r.reader` is itself a
// `*RetriableReader`, return the error wholesale:
if err == nil || err == io.EOF || errors.IsRetriableError(err) {
return n, err
}
return n, errors.NewRetriableError(err)
}
// Spool spools the contents from 'from' to 'to' by buffering the entire
// contents of 'from' into a temporary file created in the directory "dir".
// That buffer is held in memory until the file grows to larger than
// 'memoryBufferLimit`, then the remaining contents are spooled to disk.
//
// The temporary file is cleaned up after the copy is complete.
//
// The number of bytes written to "to", as well as any error encountered are
// returned.
func Spool(to io.Writer, from io.Reader, dir string) (n int64, err error) {
// First, buffer up to `memoryBufferLimit` in memory.
buf := make([]byte, memoryBufferLimit)
if bn, err := from.Read(buf); err != nil && err != io.EOF {
return int64(bn), err
} else {
buf = buf[:bn]
}
var spool io.Reader = bytes.NewReader(buf)
if err != io.EOF {
// If we weren't at the end of the stream, create a temporary
// file, and spool the remaining contents there.
tmp, err := os.CreateTemp(dir, "")
if err != nil {
return 0, errors.Wrap(err, tr.Tr.Get("Unable to create temporary file for spooling"))
}
defer func() {
tmp.Close()
os.Remove(tmp.Name())
}()
if n, err = io.Copy(tmp, from); err != nil {
return n, errors.Wrap(err, tr.Tr.Get("unable to spool"))
}
if _, err = tmp.Seek(0, io.SeekStart); err != nil {
return 0, errors.Wrap(err, tr.Tr.Get("unable to seek"))
}
// The spooled contents will now be the concatenation of the
// contents we stored in memory, then the remainder of the
// contents on disk.
spool = io.MultiReader(spool, tmp)
}
return io.Copy(to, spool)
}
// Split the input on the NUL character. Usable with bufio.Scanner.
func SplitOnNul(data []byte, atEOF bool) (advance int, token []byte, err error) {
for i := 0; i < len(data); i++ {
if data[i] == '\x00' {
return i + 1, data[:i], nil
}
}
return 0, nil, nil
}
|