1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"compress/bzip2"
"compress/flate"
"compress/zlib"
"io"
"strconv"
"github.com/ProtonMail/go-crypto/openpgp/errors"
)
// Compressed represents a compressed OpenPGP packet. The decompressed contents
// will contain more OpenPGP packets. See RFC 4880, section 5.6.
type Compressed struct {
Body io.Reader
}
const (
NoCompression = flate.NoCompression
BestSpeed = flate.BestSpeed
BestCompression = flate.BestCompression
DefaultCompression = flate.DefaultCompression
)
// CompressionConfig contains compressor configuration settings.
type CompressionConfig struct {
// Level is the compression level to use. It must be set to
// between -1 and 9, with -1 causing the compressor to use the
// default compression level, 0 causing the compressor to use
// no compression and 1 to 9 representing increasing (better,
// slower) compression levels. If Level is less than -1 or
// more then 9, a non-nil error will be returned during
// encryption. See the constants above for convenient common
// settings for Level.
Level int
}
// decompressionReader ensures that the whole compression packet is read.
type decompressionReader struct {
compressed io.Reader
decompressed io.ReadCloser
readAll bool
}
func newDecompressionReader(r io.Reader, decompressor io.ReadCloser) *decompressionReader {
return &decompressionReader{
compressed: r,
decompressed: decompressor,
}
}
func (dr *decompressionReader) Read(data []byte) (n int, err error) {
if dr.readAll {
return 0, io.EOF
}
n, err = dr.decompressed.Read(data)
if err == io.EOF {
dr.readAll = true
// Close the decompressor.
if errDec := dr.decompressed.Close(); errDec != nil {
return n, errDec
}
// Consume all remaining data from the compressed packet.
consumeAll(dr.compressed)
}
return n, err
}
func (c *Compressed) parse(r io.Reader) error {
var buf [1]byte
_, err := readFull(r, buf[:])
if err != nil {
return err
}
switch buf[0] {
case 0:
c.Body = r
case 1:
c.Body = newDecompressionReader(r, flate.NewReader(r))
case 2:
decompressor, err := zlib.NewReader(r)
if err != nil {
return err
}
c.Body = newDecompressionReader(r, decompressor)
case 3:
c.Body = newDecompressionReader(r, io.NopCloser(bzip2.NewReader(r)))
default:
err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0])))
}
return err
}
// LimitedBodyReader wraps the provided body reader with a limiter that restricts
// the number of bytes read to the specified limit.
// If limit is nil, the reader is unbounded.
func (c *Compressed) LimitedBodyReader(limit *int64) io.Reader {
if limit == nil {
return c.Body
}
return &LimitReader{R: c.Body, N: *limit}
}
// compressedWriterCloser represents the serialized compression stream
// header and the compressor. Its Close() method ensures that both the
// compressor and serialized stream header are closed. Its Write()
// method writes to the compressor.
type compressedWriteCloser struct {
sh io.Closer // Stream Header
c io.WriteCloser // Compressor
}
func (cwc compressedWriteCloser) Write(p []byte) (int, error) {
return cwc.c.Write(p)
}
func (cwc compressedWriteCloser) Close() (err error) {
err = cwc.c.Close()
if err != nil {
return err
}
return cwc.sh.Close()
}
// SerializeCompressed serializes a compressed data packet to w and
// returns a WriteCloser to which the literal data packets themselves
// can be written and which MUST be closed on completion. If cc is
// nil, sensible defaults will be used to configure the compression
// algorithm.
func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) {
compressed, err := serializeStreamHeader(w, packetTypeCompressed)
if err != nil {
return
}
_, err = compressed.Write([]byte{uint8(algo)})
if err != nil {
return
}
level := DefaultCompression
if cc != nil {
level = cc.Level
}
var compressor io.WriteCloser
switch algo {
case CompressionZIP:
compressor, err = flate.NewWriter(compressed, level)
case CompressionZLIB:
compressor, err = zlib.NewWriterLevel(compressed, level)
default:
s := strconv.Itoa(int(algo))
err = errors.UnsupportedError("Unsupported compression algorithm: " + s)
}
if err != nil {
return
}
literaldata = compressedWriteCloser{compressed, compressor}
return
}
// LimitReader is an io.Reader that fails with MessageToLarge if read bytes exceed N.
type LimitReader struct {
R io.Reader // underlying reader
N int64 // max bytes allowed
}
func (l *LimitReader) Read(p []byte) (int, error) {
if l.N <= 0 {
return 0, errors.ErrMessageTooLarge
}
n, err := l.R.Read(p)
l.N -= int64(n)
if err == nil && l.N <= 0 {
err = errors.ErrMessageTooLarge
}
return n, err
}
|