File: chunk.py

package info (click to toggle)
virtnbdbackup 2.42-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 672 kB
  • sloc: python: 4,426; makefile: 9
file content (106 lines) | stat: -rw-r--r-- 3,380 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
"""
Copyright (C) 2023  Michael Ablassmeier <abi@grinser.de>

This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program.  If not, see <https://www.gnu.org/licenses/>.
"""

from typing import List, Any, Tuple, IO, Union
from nbd import Error as nbdError
from libvirtnbdbackup import block
from libvirtnbdbackup import lz4
from libvirtnbdbackup.exceptions import DiskBackupFailed

# pylint: disable=too-many-arguments


def write(
    writer: IO[Any], blk, nbdCon, btype: str, compress: Union[bool, int], pbar
) -> Tuple[int, List[int]]:
    """During extent processing, consecutive blocks with
    the same type(data or zeroed) are unified into one big chunk.
    This helps to reduce requests to the NBD Server.

    But in cases where the block to be saved exceeds the maximum
    recommended request size (nbdClient.maxRequestSize), we
    need to split one big request into multiple not exceeding
    the limit

    If compression is enabled, function returns a list of
    offsets for the compressed frames, which is appended
    to the end of the stream.
    """
    wSize = 0
    cSizes = []
    for blocklen, blockOffset in block.step(
        blk.offset, blk.length, nbdCon.maxRequestSize
    ):
        if btype == "raw":
            writer.seek(blockOffset)

        try:
            data = nbdCon.nbd.pread(blocklen, blockOffset)
        except nbdError as e:
            raise DiskBackupFailed(e) from e

        if compress is not False:
            compressed = lz4.compressFrame(data, compress)
            wSize += writer.write(compressed)
            cSizes.append(len(compressed))
        else:
            wSize += writer.write(data)

        pbar.update(blocklen)

    return wSize, cSizes


def read(
    reader: IO[Any],
    offset: int,
    length: int,
    nbdCon,
    compression: bool,
    pbar,
) -> int:
    """Read data from reader and write to nbd connection

    If Compression is enabled function receives length information
    as dict, which contains the stream offsets for the compressed
    lz4 frames.

    Frames are read from the stream at the compressed size information
    (offset in the stream).

    After decompression, data is written back to original offset
    in the virtual machine disk image.

    If no compression is enabled, data is read from the regular
    data header at its position and written to nbd target
    directly.
    """
    wSize = 0
    for blocklen, blockOffset in block.step(offset, length, nbdCon.maxRequestSize):
        if compression is True:
            data = lz4.decompressFrame(reader.read(blocklen))
            nbdCon.nbd.pwrite(data, offset)
            offset += len(data)
            wSize += len(data)
        else:
            data = reader.read(blocklen)
            nbdCon.nbd.pwrite(data, blockOffset)
            wSize += len(data)

        pbar.update(blocklen)

    return wSize