File: block.py

package info (click to toggle)
virtnbdbackup 2.42-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 672 kB
  • sloc: python: 4,426; makefile: 9
file content (66 lines) | stat: -rw-r--r-- 2,309 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
"""
Copyright (C) 2023  Michael Ablassmeier <abi@grinser.de>

This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program.  If not, see <https://www.gnu.org/licenses/>.
"""

from typing import Generator, IO, Any, Union
from nbd import Error as nbdError
from libvirtnbdbackup import lz4
from libvirtnbdbackup.exceptions import BackupException


def step(offset: int, length: int, maxRequestSize: int) -> Generator:
    """Process block and ensure to not exceed the maximum request size
    from NBD server.

    If length parameter is dict, compression was enabled during
    backup, thus we cannot use the offsets and sizes for the
    original data, but must use the compressed offsets and sizes
    to read the correct lz4 frames from the stream.
    """
    blockOffset = offset
    if isinstance(length, dict):
        blockOffset = offset
        compressOffset = list(length.keys())[0]
        for part in length[compressOffset]:
            blockOffset += part
            yield part, blockOffset
    else:
        blockOffset = offset
        while blockOffset < offset + length:
            blocklen = min(offset + length - blockOffset, maxRequestSize)
            yield blocklen, blockOffset
            blockOffset += blocklen


def write(
    writer: IO[Any], block, nbdCon, btype: str, compress: Union[bool, int]
) -> int:
    """Write single block that does not exceed nbd maxRequestSize
    setting. In case compression is enabled, single blocks are
    compressed using lz4.
    """
    if btype == "raw":
        writer.seek(block.offset)

    try:
        data = nbdCon.nbd.pread(block.length, block.offset)
    except nbdError as e:
        raise BackupException(e) from e

    if compress is not False:
        data = lz4.compressFrame(data, compress)

    return writer.write(data)