File: test_verify_content.py

package info (click to toggle)
torf 4.3.0-1
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 988 kB
  • sloc: python: 10,054; makefile: 15; sh: 8
file content (642 lines) | stat: -rw-r--r-- 29,493 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
import collections
import errno
import itertools
import os
import random
from unittest import mock

import pytest

import torf

from . import (
    ComparableException,
    calc_corruptions,
    calc_good_pieces,
    calc_piece_indexes,
    calc_pieces_done,
    change_file_size,
    display_filespecs,
    file_piece_indexes,
    file_range,
    fuzzylist,
    pos2file,
    pos2files,
    random_positions,
    round_down_to_multiple,
)

import logging  # isort:skip
debug = logging.getLogger('test').debug


# Allow arbitrary small piece sizes to make debugging easier.
@pytest.fixture(autouse=True)
def ignore_piece_size_check(mocker):
    mocker.patch('torf._utils.is_divisible_by_16_kib', return_value=True)


class CollectingCallback():
    """Collect call arguments and make basic assertions"""
    def __init__(self, torrent):
        super().__init__()
        self.torrent = torrent
        self.seen_pieces_done = []
        self._seen_piece_indexes = collections.defaultdict(lambda: fuzzylist())  # noqa: F405
        self._seen_good_pieces = collections.defaultdict(lambda: fuzzylist())  # noqa: F405
        self._seen_skipped_pieces = collections.defaultdict(lambda: fuzzylist())  # noqa: F405
        self.seen_exceptions = fuzzylist()  # noqa: F405

    def __call__(self, t, path, pieces_done, pieces_total, piece_index, piece_hash, exc):
        assert t is self.torrent
        assert pieces_total == t.pieces
        assert 1 <= pieces_done <= pieces_total
        self.seen_pieces_done.append(pieces_done)
        self._seen_piece_indexes[path.name].append(piece_index)
        if exc is not None:
            if isinstance(exc, torf.VerifyContentError):
                assert type(piece_hash) is bytes and len(piece_hash) == 20  # noqa: E721
            else:
                assert piece_hash is None
            self.seen_exceptions.append(ComparableException(exc))  # noqa: F405
            debug(f'--- Seeing exception: {self.seen_exceptions[-1]}')
        elif piece_hash is None:
            assert exc is None
            self._seen_skipped_pieces[path.name].append(piece_index)
            debug(f'--- Seeing skipped piece of {path.name}: {piece_index}')
        else:
            assert exc is None
            assert type(piece_hash) is bytes and len(piece_hash) == 20  # noqa: E721
            debug(f'--- Seeing good piece of {path.name}: {piece_index}')
            self._seen_good_pieces[path.name].append(piece_index)

    @property
    def seen_piece_indexes(self):
        return dict(self._seen_piece_indexes)

    @property
    def seen_good_pieces(self):
        return dict(self._seen_good_pieces)

    @property
    def seen_skipped_pieces(self):
        return dict(self._seen_skipped_pieces)

class _TestCaseBase():
    """
    This class runs most of the tests while the test_*() functions mostly
    collect parametrized test values
    """
    def __init__(self, create_dir, create_file, create_torrent_file, forced_piece_size):
        self.create_dir = create_dir
        self.create_file = create_file
        self.create_torrent_file = create_torrent_file
        self.forced_piece_size = forced_piece_size
        self.reset()

    def reset(self):
        self.corruption_positions = set()
        self.files_corrupt = []
        self.files_missing = []
        self.files_missized = []
        for attr in ('_exp_exceptions', '_exp_pieces_done',
                     '_exp_piece_indexes', '_exp_good_pieces',
                     '_exp_exc_corruptions', '_exp_exc_files_missing', '_exp_exc_files_missized'):
            if hasattr(self, attr):
                delattr(self, attr)

    def run(self, *_, with_callback, exp_return_value=None, skip_on_error=False):
        debug(f'Original stream: {self.stream_original.hex()}')
        debug(f' Corrupt stream: {self.stream_corrupt.hex()}')
        debug(f'Corruption positions: {sorted(self.corruption_positions)}')
        debug(f'Corrupt piece indexes: {sorted(set(corrpos // self.piece_size for corrpos in self.corruption_positions))}')

        self.skip_on_error = skip_on_error
        kwargs = {
            # 'skip_on_error': skip_on_error,
            'exp_return_value': exp_return_value,
        }
        if not with_callback:
            exp_exceptions = self.exp_exceptions
            if not exp_exceptions:
                debug('Expecting no exceptions')
                self._run_without_callback(**kwargs)
            else:
                debug(f'Expected exceptions: {exp_exceptions}')
                exp_exception_types = tuple(set(type(exc) for exc in exp_exceptions))
                debug(f'Expected exception types: {exp_exception_types}')
                with pytest.raises(exp_exception_types) as e:
                    self._run_without_callback(**kwargs)
                # Usually the first error in the stream is reported, but not
                # always, so we expect one of the possible exceptions to be
                # raised.
                assert e.value in exp_exceptions
        else:
            return self._run_with_callback(**kwargs)

    def _run_without_callback(self, exp_return_value, **kwargs):
        debug(f'################ VERIFY WITHOUT CALLBACK: kwargs={kwargs}')
        if exp_return_value is not None:
            assert self.torrent.verify(self.content_path, **kwargs) is exp_return_value
        else:
            self.torrent.verify(self.content_path, **kwargs)

    def _run_with_callback(self, exp_return_value, **kwargs):
        debug(f'################ VERIFY WITH CALLBACK: kwargs={kwargs}')
        cb = CollectingCallback(self.torrent)
        kwargs['callback'] = cb
        kwargs['interval'] = 0
        if exp_return_value is not None:
            return_value = self.torrent.verify(self.content_path, **kwargs)
            assert return_value == exp_return_value
        else:
            self.torrent.verify(self.content_path, **kwargs)

        debug(f'seen_exceptions: {cb.seen_exceptions}')
        assert cb.seen_exceptions == self.exp_exceptions

        debug(f'seen_piece_indexes: {cb.seen_piece_indexes}')
        assert cb.seen_piece_indexes == self.exp_piece_indexes

        debug(f'seen_pieces_done: {cb.seen_pieces_done}')
        assert cb.seen_pieces_done == self.exp_pieces_done
        # Last pieces_done value must be the total number of pieces so progress
        # is finalized correctly, e.g. progress bar ends at 100%
        assert cb.seen_pieces_done[-1] == self.torrent.pieces

        debug(f'seen_good_pieces: {cb.seen_good_pieces}')
        assert cb.seen_good_pieces == self.exp_good_pieces

    @property
    def exp_pieces_done(self):
        if not hasattr(self, '_exp_pieces_done'):
            self._exp_pieces_done = calc_pieces_done(self.filespecs_abspath, self.piece_size,  # noqa: F405
                                                     self.files_missing, self.files_missized)
            debug(f'Expected pieces done: {self._exp_pieces_done}')
        return self._exp_pieces_done

    @property
    def exp_piece_indexes(self):
        if not hasattr(self, '_exp_piece_indexes'):
            self._exp_piece_indexes = calc_piece_indexes(self.filespecs, self.piece_size,  # noqa: F405
                                                         self.files_missing, self.files_missized)
            debug(f'Expected piece indexes: {dict(self._exp_piece_indexes)}')
        return self._exp_piece_indexes

    @property
    def exp_good_pieces(self):
        if not hasattr(self, '_exp_good_pieces'):
            self._exp_good_pieces = calc_good_pieces(self.filespecs,  # noqa: F405
                                                     self.piece_size,
                                                     self.files_missing,
                                                     self.corruption_positions,
                                                     self.files_missized)
            # This is disabled because the skip_on_error option for
            # Torrent.verify() was removed. Feel free to re-implement and
            # re-enable.
            # if self.skip_on_error:
            #     self._exp_good_pieces = skip_good_pieces(self._exp_good_pieces,  # noqa: F405
            #                                              self.filespecs,
            #                                              self.piece_size,
            #                                              self.corruption_positions)
            debug(f'Expected good pieces: {self._exp_good_pieces}')
        return self._exp_good_pieces

    @property
    def exp_exc_corruptions(self):
        if not hasattr(self, '_exp_exc_corruptions'):
            self._exp_exc_corruptions = calc_corruptions(self.filespecs_abspath,  # noqa: F405
                                                         self.piece_size,
                                                         self.corruption_positions)
            # This is disabled because the skip_on_error option for
            # Torrent.verify() was removed. Feel free to re-implement and
            # re-enable.
            # if self.skip_on_error:
            #     self._exp_exc_corruptions = skip_corruptions(self._exp_exc_corruptions, self.filespecs_abspath,  # noqa: F405
            #                                                  self.piece_size, self.corruption_positions,
            #                                                  self.files_missing, self.files_missized)
            debug('Expected corruptions:')
            for exc in self._exp_exc_corruptions:
                debug(f'  {exc}')
        return self._exp_exc_corruptions

    @property
    def exp_exc_files_missing(self):
        if not hasattr(self, '_exp_exc_files_missing'):
            self._exp_exc_files_missing = fuzzylist(*(ComparableException(torf.ReadError(errno.ENOENT, filepath))  # noqa: F405
                                                      for filepath in self.files_missing))
            debug(f'Expected files missing: {self._exp_exc_files_missing}')
        return self._exp_exc_files_missing

    @property
    def exp_exc_files_missized(self):
        if not hasattr(self, '_exp_exc_files_missized'):
            def mkexc(filepath):
                fsize_orig = self.get_original_filesize(filepath)
                fsize_actual = self.get_actual_filesize(filepath)
                return ComparableException(torf.VerifyFileSizeError(  # noqa: F405
                    filepath, actual_size=fsize_actual, expected_size=fsize_orig))
            self._exp_exc_files_missized = fuzzylist(*(mkexc(filepath) for filepath in self.files_missized))  # noqa: F405
            debug(f'Expected files missized: {self._exp_exc_files_missized}')
        return self._exp_exc_files_missized

    @property
    def exp_exceptions(self):
        if not hasattr(self, '_exp_exceptions'):
            debug('Calculating expected exceptions:')

            # Exceptions that must be reported
            mandatory = set(self.exp_exc_files_missing)
            maybe = set()

            # Files with wrong size must be reported if they are not also missing
            mandatory_files = set(exc.path for exc in mandatory)
            for exc in self.exp_exc_files_missized:
                if exc.filepath not in mandatory_files:
                    mandatory.add(exc)

            # If there are no missing or missized files, corruptions are mandatory
            if not mandatory:
                debug('all corruption exceptions are mandatory')
                mandatory.update(self.exp_exc_corruptions)
                maybe.update(self.exp_exc_corruptions.maybe)
            else:
                debug('not all corruption exceptions are mandatory')
                # Corrupt files are only reported if their piece_indexes aren't
                # already covered by missing or missized files
                missing_missized_pis = set()
                for filepath in itertools.chain(self.files_missing, self.files_missized):
                    filename = os.path.basename(filepath)
                    file_pis = file_piece_indexes(filename, self.filespecs, self.piece_size, exclusive=False)  # noqa: F405
                    missing_missized_pis.update(file_pis)
                for exc in self.exp_exc_corruptions:
                    if exc.piece_index not in missing_missized_pis:
                        debug(f'  expecting non-missing/missized: {str(exc)}')
                        mandatory.add(exc)
                    else:
                        debug(f'  not expecting missing/missized: {str(exc)}')

                # Also allow corruptions that are already classified as optional.
                for exc in self.exp_exc_corruptions.maybe:
                    debug(f'  also allowing {str(exc)}')
                    maybe.add(exc)

            self._exp_exceptions = fuzzylist(*mandatory, maybe=maybe)  # noqa: F405
            debug('Expected exceptions:')
            for e in self._exp_exceptions:
                debug(repr(e))
            debug('Tolerated exceptions:')
            for e in self._exp_exceptions.maybe:
                debug(repr(e))

        return self._exp_exceptions

class _TestCaseSinglefile(_TestCaseBase):
    @property
    def filespecs_abspath(self):
        return ((str(self.content_path), self.filesize),)

    def setup(self, filespecs, piece_size):
        self.filespecs = filespecs
        self.piece_size = piece_size
        self.filename = filespecs[0][0]
        self.filesize = filespecs[0][1]
        debug(f'Filename: {self.filename}, size: {self.filesize}, piece size: {piece_size}')
        self.stream_original = b'\x00' * self.filesize
        self.stream_corrupt = bytearray(self.stream_original)
        self.content_path = self.create_file(self.filename, self.stream_original)
        with self.forced_piece_size(piece_size):
            with self.create_torrent_file(path=self.content_path) as torrent_filepath:
                self.torrent = torf.Torrent.read(torrent_filepath)

    def corrupt_stream(self, *positions):
        # Check if this file already has other errors
        if self.files_missing or self.files_missized:
            return
        # Introduce random number of corruptions without changing stream length
        corruption_positions = set(random_positions(self.stream_corrupt) if not positions else positions)  # noqa: F405
        for corrpos in corruption_positions:
            debug(f'* Introducing corruption at index {corrpos}')
            self.stream_corrupt[corrpos] = (self.stream_corrupt[corrpos] + 1) % 256
            self.content_path.write_bytes(self.stream_corrupt)
        self.corruption_positions.update(corruption_positions)

    def delete_file(self, index=None):
        # Check if this file already has other errors
        if self.corruption_positions or self.files_missized:
            return
        debug(f'* Removing file from file system: {os.path.basename(self.content_path)}')
        os.rename(self.content_path, str(self.content_path) + '.deleted')
        self.files_missing = [self.content_path]
        self.stream_corrupt = b'\xCC' * self.torrent.size
        # No need to update self.corruption_positions.  A missing single file
        # does not produce any corruption errors because the "No such file"
        # error is enough.

    def change_file_size(self, index=None):
        # Check if this file already has other errors
        if self.corruption_positions or self.files_missing:
            return
        debug(f'* Changing file size in file system: {os.path.basename(self.content_path)}')
        self.stream_corrupt = change_file_size(self.content_path, self.torrent.size)  # noqa: F405
        self.files_missized.append(self.content_path)
        debug(f'  Corruption positions after changing file size: {self.corruption_positions}')

    def get_original_filesize(self, filepath):
        return len(self.stream_original)

    def get_actual_filesize(self, filepath):
        return len(self.stream_corrupt)

class _TestCaseMultifile(_TestCaseBase):
    @property
    def filespecs_abspath(self):
        return tuple((str(self.content_path / filename), filesize)
                     for filename,filesize in self.filespecs)

    def setup(self, filespecs, piece_size):
        debug(f'File sizes: {", ".join(f"{n}={s}" for n,s in filespecs)}')
        debug(f'Stream size: {sum(s for _,s in filespecs)}')
        debug(f'Piece size: {piece_size}')
        self.filespecs = filespecs
        self.piece_size = piece_size
        self.content_original = {}
        self.content_corrupt = {}
        create_dir_args = []
        for filename,filesize in filespecs:
            data = b'\x00' * filesize
            self.content_original[filename] = data
            self.content_corrupt[filename] = bytearray(data)
            create_dir_args.append((filename, data))
        self.content_path = self.create_dir('content', *create_dir_args)
        debug(f'Content: {self.content_original}')
        with self.forced_piece_size(piece_size):
            with self.create_torrent_file(path=self.content_path) as torrent_filepath:
                self.torrent = torf.Torrent.read(torrent_filepath)

    @property
    def stream_original(self):
        return b''.join((data for data in self.content_original.values()))

    @property
    def stream_corrupt(self):
        return b''.join((data for data in self.content_corrupt.values()))

    def corrupt_stream(self, *positions):
        # Introduce corruptions without changing stream length
        error_files = set(os.path.basename(f) for f in itertools.chain(
            self.files_missing, self.files_missized))
        corruption_positions = set(random_positions(self.stream_original) if not positions else positions)  # noqa: F405
        for corrpos_in_stream in corruption_positions:
            filename,corrpos_in_file = pos2file(corrpos_in_stream, self.filespecs, self.piece_size)  # noqa: F405
            if filename in error_files:
                continue
            else:
                debug(f'* Introducing corruption in {filename} at index {corrpos_in_stream} in stream, '
                      f'{corrpos_in_file} in file {filename}')
                self.corruption_positions.add(corrpos_in_stream)
                data = self.content_corrupt[filename]
                data[corrpos_in_file] = (data[corrpos_in_file] + 1) % 256
                (self.content_path / filename).write_bytes(data)
                self.files_corrupt.append(str(self.content_path / filename))
        debug(f'  Corruption positions after corrupting stream: {self.corruption_positions}')

    def delete_file(self, index=None):
        if index is None:
            index = random.choice(range(len(self.filespecs)))
        # Remove file at `index` in filespecs from file system
        filename,filesize = self.filespecs[index]

        # Don't delete corrupt/missing file
        error_files = set(os.path.basename(f) for f in itertools.chain(
            self.files_corrupt, self.files_missized))
        if filename in error_files:
            return

        debug(f'* Removing file from file system: {os.path.basename(filename)}')
        filepath = self.content_path / filename
        os.rename(filepath, str(filepath) + '.deleted')
        self.files_missing.append(filepath)
        self.content_corrupt[os.path.basename(filename)] = b'\xCC' * filesize

        # Re-calculate corruptions for adjacent files of all missing files
        corruption_positions = set()
        for removed_filepath in self.files_missing:
            # Find the first byte of the first affected piece and the first byte
            # of the last affected piece and mark them as corrupt
            removed_filename = os.path.basename(removed_filepath)
            file_beg,file_end = file_range(removed_filename, self.filespecs)  # noqa: F405
            debug(f'  {removed_filename} starts at {file_beg} and ends at {file_end} in stream')
            first_affected_piece_pos = round_down_to_multiple(file_beg, self.piece_size)  # noqa: F405
            last_affected_piece_pos = round_down_to_multiple(file_end, self.piece_size)  # noqa: F405
            debug(f'  First affected piece starts at {first_affected_piece_pos} '
                  f'and last affected piece starts at {last_affected_piece_pos}')
            corruption_positions.add(first_affected_piece_pos)
            corruption_positions.add(last_affected_piece_pos)

        self.corruption_positions.update(corruption_positions)
        self._remove_skipped_corruptions()
        debug(f'  Corruption positions after removing file: {self.corruption_positions}')

    def _remove_skipped_corruptions(self):
        # Finally, remove corruptions that exclusively belong to
        # missing/missized files because they are always skipped
        skipped_files = {str(filepath) for filepath in itertools.chain(self.files_missing, self.files_missized)}
        debug(f'  skipped_files: {skipped_files}')
        for corrpos in tuple(self.corruption_positions):
            affected_files = pos2files(corrpos, self.filespecs_abspath, self.piece_size)  # noqa: F405
            if all(f in skipped_files for f in affected_files):
                debug(f'  only skipped files are affected by corruption at position {corrpos}')
                self.corruption_positions.remove(corrpos)

    def change_file_size(self, index=None):
        # Pick random file
        if index is None:
            filename = random.choice(tuple(self.content_original))
        else:
            filename = tuple(self.content_original)[index]
        filepath = self.content_path / filename

        # Don't change corrupt/missing file
        error_files = set(os.path.basename(f) for f in itertools.chain(
            self.files_missing, self.files_corrupt))
        if filename in error_files:
            return

        debug(f'* Changing file size in file system: {filepath}')

        # Change file size
        self.content_corrupt[filename] = change_file_size(  # noqa: F405
            filepath, len(self.content_original[filename]))
        self.files_missized.append(filepath)

        # Check if the beginning of adjacent files will be corrupted
        file_beg,file_end = file_range(filename, self.filespecs)  # noqa: F405
        debug(f'  Original file beginning and end in stream: {file_beg}, {file_end}')
        if file_beg % self.piece_size != 0:
            debug(f'  Beginning corrupts previous file at piece_index {file_beg // self.piece_size}')
            self.corruption_positions.add(file_beg)

        # Check if the end of adjacent files will be corrupted
        if (file_end + 1) % self.piece_size != 0:
            filepath,_ = pos2file(file_end, self.filespecs_abspath, self.piece_size)  # noqa: F405
            if (filepath not in self.files_missing and
                filepath not in self.files_missized and
                filepath != self.filespecs_abspath[-1][0]):
                debug(f'  End corrupts next file at piece_index {(file_end + 1) // self.piece_size}')
                self.corruption_positions.add(file_end)

        self._remove_skipped_corruptions()
        debug(f'  Corruption positions after changing file size: {self.corruption_positions}')

    def get_original_filesize(self, filepath):
        return len(self.content_original[os.path.basename(filepath)])

    def get_actual_filesize(self, filepath):
        return len(self.content_corrupt[os.path.basename(filepath)])

@pytest.fixture
def mktestcase(create_dir, create_file, forced_piece_size, create_torrent_file):
    """Return instance of _TestCaseMultifile or _TestCaseSinglefile"""
    def mktestcase_(filespecs, piece_size):
        if len(filespecs) == 1:
            testcls = _TestCaseSinglefile
        else:
            testcls = _TestCaseMultifile
        testcase = testcls(create_dir, create_file, create_torrent_file, forced_piece_size)
        testcase.setup(filespecs, piece_size)
        debug(f'################ TEST TORRENT CREATED: {testcase.torrent}')
        return testcase
    return mktestcase_


def test_validate_is_called_first(monkeypatch):
    torrent = torf.Torrent()
    mock_validate = mock.Mock(side_effect=torf.MetainfoError('Mock error'))
    monkeypatch.setattr(torrent, 'validate', mock_validate)
    with pytest.raises(torf.MetainfoError) as excinfo:
        torrent.verify('some/path')
    assert str(excinfo.value) == 'Invalid metainfo: Mock error'
    mock_validate.assert_called_once_with()

def test_verify_singlefile_torrent_with_directory(generated_singlefile_torrent, create_dir):
    content_path = create_dir('multifile')
    exp_exception = torf.VerifyIsDirectoryError(content_path)

    # Without callback
    with pytest.raises(type(exp_exception)) as excinfo:
        generated_singlefile_torrent.verify(content_path)
    assert str(excinfo.value) == str(exp_exception)

    # With callback
    cb = mock.Mock()
    generated_singlefile_torrent.verify(content_path, callback=cb)
    exp_torrent = generated_singlefile_torrent
    exp_filepath = content_path
    exp_pieces_done = 0
    exp_pieces_total = generated_singlefile_torrent.pieces
    exp_piece_index = 0
    exp_piece_hash = None
    assert cb.call_args_list == [mock.call(
        exp_torrent,
        exp_filepath,
        exp_pieces_done,
        exp_pieces_total,
        exp_piece_index,
        exp_piece_hash,
        ComparableException(exp_exception),
    )]

def test_verify_multifile_torrent_with_file(generated_multifile_torrent, tmp_path):
    content_path = tmp_path / 'singlefile'
    content_path.write_text('some file data')
    exp_exception = torf.VerifyNotDirectoryError(content_path)

    # Without callback
    with pytest.raises(type(exp_exception)) as excinfo:
        generated_multifile_torrent.verify(content_path)
    assert str(excinfo.value) == str(exp_exception)

    # With callback
    cb = mock.Mock()
    generated_multifile_torrent.verify(content_path, callback=cb)
    exp_torrent = generated_multifile_torrent
    exp_filepath = content_path
    exp_pieces_done = 0
    exp_pieces_total = generated_multifile_torrent.pieces
    exp_piece_index = 0
    exp_piece_hash = None
    assert cb.call_args_list == [mock.call(
        exp_torrent,
        exp_filepath,
        exp_pieces_done,
        exp_pieces_total,
        exp_piece_index,
        exp_piece_hash,
        ComparableException(exp_exception),
    )]

def test_verify_content_successfully(mktestcase, piece_size, callback, filespecs):
    display_filespecs(filespecs, piece_size)  # noqa: F405
    tc = mktestcase(filespecs, piece_size)
    tc.run(with_callback=callback['enabled'],
           exp_return_value=True)

def test_verify_content_with_random_corruptions_and_no_skipping(mktestcase, piece_size, callback, filespecs):
    display_filespecs(filespecs, piece_size)  # noqa: F405
    tc = mktestcase(filespecs, piece_size)
    tc.corrupt_stream()
    tc.run(with_callback=callback['enabled'],
           exp_return_value=False)

# def test_verify_content_with_random_corruptions_and_skipping(mktestcase, piece_size, callback, filespecs):
#     display_filespecs(filespecs, piece_size)  # noqa: F405
#     tc = mktestcase(filespecs, piece_size)
#     tc.corrupt_stream()
#     tc.run(with_callback=callback['enabled'],
#            skip_on_error=True,
#            exp_return_value=False)

def test_verify_content_with_missing_files_and_no_skipping(mktestcase, piece_size, callback, filespecs, filespec_indexes):
    display_filespecs(filespecs, piece_size)  # noqa: F405
    tc = mktestcase(filespecs, piece_size)
    for index in filespec_indexes:
        tc.delete_file(index)
    tc.run(with_callback=callback['enabled'],
           exp_return_value=False)

# def test_verify_content_with_missing_files_and_skipping(mktestcase, piece_size, callback, filespecs, filespec_indexes):
#     display_filespecs(filespecs, piece_size)  # noqa: F405
#     tc = mktestcase(filespecs, piece_size)
#     for index in filespec_indexes:
#         tc.delete_file(index)
#     tc.run(with_callback=callback['enabled'],
#            skip_on_error=True,
#            exp_return_value=False)

def test_verify_content_with_changed_file_size_and_no_skipping(mktestcase, piece_size, callback, filespecs):
    display_filespecs(filespecs, piece_size)  # noqa: F405
    tc = mktestcase(filespecs, piece_size)
    tc.change_file_size()
    tc.run(with_callback=callback['enabled'],
           exp_return_value=False)

# def test_verify_content_with_changed_file_size_and_skipping(mktestcase, piece_size, callback, filespecs):
#     display_filespecs(filespecs, piece_size)  # noqa: F405
#     tc = mktestcase(filespecs, piece_size)
#     tc.change_file_size()
#     tc.run(with_callback=callback['enabled'],
#            skip_on_error=True,
#            exp_return_value=False)

def test_verify_content_with_multiple_error_types(mktestcase, piece_size, callback, filespecs):
    display_filespecs(filespecs, piece_size)  # noqa: F405
    tc = mktestcase(filespecs, piece_size)
    # Introduce 2 or 3 errors in random order
    errorizers = [tc.corrupt_stream, tc.delete_file, tc.change_file_size]
    for _ in range(random.randint(2, len(errorizers))):
        errorizer = errorizers.pop(random.choice(range(len(errorizers))))
        errorizer()
    tc.run(with_callback=callback['enabled'],
           # skip_on_error=random.choice((True, False)),
           exp_return_value=False)