File: preprocess-bench.py

package info (click to toggle)
pytorch-vision 0.21.0-3
  • links: PTS, VCS
  • area: main
  • in suites: sid, trixie
  • size: 20,228 kB
  • sloc: python: 65,904; cpp: 11,406; ansic: 2,459; java: 550; sh: 265; xml: 79; objc: 56; makefile: 33
file content (66 lines) | stat: -rw-r--r-- 2,402 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import argparse
import os
from timeit import default_timer as timer

import torch
import torch.utils.data
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.model_zoo import tqdm


parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
parser.add_argument("--data", metavar="PATH", required=True, help="path to dataset")
parser.add_argument(
    "--nThreads", "-j", default=2, type=int, metavar="N", help="number of data loading threads (default: 2)"
)
parser.add_argument(
    "--batchSize", "-b", default=256, type=int, metavar="N", help="mini-batch size (1 = pure stochastic) Default: 256"
)
parser.add_argument("--accimage", action="store_true", help="use accimage")


if __name__ == "__main__":
    args = parser.parse_args()

    if args.accimage:
        torchvision.set_image_backend("accimage")
    print(f"Using {torchvision.get_image_backend()}")

    # Data loading code
    transform = transforms.Compose(
        [
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.PILToTensor(),
            transforms.ConvertImageDtype(torch.float),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ]
    )

    traindir = os.path.join(args.data, "train")
    valdir = os.path.join(args.data, "val")
    train = datasets.ImageFolder(traindir, transform)
    val = datasets.ImageFolder(valdir, transform)
    train_loader = torch.utils.data.DataLoader(
        train, batch_size=args.batchSize, shuffle=True, num_workers=args.nThreads
    )
    train_iter = iter(train_loader)

    start_time = timer()
    batch_count = 20 * args.nThreads
    with tqdm(total=batch_count) as pbar:
        for _ in tqdm(range(batch_count)):
            pbar.update(1)
            batch = next(train_iter)
    end_time = timer()
    print(
        "Performance: {dataset:.0f} minutes/dataset, {batch:.1f} ms/batch,"
        " {image:.2f} ms/image {rate:.0f} images/sec".format(
            dataset=(end_time - start_time) * (float(len(train_loader)) / batch_count / 60.0),
            batch=(end_time - start_time) / float(batch_count) * 1.0e3,
            image=(end_time - start_time) / (batch_count * args.batchSize) * 1.0e3,
            rate=(batch_count * args.batchSize) / (end_time - start_time),
        )
    )