File: utils.py

package info (click to toggle)
opencv 4.10.0%2Bdfsg-5
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 282,092 kB
  • sloc: cpp: 1,178,079; xml: 682,621; python: 49,092; lisp: 31,150; java: 25,469; ansic: 11,039; javascript: 6,085; sh: 1,214; cs: 601; perl: 494; objc: 210; makefile: 173
file content (153 lines) | stat: -rw-r--r-- 5,086 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import argparse
import importlib.util
import os
import random

import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import torch

from .test.configs.test_config import CommonConfig

SEED_VAL = 42
DNN_LIB = "DNN"
# common path for model savings
MODEL_PATH_ROOT = os.path.join(CommonConfig().output_data_root_dir, "{}/models")


def get_full_model_path(lib_name, model_full_name):
    model_path = MODEL_PATH_ROOT.format(lib_name)
    return {
        "path": model_path,
        "full_path": os.path.join(model_path, model_full_name)
    }


def plot_acc(data_list, experiment_name):
    plt.figure(figsize=[8, 6])
    plt.plot(data_list[:, 0], "r", linewidth=2.5, label="Original Model")
    plt.plot(data_list[:, 1], "b", linewidth=2.5, label="Converted DNN Model")
    plt.xlabel("Iterations ", fontsize=15)
    plt.ylabel("Time (ms)", fontsize=15)
    plt.title(experiment_name, fontsize=15)
    plt.legend()
    full_path_to_fig = os.path.join(CommonConfig().output_data_root_dir, experiment_name + ".png")
    plt.savefig(full_path_to_fig, bbox_inches="tight")


def get_final_summary_info(general_quality_metric, general_inference_time, metric_name):
    general_quality_metric = np.array(general_quality_metric)
    general_inference_time = np.array(general_inference_time)
    summary_line = "===== End of processing. General results:\n"
    "\t* mean {} for the original model: {}\t"
    "\t* mean time (min) for the original model inferences: {}\n"
    "\t* mean {} for the DNN model: {}\t"
    "\t* mean time (min) for the DNN model inferences: {}\n".format(
        metric_name, np.mean(general_quality_metric[:, 0]),
        np.mean(general_inference_time[:, 0]) / 60000,
        metric_name, np.mean(general_quality_metric[:, 1]),
        np.mean(general_inference_time[:, 1]) / 60000,
    )
    return summary_line


def set_common_reproducibility():
    random.seed(SEED_VAL)
    np.random.seed(SEED_VAL)


def set_pytorch_env():
    set_common_reproducibility()
    torch.manual_seed(SEED_VAL)
    torch.set_printoptions(precision=10)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(SEED_VAL)
        torch.backends.cudnn_benchmark_enabled = False
        torch.backends.cudnn.deterministic = True


def set_tf_env(is_use_gpu=True):
    set_common_reproducibility()
    tf.random.set_seed(SEED_VAL)
    os.environ["TF_DETERMINISTIC_OPS"] = "1"

    if tf.config.list_physical_devices("GPU") and is_use_gpu:
        gpu_devices = tf.config.list_physical_devices("GPU")
        tf.config.experimental.set_visible_devices(gpu_devices[0], "GPU")
        tf.config.experimental.set_memory_growth(gpu_devices[0], True)
        os.environ["TF_USE_CUDNN"] = "1"
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = "-1"


def str_bool(input_val):
    if input_val.lower() in ('yes', 'true', 't', 'y', '1'):
        return True
    elif input_val.lower() in ('no', 'false', 'f', 'n', '0'):
        return False
    else:
        raise argparse.ArgumentTypeError('Boolean value was expected')


def get_formatted_model_list(model_list):
    note_line = 'Please, choose the model from the below list:\n'
    spaces_to_set = ' ' * (len(note_line) - 2)
    return note_line + ''.join([spaces_to_set, '{} \n'] * len(model_list)).format(*model_list)


def model_str(model_list):
    def type_model_list(input_val):
        if input_val.lower() in model_list:
            return input_val.lower()
        else:
            raise argparse.ArgumentTypeError(
                'The model is currently unavailable for test.\n' +
                get_formatted_model_list(model_list)
            )

    return type_model_list


def get_test_module(test_module_name, test_module_path):
    module_spec = importlib.util.spec_from_file_location(test_module_name, test_module_path)
    test_module = importlib.util.module_from_spec(module_spec)
    module_spec.loader.exec_module(test_module)
    module_spec.loader.exec_module(test_module)
    return test_module


def create_parser():
    parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument(
        "--test",
        type=str_bool,
        help="Define whether you'd like to run the model with OpenCV for testing.",
        default=False
    ),
    parser.add_argument(
        "--default_img_preprocess",
        type=str_bool,
        help="Define whether you'd like to preprocess the input image with defined"
             " PyTorch or TF functions for model test with OpenCV.",
        default=False
    ),
    parser.add_argument(
        "--evaluate",
        type=str_bool,
        help="Define whether you'd like to run evaluation of the models (ex.: TF vs OpenCV networks).",
        default=True
    )
    return parser


def create_extended_parser(model_list):
    parser = create_parser()
    parser.add_argument(
        "--model_name",
        type=model_str(model_list=model_list),
        help="\nDefine the model name to test.\n" +
             get_formatted_model_list(model_list),
        required=True
    )
    return parser