File: __init__.py

package info (click to toggle)
pytorch 1.7.1-7
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 80,340 kB
  • sloc: cpp: 670,830; python: 343,991; ansic: 67,845; asm: 5,503; sh: 2,924; java: 2,888; xml: 266; makefile: 244; ruby: 148; yacc: 144; objc: 51; lex: 44
file content (63 lines) | stat: -rw-r--r-- 2,432 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
from .quantize import *
from .observer import *
from .qconfig import *
from .fake_quantize import *
from .fuse_modules import fuse_modules
from .stubs import *
from .quant_type import *
from .quantize_jit import *
from .quantize_fx import *
from .quantization_mappings import *
from .fuser_method_mappings import *
from .custom_module_class_mappings import *

def default_eval_fn(model, calib_data):
    r"""
    Default evaluation function takes a torch.utils.data.Dataset or a list of
    input Tensors and run the model on the dataset
    """
    for data, target in calib_data:
        model(data)

_all__ = [
    'QuantWrapper', 'QuantStub', 'DeQuantStub',
    # Top level API for eager mode quantization
    'quantize', 'quantize_dynamic', 'quantize_qat',
    'prepare', 'convert', 'prepare_qat',
    # Top level API for graph mode quantization on TorchScript
    'quantize_jit', 'quantize_dynamic_jit',
    # Top level API for graph mode quantization on GraphModule(torch._fx)
    'fuse_fx', 'quantize_fx',  # TODO: add quantize_dynamic_fx
    'prepare_fx', 'prepare_dynamic_fx', 'convert_fx',
    'QuantType',  # quantization type
    # custom module APIs
    'register_static_quant_module_mapping',
    'get_static_quant_module_mappings', 'get_static_quant_module_class',
    'register_dynamic_quant_module_mapping',
    'get_dynamic_quant_module_mappings',
    'register_qat_module_mapping',
    'get_qat_module_mappings',
    'get_qconfig_propagation_list',
    'get_compare_output_module_list',
    'register_quantized_operator_mapping', 'get_quantized_operator',
    'register_fuser_method', 'get_fuser_method',
    'register_observed_custom_module_mapping',
    'get_observed_custom_module_class',
    'register_quantized_custom_mdoule_mapping',
    'get_quantized_custom_module_class',
    'is_custom_module_class',
    'is_observed_custom_module',
    # Sub functions for `prepare` and `swap_module`
    'propagate_qconfig_', 'add_quant_dequant', 'add_observer_', 'swap_module',
    'default_eval_fn', 'get_observer_dict',
    'register_activation_post_process_hook',
    # Observers
    'ObserverBase', 'WeightObserver', 'observer', 'default_observer',
    'default_weight_observer',
    # QConfig
    'QConfig', 'default_qconfig', 'default_dynamic_qconfig', 'float16_dynamic_qconfig',
    # QAT utilities
    'default_qat_qconfig', 'prepare_qat', 'quantize_qat',
    # module transformations
    'fuse_modules',
]