File: qconfig.py

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (46 lines) | stat: -rw-r--r-- 2,532 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import torch
from torch.ao.quantization.qconfig import QConfig
from torch.ao.quantization import MinMaxObserver
from torch.ao.quantization.fake_quantize import FakeQuantize
from torch.ao.quantization.experimental.fake_quantize import APoTFakeQuantize

"""
Default symmetric fake_quant for activations.
"""
default_symmetric_fake_quant = FakeQuantize.with_args(observer=MinMaxObserver,
                                                      qscheme=torch.per_tensor_symmetric,
                                                      dtype=torch.quint8)

"""
Default symmetric fake_quant for weights.
"""
default_weight_symmetric_fake_quant = FakeQuantize.with_args(observer=MinMaxObserver,
                                                             qscheme=torch.per_tensor_symmetric,
                                                             dtype=torch.qint8)

# uniform activation and weight, b=8 k=2
uniform_qconfig_8bit = QConfig(activation=default_symmetric_fake_quant,
                               weight=default_weight_symmetric_fake_quant.with_args)

# uniform activation, APoT weight, b=8 k=2
apot_weight_qconfig_8bit = QConfig(activation=default_symmetric_fake_quant.with_args,
                                   weight=APoTFakeQuantize.with_args(b=8, k=2, dtype=torch.qint8))

# APoT activation and uniform weight, b=8 k=2
apot_qconfig_8bit = QConfig(activation=APoTFakeQuantize.with_args(b=8, k=2, dtype=torch.quint8),
                            weight=APoTFakeQuantize.with_args(b=8, k=2, dtype=torch.qint8))

# uniform activation and weight, b=4 k=2
uniform_qconfig_4bit = QConfig(activation=default_symmetric_fake_quant.with_args(quant_min=0,
                                                                                 quant_max=15),
                               weight=default_weight_symmetric_fake_quant.with_args(quant_min=0,
                                                                                    quant_max=15))

# uniform activation, APoT weight, b=4 k=2
apot_weight_qconfig_4bit = QConfig(activation=default_symmetric_fake_quant.with_args(quant_min=0,
                                                                                     quant_max=15),
                                   weight=APoTFakeQuantize.with_args(b=4, k=2, dtype=torch.qint8))

# APoT activation and uniform weight, b=4 k=2
apot_qconfig_4bit = QConfig(activation=APoTFakeQuantize.with_args(b=4, k=2, dtype=torch.quint8),
                            weight=APoTFakeQuantize.with_args(b=4, k=2, dtype=torch.qint8))