File: qconfig.py

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (56 lines) | stat: -rw-r--r-- 1,943 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import torch
from torch.ao.quantization import MinMaxObserver
from torch.ao.quantization.experimental.fake_quantize import APoTFakeQuantize
from torch.ao.quantization.fake_quantize import FakeQuantize
from torch.ao.quantization.qconfig import QConfig


"""
Default symmetric fake_quant for activations.
"""
default_symmetric_fake_quant = FakeQuantize.with_args(
    observer=MinMaxObserver, qscheme=torch.per_tensor_symmetric, dtype=torch.quint8
)

"""
Default symmetric fake_quant for weights.
"""
default_weight_symmetric_fake_quant = FakeQuantize.with_args(
    observer=MinMaxObserver, qscheme=torch.per_tensor_symmetric, dtype=torch.qint8
)

# uniform activation and weight, b=8 k=2
uniform_qconfig_8bit = QConfig(
    activation=default_symmetric_fake_quant,
    weight=default_weight_symmetric_fake_quant.with_args,
)

# uniform activation, APoT weight, b=8 k=2
apot_weight_qconfig_8bit = QConfig(
    activation=default_symmetric_fake_quant.with_args,
    weight=APoTFakeQuantize.with_args(b=8, k=2, dtype=torch.qint8),
)

# APoT activation and uniform weight, b=8 k=2
apot_qconfig_8bit = QConfig(
    activation=APoTFakeQuantize.with_args(b=8, k=2, dtype=torch.quint8),
    weight=APoTFakeQuantize.with_args(b=8, k=2, dtype=torch.qint8),
)

# uniform activation and weight, b=4 k=2
uniform_qconfig_4bit = QConfig(
    activation=default_symmetric_fake_quant.with_args(quant_min=0, quant_max=15),
    weight=default_weight_symmetric_fake_quant.with_args(quant_min=0, quant_max=15),
)

# uniform activation, APoT weight, b=4 k=2
apot_weight_qconfig_4bit = QConfig(
    activation=default_symmetric_fake_quant.with_args(quant_min=0, quant_max=15),
    weight=APoTFakeQuantize.with_args(b=4, k=2, dtype=torch.qint8),
)

# APoT activation and uniform weight, b=4 k=2
apot_qconfig_4bit = QConfig(
    activation=APoTFakeQuantize.with_args(b=4, k=2, dtype=torch.quint8),
    weight=APoTFakeQuantize.with_args(b=4, k=2, dtype=torch.qint8),
)