File: gelu_test.py

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (34 lines) | stat: -rw-r--r-- 645 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34

import operator_benchmark as op_bench
import torch


"""
Microbenchmarks for the gelu operators.
"""

gelu_configs_long = op_bench.cross_product_configs(
    N=[1, 4],
    C=[3],
    H=[16, 256],
    W=[16, 256],
    device=['cpu'],
    tags=['long']
)


class GeluBenchmark(op_bench.TorchBenchmarkBase):
    def init(self, N, C, H, W, device):
        self.inputs = {
            "input": torch.rand(N, C, H, W, device=device)
        }

    def forward(self, input):
        return torch.nn.functional.gelu(input)


op_bench.generate_pt_test(gelu_configs_long, GeluBenchmark)


if __name__ == "__main__":
    op_bench.benchmark_runner.main()