File: torch_linalg.mlir

package info (click to toggle)
llvm-toolchain-19 1%3A19.1.7-3
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 1,998,520 kB
  • sloc: cpp: 6,951,680; ansic: 1,486,157; asm: 913,598; python: 232,024; f90: 80,126; objc: 75,281; lisp: 37,276; pascal: 16,990; sh: 10,009; ml: 5,058; perl: 4,724; awk: 3,523; makefile: 3,167; javascript: 2,504; xml: 892; fortran: 664; cs: 573
file content (63 lines) | stat: -rw-r--r-- 2,611 bytes parent folder | download | duplicates (9)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
// RUN: mlir-opt %s --sparse-assembler                 | FileCheck %s --check-prefix=CHECK-HI
// RUN: mlir-opt %s --sparse-assembler \
// RUN:             --inline                           | FileCheck %s --check-prefix=CHECK-INL
// RUN: mlir-opt %s --sparse-assembler \
// RUN:             --linalg-generalize-named-ops \
// RUN:             --linalg-fuse-elementwise-ops \
// RUN:             --sparsification-and-bufferization | FileCheck %s --check-prefix=CHECK-MID
// RUN: mlir-opt %s --sparse-assembler \
// RUN:             --sparsifier                       | FileCheck %s --check-prefix=CHECK-LOW

//
// An example of a module generated by torch-mlir with a sparse tensor from
// torch.sparse. The MLIR sparsifier should be able to provide the external
// API through a wrapper method (spiface and ciface). Various passes should
// compose without trouble.
//

// CHECK-HI-LABEL: func.func @main
// CHECK-HI:         sparse_tensor.assemble
// CHECK-HI:         call @_internal_main
// CHECK-HI:         return
// CHECK-HI:       func.func private @_internal_main
// CHECK-HI:         linalg.matmul
// CHECK-HI:         return

// CHECK-INL-LABEL: func.func @main
// CHECK-INL:         sparse_tensor.assemble
// CHECK-INL:         linalg.matmul
// CHECK-INL:         return
// CHECK-INL-NOT:   func.func private @_internal_main

// CHECK-MID-LABEL: func.func @main
// CHECK-MID:          memref.load
// CHECK-MID:          call @_internal_main
// CHECK-MID:          return
// CHECK-MID:       func.func private @_internal_main
// CHECK-MID:          scf.for
// CHECK-MID:            scf.for
// CHECK-MID:          return

// CHECK-LOW-LABEL: llvm.func @main
// CHECK-LOW:         llvm.call @_internal_main
// CHECK-LOW:         llvm.return
// CHECK-LOW:       llvm.func @_mlir_ciface_main
// CHECK-LOW:         llvm.call @main
// CHECK-LOW:         llvm.return
// CHECK-LOW:       llvm.func @_internal_main
// CHECK-SAME:        {sym_visibility = "private"}
// CHECK-LOW:         llvm.return

#csc = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
module {
  func.func @main(%arg0: tensor<64x64xf32, #csc>,
                  %arg1: tensor<64x64xf32>) -> tensor<64x64xf32> attributes {llvm.emit_c_interface} {
    %cst = arith.constant 0.000000e+00 : f32
    %0 = tensor.empty() : tensor<64x64xf32>
    %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<64x64xf32>) -> tensor<64x64xf32>
    %2 = linalg.matmul
      ins(%arg0, %arg1 : tensor<64x64xf32, #csc>, tensor<64x64xf32>)
      outs(%1 : tensor<64x64xf32>) -> tensor<64x64xf32>
    return %2 : tensor<64x64xf32>
  }
}