File: semi_ring.mlir

package info (click to toggle)
llvm-toolchain-18 1%3A18.1.8-18
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 1,908,340 kB
  • sloc: cpp: 6,667,937; ansic: 1,440,452; asm: 883,619; python: 230,549; objc: 76,880; f90: 74,238; lisp: 35,989; pascal: 16,571; sh: 10,229; perl: 7,459; ml: 5,047; awk: 3,523; makefile: 2,987; javascript: 2,149; xml: 892; fortran: 649; cs: 573
file content (58 lines) | stat: -rw-r--r-- 2,974 bytes parent folder | download | duplicates (15)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
// RUN: mlir-opt %s --sparse-reinterpret-map -sparsification | FileCheck %s

#SM = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>

#trait = {
  indexing_maps = [
    affine_map<(i,j) -> (i,j)> // A
  ],
  iterator_types = ["parallel", "parallel"],
  doc = "A(i,j) += 2.0 where A(i,j) != 0"
}

module {
  // Example of a semi-ring operation that only adds a
  // constant at stored values (something that would
  // typically not sparsify since it would densify the
  // implicit zeros in the normal case). The sparse
  // compiler should see that this is a "simply dynamic"
  // operation, and the values can be change "in-place".
  //
  // CHECK-LABEL: func.func @add_only_where_nonzero(
  // CHECK-SAME:    %[[VAL_0:.*]]: tensor<8x8xf64, #sparse{{[0-9]*}}>) -> tensor<8x8xf64, #sparse{{[0-9]*}}> {
  // CHECK-DAG:     %[[VAL_1:.*]] = arith.constant 8 : index
  // CHECK-DAG:     %[[VAL_2:.*]] = arith.constant 0 : index
  // CHECK-DAG:     %[[VAL_3:.*]] = arith.constant 1 : index
  // CHECK-DAG:     %[[VAL_4:.*]] = arith.constant 2.000000e+00 : f64
  // CHECK-DAG:     %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
  // CHECK-DAG:     %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xf64>
  // CHECK:         scf.for %[[VAL_7:.*]] = %[[VAL_2]] to %[[VAL_1]] step %[[VAL_3]] {
  // CHECK:           %[[VAL_8:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_7]]] : memref<?xindex>
  // CHECK:           %[[VAL_9:.*]] = arith.addi %[[VAL_7]], %[[VAL_3]] : index
  // CHECK:           %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_9]]] : memref<?xindex>
  // CHECK:           scf.for %[[VAL_11:.*]] = %[[VAL_8]] to %[[VAL_10]] step %[[VAL_3]] {
  // CHECK:             %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xf64>
  // CHECK:             %[[VAL_13:.*]] = arith.addf %[[VAL_12]], %[[VAL_4]] : f64
  // CHECK:             memref.store %[[VAL_13]], %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xf64>
  // CHECK:           } {"Emitted from" = "linalg.generic"}
  // CHECK:         } {"Emitted from" = "linalg.generic"}
  // CHECK:         %[[VAL_14:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<8x8xf64, #sparse{{[0-9]*}}>
  // CHECK:         return %[[VAL_14]] : tensor<8x8xf64, #sparse{{[0-9]*}}>
  // CHECK:       }
  func.func @add_only_where_nonzero(%argA: tensor<8x8xf64, #SM>) -> tensor<8x8xf64, #SM> {
    %c = arith.constant 2.0 : f64
    %result = linalg.generic #trait
      outs(%argA: tensor<8x8xf64, #SM>) {
        ^bb(%a: f64):
           %u = sparse_tensor.unary %a : f64 to f64
             present={
                ^bb0(%p: f64):
                  %add = arith.addf %p, %c : f64
                  sparse_tensor.yield %add : f64
             }
             absent={}
           linalg.yield %u : f64
    } -> tensor<8x8xf64, #SM>
    return %result : tensor<8x8xf64, #SM>
  }
}