File: sparse_vector_concat.mlir

package info (click to toggle)
swiftlang 6.0.3-2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 2,519,992 kB
  • sloc: cpp: 9,107,863; ansic: 2,040,022; asm: 1,135,751; python: 296,500; objc: 82,456; f90: 60,502; lisp: 34,951; pascal: 19,946; sh: 18,133; perl: 7,482; ml: 4,937; javascript: 4,117; makefile: 3,840; awk: 3,535; xml: 914; fortran: 619; cs: 573; ruby: 573
file content (31 lines) | stat: -rw-r--r-- 1,056 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
// RUN: mlir-opt %s --sparse-compiler="enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true"

#MAT_D_C = #sparse_tensor.encoding<{
  lvlTypes = ["dense", "compressed"]
}>

#MAT_C_C_P = #sparse_tensor.encoding<{
  lvlTypes = [ "compressed", "compressed" ],
  dimToLvl = affine_map<(i,j) -> (j,i)>
}>

#MAT_C_D_P = #sparse_tensor.encoding<{
  lvlTypes = [ "compressed", "dense" ],
  dimToLvl = affine_map<(i,j) -> (j,i)>
}>

//
// Ensures only last loop is vectorized
// (vectorizing the others would crash).
//
// CHECK-LABEL: llvm.func @foo
// CHECK:       llvm.intr.masked.load
// CHECK:       llvm.intr.masked.scatter
//
func.func @foo(%arg0: tensor<2x4xf64, #MAT_C_C_P>,
               %arg1: tensor<3x4xf64, #MAT_C_D_P>,
           %arg2: tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64> {
  %0 = sparse_tensor.concatenate %arg0, %arg1, %arg2 {dimension = 0 : index}
       : tensor<2x4xf64, #MAT_C_C_P>, tensor<3x4xf64, #MAT_C_D_P>, tensor<4x4xf64, #MAT_D_C> to tensor<9x4xf64>
  return %0 : tensor<9x4xf64>
}