File: reshape_linearization_fusion.mlir

package info (click to toggle)
llvm-toolchain-13 1%3A13.0.1-11
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 1,418,840 kB
  • sloc: cpp: 5,290,826; ansic: 996,570; asm: 544,593; python: 188,212; objc: 72,027; lisp: 30,291; f90: 25,395; sh: 24,898; javascript: 9,780; pascal: 9,398; perl: 7,484; ml: 5,432; awk: 3,523; makefile: 2,913; xml: 953; cs: 573; fortran: 539
file content (201 lines) | stat: -rw-r--r-- 8,866 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
// RUN: mlir-opt -split-input-file -linalg-fold-reshape-ops-by-linearization %s | FileCheck %s

#map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xi32>)
  -> tensor<?x?x4x?xi32> {
  %0 = linalg.tensor_expand_shape %arg0 [[0], [1, 2], [3]] :
    tensor<?x?x?xi32> into tensor<?x?x4x?xi32>
  %1 = linalg.generic {
    indexing_maps = [#map0, #map0],
    iterator_types = ["parallel", "parallel", "parallel", "parallel"] }
    ins(%0 : tensor<?x?x4x?xi32>)
    outs(%0 : tensor<?x?x4x?xi32>) {
  ^bb0(%arg6: i32, %arg7 : i32):       // no predecessors
    %idx = linalg.index 0 : index
    %2 = index_cast %idx : index to i32
    %3 = addi %arg6, %2 : i32
    linalg.yield %3 : i32
  } -> tensor<?x?x4x?xi32>
  return %1 : tensor<?x?x4x?xi32>
}
//   CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 4 + d2, d3)>
//   CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
//       CHECK: func @generic_op_reshape_producer_fusion
//  CHECK-SAME:   %[[ARG0:.+]]: tensor<?x?x?xi32>
//       CHECK:   %[[T0:.+]] = linalg.tensor_expand_shape %[[ARG0]]
//  CHECK-SAME:     [0], [1, 2], [3]
//       CHECK:   linalg.generic
//  CHECK-SAME:     indexing_maps = [#[[MAP3]], #[[MAP4]]]
//  CHECK-SAME:     ins(%[[ARG0]] : tensor<?x?x?xi32>)
//  CHECK-SAME:     outs(%[[T0]] : tensor<?x?x4x?xi32>)
//       CHECK:   %[[IDX:.+]] = linalg.index 0 : index
//  CHECK-NEXT:   %[[IDX_CASTED:.+]] = index_cast %[[IDX]] : index to i32

// -----

#map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xi32>)
  -> tensor<?x?xi32> {
  %0 = linalg.generic {
    indexing_maps = [#map0, #map0],
    iterator_types = ["parallel", "parallel", "parallel", "parallel"] }
    ins(%arg0 : tensor<?x?x4x5xi32>) outs(%arg0 : tensor<?x?x4x5xi32>) {
  ^bb0(%arg6: i32, %arg7: i32):       // no predecessors
    %idx = linalg.index 0 : index
    %2 = index_cast %idx : index to i32
    %3 = addi %arg6, %2 : i32
    linalg.yield %3 : i32
  } -> tensor<?x?x4x5xi32>
  %1 = linalg.tensor_collapse_shape %0 [[0], [1, 2, 3]] :
    tensor<?x?x4x5xi32> into tensor<?x?xi32>
  return %1 : tensor<?x?xi32>
}
//   CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
//   CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 20 + d2 * 5 + d3)>
//       CHECK: func @generic_op_reshape_consumer_fusion
//  CHECK-SAME:   %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?x4x5xi32>
//       CHECK:   %[[T0:.+]] = linalg.tensor_collapse_shape %[[ARG0]]
//  CHECK-SAME:     [0], [1, 2, 3]
//       CHECK:   linalg.generic
//  CHECK-SAME:     indexing_maps = [#[[MAP2]], #[[MAP3]]]
//  CHECK-SAME:     outs(%[[T0]] : tensor<?x?xi32>)
//       CHECK:   %[[IDX:.+]] = linalg.index 0 : index
//  CHECK-NEXT:   %[[IDX_CASTED:.+]] = index_cast %[[IDX]] : index to i32
//   CHECK-NOT:   linalg.tensor_collapse_shape

// -----

#map2 = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
#map3 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
func @generic_op_021_permultation_reshape_producer_fusion(%arg0 : tensor<3x35xf32>) -> tensor<3x7x5xf32> {
  %0 = linalg.tensor_expand_shape %arg0 [[0], [1, 2]]
      : tensor<3x35xf32> into tensor<3x5x7xf32>
  %1 = linalg.init_tensor [3, 7, 5] : tensor<3x7x5xf32>
  %2 = linalg.generic
    {indexing_maps = [#map2, #map3],
     iterator_types = ["parallel", "parallel", "parallel"]}
    ins(%0 : tensor<3x5x7xf32>) outs(%1 : tensor<3x7x5xf32>) {
    ^bb0(%arg2: f32, %arg3 : f32):  // no predecessors
      linalg.yield %arg2 : f32
    } -> tensor<3x7x5xf32>
    return %2 : tensor<3x7x5xf32>
}

//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2) -> (d0, d1 + d2 * 7)>
//   CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
//       CHECK: func @generic_op_021_permultation_reshape_producer_fusion
//   CHECK-NOT:   linalg.tensor_expand_shape
//       CHECK:   linalg.generic
//  CHECK-SAME:     indexing_maps = [#[[MAP0]], #[[MAP1]]]

// -----

#map2 = affine_map<(d0, d1, d2) -> (d1, d0, d2)>
#map3 = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
func @generic_op_120_permutation_reshape_producer_fusion(%arg0 : tensor<3x35xf32>) -> tensor<5x7x3xf32> {
  %0 = linalg.tensor_expand_shape %arg0 [[0], [1, 2]]
      : tensor<3x35xf32> into tensor<3x5x7xf32>
  %1 = linalg.init_tensor [5, 7, 3] : tensor<5x7x3xf32>
  %2 = linalg.generic
    {indexing_maps = [#map2, #map3],
     iterator_types = ["parallel", "parallel", "parallel"]}
    ins(%0 : tensor<3x5x7xf32>) outs(%1 : tensor<5x7x3xf32>) {
    ^bb0(%arg2: f32, %arg3: f32):  // no predecessors
      linalg.yield %arg2 : f32
    } -> tensor<5x7x3xf32>
    return %2 : tensor<5x7x3xf32>
}

//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2) -> (d1, d0 * 7 + d2)>
//   CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
//       CHECK: func @generic_op_120_permutation_reshape_producer_fusion
//   CHECK-NOT:   linalg.tensor_expand_shape
//       CHECK:   linalg.generic
//  CHECK-SAME:     indexing_maps = [#[[MAP0]], #[[MAP1]]]

// -----

#map0 = affine_map<(d0, d1, d2) -> (d0)>
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d1, d0, d2)>
#map3 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
func @generic_op_102_permultation_reshape_producer_fusion(%arg0 : tensor<3x35xf32>) -> tensor<5x3x7xf32> {
  %0 = linalg.tensor_expand_shape %arg0 [[0], [1, 2]]
      : tensor<3x35xf32> into tensor<3x5x7xf32>
  %1 = linalg.init_tensor [5, 3, 7] : tensor<5x3x7xf32>
  %2 = linalg.generic
    {indexing_maps = [#map2, #map3],
     iterator_types = ["parallel", "parallel", "parallel"]}
    ins(%0 : tensor<3x5x7xf32>) outs(%1 : tensor<5x3x7xf32>) {
    ^bb0(%arg2: f32, %arg3: f32):  // no predecessors
      linalg.yield %arg2 : f32
    } -> tensor<5x3x7xf32>
    return %2 : tensor<5x3x7xf32>
}


//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2) -> (d1, d0 * 7 + d2)>
//   CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
//       CHECK: func @generic_op_102_permultation_reshape_producer_fusion
//   CHECK-NOT:   linalg.tensor_expand_shape
//       CHECK:   linalg.generic
//  CHECK-SAME:     indexing_maps = [#[[MAP0]], #[[MAP1]]]

// -----

#map0 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2) -> (d1, d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0)>
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)>
func @generic_op_102_permultation_reshape_consumer_fusion(%arg0 : tensor<3x5x7xf32>) -> tensor<5x21xf32> {
  %0 = linalg.init_tensor [5, 3, 7] : tensor<5x3x7xf32>
  %1 = linalg.generic
    {indexing_maps = [#map0, #map1],
     iterator_types = ["parallel", "parallel", "parallel"]}
    ins(%arg0 : tensor<3x5x7xf32>) outs(%0 : tensor<5x3x7xf32>) {
    ^bb0(%arg2: f32, %arg3 : f32):  // no predecessors
      linalg.yield %arg2 : f32
  } -> tensor<5x3x7xf32>
  %2 = linalg.tensor_collapse_shape %1 [[0], [1, 2]]
      : tensor<5x3x7xf32> into tensor<5x21xf32>
  return %2 : tensor<5x21xf32>
}
//   CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
//   CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2) -> (d1, d0 * 7 + d2)>
//       CHECK: func @generic_op_102_permultation_reshape_consumer_fusion
//  CHECK-SAME:   %[[ARG0:.+]]: tensor<3x5x7xf32>
//       CHECK:   %[[T0:.+]] = linalg.init_tensor [5, 3, 7]
//       CHECK:   %[[T1:.+]] = linalg.tensor_collapse_shape %[[T0]]
//  CHECK-SAME:     [0], [1, 2]
//       CHECK:   linalg.generic
//  CHECK-SAME:     indexing_maps = [#[[MAP2]], #[[MAP3]]]
//  CHECK-SAME:     ins(%[[ARG0]] : tensor<3x5x7xf32>)
//  CHECK-SAME:     outs(%[[T1]] : tensor<5x21xf32>)

// -----

#map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
func @generic_op_reshape_consumer_nofusion(%arg0 : tensor<?x?x?x5xf32>,
                                           %arg1 : tensor<?x?x?x5xf32>) ->
                                           tensor<?x?xf32>
{
  %0 = linalg.generic {
     indexing_maps = [#map0, #map0, #map0],
     iterator_types = ["parallel", "parallel", "parallel", "parallel"]}
      ins(%arg0, %arg1 : tensor<?x?x?x5xf32>, tensor<?x?x?x5xf32>)
      outs(%arg0 : tensor<?x?x?x5xf32>) {
    ^bb0(%arg3: f32, %arg4: f32, %arg5: f32):       // no predecessors
      %1 = mulf %arg3, %arg4 : f32
      linalg.yield %1 : f32
  } -> tensor<?x?x?x5xf32>
  %1 = linalg.tensor_collapse_shape %0 [[0], [1, 2, 3]] :
    tensor<?x?x?x5xf32> into tensor<?x?xf32>
  return %1 : tensor<?x?xf32>
}
// CHECK-LABEL: func @generic_op_reshape_consumer_nofusion
//  CHECK-SAME:   %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?x?x5xf32>
//  CHECK-SAME:   %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?x?x5xf32>
//       CHECK:   %[[NOFUSE:.+]] = linalg.generic
//  CHECK-SAME:     ins(%[[ARG0]], %[[ARG1]]
//       CHECK:   %[[RESULT:.+]] = linalg.tensor_collapse_shape %[[NOFUSE]]
//       CHECK:   return %[[RESULT]]