File: attribute.mlir

package info (click to toggle)
swiftlang 6.0.3-2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 2,519,992 kB
  • sloc: cpp: 9,107,863; ansic: 2,040,022; asm: 1,135,751; python: 296,500; objc: 82,456; f90: 60,502; lisp: 34,951; pascal: 19,946; sh: 18,133; perl: 7,482; ml: 4,937; javascript: 4,117; makefile: 3,840; awk: 3,535; xml: 914; fortran: 619; cs: 573; ruby: 573
file content (117 lines) | stat: -rw-r--r-- 4,299 bytes parent folder | download | duplicates (12)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
func.func @depthwise_conv_1d_nwc_wcm(%input: tensor<1x12x8xf32>, %filter: tensor<3x8x8xf32>)
// <- function.builtin
//        ^ function
//                                   ^ variable.parameter
//                                           ^ type.builtin
//                                                               ^ variable.parameter
//                                                                        ^ type.builtin
  -> tensor<1x10x8x8xf32> {
// ^ operator
//   ^ type.builtin
  %zero = arith.constant 0.000000e+00 : f32
// ^ variable
//        ^ function.builtin
//                       ^ number
//                                      ^ type.builtin
  %init = tensor.empty() : tensor<1x10x8x8xf32>
// ^ variable
//        ^ function.builtin
//                         ^ type.builtin
  %fill = linalg.fill ins(%zero : f32) outs(%init : tensor<1x10x8x8xf32>) -> tensor<1x10x8x8xf32>
// ^ variable
//        ^ function.builtin
//                    ^ keyword
//                        ^ variable
//                                ^ type.builtin
//                                     ^ keyword
  %0 = linalg.depthwise_conv_1d_nwc_wcm {dilations = dense<1> : tensor<1xi64>,
// ^ variable
//     ^ function.builtin
//                                       ^ attribute
//                                                   ^ constant.builtin
    strides = dense<1> : tensor<1xi64>}
//            ^ constant.builtin
    ins(%input, %filter : tensor<1x12x8xf32>, tensor<3x8x8xf32>)
//      ^ variable.parameter
//              ^ variable.parameter
    outs(%fill : tensor<1x10x8x8xf32>) -> tensor<1x10x8x8xf32>
//       ^ variable
  return %0 : tensor<1x10x8x8xf32>
// ^ function.builtin
//       ^ variable
}

func.func @fastmath(%arg0: f32, %arg1: f32) {
// <- function.builtin
//        ^ function
//                  ^ variable.parameter
//                         ^ type.builtin
//                              ^ variable.parameter
//                                     ^ type.builtin
  %5 = arith.negf %arg0 fastmath<fast> : f32
//     ^ function.builtin
//                      ^ attribute
  %6 = arith.addf %arg0, %arg1 fastmath<none> : f32
//     ^ function.builtin
//                             ^ attribute
  %8 = arith.mulf %arg0, %arg1 fastmath<reassoc,nnan,ninf,nsz,arcp,contract,afn> : f32
//     ^ function.builtin
//                             ^ attribute
  return
// ^ function.builtin
}

#map0 = affine_map<(d0, d1) -> (d0, d1)>
// <- attribute
//      ^ attribute
#map1 = affine_map<(d0, d1) -> (d0)>
// <- attribute
//      ^ attribute
#map2 = affine_map<(d0) -> (d0)>
// <- attribute
//      ^ attribute

func.func @add_broadcast_mul_fusion(%arg0: tensor<?xf32>, %arg1 : tensor<?xf32>,
  %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
{
  %c0 = arith.constant 0 : index
  %c1 = arith.constant 1 : index
  %0 = tensor.dim %arg0, %c0 : tensor<?xf32>
  %1 = tensor.empty(%0) : tensor<?xf32>
  %2 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]}
//                                      ^ attribute
//                                             ^ attribute
//                                                    ^ attribute
      ins(%arg0, %arg1 : tensor<?xf32>, tensor<?xf32>)
//    ^ keyword
      outs(%1 : tensor<?xf32>) {
//    ^ keyword
    ^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
      %3 = arith.addf %arg3, %arg4 : f32
      linalg.yield %3 : f32
  } -> tensor<?xf32>
  %3 = tensor.dim %arg2, %c1 : tensor<?x?xf32>
  %4 = tensor.empty(%0, %3) : tensor<?x?xf32>
  %5 = linalg.generic {indexing_maps = [#map1, #map0, #map0], iterator_types = ["parallel", "parallel"]}
//     ^ function.builtin
      ins(%2, %arg2 : tensor<?xf32>, tensor<?x?xf32>)
      outs(%4 : tensor<?x?xf32>){
    ^bb0(%arg5: f32, %arg6: f32, %arg7: f32):
      %6 = arith.mulf %arg5, %arg6 : f32
      linalg.yield %6 : f32
    } -> tensor<?x?xf32>
  return %5 : tensor<?x?xf32>
}

func.func @broadcast(%input: tensor<8x32xf32>,
                     %init: tensor<8x16x32xf32>) -> tensor<8x16x32xf32> {
  %bcast = linalg.broadcast
//         ^ function.builtin
      ins(%input:tensor<8x32xf32>)
//    ^ keyword
      outs(%init:tensor<8x16x32xf32>)
//    ^ keyword
      dimensions = [1]
//    ^ attribute
  func.return %bcast : tensor<8x16x32xf32>
}