File: test-transfer-read.mlir

package info (click to toggle)
llvm-toolchain-15 1%3A15.0.6-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 1,554,644 kB
  • sloc: cpp: 5,922,452; ansic: 1,012,136; asm: 674,362; python: 191,568; objc: 73,855; f90: 42,327; lisp: 31,913; pascal: 11,973; javascript: 10,144; sh: 9,421; perl: 7,447; ml: 5,527; awk: 3,523; makefile: 2,520; xml: 885; cs: 573; fortran: 567
file content (95 lines) | stat: -rw-r--r-- 3,900 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void  \
// RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void  \
// RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

func.func @transfer_read_1d(%A : memref<?xf32>, %base: index) {
  %fm42 = arith.constant -42.0: f32
  %f = vector.transfer_read %A[%base], %fm42
      {permutation_map = affine_map<(d0) -> (d0)>} :
    memref<?xf32>, vector<13xf32>
  vector.print %f: vector<13xf32>
  return
}

func.func @transfer_read_mask_1d(%A : memref<?xf32>, %base: index) {
  %fm42 = arith.constant -42.0: f32
  %m = arith.constant dense<[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]> : vector<13xi1>
  %f = vector.transfer_read %A[%base], %fm42, %m : memref<?xf32>, vector<13xf32>
  vector.print %f: vector<13xf32>
  return
}

func.func @transfer_read_inbounds_4(%A : memref<?xf32>, %base: index) {
  %fm42 = arith.constant -42.0: f32
  %f = vector.transfer_read %A[%base], %fm42
      {permutation_map = affine_map<(d0) -> (d0)>, in_bounds = [true]} :
    memref<?xf32>, vector<4xf32>
  vector.print %f: vector<4xf32>
  return
}

func.func @transfer_read_mask_inbounds_4(%A : memref<?xf32>, %base: index) {
  %fm42 = arith.constant -42.0: f32
  %m = arith.constant dense<[0, 1, 0, 1]> : vector<4xi1>
  %f = vector.transfer_read %A[%base], %fm42, %m {in_bounds = [true]}
      : memref<?xf32>, vector<4xf32>
  vector.print %f: vector<4xf32>
  return
}

func.func @transfer_write_1d(%A : memref<?xf32>, %base: index) {
  %f0 = arith.constant 0.0 : f32
  %vf0 = vector.splat %f0 : vector<4xf32>
  vector.transfer_write %vf0, %A[%base]
      {permutation_map = affine_map<(d0) -> (d0)>} :
    vector<4xf32>, memref<?xf32>
  return
}

func.func @entry() {
  %c0 = arith.constant 0: index
  %c1 = arith.constant 1: index
  %c2 = arith.constant 2: index
  %c3 = arith.constant 3: index
  %c4 = arith.constant 4: index
  %c5 = arith.constant 5: index
  %A = memref.alloc(%c5) : memref<?xf32>
  scf.for %i = %c0 to %c5 step %c1 {
    %i32 = arith.index_cast %i : index to i32
    %fi = arith.sitofp %i32 : i32 to f32
    memref.store %fi, %A[%i] : memref<?xf32>
  }
  // On input, memory contains [[ 0, 1, 2, 3, 4, xxx garbage xxx ]]
  // Read shifted by 2 and pad with -42:
  //   ( 2, 3, 4, -42, ..., -42)
  call @transfer_read_1d(%A, %c2) : (memref<?xf32>, index) -> ()
  // Read with mask and out-of-bounds access.
  call @transfer_read_mask_1d(%A, %c2) : (memref<?xf32>, index) -> ()
  // Write into memory shifted by 3
  //   memory contains [[ 0, 1, 2, 0, 0, xxx garbage xxx ]]
  call @transfer_write_1d(%A, %c3) : (memref<?xf32>, index) -> ()
  // Read shifted by 0 and pad with -42:
  //   ( 0, 1, 2, 0, 0, -42, ..., -42)
  call @transfer_read_1d(%A, %c0) : (memref<?xf32>, index) -> ()
  // Read in-bounds 4 @ 1, guaranteed to not overflow.
  // Exercises proper alignment.
  call @transfer_read_inbounds_4(%A, %c1) : (memref<?xf32>, index) -> ()
  // Read in-bounds with mask.
  call @transfer_read_mask_inbounds_4(%A, %c1) : (memref<?xf32>, index) -> ()

  memref.dealloc %A : memref<?xf32>

  return
}

// CHECK: ( 2, 3, 4, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42 )
// CHECK: ( -42, -42, 4, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42 )
// CHECK: ( 0, 1, 2, 0, 0, -42, -42, -42, -42, -42, -42, -42, -42 )
// CHECK: ( 1, 2, 0, 0 )
// CHECK: ( -42, 2, -42, 0 )