File: gpu-to-nvvm-32b.mlir

package info (click to toggle)
llvm-toolchain-20 1%3A20.1.8-1~exp1
  • links: PTS, VCS
  • area: main
  • in suites: experimental
  • size: 2,111,388 kB
  • sloc: cpp: 7,438,767; ansic: 1,393,871; asm: 1,012,926; python: 241,728; f90: 86,635; objc: 75,411; lisp: 42,144; pascal: 17,286; sh: 10,027; ml: 5,082; perl: 4,730; awk: 3,523; makefile: 3,349; javascript: 2,251; xml: 892; fortran: 672
file content (79 lines) | stat: -rw-r--r-- 2,902 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
// RUN: mlir-opt %s -convert-gpu-to-nvvm='index-bitwidth=32' -split-input-file | FileCheck %s

// RUN: mlir-opt %s -transform-interpreter | FileCheck %s

gpu.module @test_module_0 {
  // CHECK-LABEL: func @gpu_index_ops()
  func.func @gpu_index_ops()
      -> (index, index, index, index, index, index,
          index, index, index, index, index, index,
          index) {
    %tIdX = gpu.thread_id x
    %tIdY = gpu.thread_id y
    %tIdZ = gpu.thread_id z

    %bDimX = gpu.block_dim x
    %bDimY = gpu.block_dim y
    %bDimZ = gpu.block_dim z

    %bIdX = gpu.block_id x
    %bIdY = gpu.block_id y
    %bIdZ = gpu.block_id z

    %gDimX = gpu.grid_dim x
    %gDimY = gpu.grid_dim y
    %gDimZ = gpu.grid_dim z

    // CHECK-NOT: = llvm.sext %{{.*}} : i32 to i64
    %laneId = gpu.lane_id

    func.return %tIdX, %tIdY, %tIdZ, %bDimX, %bDimY, %bDimZ,
               %bIdX, %bIdY, %bIdZ, %gDimX, %gDimY, %gDimZ,
               %laneId
        : index, index, index, index, index, index,
          index, index, index, index, index, index,
          index
  }
}



gpu.module @test_module_1 {
  // CHECK-LABEL: func @gpu_index_comp
  func.func @gpu_index_comp(%idx : index) -> index {
    // CHECK: = llvm.add %{{.*}}, %{{.*}} : i32
    %0 = arith.addi %idx, %idx : index
    // CHECK: llvm.return %{{.*}} : i32
    func.return %0 : index
  }
}

module attributes {transform.with_named_sequence} {
  transform.named_sequence @__transform_main(%toplevel_module: !transform.any_op {transform.readonly}) {
    %gpu_module = transform.structured.match ops{["gpu.module"]} in %toplevel_module
      : (!transform.any_op) -> !transform.any_op
    transform.apply_conversion_patterns to %gpu_module {
      transform.apply_conversion_patterns.dialect_to_llvm "arith"
      transform.apply_conversion_patterns.dialect_to_llvm "cf"
      transform.apply_conversion_patterns.vector.vector_to_llvm
      transform.apply_conversion_patterns.func.func_to_llvm
      transform.apply_conversion_patterns.dialect_to_llvm "memref"
      transform.apply_conversion_patterns.gpu.gpu_to_nvvm
      transform.apply_conversion_patterns.gpu.gpu_wmma_to_nvvm
      transform.apply_conversion_patterns.gpu.gpu_subgroup_reduce_to_nvvm {has_redux = true}
      transform.apply_conversion_patterns.nvgpu.nvgpu_to_nvvm
    } with type_converter {
      transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
        {index_bitwidth = 32, use_opaque_pointers = true}
    } {
      legal_dialects = ["llvm", "memref", "nvvm"],
      legal_ops = ["func.func", "gpu.module", "gpu.yield"],
      illegal_dialects = ["gpu"],
      illegal_ops = ["llvm.cos", "llvm.exp", "llvm.exp2", "llvm.fabs", "llvm.fceil",
                    "llvm.ffloor", "llvm.log", "llvm.log10", "llvm.log2", "llvm.pow",
                    "llvm.sin", "llvm.sqrt"],
      partial_conversion
    } : !transform.any_op
    transform.yield
  }
}