1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt --codegen-opt-level=2 -mtriple=x86_64 -lower-amx-type %s -S | FileCheck %s
define void @combine_store(ptr%p) {
; CHECK-LABEL: @combine_store(
; CHECK-NEXT: [[T1:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 64)
; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[P:%.*]], i64 64, x86_amx [[T1]])
; CHECK-NEXT: ret void
;
%t1 = call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 64)
%t2 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t1)
store <256 x i32> %t2, ptr %p, align 64
ret void
}
define <256 x i32> @combine_store_2user(ptr%p) {
; CHECK-LABEL: @combine_store_2user(
; CHECK-NEXT: [[TMP1:%.*]] = alloca <256 x i32>, align 64
; CHECK-NEXT: [[T1:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 64)
; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[TMP1]], i64 64, x86_amx [[T1]])
; CHECK-NEXT: [[TMP3:%.*]] = load <256 x i32>, ptr [[TMP1]], align 1024
; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[P:%.*]], i64 64, x86_amx [[T1]])
; CHECK-NEXT: ret <256 x i32> [[TMP3]]
;
%t1 = call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 64)
%t2 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t1)
store <256 x i32> %t2, ptr %p, align 64
ret <256 x i32> %t2
}
define void @combine_load(ptr%p, ptr%p2) {
; CHECK-LABEL: @combine_load(
; CHECK-NEXT: [[TMP2:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr [[P:%.*]], i64 64)
; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[P2:%.*]], i64 64, x86_amx [[TMP2]])
; CHECK-NEXT: ret void
;
%t1 = load <256 x i32>, ptr %p, align 64
%t2 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t1)
call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr %p2, i64 64, x86_amx %t2)
ret void
}
define void @combine_cast_across_store(ptr%p, ptr%p2) {
; CHECK-LABEL: @combine_cast_across_store(
; CHECK-NEXT: [[TMP2:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr [[P:%.*]], i64 64)
; CHECK-NEXT: store <256 x i32> zeroinitializer, ptr [[P]], align 64
; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[P2:%.*]], i64 64, x86_amx [[TMP2]])
; CHECK-NEXT: ret void
;
%t1 = load <256 x i32>, ptr %p, align 64
store <256 x i32> zeroinitializer, ptr %p, align 64
%t2 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t1)
call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr %p2, i64 64, x86_amx %t2)
ret void
}
define <256 x i32> @combine_load_2user(ptr%p, ptr%p2) {
; CHECK-LABEL: @combine_load_2user(
; CHECK-NEXT: [[TMP1:%.*]] = alloca <256 x i32>, align 64
; CHECK-NEXT: [[T1:%.*]] = load <256 x i32>, ptr [[P:%.*]], align 64
; CHECK-NEXT: store <256 x i32> [[T1]], ptr [[TMP1]], align 1024
; CHECK-NEXT: [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr [[TMP1]], i64 64)
; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[P2:%.*]], i64 64, x86_amx [[TMP3]])
; CHECK-NEXT: ret <256 x i32> [[T1]]
;
%t1 = load <256 x i32>, ptr %p, align 64
%t2 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t1)
call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr %p2, i64 64, x86_amx %t2)
%t3 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t2)
ret <256 x i32> %t3
}
define <256 x i32> @combine_load_3user(ptr%p, ptr%p2) {
; CHECK-LABEL: @combine_load_3user(
; CHECK-NEXT: [[TMP1:%.*]] = alloca <256 x i32>, align 64
; CHECK-NEXT: [[T1:%.*]] = load <256 x i32>, ptr [[P:%.*]], align 64
; CHECK-NEXT: store <256 x i32> [[T1]], ptr [[TMP1]], align 1024
; CHECK-NEXT: [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 16, ptr [[TMP1]], i64 16)
; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[P2:%.*]], i64 64, x86_amx [[TMP3]])
; CHECK-NEXT: [[TMP4:%.*]] = call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 16, i16 64, x86_amx [[TMP3]], x86_amx [[TMP3]], x86_amx [[TMP3]])
; CHECK-NEXT: ret <256 x i32> [[T1]]
;
%t1 = load <256 x i32>, ptr %p, align 64
%t2 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t1)
call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr %p2, i64 64, x86_amx %t2)
%t3 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t2)
call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 16, i16 64, x86_amx %t2, x86_amx %t2, x86_amx %t2)
ret <256 x i32> %t3
}
declare x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32>)
declare <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx)
declare x86_amx @llvm.x86.tilezero.internal(i16, i16)
declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, ptr, i64)
declare void @llvm.x86.tilestored64.internal(i16, i16, ptr, i64, x86_amx)
declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
|