1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc < %s | FileCheck %s
target datalayout = "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:32-n8:16:32-a:0:32-S32"
target triple = "i386-pc-windows-gnu"
; This function uses esi as base pointer, the inline asm clobbers esi, so we
; should save esi using esp before the inline asm, and restore esi after the
; inline asm.
define i32 @clober_bp() {
; CHECK-LABEL: clober_bp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %ebp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %ebp, -8
; CHECK-NEXT: movl %esp, %ebp
; CHECK-NEXT: .cfi_def_cfa_register %ebp
; CHECK-NEXT: pushl %edi
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: andl $-16, %esp
; CHECK-NEXT: subl $16, %esp
; CHECK-NEXT: movl %esp, %esi
; CHECK-NEXT: .cfi_offset %esi, -16
; CHECK-NEXT: .cfi_offset %edi, -12
; CHECK-NEXT: movl $4, 12(%esi)
; CHECK-NEXT: movl 12(%esi), %eax
; CHECK-NEXT: addl $3, %eax
; CHECK-NEXT: andl $-4, %eax
; CHECK-NEXT: calll __alloca
; CHECK-NEXT: movl %esp, %eax
; CHECK-NEXT: andl $-16, %eax
; CHECK-NEXT: movl %eax, %esp
; CHECK-NEXT: movl $1, (%eax)
; CHECK-NEXT: leal 8(%esi), %edi
; CHECK-NEXT: movl $4, %ecx
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: movl %eax, %esi
; CHECK-NEXT: #APP
; CHECK-NEXT: rep movsb (%esi), %es:(%edi)
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: popl %esi
; CHECK-NEXT: movl 8(%esi), %eax
; CHECK-NEXT: leal -8(%ebp), %esp
; CHECK-NEXT: popl %esi
; CHECK-NEXT: popl %edi
; CHECK-NEXT: popl %ebp
; CHECK-NEXT: retl
entry:
%size = alloca i32, align 4
%g = alloca i32, align 4
store volatile i32 4, ptr %size, align 4
%len = load volatile i32, ptr %size, align 4
%var_array = alloca i8, i32 %len, align 16
store i32 1, ptr %var_array, align 16
%nil = call { ptr, ptr, i32 } asm "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %g, ptr %var_array, i32 4)
%retval = load i32, ptr %g, align 4
ret i32 %retval
}
; This function has the same code except the inline asm also clobbers
; frame pointer.
define i32 @clobber_bpfp() {
; CHECK-LABEL: clobber_bpfp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %ebp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %ebp, -8
; CHECK-NEXT: movl %esp, %ebp
; CHECK-NEXT: .cfi_def_cfa_register %ebp
; CHECK-NEXT: pushl %edi
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: andl $-16, %esp
; CHECK-NEXT: subl $16, %esp
; CHECK-NEXT: movl %esp, %esi
; CHECK-NEXT: .cfi_offset %esi, -16
; CHECK-NEXT: .cfi_offset %edi, -12
; CHECK-NEXT: movl $4, 12(%esi)
; CHECK-NEXT: movl 12(%esi), %eax
; CHECK-NEXT: addl $3, %eax
; CHECK-NEXT: andl $-4, %eax
; CHECK-NEXT: calll __alloca
; CHECK-NEXT: movl %esp, %eax
; CHECK-NEXT: andl $-16, %eax
; CHECK-NEXT: movl %eax, %esp
; CHECK-NEXT: movl $1, (%eax)
; CHECK-NEXT: leal 8(%esi), %edi
; CHECK-NEXT: movl $4, %ecx
; CHECK-NEXT: pushl %ebp
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: .cfi_remember_state
; CHECK-NEXT: .cfi_escape 0x0f, 0x06, 0x74, 0x04, 0x06, 0x11, 0x08, 0x22 #
; CHECK-NEXT: movl %eax, %esi
; CHECK-NEXT: #APP
; CHECK-NEXT: rep movsb (%esi), %es:(%edi)
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: popl %esi
; CHECK-NEXT: popl %ebp
; CHECK-NEXT: .cfi_restore_state
; CHECK-NEXT: movl 8(%esi), %eax
; CHECK-NEXT: leal -8(%ebp), %esp
; CHECK-NEXT: popl %esi
; CHECK-NEXT: popl %edi
; CHECK-NEXT: popl %ebp
; CHECK-NEXT: retl
entry:
%size = alloca i32, align 4
%g = alloca i32, align 4
store volatile i32 4, ptr %size, align 4
%len = load volatile i32, ptr %size, align 4
%var_array = alloca i8, i32 %len, align 16
store i32 1, ptr %var_array, align 16
%nil = call { ptr, ptr, i32 } asm "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags},~{ebp}"(ptr %g, ptr %var_array, i32 4)
%retval = load i32, ptr %g, align 4
ret i32 %retval
}
|