File: arm64-misaligned-memcpy-inline.ll

package info (click to toggle)
llvm-toolchain-3.9 1%3A3.9.1-8
  • links: PTS, VCS
  • area: main
  • in suites: stretch
  • size: 441,060 kB
  • ctags: 428,777
  • sloc: cpp: 2,546,577; ansic: 538,318; asm: 119,677; objc: 103,316; python: 102,148; sh: 27,847; pascal: 5,626; ml: 5,510; perl: 5,293; lisp: 4,801; makefile: 2,177; xml: 686; cs: 362; php: 212; csh: 117
file content (14 lines) | stat: -rw-r--r-- 498 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
; RUN: llc -mtriple=arm64-apple-ios -mattr=+strict-align < %s | FileCheck %s

; Small (16-bytes here) unaligned memcpys should stay memcpy calls if
; strict-alignment is turned on.
define void @t0(i8* %out, i8* %in) {
; CHECK-LABEL: t0:
; CHECK:         orr w2, wzr, #0x10
; CHECK-NEXT:    bl _memcpy
entry:
  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out, i8* %in, i64 16, i32 1, i1 false)
  ret void
}

declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1)