1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
|
/* Optimized strncmp implementation for PowerPC64/POWER10.
Copyright (C) 2024-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
/* Implements the function
int [r3] strncmp (const char *s1 [r3], const char *s2 [r4], size_t [r5] n)
The implementation uses unaligned doubleword access to avoid specialized
code paths depending of data alignment for first 32 bytes and uses
vectorised loops after that. */
#ifndef STRNCMP
# define STRNCMP strncmp
#endif
/* TODO: Change this to actual instructions when minimum binutils is upgraded
to 2.27. Macros are defined below for these newer instructions in order
to maintain compatibility. */
#define LXVP(xtp,dq,ra) \
.long(((6)<<(32-6)) \
| ((((xtp)-32)>>1)<<(32-10)) \
| ((1)<<(32-11)) \
| ((ra)<<(32-16)) \
| dq)
#define COMPARE_16(vreg1,vreg2,offset) \
lxv vreg1+32,offset(r3); \
lxv vreg2+32,offset(r4); \
vcmpnezb. v7,vreg1,vreg2; \
bne cr6,L(different); \
cmpldi cr7,r5,16; \
ble cr7,L(ret0); \
addi r5,r5,-16;
#define COMPARE_32(vreg1,vreg2,offset,label1,label2) \
LXVP(vreg1+32,offset,r3); \
LXVP(vreg2+32,offset,r4); \
vcmpnezb. v7,vreg1+1,vreg2+1; \
bne cr6,L(label1); \
vcmpnezb. v7,vreg1,vreg2; \
bne cr6,L(label2); \
cmpldi cr7,r5,32; \
ble cr7,L(ret0); \
addi r5,r5,-32;
#define TAIL_FIRST_16B(vreg1,vreg2) \
vctzlsbb r6,v7; \
cmpld cr7,r5,r6; \
ble cr7,L(ret0); \
vextubrx r5,r6,vreg1; \
vextubrx r4,r6,vreg2; \
subf r3,r4,r5; \
blr;
#define TAIL_SECOND_16B(vreg1,vreg2) \
vctzlsbb r6,v7; \
addi r0,r6,16; \
cmpld cr7,r5,r0; \
ble cr7,L(ret0); \
vextubrx r5,r6,vreg1; \
vextubrx r4,r6,vreg2; \
subf r3,r4,r5; \
blr;
#define CHECK_N_BYTES(reg1,reg2,len_reg) \
sldi r6,len_reg,56; \
lxvl 32+v4,reg1,r6; \
lxvl 32+v5,reg2,r6; \
add reg1,reg1,len_reg; \
add reg2,reg2,len_reg; \
vcmpnezb v7,v4,v5; \
vctzlsbb r6,v7; \
cmpld cr7,r6,len_reg; \
blt cr7,L(different); \
cmpld cr7,r5,len_reg; \
ble cr7,L(ret0); \
sub r5,r5,len_reg; \
/* TODO: change this to .machine power10 when the minimum required
binutils allows it. */
.machine power9
ENTRY_TOCLESS (STRNCMP, 4)
/* Check if size is 0. */
cmpdi cr0,r5,0
beq cr0,L(ret0)
andi. r7,r3,4095
andi. r8,r4,4095
cmpldi cr0,r7,4096-16
cmpldi cr1,r8,4096-16
bgt cr0,L(crosses)
bgt cr1,L(crosses)
COMPARE_16(v4,v5,0)
addi r3,r3,16
addi r4,r4,16
L(crosses):
andi. r7,r3,15
subfic r7,r7,16 /* r7(nalign1) = 16 - (str1 & 15). */
andi. r9,r4,15
subfic r8,r9,16 /* r8(nalign2) = 16 - (str2 & 15). */
cmpld cr7,r7,r8
beq cr7,L(same_aligned)
blt cr7,L(nalign1_min)
/* nalign2 is minimum and s2 pointer is aligned. */
CHECK_N_BYTES(r3,r4,r8)
/* Are we on the 64B hunk which crosses a page? */
andi. r10,r3,63 /* Determine offset into 64B hunk. */
andi. r8,r3,15 /* The offset into the 16B hunk. */
neg r7,r3
andi. r9,r7,15 /* Number of bytes after a 16B cross. */
rlwinm. r7,r7,26,0x3F /* ((r4-4096))>>6&63. */
beq L(compare_64_pagecross)
mtctr r7
b L(compare_64B_unaligned)
/* nalign1 is minimum and s1 pointer is aligned. */
L(nalign1_min):
CHECK_N_BYTES(r3,r4,r7)
/* Are we on the 64B hunk which crosses a page? */
andi. r10,r4,63 /* Determine offset into 64B hunk. */
andi. r8,r4,15 /* The offset into the 16B hunk. */
neg r7,r4
andi. r9,r7,15 /* Number of bytes after a 16B cross. */
rlwinm. r7,r7,26,0x3F /* ((r4-4096))>>6&63. */
beq L(compare_64_pagecross)
mtctr r7
.p2align 5
L(compare_64B_unaligned):
COMPARE_16(v4,v5,0)
COMPARE_16(v4,v5,16)
COMPARE_16(v4,v5,32)
COMPARE_16(v4,v5,48)
addi r3,r3,64
addi r4,r4,64
bdnz L(compare_64B_unaligned)
/* Cross the page boundary of s2, carefully. Only for first
iteration we have to get the count of 64B blocks to be checked.
From second iteration and beyond, loop counter is always 63. */
L(compare_64_pagecross):
li r11, 63
mtctr r11
cmpldi r10,16
ble L(cross_4)
cmpldi r10,32
ble L(cross_3)
cmpldi r10,48
ble L(cross_2)
L(cross_1):
CHECK_N_BYTES(r3,r4,r9)
CHECK_N_BYTES(r3,r4,r8)
COMPARE_16(v4,v5,0)
COMPARE_16(v4,v5,16)
COMPARE_16(v4,v5,32)
addi r3,r3,48
addi r4,r4,48
b L(compare_64B_unaligned)
L(cross_2):
COMPARE_16(v4,v5,0)
addi r3,r3,16
addi r4,r4,16
CHECK_N_BYTES(r3,r4,r9)
CHECK_N_BYTES(r3,r4,r8)
COMPARE_16(v4,v5,0)
COMPARE_16(v4,v5,16)
addi r3,r3,32
addi r4,r4,32
b L(compare_64B_unaligned)
L(cross_3):
COMPARE_16(v4,v5,0)
COMPARE_16(v4,v5,16)
addi r3,r3,32
addi r4,r4,32
CHECK_N_BYTES(r3,r4,r9)
CHECK_N_BYTES(r3,r4,r8)
COMPARE_16(v4,v5,0)
addi r3,r3,16
addi r4,r4,16
b L(compare_64B_unaligned)
L(cross_4):
COMPARE_16(v4,v5,0)
COMPARE_16(v4,v5,16)
COMPARE_16(v4,v5,32)
addi r3,r3,48
addi r4,r4,48
CHECK_N_BYTES(r3,r4,r9)
CHECK_N_BYTES(r3,r4,r8)
b L(compare_64B_unaligned)
L(same_aligned):
CHECK_N_BYTES(r3,r4,r7)
/* Align s1 to 32B and adjust s2 address.
Use lxvp only if both s1 and s2 are 32B aligned. */
COMPARE_16(v4,v5,0)
COMPARE_16(v4,v5,16)
COMPARE_16(v4,v5,32)
COMPARE_16(v4,v5,48)
addi r3,r3,64
addi r4,r4,64
COMPARE_16(v4,v5,0)
COMPARE_16(v4,v5,16)
addi r5,r5,32
clrldi r6,r3,59
subfic r7,r6,32
add r3,r3,r7
add r4,r4,r7
subf r5,r7,r5
andi. r7,r4,0x1F
beq cr0,L(32B_aligned_loop)
.p2align 5
L(16B_aligned_loop):
COMPARE_16(v4,v5,0)
COMPARE_16(v4,v5,16)
COMPARE_16(v4,v5,32)
COMPARE_16(v4,v5,48)
addi r3,r3,64
addi r4,r4,64
b L(16B_aligned_loop)
/* Calculate and return the difference. */
L(different):
TAIL_FIRST_16B(v4,v5)
.p2align 5
L(32B_aligned_loop):
COMPARE_32(v14,v16,0,tail1,tail2)
COMPARE_32(v18,v20,32,tail3,tail4)
COMPARE_32(v22,v24,64,tail5,tail6)
COMPARE_32(v26,v28,96,tail7,tail8)
addi r3,r3,128
addi r4,r4,128
b L(32B_aligned_loop)
L(tail1): TAIL_FIRST_16B(v15,v17)
L(tail2): TAIL_SECOND_16B(v14,v16)
L(tail3): TAIL_FIRST_16B(v19,v21)
L(tail4): TAIL_SECOND_16B(v18,v20)
L(tail5): TAIL_FIRST_16B(v23,v25)
L(tail6): TAIL_SECOND_16B(v22,v24)
L(tail7): TAIL_FIRST_16B(v27,v29)
L(tail8): TAIL_SECOND_16B(v26,v28)
.p2align 5
L(ret0):
li r3,0
blr
END(STRNCMP)
libc_hidden_builtin_def(strncmp)
|