1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
|
/* rawmemchr optimized with 256-bit EVEX instructions.
Copyright (C) 2022-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <isa-level.h>
#include <sysdep.h>
#if ISA_SHOULD_BUILD (4)
# ifndef VEC_SIZE
# include "x86-evex256-vecs.h"
# endif
# ifndef RAWMEMCHR
# define RAWMEMCHR __rawmemchr_evex
# endif
# define PC_SHIFT_GPR rdi
# define REG_WIDTH VEC_SIZE
# define VPTESTN vptestnmb
# define VPBROADCAST vpbroadcastb
# define VPMINU vpminub
# define VPCMP vpcmpb
# define VPCMPEQ vpcmpeqb
# define CHAR_SIZE 1
# include "reg-macros.h"
/* If not in an RTM and VEC_SIZE != 64 (the VEC_SIZE = 64
doesn't have VEX encoding), use VEX encoding in loop so we
can use vpcmpeqb + vptern which is more efficient than the
EVEX alternative. */
# if defined USE_IN_RTM || VEC_SIZE == 64
# undef COND_VZEROUPPER
# undef VZEROUPPER_RETURN
# undef VZEROUPPER
# define COND_VZEROUPPER
# define VZEROUPPER_RETURN ret
# define VZEROUPPER
# define USE_TERN_IN_LOOP 0
# else
# define USE_TERN_IN_LOOP 1
# undef VZEROUPPER
# define VZEROUPPER vzeroupper
# endif
# define CHAR_PER_VEC VEC_SIZE
# if CHAR_PER_VEC == 64
# define TAIL_RETURN_LBL first_vec_x2
# define TAIL_RETURN_OFFSET (CHAR_PER_VEC * 2)
# define FALLTHROUGH_RETURN_LBL first_vec_x3
# define FALLTHROUGH_RETURN_OFFSET (CHAR_PER_VEC * 3)
# else /* !(CHAR_PER_VEC == 64) */
# define TAIL_RETURN_LBL first_vec_x3
# define TAIL_RETURN_OFFSET (CHAR_PER_VEC * 3)
# define FALLTHROUGH_RETURN_LBL first_vec_x2
# define FALLTHROUGH_RETURN_OFFSET (CHAR_PER_VEC * 2)
# endif /* !(CHAR_PER_VEC == 64) */
# define VMATCH VMM(0)
# define VMATCH_LO VMM_lo(0)
# define PAGE_SIZE 4096
.section SECTION(.text), "ax", @progbits
ENTRY_P2ALIGN (RAWMEMCHR, 6)
VPBROADCAST %esi, %VMATCH
/* Check if we may cross page boundary with one vector load. */
movl %edi, %eax
andl $(PAGE_SIZE - 1), %eax
cmpl $(PAGE_SIZE - VEC_SIZE), %eax
ja L(page_cross)
VPCMPEQ (%rdi), %VMATCH, %k0
KMOV %k0, %VRAX
test %VRAX, %VRAX
jz L(aligned_more)
L(first_vec_x0):
bsf %VRAX, %VRAX
addq %rdi, %rax
ret
.p2align 4,, 4
L(first_vec_x4):
bsf %VRAX, %VRAX
leaq (VEC_SIZE * 4)(%rdi, %rax), %rax
ret
/* For VEC_SIZE == 32 we can fit this in aligning bytes so might
as well place it more locally. For VEC_SIZE == 64 we reuse
return code at the end of loop's return. */
# if VEC_SIZE == 32
.p2align 4,, 4
L(FALLTHROUGH_RETURN_LBL):
bsf %VRAX, %VRAX
leaq (FALLTHROUGH_RETURN_OFFSET)(%rdi, %rax), %rax
ret
# endif
.p2align 4,, 6
L(page_cross):
/* eax has lower page-offset bits of rdi so xor will zero them
out. */
xorq %rdi, %rax
VPCMPEQ (PAGE_SIZE - VEC_SIZE)(%rax), %VMATCH, %k0
KMOV %k0, %VRAX
/* Shift out out-of-bounds matches. */
shrx %VRDI, %VRAX, %VRAX
test %VRAX, %VRAX
jnz L(first_vec_x0)
.p2align 4,, 10
L(aligned_more):
L(page_cross_continue):
/* Align pointer. */
andq $(VEC_SIZE * -1), %rdi
VPCMPEQ VEC_SIZE(%rdi), %VMATCH, %k0
KMOV %k0, %VRAX
test %VRAX, %VRAX
jnz L(first_vec_x1)
VPCMPEQ (VEC_SIZE * 2)(%rdi), %VMATCH, %k0
KMOV %k0, %VRAX
test %VRAX, %VRAX
jnz L(first_vec_x2)
VPCMPEQ (VEC_SIZE * 3)(%rdi), %VMATCH, %k0
KMOV %k0, %VRAX
test %VRAX, %VRAX
jnz L(first_vec_x3)
VPCMPEQ (VEC_SIZE * 4)(%rdi), %VMATCH, %k0
KMOV %k0, %VRAX
test %VRAX, %VRAX
jnz L(first_vec_x4)
subq $-(VEC_SIZE * 1), %rdi
# if VEC_SIZE == 64
/* Saves code size. No evex512 processor has partial register
stalls. If that change this can be replaced with `andq
$-(VEC_SIZE * 4), %rdi`. */
xorb %dil, %dil
# else
andq $-(VEC_SIZE * 4), %rdi
# endif
# if USE_TERN_IN_LOOP
/* copy VMATCH to low ymm so we can use vpcmpeq which is not
encodable with EVEX registers. NB: this is VEC_SIZE == 32
only as there is no way to encode vpcmpeq with zmm0-15. */
vmovdqa64 %VMATCH, %VMATCH_LO
# endif
.p2align 4
L(loop_4x_vec):
/* Two versions of the loop. One that does not require
vzeroupper by not using ymm0-15 and another does that
require vzeroupper because it uses ymm0-15. The reason why
ymm0-15 is used at all is because there is no EVEX encoding
vpcmpeq and with vpcmpeq this loop can be performed more
efficiently. The non-vzeroupper version is safe for RTM
while the vzeroupper version should be preferred if RTM are
not supported. Which loop version we use is determined by
USE_TERN_IN_LOOP. */
# if USE_TERN_IN_LOOP
/* Since vptern can only take 3x vectors fastest to do 1 vec
separately with EVEX vpcmp. */
VPCMPEQ (VEC_SIZE * 4)(%rdi), %VMATCH, %k1
/* Compare 3x with vpcmpeq and or them all together with vptern.
*/
VPCMPEQ (VEC_SIZE * 5)(%rdi), %VMATCH_LO, %VMM_lo(2)
subq $(VEC_SIZE * -4), %rdi
VPCMPEQ (VEC_SIZE * 2)(%rdi), %VMATCH_LO, %VMM_lo(3)
VPCMPEQ (VEC_SIZE * 3)(%rdi), %VMATCH_LO, %VMM_lo(4)
/* 254 is mask for oring VEC_lo(2), VEC_lo(3), VEC_lo(4) into
VEC_lo(4). */
vpternlogd $254, %VMM_lo(2), %VMM_lo(3), %VMM_lo(4)
vpmovmskb %VMM_lo(4), %VRCX
KMOV %k1, %eax
/* NB: rax has match from first VEC and rcx has matches from
VEC 2-4. If rax is non-zero we will return that match. If
rax is zero adding won't disturb the bits in rcx. */
add %rax, %rcx
# else
/* Loop version that uses EVEX encoding. */
VPCMP $4, (VEC_SIZE * 4)(%rdi), %VMATCH, %k1
vpxorq (VEC_SIZE * 5)(%rdi), %VMATCH, %VMM(2)
vpxorq (VEC_SIZE * 6)(%rdi), %VMATCH, %VMM(3)
VPCMPEQ (VEC_SIZE * 7)(%rdi), %VMATCH, %k3
VPMINU %VMM(2), %VMM(3), %VMM(3){%k1}{z}
VPTESTN %VMM(3), %VMM(3), %k2
subq $(VEC_SIZE * -4), %rdi
KORTEST %k2, %k3
# endif
jz L(loop_4x_vec)
# if USE_TERN_IN_LOOP
test %VRAX, %VRAX
# else
KMOV %k1, %VRAX
inc %VRAX
# endif
jnz L(last_vec_x0)
# if USE_TERN_IN_LOOP
vpmovmskb %VMM_lo(2), %VRAX
# else
VPTESTN %VMM(2), %VMM(2), %k1
KMOV %k1, %VRAX
# endif
test %VRAX, %VRAX
jnz L(last_vec_x1)
# if USE_TERN_IN_LOOP
vpmovmskb %VMM_lo(3), %VRAX
# else
KMOV %k2, %VRAX
# endif
/* No longer need any of the lo vecs (ymm0-15) so vzeroupper
(only if used VEX encoded loop). */
COND_VZEROUPPER
/* Separate logic for VEC_SIZE == 64 and VEC_SIZE == 32 for
returning last 2x VEC. For VEC_SIZE == 64 we test each VEC
individually, for VEC_SIZE == 32 we combine them in a single
64-bit GPR. */
# if CHAR_PER_VEC == 64
# if USE_TERN_IN_LOOP
# error "Unsupported"
# endif
/* If CHAR_PER_VEC == 64 we can't combine the last two VEC. */
test %VRAX, %VRAX
jnz L(first_vec_x2)
KMOV %k3, %VRAX
L(FALLTHROUGH_RETURN_LBL):
# else
/* CHAR_PER_VEC <= 32 so we can combine the results from the
last 2x VEC. */
# if !USE_TERN_IN_LOOP
KMOV %k3, %VRCX
# endif
salq $CHAR_PER_VEC, %rcx
addq %rcx, %rax
# endif
bsf %rax, %rax
leaq (FALLTHROUGH_RETURN_OFFSET)(%rdi, %rax), %rax
ret
.p2align 4,, 8
L(TAIL_RETURN_LBL):
bsf %rax, %rax
leaq (TAIL_RETURN_OFFSET)(%rdi, %rax), %rax
ret
.p2align 4,, 8
L(last_vec_x1):
COND_VZEROUPPER
L(first_vec_x1):
bsf %VRAX, %VRAX
leaq (VEC_SIZE * 1)(%rdi, %rax), %rax
ret
.p2align 4,, 8
L(last_vec_x0):
COND_VZEROUPPER
bsf %VRAX, %VRAX
addq %rdi, %rax
ret
END (RAWMEMCHR)
#endif
|