2017-06-09 20:44:30 +08:00
|
|
|
/* memrchr optimized with AVX2.
|
2021-01-03 03:32:25 +08:00
|
|
|
Copyright (C) 2017-2021 Free Software Foundation, Inc.
|
2017-06-09 20:44:30 +08:00
|
|
|
This file is part of the GNU C Library.
|
|
|
|
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
modify it under the terms of the GNU Lesser General Public
|
|
|
|
License as published by the Free Software Foundation; either
|
|
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
Lesser General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
|
|
License along with the GNU C Library; if not, see
|
Prefer https to http for gnu.org and fsf.org URLs
Also, change sources.redhat.com to sourceware.org.
This patch was automatically generated by running the following shell
script, which uses GNU sed, and which avoids modifying files imported
from upstream:
sed -ri '
s,(http|ftp)(://(.*\.)?(gnu|fsf|sourceware)\.org($|[^.]|\.[^a-z])),https\2,g
s,(http|ftp)(://(.*\.)?)sources\.redhat\.com($|[^.]|\.[^a-z]),https\2sourceware.org\4,g
' \
$(find $(git ls-files) -prune -type f \
! -name '*.po' \
! -name 'ChangeLog*' \
! -path COPYING ! -path COPYING.LIB \
! -path manual/fdl-1.3.texi ! -path manual/lgpl-2.1.texi \
! -path manual/texinfo.tex ! -path scripts/config.guess \
! -path scripts/config.sub ! -path scripts/install-sh \
! -path scripts/mkinstalldirs ! -path scripts/move-if-change \
! -path INSTALL ! -path locale/programs/charmap-kw.h \
! -path po/libc.pot ! -path sysdeps/gnu/errlist.c \
! '(' -name configure \
-execdir test -f configure.ac -o -f configure.in ';' ')' \
! '(' -name preconfigure \
-execdir test -f preconfigure.ac ';' ')' \
-print)
and then by running 'make dist-prepare' to regenerate files built
from the altered files, and then executing the following to cleanup:
chmod a+x sysdeps/unix/sysv/linux/riscv/configure
# Omit irrelevant whitespace and comment-only changes,
# perhaps from a slightly-different Autoconf version.
git checkout -f \
sysdeps/csky/configure \
sysdeps/hppa/configure \
sysdeps/riscv/configure \
sysdeps/unix/sysv/linux/csky/configure
# Omit changes that caused a pre-commit check to fail like this:
# remote: *** error: sysdeps/powerpc/powerpc64/ppc-mcount.S: trailing lines
git checkout -f \
sysdeps/powerpc/powerpc64/ppc-mcount.S \
sysdeps/unix/sysv/linux/s390/s390-64/syscall.S
# Omit change that caused a pre-commit check to fail like this:
# remote: *** error: sysdeps/sparc/sparc64/multiarch/memcpy-ultra3.S: last line does not end in newline
git checkout -f sysdeps/sparc/sparc64/multiarch/memcpy-ultra3.S
2019-09-07 13:40:42 +08:00
|
|
|
<https://www.gnu.org/licenses/>. */
|
2017-06-09 20:44:30 +08:00
|
|
|
|
|
|
|
#if IS_IN (libc)
|
|
|
|
|
|
|
|
# include <sysdep.h>
|
|
|
|
|
|
|
|
# ifndef VZEROUPPER
|
|
|
|
# define VZEROUPPER vzeroupper
|
|
|
|
# endif
|
|
|
|
|
|
|
|
# define VEC_SIZE 32
|
|
|
|
|
|
|
|
.section .text.avx,"ax",@progbits
|
|
|
|
ENTRY (__memrchr_avx2)
|
|
|
|
/* Broadcast CHAR to YMM0. */
|
|
|
|
vmovd %esi, %xmm0
|
|
|
|
vpbroadcastb %xmm0, %ymm0
|
|
|
|
|
2019-01-22 03:29:58 +08:00
|
|
|
sub $VEC_SIZE, %RDX_LP
|
2017-06-09 20:44:30 +08:00
|
|
|
jbe L(last_vec_or_less)
|
|
|
|
|
2019-01-22 03:29:58 +08:00
|
|
|
add %RDX_LP, %RDI_LP
|
2017-06-09 20:44:30 +08:00
|
|
|
|
|
|
|
/* Check the last VEC_SIZE bytes. */
|
|
|
|
vpcmpeqb (%rdi), %ymm0, %ymm1
|
|
|
|
vpmovmskb %ymm1, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jnz L(last_vec_x0)
|
|
|
|
|
|
|
|
subq $(VEC_SIZE * 4), %rdi
|
|
|
|
movl %edi, %ecx
|
|
|
|
andl $(VEC_SIZE - 1), %ecx
|
|
|
|
jz L(aligned_more)
|
|
|
|
|
|
|
|
/* Align data for aligned loads in the loop. */
|
|
|
|
addq $VEC_SIZE, %rdi
|
|
|
|
addq $VEC_SIZE, %rdx
|
|
|
|
andq $-VEC_SIZE, %rdi
|
|
|
|
subq %rcx, %rdx
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(aligned_more):
|
|
|
|
subq $(VEC_SIZE * 4), %rdx
|
|
|
|
jbe L(last_4x_vec_or_less)
|
|
|
|
|
|
|
|
/* Check the last 4 * VEC_SIZE. Only one VEC_SIZE at a time
|
|
|
|
since data is only aligned to VEC_SIZE. */
|
|
|
|
vpcmpeqb (VEC_SIZE * 3)(%rdi), %ymm0, %ymm1
|
|
|
|
vpmovmskb %ymm1, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jnz L(last_vec_x3)
|
|
|
|
|
|
|
|
vpcmpeqb (VEC_SIZE * 2)(%rdi), %ymm0, %ymm2
|
|
|
|
vpmovmskb %ymm2, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jnz L(last_vec_x2)
|
|
|
|
|
|
|
|
vpcmpeqb VEC_SIZE(%rdi), %ymm0, %ymm3
|
|
|
|
vpmovmskb %ymm3, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jnz L(last_vec_x1)
|
|
|
|
|
|
|
|
vpcmpeqb (%rdi), %ymm0, %ymm4
|
|
|
|
vpmovmskb %ymm4, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jnz L(last_vec_x0)
|
|
|
|
|
|
|
|
/* Align data to 4 * VEC_SIZE for loop with fewer branches.
|
|
|
|
There are some overlaps with above if data isn't aligned
|
|
|
|
to 4 * VEC_SIZE. */
|
|
|
|
movl %edi, %ecx
|
|
|
|
andl $(VEC_SIZE * 4 - 1), %ecx
|
|
|
|
jz L(loop_4x_vec)
|
|
|
|
|
|
|
|
addq $(VEC_SIZE * 4), %rdi
|
|
|
|
addq $(VEC_SIZE * 4), %rdx
|
|
|
|
andq $-(VEC_SIZE * 4), %rdi
|
|
|
|
subq %rcx, %rdx
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(loop_4x_vec):
|
|
|
|
/* Compare 4 * VEC at a time forward. */
|
|
|
|
subq $(VEC_SIZE * 4), %rdi
|
|
|
|
subq $(VEC_SIZE * 4), %rdx
|
|
|
|
jbe L(last_4x_vec_or_less)
|
|
|
|
|
|
|
|
vmovdqa (%rdi), %ymm1
|
|
|
|
vmovdqa VEC_SIZE(%rdi), %ymm2
|
|
|
|
vmovdqa (VEC_SIZE * 2)(%rdi), %ymm3
|
|
|
|
vmovdqa (VEC_SIZE * 3)(%rdi), %ymm4
|
|
|
|
|
|
|
|
vpcmpeqb %ymm1, %ymm0, %ymm1
|
|
|
|
vpcmpeqb %ymm2, %ymm0, %ymm2
|
|
|
|
vpcmpeqb %ymm3, %ymm0, %ymm3
|
|
|
|
vpcmpeqb %ymm4, %ymm0, %ymm4
|
|
|
|
|
|
|
|
vpor %ymm1, %ymm2, %ymm5
|
|
|
|
vpor %ymm3, %ymm4, %ymm6
|
|
|
|
vpor %ymm5, %ymm6, %ymm5
|
|
|
|
|
|
|
|
vpmovmskb %ymm5, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jz L(loop_4x_vec)
|
|
|
|
|
|
|
|
/* There is a match. */
|
|
|
|
vpmovmskb %ymm4, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jnz L(last_vec_x3)
|
|
|
|
|
|
|
|
vpmovmskb %ymm3, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jnz L(last_vec_x2)
|
|
|
|
|
|
|
|
vpmovmskb %ymm2, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jnz L(last_vec_x1)
|
|
|
|
|
|
|
|
vpmovmskb %ymm1, %eax
|
|
|
|
bsrl %eax, %eax
|
|
|
|
addq %rdi, %rax
|
|
|
|
VZEROUPPER
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(last_4x_vec_or_less):
|
|
|
|
addl $(VEC_SIZE * 4), %edx
|
|
|
|
cmpl $(VEC_SIZE * 2), %edx
|
|
|
|
jbe L(last_2x_vec)
|
|
|
|
|
|
|
|
vpcmpeqb (VEC_SIZE * 3)(%rdi), %ymm0, %ymm1
|
|
|
|
vpmovmskb %ymm1, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jnz L(last_vec_x3)
|
|
|
|
|
|
|
|
vpcmpeqb (VEC_SIZE * 2)(%rdi), %ymm0, %ymm2
|
|
|
|
vpmovmskb %ymm2, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jnz L(last_vec_x2)
|
|
|
|
|
|
|
|
vpcmpeqb VEC_SIZE(%rdi), %ymm0, %ymm3
|
|
|
|
vpmovmskb %ymm3, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jnz L(last_vec_x1_check)
|
|
|
|
cmpl $(VEC_SIZE * 3), %edx
|
|
|
|
jbe L(zero)
|
|
|
|
|
|
|
|
vpcmpeqb (%rdi), %ymm0, %ymm4
|
|
|
|
vpmovmskb %ymm4, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jz L(zero)
|
|
|
|
bsrl %eax, %eax
|
|
|
|
subq $(VEC_SIZE * 4), %rdx
|
|
|
|
addq %rax, %rdx
|
|
|
|
jl L(zero)
|
|
|
|
addq %rdi, %rax
|
|
|
|
VZEROUPPER
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(last_2x_vec):
|
|
|
|
vpcmpeqb (VEC_SIZE * 3)(%rdi), %ymm0, %ymm1
|
|
|
|
vpmovmskb %ymm1, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jnz L(last_vec_x3_check)
|
|
|
|
cmpl $VEC_SIZE, %edx
|
|
|
|
jbe L(zero)
|
|
|
|
|
|
|
|
vpcmpeqb (VEC_SIZE * 2)(%rdi), %ymm0, %ymm1
|
|
|
|
vpmovmskb %ymm1, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jz L(zero)
|
|
|
|
bsrl %eax, %eax
|
|
|
|
subq $(VEC_SIZE * 2), %rdx
|
|
|
|
addq %rax, %rdx
|
|
|
|
jl L(zero)
|
|
|
|
addl $(VEC_SIZE * 2), %eax
|
|
|
|
addq %rdi, %rax
|
|
|
|
VZEROUPPER
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(last_vec_x0):
|
|
|
|
bsrl %eax, %eax
|
|
|
|
addq %rdi, %rax
|
|
|
|
VZEROUPPER
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(last_vec_x1):
|
|
|
|
bsrl %eax, %eax
|
|
|
|
addl $VEC_SIZE, %eax
|
|
|
|
addq %rdi, %rax
|
|
|
|
VZEROUPPER
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(last_vec_x2):
|
|
|
|
bsrl %eax, %eax
|
|
|
|
addl $(VEC_SIZE * 2), %eax
|
|
|
|
addq %rdi, %rax
|
|
|
|
VZEROUPPER
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(last_vec_x3):
|
|
|
|
bsrl %eax, %eax
|
|
|
|
addl $(VEC_SIZE * 3), %eax
|
|
|
|
addq %rdi, %rax
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(last_vec_x1_check):
|
|
|
|
bsrl %eax, %eax
|
|
|
|
subq $(VEC_SIZE * 3), %rdx
|
|
|
|
addq %rax, %rdx
|
|
|
|
jl L(zero)
|
|
|
|
addl $VEC_SIZE, %eax
|
|
|
|
addq %rdi, %rax
|
|
|
|
VZEROUPPER
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(last_vec_x3_check):
|
|
|
|
bsrl %eax, %eax
|
|
|
|
subq $VEC_SIZE, %rdx
|
|
|
|
addq %rax, %rdx
|
|
|
|
jl L(zero)
|
|
|
|
addl $(VEC_SIZE * 3), %eax
|
|
|
|
addq %rdi, %rax
|
|
|
|
VZEROUPPER
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(zero):
|
|
|
|
VZEROUPPER
|
|
|
|
L(null):
|
|
|
|
xorl %eax, %eax
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(last_vec_or_less_aligned):
|
|
|
|
movl %edx, %ecx
|
|
|
|
|
|
|
|
vpcmpeqb (%rdi), %ymm0, %ymm1
|
|
|
|
|
|
|
|
movl $1, %edx
|
|
|
|
/* Support rdx << 32. */
|
|
|
|
salq %cl, %rdx
|
|
|
|
subq $1, %rdx
|
|
|
|
|
|
|
|
vpmovmskb %ymm1, %eax
|
|
|
|
|
|
|
|
/* Remove the trailing bytes. */
|
|
|
|
andl %edx, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jz L(zero)
|
|
|
|
|
|
|
|
bsrl %eax, %eax
|
|
|
|
addq %rdi, %rax
|
|
|
|
VZEROUPPER
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(last_vec_or_less):
|
|
|
|
addl $VEC_SIZE, %edx
|
|
|
|
|
|
|
|
/* Check for zero length. */
|
|
|
|
testl %edx, %edx
|
|
|
|
jz L(null)
|
|
|
|
|
|
|
|
movl %edi, %ecx
|
|
|
|
andl $(VEC_SIZE - 1), %ecx
|
|
|
|
jz L(last_vec_or_less_aligned)
|
|
|
|
|
|
|
|
movl %ecx, %esi
|
|
|
|
movl %ecx, %r8d
|
|
|
|
addl %edx, %esi
|
|
|
|
andq $-VEC_SIZE, %rdi
|
|
|
|
|
|
|
|
subl $VEC_SIZE, %esi
|
|
|
|
ja L(last_vec_2x_aligned)
|
|
|
|
|
|
|
|
/* Check the last VEC. */
|
|
|
|
vpcmpeqb (%rdi), %ymm0, %ymm1
|
|
|
|
vpmovmskb %ymm1, %eax
|
|
|
|
|
|
|
|
/* Remove the leading and trailing bytes. */
|
|
|
|
sarl %cl, %eax
|
|
|
|
movl %edx, %ecx
|
|
|
|
|
|
|
|
movl $1, %edx
|
|
|
|
sall %cl, %edx
|
|
|
|
subl $1, %edx
|
|
|
|
|
|
|
|
andl %edx, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jz L(zero)
|
|
|
|
|
|
|
|
bsrl %eax, %eax
|
|
|
|
addq %rdi, %rax
|
|
|
|
addq %r8, %rax
|
|
|
|
VZEROUPPER
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(last_vec_2x_aligned):
|
|
|
|
movl %esi, %ecx
|
|
|
|
|
|
|
|
/* Check the last VEC. */
|
|
|
|
vpcmpeqb VEC_SIZE(%rdi), %ymm0, %ymm1
|
|
|
|
|
|
|
|
movl $1, %edx
|
|
|
|
sall %cl, %edx
|
|
|
|
subl $1, %edx
|
|
|
|
|
|
|
|
vpmovmskb %ymm1, %eax
|
|
|
|
|
|
|
|
/* Remove the trailing bytes. */
|
|
|
|
andl %edx, %eax
|
|
|
|
|
|
|
|
testl %eax, %eax
|
|
|
|
jnz L(last_vec_x1)
|
|
|
|
|
|
|
|
/* Check the second last VEC. */
|
|
|
|
vpcmpeqb (%rdi), %ymm0, %ymm1
|
|
|
|
|
|
|
|
movl %r8d, %ecx
|
|
|
|
|
|
|
|
vpmovmskb %ymm1, %eax
|
|
|
|
|
|
|
|
/* Remove the leading bytes. Must use unsigned right shift for
|
|
|
|
bsrl below. */
|
|
|
|
shrl %cl, %eax
|
|
|
|
testl %eax, %eax
|
|
|
|
jz L(zero)
|
|
|
|
|
|
|
|
bsrl %eax, %eax
|
|
|
|
addq %rdi, %rax
|
|
|
|
addq %r8, %rax
|
|
|
|
VZEROUPPER
|
|
|
|
ret
|
|
|
|
END (__memrchr_avx2)
|
|
|
|
#endif
|