glibc/sysdeps/powerpc/powerpc64/memset.S
Roland McGrath 124dcac84b * sysdeps/powerpc/elf/libc-start.c (AUX_VECTOR_INIT): Define it.
(LIBC_START_MAIN, LIBC_START_MAIN_AUXVEC_ARG, MAIN_AUXVEC_ARG)
	(INIT_MAIN_ARGS): Define, and #include <sysdeps/generic/libc-start.c>.
	(__libc_start_main): Just call the generic one for most of the work.

	* sysdeps/generic/libc-start.c [LIBC_START_MAIN]: If defined, define a
	static function by that name instead of BP_SYM (__libc_start_main).
	[LIBC_START_MAIN_AUXVEC_ARG]: Take AUXVEC as argument.
	[MAIN_AUXVEC_ARG]: Pass 4th argument to MAIN.
	[INIT_MAIN_ARGS]: Give INIT the same args as MAIN.

	* sysdeps/generic/dl-sysdep.c (_dl_sysdep_start) [DL_PLATFORM_AUXV]:
	Use this macro for extra AT_* cases.
	* sysdeps/unix/sysv/linux/powerpc/dl-sysdep.c (DL_PLATFORM_AUXV):
	New macro, guts from ...
	(__aux_init_cache): ... here, function removed.
	(DL_PLATFORM_INIT): Don't define this.

	* sysdeps/powerpc/powerpc32/memset.S: Put __cache_line_size in bss.
	* sysdeps/powerpc/powerpc64/memset.S: Likewise.

	* Versions.def (libthread_db): Add GLICB_2.3.3 set.
2003-03-15 23:09:52 +00:00

291 lines
7.4 KiB
ArmAsm

/* Optimized memset implementation for PowerPC64.
Copyright (C) 1997, 1999, 2000, 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#include <sysdep.h>
#include <bp-sym.h>
#include <bp-asm.h>
/* Define a global static that can hold the cache line size. The
assumption is that startup code will access the "aux vector" to
to obtain the value set by the kernel and store it into this
variable. */
.globl __cache_line_size
.lcomm __cache_line_size,4,4
.section ".toc","aw"
.LC0:
.tc __cache_line_size[TC],__cache_line_size
.section ".text"
.align 2
/* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
Returns 's'.
The memset is done in three sizes: byte (8 bits), word (32 bits),
cache line (256 bits). There is a special case for setting cache lines
to 0, to take advantage of the dcbz instruction. */
EALIGN (BP_SYM (memset), 5, 0)
#define rTMP r0
#define rRTN r3 /* Initial value of 1st argument. */
#if __BOUNDED_POINTERS__
# define rMEMP0 r4 /* Original value of 1st arg. */
# define rCHR r5 /* Char to set in each byte. */
# define rLEN r6 /* Length of region to set. */
# define rMEMP r10 /* Address at which we are storing. */
#else
# define rMEMP0 r3 /* Original value of 1st arg. */
# define rCHR r4 /* Char to set in each byte. */
# define rLEN r5 /* Length of region to set. */
# define rMEMP r6 /* Address at which we are storing. */
#endif
#define rALIGN r7 /* Number of bytes we are setting now (when aligning). */
#define rMEMP2 r8
#define rNEG64 r8 /* Constant -64 for clearing with dcbz. */
#define rCLS r8 /* Cache line size obtained from static. */
#define rCLM r9 /* Cache line size mask to check for cache alignment. */
___memset:
#if __BOUNDED_POINTERS__
cmpldi cr1, rRTN, 0
CHECK_BOUNDS_BOTH_WIDE (rMEMP0, rTMP, rTMP2, rLEN)
beq cr1, L(b0)
STORE_RETURN_VALUE (rMEMP0)
STORE_RETURN_BOUNDS (rTMP, rTMP2)
L(b0):
#endif
/* Take care of case for size <= 4. */
cmpldi cr1, rLEN, 8
andi. rALIGN, rMEMP0, 7
mr rMEMP, rMEMP0
ble- cr1, L(small)
/* Align to doubleword boundary. */
cmpldi cr5, rLEN, 31
rlwimi rCHR, rCHR, 8, 16, 23 /* Replicate byte to halfword. */
beq+ L(aligned2)
mtcrf 0x01, rMEMP0
subfic rALIGN, rALIGN, 8
cror 28,30,31 /* Detect odd word aligned. */
add rMEMP, rMEMP, rALIGN
sub rLEN, rLEN, rALIGN
rlwimi rCHR, rCHR, 16, 0, 15 /* Replicate halfword to word. */
bt 29, L(g4)
/* Process the even word of doubleword. */
bf+ 31, L(g2)
stb rCHR, 0(rMEMP0)
bt 30, L(g4x)
L(g2):
sth rCHR, -6(rMEMP)
L(g4x):
stw rCHR, -4(rMEMP)
b L(aligned)
/* Process the odd word of doubleword. */
L(g4):
bf 28, L(g4x) /* If false, word aligned on odd word. */
bf+ 31, L(g0)
stb rCHR, 0(rMEMP0)
bt 30, L(aligned)
L(g0):
sth rCHR, -2(rMEMP)
/* Handle the case of size < 31. */
L(aligned2):
rlwimi rCHR, rCHR, 16, 0, 15 /* Replicate halfword to word. */
L(aligned):
mtcrf 0x01, rLEN
ble cr5, L(medium)
/* Align to 32-byte boundary. */
andi. rALIGN, rMEMP, 0x18
subfic rALIGN, rALIGN, 0x20
insrdi rCHR,rCHR,32,0 /* Replicate word to double word. */
beq L(caligned)
mtcrf 0x01, rALIGN
add rMEMP, rMEMP, rALIGN
sub rLEN, rLEN, rALIGN
cmplwi cr1, rALIGN, 0x10
mr rMEMP2, rMEMP
bf 28, L(a1)
stdu rCHR, -8(rMEMP2)
L(a1): blt cr1, L(a2)
std rCHR, -8(rMEMP2)
stdu rCHR, -16(rMEMP2)
L(a2):
/* Now aligned to a 32 byte boundary. */
L(caligned):
cmpldi cr1, rCHR, 0
clrrdi. rALIGN, rLEN, 5
mtcrf 0x01, rLEN
beq cr1, L(zloopstart) /* Special case for clearing memory using dcbz. */
L(nondcbz):
srdi rTMP, rALIGN, 5
mtctr rTMP
beq L(medium) /* We may not actually get to do a full line. */
clrldi. rLEN, rLEN, 59
add rMEMP, rMEMP, rALIGN
li rNEG64, -0x40
bdz L(cloopdone)
L(c3): dcbtst rNEG64, rMEMP
std rCHR, -8(rMEMP)
std rCHR, -16(rMEMP)
std rCHR, -24(rMEMP)
stdu rCHR, -32(rMEMP)
bdnz L(c3)
L(cloopdone):
std rCHR, -8(rMEMP)
std rCHR, -16(rMEMP)
cmpldi cr1, rLEN, 16
std rCHR, -24(rMEMP)
stdu rCHR, -32(rMEMP)
beqlr
add rMEMP, rMEMP, rALIGN
b L(medium_tail2)
.align 5
/* Clear lines of memory in 128-byte chunks. */
L(zloopstart):
/* If the remaining length is less the 32 bytes, don't bother getting
the cache line size. */
beq L(medium)
ld rCLS,.LC0@toc(r2)
lwz rCLS,0(rCLS)
/* If the cache line size was not set just goto to L(nondcbz) which is
safe for any cache line size. */
cmpldi cr1,rCLS,0
beq cr1,L(nondcbz)
/* Now we know the cache line size, and it is not 32-bytes, but
we may not yet be aligned to the cache line. May have a partial
line to fill, so touch it 1st. */
dcbt 0,rMEMP
addi rCLM,rCLS,-1
L(getCacheAligned):
cmpldi cr1,rLEN,32
and. rTMP,rCLM,rMEMP
blt cr1,L(handletail32)
beq L(cacheAligned)
addi rMEMP,rMEMP,32
addi rLEN,rLEN,-32
std rCHR,-32(rMEMP)
std rCHR,-24(rMEMP)
std rCHR,-16(rMEMP)
std rCHR,-8(rMEMP)
b L(getCacheAligned)
/* Now we are aligned to the cache line and can use dcbz. */
L(cacheAligned):
cmpld cr1,rLEN,rCLS
blt cr1,L(handletail32)
dcbz 0,rMEMP
subf rLEN,rCLS,rLEN
add rMEMP,rMEMP,rCLS
b L(cacheAligned)
/* We are here because the cache line size was set and was not 32-bytes
and the remainder (rLEN) is less than the actual cache line size.
So set up the preconditions for L(nondcbz) and go there. */
L(handletail32):
clrrwi. rALIGN, rLEN, 5
b L(nondcbz)
.align 5
L(small):
/* Memset of 8 bytes or less. */
cmpldi cr6, rLEN, 4
cmpldi cr5, rLEN, 1
ble cr6,L(le4)
subi rLEN, rLEN, 4
stb rCHR,0(rMEMP)
stb rCHR,1(rMEMP)
stb rCHR,2(rMEMP)
stb rCHR,3(rMEMP)
addi rMEMP,rMEMP, 4
cmpldi cr5, rLEN, 1
L(le4):
cmpldi cr1, rLEN, 3
bltlr cr5
stb rCHR, 0(rMEMP)
beqlr cr5
stb rCHR, 1(rMEMP)
bltlr cr1
stb rCHR, 2(rMEMP)
beqlr cr1
stb rCHR, 3(rMEMP)
blr
/* Memset of 0-31 bytes. */
.align 5
L(medium):
insrdi rCHR,rCHR,32,0 /* Replicate word to double word. */
cmpldi cr1, rLEN, 16
L(medium_tail2):
add rMEMP, rMEMP, rLEN
L(medium_tail):
bt- 31, L(medium_31t)
bt- 30, L(medium_30t)
L(medium_30f):
bt- 29, L(medium_29t)
L(medium_29f):
bge- cr1, L(medium_27t)
bflr- 28
std rCHR, -8(rMEMP)
blr
L(medium_31t):
stbu rCHR, -1(rMEMP)
bf- 30, L(medium_30f)
L(medium_30t):
sthu rCHR, -2(rMEMP)
bf- 29, L(medium_29f)
L(medium_29t):
stwu rCHR, -4(rMEMP)
blt- cr1, L(medium_27f)
L(medium_27t):
std rCHR, -8(rMEMP)
stdu rCHR, -16(rMEMP)
L(medium_27f):
bflr- 28
L(medium_28t):
std rCHR, -8(rMEMP)
blr
END_GEN_TB (BP_SYM (memset),TB_TOCLESS)
/* Copied from bzero.S to prevent the linker from inserting a stub
between bzero and memset. */
ENTRY (BP_SYM (__bzero))
#if __BOUNDED_POINTERS__
mr r6,r4
li r5,0
mr r4,r3
/* Tell memset that we don't want a return value. */
li r3,0
b ___memset
#else
mr r5,r4
li r4,0
b ___memset
#endif
END_GEN_TB (BP_SYM (__bzero),TB_TOCLESS)
weak_alias (BP_SYM (__bzero), BP_SYM (bzero))