Fix whitespace issues.

This commit is contained in:
Ulrich Drepper 2010-01-18 12:43:47 -08:00
parent 057edf90e0
commit d6ac9329b3
2 changed files with 24 additions and 24 deletions

View File

@ -43,16 +43,16 @@
.align 7 .align 7
EALIGN (BP_SYM (memcpy), 5, 0) EALIGN (BP_SYM (memcpy), 5, 0)
CALL_MCOUNT CALL_MCOUNT
dcbt 0,r4 /* Prefetch ONE SRC cacheline */ dcbt 0,r4 /* Prefetch ONE SRC cacheline */
cmplwi cr1,r5,16 /* is size < 16 ? */ cmplwi cr1,r5,16 /* is size < 16 ? */
mr r6,r3 mr r6,r3
blt+ cr1,.Lshortcopy blt+ cr1,.Lshortcopy
.Lbigcopy: .Lbigcopy:
neg r8,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */ neg r8,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */
clrlwi r8,r8,32-4 /* aling to 16byte boundary */ clrlwi r8,r8,32-4 /* aling to 16byte boundary */
sub r7,r4,r3 sub r7,r4,r3
cmplwi cr0,r8,0 cmplwi cr0,r8,0
beq+ .Ldst_aligned beq+ .Ldst_aligned
@ -112,8 +112,8 @@ EALIGN (BP_SYM (memcpy), 5, 0)
.LprefetchSRC: .LprefetchSRC:
dcbt r12,r4 dcbt r12,r4
addi r12,r12,128 addi r12,r12,128
bdnz .LprefetchSRC bdnz .LprefetchSRC
.Lnocacheprefetch: .Lnocacheprefetch:
mtctr r7 mtctr r7
@ -122,7 +122,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
beq cr6,.Lcachelinealigned beq cr6,.Lcachelinealigned
.Laligntocacheline: .Laligntocacheline:
lfd fp9,0x08(r4) lfd fp9,0x08(r4)
lfdu fp10,0x10(r4) lfdu fp10,0x10(r4)
stfd fp9,0x08(r6) stfd fp9,0x08(r6)
stfdu fp10,0x10(r6) stfdu fp10,0x10(r6)
@ -131,10 +131,10 @@ EALIGN (BP_SYM (memcpy), 5, 0)
.Lcachelinealigned: /* copy while cache lines */ .Lcachelinealigned: /* copy while cache lines */
blt- cr1,.Llessthancacheline /* size <128 */ blt- cr1,.Llessthancacheline /* size <128 */
.Louterloop: .Louterloop:
cmpwi r11,0 cmpwi r11,0
mtctr r11 mtctr r11
beq- .Lendloop beq- .Lendloop
@ -142,7 +142,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
.align 4 .align 4
/* Copy whole cachelines, optimized by prefetching SRC cacheline */ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
.Lloop: /* Copy aligned body */ .Lloop: /* Copy aligned body */
dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
lfd fp9, 0x08(r4) lfd fp9, 0x08(r4)
dcbz r11,r6 dcbz r11,r6
@ -186,7 +186,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
beq- .Lendloop2 beq- .Lendloop2
mtctr r10 mtctr r10
.Lloop2: /* Copy aligned body */ .Lloop2: /* Copy aligned body */
lfd fp9, 0x08(r4) lfd fp9, 0x08(r4)
lfd fp10, 0x10(r4) lfd fp10, 0x10(r4)
lfd fp11, 0x18(r4) lfd fp11, 0x18(r4)
@ -206,7 +206,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
mtctr r7 mtctr r7
.Lcopy_remaining: .Lcopy_remaining:
lfd fp9,0x08(r4) lfd fp9,0x08(r4)
lfdu fp10,0x10(r4) lfdu fp10,0x10(r4)
stfd fp9,0x08(r6) stfd fp9,0x08(r6)
stfdu fp10,0x10(r6) stfdu fp10,0x10(r6)
@ -214,7 +214,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
.Ldo_lt16: /* less than 16 ? */ .Ldo_lt16: /* less than 16 ? */
cmplwi cr0,r5,0 /* copy remaining bytes (0-15) */ cmplwi cr0,r5,0 /* copy remaining bytes (0-15) */
beqlr+ /* no rest to copy */ beqlr+ /* no rest to copy */
addi r4,r4,8 addi r4,r4,8
addi r6,r6,8 addi r6,r6,8

View File

@ -43,16 +43,16 @@
.align 7 .align 7
EALIGN (BP_SYM (memcpy), 5, 0) EALIGN (BP_SYM (memcpy), 5, 0)
CALL_MCOUNT 3 CALL_MCOUNT 3
dcbt 0,r4 /* Prefetch ONE SRC cacheline */ dcbt 0,r4 /* Prefetch ONE SRC cacheline */
cmpldi cr1,r5,16 /* is size < 16 ? */ cmpldi cr1,r5,16 /* is size < 16 ? */
mr r6,r3 mr r6,r3
blt+ cr1,.Lshortcopy blt+ cr1,.Lshortcopy
.Lbigcopy: .Lbigcopy:
neg r8,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */ neg r8,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */
clrldi r8,r8,64-4 /* aling to 16byte boundary */ clrldi r8,r8,64-4 /* aling to 16byte boundary */
sub r7,r4,r3 sub r7,r4,r3
cmpldi cr0,r8,0 cmpldi cr0,r8,0
beq+ .Ldst_aligned beq+ .Ldst_aligned
@ -112,8 +112,8 @@ EALIGN (BP_SYM (memcpy), 5, 0)
.LprefetchSRC: .LprefetchSRC:
dcbt r12,r4 dcbt r12,r4
addi r12,r12,128 addi r12,r12,128
bdnz .LprefetchSRC bdnz .LprefetchSRC
.Lnocacheprefetch: .Lnocacheprefetch:
mtctr r7 mtctr r7
@ -122,7 +122,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
beq cr6,.Lcachelinealigned beq cr6,.Lcachelinealigned
.Laligntocacheline: .Laligntocacheline:
ld r9,0x08(r4) ld r9,0x08(r4)
ldu r7,0x10(r4) ldu r7,0x10(r4)
std r9,0x08(r6) std r9,0x08(r6)
stdu r7,0x10(r6) stdu r7,0x10(r6)
@ -131,10 +131,10 @@ EALIGN (BP_SYM (memcpy), 5, 0)
.Lcachelinealigned: /* copy while cache lines */ .Lcachelinealigned: /* copy while cache lines */
blt- cr1,.Llessthancacheline /* size <128 */ blt- cr1,.Llessthancacheline /* size <128 */
.Louterloop: .Louterloop:
cmpdi r11,0 cmpdi r11,0
mtctr r11 mtctr r11
beq- .Lendloop beq- .Lendloop
@ -142,7 +142,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
.align 4 .align 4
/* Copy whole cachelines, optimized by prefetching SRC cacheline */ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
.Lloop: /* Copy aligned body */ .Lloop: /* Copy aligned body */
dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
ld r9, 0x08(r4) ld r9, 0x08(r4)
dcbz r11,r6 dcbz r11,r6
@ -186,7 +186,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
beq- .Lendloop2 beq- .Lendloop2
mtctr r10 mtctr r10
.Lloop2: /* Copy aligned body */ .Lloop2: /* Copy aligned body */
ld r9, 0x08(r4) ld r9, 0x08(r4)
ld r7, 0x10(r4) ld r7, 0x10(r4)
ld r8, 0x18(r4) ld r8, 0x18(r4)
@ -206,7 +206,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
mtctr r7 mtctr r7
.Lcopy_remaining: .Lcopy_remaining:
ld r8,0x08(r4) ld r8,0x08(r4)
ldu r7,0x10(r4) ldu r7,0x10(r4)
std r8,0x08(r6) std r8,0x08(r6)
stdu r7,0x10(r6) stdu r7,0x10(r6)
@ -214,7 +214,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
.Ldo_lt16: /* less than 16 ? */ .Ldo_lt16: /* less than 16 ? */
cmpldi cr0,r5,0 /* copy remaining bytes (0-15) */ cmpldi cr0,r5,0 /* copy remaining bytes (0-15) */
beqlr+ /* no rest to copy */ beqlr+ /* no rest to copy */
addi r4,r4,8 addi r4,r4,8
addi r6,r6,8 addi r6,r6,8