diff options
author | Jeff Johnston <jjohnstn@redhat.com> | 2002-04-20 00:29:51 +0000 |
---|---|---|
committer | Jeff Johnston <jjohnstn@redhat.com> | 2002-04-20 00:29:51 +0000 |
commit | 59be22909b8ed45624a24fe9952d69a1280bd858 (patch) | |
tree | ae61de2700d322ec1d33d8d19b3ad8c67973122a /newlib/libc/machine/xscale/memset.c | |
parent | 2dd4c4dc18707380351de8dd7eeef630bf6a60ab (diff) | |
download | cygnal-59be22909b8ed45624a24fe9952d69a1280bd858.tar.gz cygnal-59be22909b8ed45624a24fe9952d69a1280bd858.tar.bz2 cygnal-59be22909b8ed45624a24fe9952d69a1280bd858.zip |
2002-04-19 Bill Siegmund
* libc/machine/xscale/memchr.c: Don't use multi-line strings.
* libc/machine/xscale/memcmp.c: Ditto.
* libc/machine/xscale/memcpy.c: Ditto.
* libc/machine/xscale/memmove.c: Ditto.
* libc/machine/xscale/memset.c: Ditto.
* libc/machine/xscale/strchr.c: Ditto.
* libc/machine/xscale/strcmp.c: Ditto.
* libc/machine/xscale/strcpy.c: Ditto.
* libc/machine/xscale/strlen.c: Ditto.
Diffstat (limited to 'newlib/libc/machine/xscale/memset.c')
-rw-r--r-- | newlib/libc/machine/xscale/memset.c | 122 |
1 files changed, 61 insertions, 61 deletions
diff --git a/newlib/libc/machine/xscale/memset.c b/newlib/libc/machine/xscale/memset.c index ad1fc74bc..4ff8d01e0 100644 --- a/newlib/libc/machine/xscale/memset.c +++ b/newlib/libc/machine/xscale/memset.c @@ -14,69 +14,69 @@ memset (void *dst, int c, size_t len) asm volatile ("tst %0, #0x3" #ifndef __OPTIMIZE_SIZE__ -" - beq 1f - b 2f -0: - strb %1, [%0], #1 - tst %0, #0x3 - beq 1f -2: - movs r3, %2 - sub %2, %2, #1 - bne 0b -# At this point we know that %2 == len == -1 (since the SUB has already taken -# place). If we fall through to the 1: label (as the code used to do), the -# CMP will detect this negative value and branch to the 2: label. This will -# test %2 again, but this time against 0. The test will fail and the loop -# at 2: will go on for (almost) ever. Hence the explicit branch to the end -# of the hand written assembly code. - b 4f -1: - cmp %2, #0x3 - bls 2f - and %1, %1, #0xff - orr lr, %1, %1, asl #8 - cmp %2, #0xf - orr lr, lr, lr, asl #16 - bls 1f - mov r3, lr - mov r4, lr - mov r5, lr -0: - sub %2, %2, #16 - stmia %0!, { r3, r4, r5, lr } - cmp %2, #0xf - bhi 0b -1: - cmp %2, #0x7 - bls 1f - mov r3, lr -0: - sub %2, %2, #8 - stmia %0!, { r3, lr } - cmp %2, #0x7 - bhi 0b -1: - cmp %2, #0x3 - bls 2f -0: - sub %2, %2, #4 - str lr, [%0], #4 - cmp %2, #0x3 - bhi 0b +"\n\ + beq 1f\n\ + b 2f\n\ +0:\n\ + strb %1, [%0], #1\n\ + tst %0, #0x3\n\ + beq 1f\n\ +2:\n\ + movs r3, %2\n\ + sub %2, %2, #1\n\ + bne 0b\n\ +# At this point we know that %2 == len == -1 (since the SUB has already taken\n\ +# place). If we fall through to the 1: label (as the code used to do), the\n\ +# CMP will detect this negative value and branch to the 2: label. This will\n\ +# test %2 again, but this time against 0. The test will fail and the loop\n\ +# at 2: will go on for (almost) ever. Hence the explicit branch to the end\n\ +# of the hand written assembly code.\n\ + b 4f\n\ +1:\n\ + cmp %2, #0x3\n\ + bls 2f\n\ + and %1, %1, #0xff\n\ + orr lr, %1, %1, asl #8\n\ + cmp %2, #0xf\n\ + orr lr, lr, lr, asl #16\n\ + bls 1f\n\ + mov r3, lr\n\ + mov r4, lr\n\ + mov r5, lr\n\ +0:\n\ + sub %2, %2, #16\n\ + stmia %0!, { r3, r4, r5, lr }\n\ + cmp %2, #0xf\n\ + bhi 0b\n\ +1:\n\ + cmp %2, #0x7\n\ + bls 1f\n\ + mov r3, lr\n\ +0:\n\ + sub %2, %2, #8\n\ + stmia %0!, { r3, lr }\n\ + cmp %2, #0x7\n\ + bhi 0b\n\ +1:\n\ + cmp %2, #0x3\n\ + bls 2f\n\ +0:\n\ + sub %2, %2, #4\n\ + str lr, [%0], #4\n\ + cmp %2, #0x3\n\ + bhi 0b\n\ " #endif /* !__OPTIMIZE_SIZE__ */ -" -2: - movs r3, %2 - sub %2, %2, #1 - beq 4f -0: - movs r3, %2 - sub %2, %2, #1 - strb %1, [%0], #1 - bne 0b +"\n\ +2:\n\ + movs r3, %2\n\ + sub %2, %2, #1\n\ + beq 4f\n\ +0:\n\ + movs r3, %2\n\ + sub %2, %2, #1\n\ + strb %1, [%0], #1\n\ + bne 0b\n\ 4:" : "=&r" (dummy), "=&r" (c), "=&r" (len) |