blob: 7d71a88bc30dabd09002e9ec4ef5df78c1169015 [file] [log] [blame]
Stefan Reinauer52db0b92012-12-07 17:15:04 -08001/*
Martin Roth155c8022019-07-28 19:25:07 -06002 * This file is part of the coreboot project.
Stefan Reinauer52db0b92012-12-07 17:15:04 -08003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
Martin Roth4af58862016-01-21 13:15:16 -07008 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
Martin Roth155c8022019-07-28 19:25:07 -060013 * Based on linux/arch/arm/lib/memset.S
14 *
15 * ASM optimised string functions
Stefan Reinauer52db0b92012-12-07 17:15:04 -080016 */
Julius Wernerd65e2142013-12-13 12:59:57 -080017
18#include <arch/asm.h>
19#include "asmlib.h"
Stefan Reinauer52db0b92012-12-07 17:15:04 -080020
Julius Wernerd65e2142013-12-13 12:59:57 -080021ENTRY(memset)
Stefan Reinauer52db0b92012-12-07 17:15:04 -080022 ands r3, r0, #3 @ 1 unaligned?
Julius Wernerd65e2142013-12-13 12:59:57 -080023 mov ip, r0 @ preserve r0 as return value
24 bne 6f @ 1
Stefan Reinauer52db0b92012-12-07 17:15:04 -080025/*
Julius Wernerd65e2142013-12-13 12:59:57 -080026 * we know that the pointer in ip is aligned to a word boundary.
Stefan Reinauer52db0b92012-12-07 17:15:04 -080027 */
Julius Wernerd65e2142013-12-13 12:59:57 -0800281: orr r1, r1, r1, lsl #8
Stefan Reinauer52db0b92012-12-07 17:15:04 -080029 orr r1, r1, r1, lsl #16
30 mov r3, r1
31 cmp r2, #16
32 blt 4f
33
34#if ! CALGN(1)+0
35
36/*
Julius Wernerd65e2142013-12-13 12:59:57 -080037 * We need 2 extra registers for this loop - use r8 and the LR
Stefan Reinauer52db0b92012-12-07 17:15:04 -080038 */
Julius Wernerd65e2142013-12-13 12:59:57 -080039 stmfd sp!, {r8, lr}
40 mov r8, r1
Stefan Reinauer52db0b92012-12-07 17:15:04 -080041 mov lr, r1
42
432: subs r2, r2, #64
Julius Wernerd65e2142013-12-13 12:59:57 -080044 stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
45 stmgeia ip!, {r1, r3, r8, lr}
46 stmgeia ip!, {r1, r3, r8, lr}
47 stmgeia ip!, {r1, r3, r8, lr}
Stefan Reinauer52db0b92012-12-07 17:15:04 -080048 bgt 2b
Julius Wernerd65e2142013-12-13 12:59:57 -080049 ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
Stefan Reinauer52db0b92012-12-07 17:15:04 -080050/*
51 * No need to correct the count; we're only testing bits from now on
52 */
53 tst r2, #32
Julius Wernerd65e2142013-12-13 12:59:57 -080054 stmneia ip!, {r1, r3, r8, lr}
55 stmneia ip!, {r1, r3, r8, lr}
Stefan Reinauer52db0b92012-12-07 17:15:04 -080056 tst r2, #16
Julius Wernerd65e2142013-12-13 12:59:57 -080057 stmneia ip!, {r1, r3, r8, lr}
58 ldmfd sp!, {r8, lr}
Stefan Reinauer52db0b92012-12-07 17:15:04 -080059
60#else
61
62/*
63 * This version aligns the destination pointer in order to write
64 * whole cache lines at once.
65 */
66
Julius Wernerd65e2142013-12-13 12:59:57 -080067 stmfd sp!, {r4-r8, lr}
Stefan Reinauer52db0b92012-12-07 17:15:04 -080068 mov r4, r1
69 mov r5, r1
70 mov r6, r1
71 mov r7, r1
Julius Wernerd65e2142013-12-13 12:59:57 -080072 mov r8, r1
Stefan Reinauer52db0b92012-12-07 17:15:04 -080073 mov lr, r1
74
75 cmp r2, #96
Julius Wernerd65e2142013-12-13 12:59:57 -080076 tstgt ip, #31
Stefan Reinauer52db0b92012-12-07 17:15:04 -080077 ble 3f
78
Julius Wernerd65e2142013-12-13 12:59:57 -080079 and r8, ip, #31
80 rsb r8, r8, #32
81 sub r2, r2, r8
82 movs r8, r8, lsl #(32 - 4)
83 stmcsia ip!, {r4, r5, r6, r7}
84 stmmiia ip!, {r4, r5}
85 tst r8, #(1 << 30)
86 mov r8, r1
87 strne r1, [ip], #4
Stefan Reinauer52db0b92012-12-07 17:15:04 -080088
893: subs r2, r2, #64
Julius Wernerd65e2142013-12-13 12:59:57 -080090 stmgeia ip!, {r1, r3-r8, lr}
91 stmgeia ip!, {r1, r3-r8, lr}
Stefan Reinauer52db0b92012-12-07 17:15:04 -080092 bgt 3b
Julius Wernerd65e2142013-12-13 12:59:57 -080093 ldmeqfd sp!, {r4-r8, pc}
Stefan Reinauer52db0b92012-12-07 17:15:04 -080094
95 tst r2, #32
Julius Wernerd65e2142013-12-13 12:59:57 -080096 stmneia ip!, {r1, r3-r8, lr}
Stefan Reinauer52db0b92012-12-07 17:15:04 -080097 tst r2, #16
Julius Wernerd65e2142013-12-13 12:59:57 -080098 stmneia ip!, {r4-r7}
99 ldmfd sp!, {r4-r8, lr}
Stefan Reinauer52db0b92012-12-07 17:15:04 -0800100
101#endif
102
1034: tst r2, #8
Julius Wernerd65e2142013-12-13 12:59:57 -0800104 stmneia ip!, {r1, r3}
Stefan Reinauer52db0b92012-12-07 17:15:04 -0800105 tst r2, #4
Julius Wernerd65e2142013-12-13 12:59:57 -0800106 strne r1, [ip], #4
Stefan Reinauer52db0b92012-12-07 17:15:04 -0800107/*
108 * When we get here, we've got less than 4 bytes to zero. We
109 * may have an unaligned pointer as well.
110 */
1115: tst r2, #2
Julius Wernerd65e2142013-12-13 12:59:57 -0800112 strneb r1, [ip], #1
113 strneb r1, [ip], #1
Stefan Reinauer52db0b92012-12-07 17:15:04 -0800114 tst r2, #1
Julius Wernerd65e2142013-12-13 12:59:57 -0800115 strneb r1, [ip], #1
Stefan Reinauer52db0b92012-12-07 17:15:04 -0800116 mov pc, lr
Julius Wernerd65e2142013-12-13 12:59:57 -0800117
1186: subs r2, r2, #4 @ 1 do we have enough
119 blt 5b @ 1 bytes to align with?
120 cmp r3, #2 @ 1
121 strltb r1, [ip], #1 @ 1
122 strleb r1, [ip], #1 @ 1
123 strb r1, [ip], #1 @ 1
124 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
125 b 1b
126ENDPROC(memset)