Angel Pons | 5f249e6 | 2020-04-04 18:51:01 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | /* Early initialization code for aarch64 (a.k.a. armv8) */ |
Patrick Rudolph | 88f81af | 2018-04-11 11:40:55 +0200 | [diff] [blame] | 3 | |
| 4 | #include <arch/asm.h> |
| 5 | #include <soc/addressmap.h> |
| 6 | |
| 7 | // based on arm64_init_cpu |
| 8 | ENTRY(secondary_init) |
| 9 | /* Initialize PSTATE (unmask all exceptions, select SP_EL0). */ |
| 10 | msr SPSel, #0 |
| 11 | msr DAIFClr, #0xf |
| 12 | |
| 13 | /* TODO: This is where we'd put non-boot CPUs into WFI if needed. */ |
| 14 | |
| 15 | /* x22: SCTLR, return address: x23 (callee-saved by subroutine) */ |
| 16 | mov x23, x30 |
| 17 | /* TODO: Assert that we always start running at EL3 */ |
| 18 | mrs x22, sctlr_el3 |
| 19 | |
| 20 | /* Activate ICache (12) already for speed during cache flush below. */ |
| 21 | orr x22, x22, #(1 << 12) |
| 22 | msr sctlr_el3, x22 |
| 23 | isb |
| 24 | |
| 25 | /* Invalidate dcache */ |
| 26 | bl dcache_invalidate_all |
| 27 | |
| 28 | /* Deactivate MMU (0), Alignment Check (1) and DCache (2) */ |
| 29 | and x22, x22, # ~(1 << 0) & ~(1 << 1) & ~(1 << 2) |
| 30 | /* Activate Stack Alignment (3) because why not */ |
| 31 | orr x22, x22, #(1 << 3) |
| 32 | /* Set to little-endian (25) */ |
| 33 | and x22, x22, # ~(1 << 25) |
| 34 | /* Deactivate write-xor-execute enforcement (19) */ |
| 35 | and x22, x22, # ~(1 << 19) |
| 36 | msr sctlr_el3, x22 |
| 37 | |
| 38 | /* Invalidate icache and TLB for good measure */ |
| 39 | ic iallu |
| 40 | tlbi alle3 |
| 41 | dsb sy |
| 42 | isb |
| 43 | |
| 44 | /* Load core ID to x0 */ |
| 45 | mrs x0, MPIDR_EL1 |
| 46 | and x1, x0, # 0xf |
| 47 | lsr x0, x0, 4 |
| 48 | and x0, x0, # 0xff0 |
| 49 | orr x0, x0, x1 |
| 50 | |
| 51 | /* Each core gets CONFIG_STACK_SIZE bytes of stack */ |
| 52 | mov x2, # CONFIG_STACK_SIZE |
| 53 | mul x1, x0, x2 |
| 54 | /* Backup core id */ |
| 55 | mov x22, x0 |
| 56 | ldr x0, =_stack_sec |
| 57 | add x0, x1, x0 // x0 = CONFIG_STACK_SIZE * coreid + _stack_sec |
| 58 | add x1, x0, # CONFIG_STACK_SIZE // x1 = x0 + CONFIG_STACK_SIZE |
| 59 | |
| 60 | /* Initialize stack with sentinel value to later check overflow. */ |
| 61 | ldr x2, =0xdeadbeefdeadbeef |
| 62 | |
| 63 | 1: |
| 64 | stp x2, x2, [x0], #16 |
| 65 | cmp x0, x1 |
| 66 | bne 1b |
| 67 | |
| 68 | /* Leave a line of beef dead for easier visibility in stack dumps. */ |
| 69 | sub sp, x0, #16 |
| 70 | |
| 71 | /* Set arg0 to core id */ |
| 72 | mov x0, x22 |
| 73 | |
| 74 | /* Call C entry */ |
| 75 | bl secondary_cpu_init |
| 76 | |
| 77 | ENDPROC(secondary_init) |