blob: 7aeabe6f2aaffe28e6a9cf1f56fa49264b5f1b90 [file] [log] [blame]
Aaron Durbin0df877a2014-07-10 12:40:30 -05001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright 2014 Google Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Aaron Durbin0df877a2014-07-10 12:40:30 -050014 */
15
Furquan Shaikhab020f32014-09-10 12:19:38 -070016/*
17 * ======================== stage_entry.S =====================================
Furquan Shaikhb3f6ad32015-10-15 12:15:31 -070018 * This file acts as an entry point to the different stages of arm64. They share
19 * the same process of setting up stacks and jumping to c code. It is important
20 * to save x25 from corruption as it contains the argument for rmodule.
Furquan Shaikhab020f32014-09-10 12:19:38 -070021 * =============================================================================
22 */
Aaron Durbin0df877a2014-07-10 12:40:30 -050023
24#include <arch/asm.h>
Furquan Shaikh136657c2014-09-09 09:43:08 -070025#define __ASSEMBLY__
26#include <arch/lib_helpers.h>
Aaron Durbin1c651292014-08-27 12:50:26 -050027
Aaron Durbina5c7f662014-08-27 14:45:59 -050028#define STACK_SZ CONFIG_STACK_SIZE
29#define EXCEPTION_STACK_SZ CONFIG_STACK_SIZE
30
31/*
Furquan Shaikhb3f6ad32015-10-15 12:15:31 -070032 * Stack for armv8 CPU grows down from _estack. Additionally, provide exception
33 * stack for the CPU.
Aaron Durbina5c7f662014-08-27 14:45:59 -050034 */
35.section .bss, "aw", @nobits
Furquan Shaikh94824982014-11-21 15:42:40 -080036
Aaron Durbina5c7f662014-08-27 14:45:59 -050037.global _stack
38.global _estack
39.balign STACK_SZ
40_stack:
Furquan Shaikhb3f6ad32015-10-15 12:15:31 -070041.space STACK_SZ
Aaron Durbina5c7f662014-08-27 14:45:59 -050042_estack:
43
44.global _stack_exceptions
45.global _estack_exceptions
46.balign EXCEPTION_STACK_SZ
47_stack_exceptions:
Furquan Shaikhb3f6ad32015-10-15 12:15:31 -070048.space EXCEPTION_STACK_SZ
Aaron Durbina5c7f662014-08-27 14:45:59 -050049_estack_exceptions:
50
51ENTRY(cpu_get_stack)
Furquan Shaikhb3f6ad32015-10-15 12:15:31 -070052 ldr x0, 1f
Aaron Durbina5c7f662014-08-27 14:45:59 -050053 ret
54.align 3
551:
56 .quad _estack
57ENDPROC(cpu_get_stack)
58
59ENTRY(cpu_get_exception_stack)
Furquan Shaikhb3f6ad32015-10-15 12:15:31 -070060 ldr x0, 1f
Aaron Durbina5c7f662014-08-27 14:45:59 -050061 ret
62.align 3
631:
64 .quad _estack_exceptions
65ENDPROC(cpu_get_exception_stack)
66
Aaron Durbin1c651292014-08-27 12:50:26 -050067/*
68 * Boot strap the processor into a C environemnt. That consists of providing
69 * 16-byte aligned stack. The programming enviroment uses SP_EL0 as its main
70 * stack while keeping SP_ELx reserved for exception entry.
71 */
Furquan Shaikhab020f32014-09-10 12:19:38 -070072/*
73 * IMPORTANT: Ensure x25 is not corrupted because it saves the argument to
Aaron Durbin8c8e2b72015-10-14 10:08:10 -050074 * any rmodules.
Furquan Shaikhab020f32014-09-10 12:19:38 -070075 */
Aaron Durbin1c651292014-08-27 12:50:26 -050076ENTRY(arm64_c_environment)
Elyes HAOUAS2078e752016-08-21 10:41:44 +020077 /* Set the exception stack for the CPU. */
Aaron Durbina5c7f662014-08-27 14:45:59 -050078 bl cpu_get_exception_stack
Furquan Shaikh1af7b5d2014-08-21 12:52:06 -070079 msr SPSel, #1
80 isb
Furquan Shaikh1af7b5d2014-08-21 12:52:06 -070081 mov sp, x0
82
Aaron Durbin0df877a2014-07-10 12:40:30 -050083 /* Have stack pointer use SP_EL0. */
84 msr SPSel, #0
85 isb
86
Elyes HAOUAS2078e752016-08-21 10:41:44 +020087 /* Set the non-exception stack for the CPU. */
Aaron Durbina5c7f662014-08-27 14:45:59 -050088 bl cpu_get_stack
Aaron Durbin0df877a2014-07-10 12:40:30 -050089 mov sp, x0
Aaron Durbin0df877a2014-07-10 12:40:30 -050090
Aaron Durbin3a0013d2014-08-27 15:52:01 -050091 /* Get entry point by dereferencing c_entry. */
Furquan Shaikhab020f32014-09-10 12:19:38 -070092 ldr x1, 1f
Furquan Shaikhab020f32014-09-10 12:19:38 -070093 /* Move back the arguments from x25 to x0 */
94 mov x0, x25
95 br x1
Aaron Durbin3a0013d2014-08-27 15:52:01 -050096.align 3
97 1:
98 .quad c_entry
Aaron Durbin1c651292014-08-27 12:50:26 -050099ENDPROC(arm64_c_environment)
100
Aaron Durbindde76292015-09-05 12:59:26 -0500101ENTRY(_start)
Aaron Durbin8c8e2b72015-10-14 10:08:10 -0500102 /* Save any arguments to current rmodule in x25 */
Aaron Durbindee19962014-09-18 13:48:49 -0500103 mov x25, x0
104 b arm64_c_environment
Aaron Durbindde76292015-09-05 12:59:26 -0500105ENDPROC(_start)
Furquan Shaikhab020f32014-09-10 12:19:38 -0700106
Furquan Shaikh94824982014-11-21 15:42:40 -0800107/*
108 * Setup SCTLR so that:
109 * Little endian mode is setup, XN is not enforced, MMU and caches are disabled.
110 * Alignment and stack alignment checks are disabled.
111 */
112.macro setup_sctlr
Aaron Durbindee19962014-09-18 13:48:49 -0500113 read_current x0, sctlr
Aaron Durbin1c651292014-08-27 12:50:26 -0500114 bic x0, x0, #(1 << 25) /* Little Endian */
115 bic x0, x0, #(1 << 19) /* XN not enforced */
116 bic x0, x0, #(1 << 12) /* Disable Instruction Cache */
Furquan Shaikh94824982014-11-21 15:42:40 -0800117 bic x0, x0, #0xf /* Clear SA, C, A and M */
Furquan Shaikh136657c2014-09-09 09:43:08 -0700118 write_current sctlr, x0, x1
Furquan Shaikh94824982014-11-21 15:42:40 -0800119.endm
120
Aaron Durbindee19962014-09-18 13:48:49 -0500121CPU_RESET_ENTRY(arm64_cpu_startup)
Furquan Shaikh36d35862015-03-27 22:55:59 -0700122 bl arm64_cpu_early_setup
Furquan Shaikh94824982014-11-21 15:42:40 -0800123 setup_sctlr
124 b arm64_c_environment
Aaron Durbin1c651292014-08-27 12:50:26 -0500125ENDPROC(arm64_cpu_startup)
Aaron Durbin0df877a2014-07-10 12:40:30 -0500126
Furquan Shaikh6e1dc0c2015-07-10 15:25:26 -0700127/*
128 * stage_entry is defined as a weak symbol to allow SoCs/CPUs to define a custom
129 * entry point to perform any fixups that need to be done immediately after
130 * power on reset. In case SoC/CPU does not need any custom-defined entrypoint,
131 * this weak symbol can be used to jump directly to arm64_cpu_startup.
132 */
133ENTRY_WEAK(stage_entry)
Furquan Shaikh94824982014-11-21 15:42:40 -0800134 b arm64_cpu_startup
Aaron Durbin0df877a2014-07-10 12:40:30 -0500135ENDPROC(stage_entry)