blob: 8c5beb0b4f4edeb1b27d36c2da56c9bdb6327aed [file] [log] [blame]
Furquan Shaikh668316b2014-08-30 21:59:11 -07001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright 2014 Google Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Furquan Shaikh668316b2014-08-30 21:59:11 -070014 */
15
Julius Werner7dcf9d52015-10-16 13:10:02 -070016#include <arch/cache.h>
Furquan Shaikh668316b2014-08-30 21:59:11 -070017#include <arch/lib_helpers.h>
18#include <arch/transition.h>
Julius Werner7dcf9d52015-10-16 13:10:02 -070019#include <assert.h>
Aaron Durbin64031672018-04-21 14:45:32 -060020#include <compiler.h>
Furquan Shaikh668316b2014-08-30 21:59:11 -070021#include <console/console.h>
22
Furquan Shaikh668316b2014-08-30 21:59:11 -070023/* Litte-endian, No XN-forced, Instr cache disabled,
24 * Stack alignment disabled, Data and unified cache
25 * disabled, Alignment check disabled, MMU disabled
26 */
27#define SCTLR_MASK (SCTLR_MMU_DISABLE | SCTLR_ACE_DISABLE | \
28 SCTLR_CACHE_DISABLE | SCTLR_SAE_DISABLE | SCTLR_RES1 | \
29 SCTLR_ICE_DISABLE | SCTLR_WXN_DISABLE | SCTLR_LITTLE_END)
30
Aaron Durbin64031672018-04-21 14:45:32 -060031void __weak exc_dispatch(struct exc_state *exc_state, uint64_t id)
Furquan Shaikh668316b2014-08-30 21:59:11 -070032{
33 /* Default weak implementation does nothing. */
34}
35
36void exc_entry(struct exc_state *exc_state, uint64_t id)
37{
38 struct elx_state *elx = &exc_state->elx;
39 struct regs *regs = &exc_state->regs;
40 uint8_t elx_mode, elx_el;
41
42 elx->spsr = raw_read_spsr_current();
43 elx_mode = get_mode_from_spsr(elx->spsr);
44 elx_el = get_el_from_spsr(elx->spsr);
45
46 if (elx_mode == SPSR_USE_H) {
47 if (elx_el == get_current_el())
48 regs->sp = (uint64_t)&exc_state[1];
49 else
50 regs->sp = raw_read_sp_elx(elx_el);
51 } else {
52 regs->sp = raw_read_sp_el0();
53 }
54
55 elx->elr = raw_read_elr_current();
56
57 exc_dispatch(exc_state, id);
58}
59
60void transition_with_entry(void *entry, void *arg, struct exc_state *exc_state)
61{
62 /* Argument to entry point goes into X0 */
63 exc_state->regs.x[X0_INDEX] = (uint64_t)arg;
64 /* Entry point goes into ELR */
65 exc_state->elx.elr = (uint64_t)entry;
66
67 transition(exc_state);
68}
69
70void transition(struct exc_state *exc_state)
71{
Furquan Shaikh668316b2014-08-30 21:59:11 -070072 uint64_t sctlr;
73 uint32_t current_el = get_current_el();
74
75 struct elx_state *elx = &exc_state->elx;
76 struct regs *regs = &exc_state->regs;
77
78 uint8_t elx_el = get_el_from_spsr(elx->spsr);
79
80 /*
81 * Policies enforced:
82 * 1. We support only elx --> (elx - 1) transitions
83 * 2. We support transitions to Aarch64 mode only
84 *
85 * If any of the above conditions holds false, then we need a proper way
86 * to update SCR/HCR before removing the checks below
87 */
88 if ((current_el - elx_el) != 1)
89 die("ARM64 Error: Do not support transition\n");
90
91 if (elx->spsr & SPSR_ERET_32)
92 die("ARM64 Error: Do not support eret to Aarch32\n");
Furquan Shaikh668316b2014-08-30 21:59:11 -070093
Julius Werner7dcf9d52015-10-16 13:10:02 -070094 /* Most parts of coreboot currently don't support EL2 anyway. */
95 assert(current_el == EL3);
96
97 /* Initialize SCR with defaults for running without secure monitor. */
98 raw_write_scr_el3(SCR_TWE_DISABLE | /* don't trap WFE */
99 SCR_TWI_DISABLE | /* don't trap WFI */
100 SCR_ST_ENABLE | /* allow secure timer access */
101 SCR_LOWER_AARCH64 | /* lower level is AArch64 */
102 SCR_SIF_DISABLE | /* disable secure ins. fetch */
103 SCR_HVC_ENABLE | /* allow HVC instruction */
104 SCR_SMD_ENABLE | /* disable SMC instruction */
105 SCR_RES1 | /* reserved-1 bits */
106 SCR_EA_DISABLE | /* disable ext. abort trap */
107 SCR_FIQ_DISABLE | /* disable FIQ trap to EL3 */
108 SCR_IRQ_DISABLE | /* disable IRQ trap to EL3 */
109 SCR_NS_ENABLE); /* lower level is non-secure */
110
111 /* Initialize CPTR to not trap anything to EL3. */
112 raw_write_cptr_el3(CPTR_EL3_TCPAC_DISABLE | CPTR_EL3_TTA_DISABLE |
113 CPTR_EL3_TFP_DISABLE);
Furquan Shaikh668316b2014-08-30 21:59:11 -0700114
Julius Wernerda3a1462015-05-13 11:19:33 -0700115 /* ELR/SPSR: Write entry point and processor state of program */
Furquan Shaikh668316b2014-08-30 21:59:11 -0700116 raw_write_elr_current(elx->elr);
Julius Wernerda3a1462015-05-13 11:19:33 -0700117 raw_write_spsr_current(elx->spsr);
Furquan Shaikh668316b2014-08-30 21:59:11 -0700118
119 /* SCTLR: Initialize EL with selected properties */
120 sctlr = raw_read_sctlr(elx_el);
121 sctlr &= SCTLR_MASK;
122 raw_write_sctlr(sctlr, elx_el);
123
124 /* SP_ELx: Initialize stack pointer */
125 raw_write_sp_elx(elx->sp_elx, elx_el);
Julius Werner7dcf9d52015-10-16 13:10:02 -0700126 isb();
Furquan Shaikh668316b2014-08-30 21:59:11 -0700127
128 /* Eret to the entry point */
129 trans_switch(regs);
130}