blob: 3cedbcf58f5b88700c170297ca1bb3620b4688a3 [file] [log] [blame]
Patrick Georgi0a3d4e02020-03-04 14:39:09 +01001/* SPDX-License-Identifier: BSD-3-Clause */
Furquan Shaikh24869572014-07-17 11:36:08 -07002
Julius Werner62336812015-05-18 13:11:12 -07003#include <assert.h>
Furquan Shaikh24869572014-07-17 11:36:08 -07004#include <stdint.h>
5#include <string.h>
Julius Wernerfe4cbf12015-10-07 18:38:24 -07006#include <symbols.h>
Furquan Shaikh24869572014-07-17 11:36:08 -07007
Julius Werner62336812015-05-18 13:11:12 -07008#include <console/console.h>
Furquan Shaikh24869572014-07-17 11:36:08 -07009#include <arch/mmu.h>
10#include <arch/lib_helpers.h>
11#include <arch/cache.h>
12
Julius Wernerfe4cbf12015-10-07 18:38:24 -070013/* This just caches the next free table slot (okay to do since they fill up from
14 * bottom to top and can never be freed up again). It will reset to its initial
15 * value on stage transition, so we still need to check it for UNUSED_DESC. */
16static uint64_t *next_free_table = (void *)_ttb;
Furquan Shaikh24869572014-07-17 11:36:08 -070017
Julius Werner62336812015-05-18 13:11:12 -070018static void print_tag(int level, uint64_t tag)
19{
Jimmy Huang2e01e8d2015-05-20 15:57:06 +080020 printk(level, tag & MA_MEM_NC ? "non-cacheable | " :
21 " cacheable | ");
Julius Werner62336812015-05-18 13:11:12 -070022 printk(level, tag & MA_RO ? "read-only | " :
23 "read-write | ");
24 printk(level, tag & MA_NS ? "non-secure | " :
25 " secure | ");
26 printk(level, tag & MA_MEM ? "normal\n" :
27 "device\n");
28}
Furquan Shaikh24869572014-07-17 11:36:08 -070029
30/* Func : get_block_attr
31 * Desc : Get block descriptor attributes based on the value of tag in memrange
32 * region
33 */
34static uint64_t get_block_attr(unsigned long tag)
35{
36 uint64_t attr;
37
Yuchen He5e7dc212023-01-28 00:03:31 +010038 attr = (tag & MA_NS) ? BLOCK_NS : 0;
39 attr |= (tag & MA_RO) ? BLOCK_AP_RO : BLOCK_AP_RW;
Furquan Shaikh24869572014-07-17 11:36:08 -070040 attr |= BLOCK_ACCESS;
Aaron Durbin4633dc12014-08-12 17:40:38 -050041
42 if (tag & MA_MEM) {
Furquan Shaikh55aa17b2015-03-27 22:52:18 -070043 attr |= BLOCK_SH_INNER_SHAREABLE;
Aaron Durbin4633dc12014-08-12 17:40:38 -050044 if (tag & MA_MEM_NC)
45 attr |= BLOCK_INDEX_MEM_NORMAL_NC << BLOCK_INDEX_SHIFT;
46 else
47 attr |= BLOCK_INDEX_MEM_NORMAL << BLOCK_INDEX_SHIFT;
48 } else {
49 attr |= BLOCK_INDEX_MEM_DEV_NGNRNE << BLOCK_INDEX_SHIFT;
Jimmy Huangc159a0e2015-09-15 15:29:10 +080050 attr |= BLOCK_XN;
Aaron Durbin4633dc12014-08-12 17:40:38 -050051 }
52
Furquan Shaikh24869572014-07-17 11:36:08 -070053 return attr;
54}
55
Julius Werner62336812015-05-18 13:11:12 -070056/* Func : setup_new_table
57 * Desc : Get next free table from TTB and set it up to match old parent entry.
Furquan Shaikh24869572014-07-17 11:36:08 -070058 */
Julius Werner62336812015-05-18 13:11:12 -070059static uint64_t *setup_new_table(uint64_t desc, size_t xlat_size)
Furquan Shaikh24869572014-07-17 11:36:08 -070060{
Julius Wernerfe4cbf12015-10-07 18:38:24 -070061 while (next_free_table[0] != UNUSED_DESC) {
62 next_free_table += GRANULE_SIZE/sizeof(*next_free_table);
63 if (_ettb - (u8 *)next_free_table <= 0)
64 die("Ran out of page table space!");
65 }
Furquan Shaikh24869572014-07-17 11:36:08 -070066
Julius Wernerfe4cbf12015-10-07 18:38:24 -070067 void *frame_base = (void *)(desc & XLAT_ADDR_MASK);
68 printk(BIOS_DEBUG, "Backing address range [%p:%p) with new page"
69 " table @%p\n", frame_base, frame_base +
70 (xlat_size << BITS_RESOLVED_PER_LVL), next_free_table);
Furquan Shaikh24869572014-07-17 11:36:08 -070071
Julius Werner62336812015-05-18 13:11:12 -070072 if (!desc) {
Julius Wernerfe4cbf12015-10-07 18:38:24 -070073 memset(next_free_table, 0, GRANULE_SIZE);
Julius Werner62336812015-05-18 13:11:12 -070074 } else {
75 /* Can reuse old parent entry, but may need to adjust type. */
76 if (xlat_size == L3_XLAT_SIZE)
77 desc |= PAGE_DESC;
78
Julius Wernerfe4cbf12015-10-07 18:38:24 -070079 int i = 0;
80 for (; i < GRANULE_SIZE/sizeof(*next_free_table); i++) {
81 next_free_table[i] = desc;
82 desc += xlat_size;
83 }
Julius Werner62336812015-05-18 13:11:12 -070084 }
Furquan Shaikh24869572014-07-17 11:36:08 -070085
Julius Wernerfe4cbf12015-10-07 18:38:24 -070086 return next_free_table;
Furquan Shaikh24869572014-07-17 11:36:08 -070087}
88
89/* Func: get_next_level_table
Julius Werner62336812015-05-18 13:11:12 -070090 * Desc: Check if the table entry is a valid descriptor. If not, initialize new
Furquan Shaikh24869572014-07-17 11:36:08 -070091 * table, update the entry and return the table addr. If valid, return the addr
92 */
Julius Werner62336812015-05-18 13:11:12 -070093static uint64_t *get_next_level_table(uint64_t *ptr, size_t xlat_size)
Furquan Shaikh24869572014-07-17 11:36:08 -070094{
95 uint64_t desc = *ptr;
96
Julius Wernerfe4cbf12015-10-07 18:38:24 -070097 if ((desc & DESC_MASK) != TABLE_DESC) {
Julius Werner62336812015-05-18 13:11:12 -070098 uint64_t *new_table = setup_new_table(desc, xlat_size);
Furquan Shaikh24869572014-07-17 11:36:08 -070099 desc = ((uint64_t)new_table) | TABLE_DESC;
100 *ptr = desc;
101 }
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700102 return (uint64_t *)(desc & XLAT_ADDR_MASK);
Furquan Shaikh24869572014-07-17 11:36:08 -0700103}
104
105/* Func : init_xlat_table
106 * Desc : Given a base address and size, it identifies the indices within
107 * different level XLAT tables which map the given base addr. Similar to table
108 * walk, except that all invalid entries during the walk are updated
109 * accordingly. On success, it returns the size of the block/page addressed by
Julius Werner62336812015-05-18 13:11:12 -0700110 * the final table.
Furquan Shaikh24869572014-07-17 11:36:08 -0700111 */
112static uint64_t init_xlat_table(uint64_t base_addr,
113 uint64_t size,
114 uint64_t tag)
115{
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100116 uint64_t l0_index = (base_addr & L0_ADDR_MASK) >> L0_ADDR_SHIFT;
Julius Werner62336812015-05-18 13:11:12 -0700117 uint64_t l1_index = (base_addr & L1_ADDR_MASK) >> L1_ADDR_SHIFT;
118 uint64_t l2_index = (base_addr & L2_ADDR_MASK) >> L2_ADDR_SHIFT;
119 uint64_t l3_index = (base_addr & L3_ADDR_MASK) >> L3_ADDR_SHIFT;
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700120 uint64_t *table = (uint64_t *)_ttb;
Furquan Shaikh24869572014-07-17 11:36:08 -0700121 uint64_t desc;
122 uint64_t attr = get_block_attr(tag);
123
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100124 /* L0 entry stores a table descriptor (doesn't support blocks) */
125 table = get_next_level_table(&table[l0_index], L1_XLAT_SIZE);
126
127 /* L1 table lookup */
128 if ((size >= L1_XLAT_SIZE) &&
129 IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) {
Jimmy Huangdea45972015-04-13 20:28:38 +0800130 /* If block address is aligned and size is greater than
131 * or equal to size addressed by each L1 entry, we can
132 * directly store a block desc */
133 desc = base_addr | BLOCK_DESC | attr;
134 table[l1_index] = desc;
135 /* L2 lookup is not required */
136 return L1_XLAT_SIZE;
Furquan Shaikh24869572014-07-17 11:36:08 -0700137 }
138
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100139 /* L1 entry stores a table descriptor */
140 table = get_next_level_table(&table[l1_index], L2_XLAT_SIZE);
141
142 /* L2 table lookup */
Jimmy Huangdea45972015-04-13 20:28:38 +0800143 if ((size >= L2_XLAT_SIZE) &&
144 IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) {
145 /* If block address is aligned and size is greater than
146 * or equal to size addressed by each L2 entry, we can
147 * directly store a block desc */
Furquan Shaikh24869572014-07-17 11:36:08 -0700148 desc = base_addr | BLOCK_DESC | attr;
149 table[l2_index] = desc;
150 /* L3 lookup is not required */
151 return L2_XLAT_SIZE;
Furquan Shaikh24869572014-07-17 11:36:08 -0700152 }
153
Julius Werner62336812015-05-18 13:11:12 -0700154 /* L2 entry stores a table descriptor */
155 table = get_next_level_table(&table[l2_index], L3_XLAT_SIZE);
156
Furquan Shaikh24869572014-07-17 11:36:08 -0700157 /* L3 table lookup */
158 desc = base_addr | PAGE_DESC | attr;
159 table[l3_index] = desc;
160 return L3_XLAT_SIZE;
161}
162
163/* Func : sanity_check
Julius Werner62336812015-05-18 13:11:12 -0700164 * Desc : Check address/size alignment of a table or page.
Furquan Shaikh24869572014-07-17 11:36:08 -0700165 */
Julius Werner62336812015-05-18 13:11:12 -0700166static void sanity_check(uint64_t addr, uint64_t size)
Furquan Shaikh24869572014-07-17 11:36:08 -0700167{
Julius Werner62336812015-05-18 13:11:12 -0700168 assert(!(addr & GRANULE_SIZE_MASK) &&
169 !(size & GRANULE_SIZE_MASK) &&
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100170 (addr + size < (1UL << BITS_PER_VA)) &&
Julius Werner62336812015-05-18 13:11:12 -0700171 size >= GRANULE_SIZE);
Furquan Shaikh24869572014-07-17 11:36:08 -0700172}
173
Julius Werner372d0ff2016-01-26 19:17:53 -0800174/* Func : get_pte
175 * Desc : Returns the page table entry governing a specific address. */
176static uint64_t get_pte(void *addr)
177{
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100178 int shift = L0_ADDR_SHIFT;
Julius Werner372d0ff2016-01-26 19:17:53 -0800179 uint64_t *pte = (uint64_t *)_ttb;
180
181 while (1) {
182 int index = ((uintptr_t)addr >> shift) &
183 ((1UL << BITS_RESOLVED_PER_LVL) - 1);
184
185 if ((pte[index] & DESC_MASK) != TABLE_DESC ||
186 shift <= GRANULE_SIZE_SHIFT)
187 return pte[index];
188
189 pte = (uint64_t *)(pte[index] & XLAT_ADDR_MASK);
190 shift -= BITS_RESOLVED_PER_LVL;
191 }
192}
193
Julius Werner487f7f22018-08-10 13:06:00 -0700194/* Func : assert_correct_ttb_mapping
195 * Desc : Asserts that mapping for addr matches the access type used by the
196 * page table walk (i.e. addr is correctly mapped to be part of the TTB). */
197static void assert_correct_ttb_mapping(void *addr)
198{
199 uint64_t pte = get_pte(addr);
200 assert(((pte >> BLOCK_INDEX_SHIFT) & BLOCK_INDEX_MASK)
201 == BLOCK_INDEX_MEM_NORMAL && !(pte & BLOCK_NS));
202}
203
Julius Werner62336812015-05-18 13:11:12 -0700204/* Func : mmu_config_range
205 * Desc : This function repeatedly calls init_xlat_table with the base
Furquan Shaikh24869572014-07-17 11:36:08 -0700206 * address. Based on size returned from init_xlat_table, base_addr is updated
207 * and subsequent calls are made for initializing the xlat table until the whole
208 * region is initialized.
209 */
Julius Werner62336812015-05-18 13:11:12 -0700210void mmu_config_range(void *start, size_t size, uint64_t tag)
Furquan Shaikh24869572014-07-17 11:36:08 -0700211{
Julius Werner62336812015-05-18 13:11:12 -0700212 uint64_t base_addr = (uintptr_t)start;
Furquan Shaikh24869572014-07-17 11:36:08 -0700213 uint64_t temp_size = size;
214
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700215 printk(BIOS_INFO, "Mapping address range [%p:%p) as ",
216 start, start + size);
217 print_tag(BIOS_INFO, tag);
Furquan Shaikh35531192015-05-20 17:10:55 -0700218
Julius Werner62336812015-05-18 13:11:12 -0700219 sanity_check(base_addr, temp_size);
Furquan Shaikh24869572014-07-17 11:36:08 -0700220
Julius Werner62336812015-05-18 13:11:12 -0700221 while (temp_size)
222 temp_size -= init_xlat_table(base_addr + (size - temp_size),
223 temp_size, tag);
Furquan Shaikh24869572014-07-17 11:36:08 -0700224
Julius Werner62336812015-05-18 13:11:12 -0700225 /* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
226 dsb();
Julius Werner0c5f61a2018-08-03 17:14:45 -0700227 tlbiall_el3();
Julius Werner62336812015-05-18 13:11:12 -0700228 dsb();
229 isb();
Furquan Shaikh24869572014-07-17 11:36:08 -0700230}
231
232/* Func : mmu_init
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700233 * Desc : Initialize MMU registers and page table memory region. This must be
234 * called exactly ONCE PER BOOT before trying to configure any mappings.
Furquan Shaikh24869572014-07-17 11:36:08 -0700235 */
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700236void mmu_init(void)
Furquan Shaikh24869572014-07-17 11:36:08 -0700237{
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700238 /* Initially mark all table slots unused (first PTE == UNUSED_DESC). */
239 uint64_t *table = (uint64_t *)_ttb;
240 for (; _ettb - (u8 *)table > 0; table += GRANULE_SIZE/sizeof(*table))
241 table[0] = UNUSED_DESC;
Furquan Shaikh24869572014-07-17 11:36:08 -0700242
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100243 /* Initialize the root table (L0) to be completely unmapped. */
244 uint64_t *root = setup_new_table(INVALID_DESC, L0_XLAT_SIZE);
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700245 assert((u8 *)root == _ttb);
Furquan Shaikh24869572014-07-17 11:36:08 -0700246
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700247 /* Initialize TTBR */
248 raw_write_ttbr0_el3((uintptr_t)root);
Furquan Shaikh24869572014-07-17 11:36:08 -0700249
250 /* Initialize MAIR indices */
251 raw_write_mair_el3(MAIR_ATTRIBUTES);
252
Furquan Shaikh24869572014-07-17 11:36:08 -0700253 /* Initialize TCR flags */
254 raw_write_tcr_el3(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100255 TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_256TB |
Furquan Shaikh24869572014-07-17 11:36:08 -0700256 TCR_TBI_USED);
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700257}
Furquan Shaikh24869572014-07-17 11:36:08 -0700258
T Michael Turney6e0f0f72018-04-25 14:49:49 -0700259/* Func : mmu_save_context
260 * Desc : Save mmu context (registers and ttbr base).
261 */
262void mmu_save_context(struct mmu_context *mmu_context)
263{
264 assert(mmu_context);
265
266 /* Back-up MAIR_ATTRIBUTES */
267 mmu_context->mair = raw_read_mair_el3();
268
269 /* Back-up TCR value */
270 mmu_context->tcr = raw_read_tcr_el3();
271}
272
273/* Func : mmu_restore_context
274 * Desc : Restore mmu context using input backed-up context
275 */
276void mmu_restore_context(const struct mmu_context *mmu_context)
277{
278 assert(mmu_context);
279
280 /* Restore TTBR */
281 raw_write_ttbr0_el3((uintptr_t)_ttb);
282
283 /* Restore MAIR indices */
284 raw_write_mair_el3(mmu_context->mair);
285
286 /* Restore TCR flags */
287 raw_write_tcr_el3(mmu_context->tcr);
288
289 /* invalidate tlb since ttbr is updated. */
290 tlb_invalidate_all();
291}
292
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700293void mmu_enable(void)
294{
Julius Werner487f7f22018-08-10 13:06:00 -0700295 assert_correct_ttb_mapping(_ttb);
Zebreus9ba7bad2023-11-13 21:06:18 +0100296 assert_correct_ttb_mapping((void *)((uintptr_t)_ettb - 1));
Julius Werner372d0ff2016-01-26 19:17:53 -0800297
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700298 uint32_t sctlr = raw_read_sctlr_el3();
Furquan Shaikh24869572014-07-17 11:36:08 -0700299 sctlr |= SCTLR_C | SCTLR_M | SCTLR_I;
300 raw_write_sctlr_el3(sctlr);
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700301 isb();
302}