blob: a24e7c6fdd3043223243d1dbe04d914bff0b66ef [file] [log] [blame]
Furquan Shaikh24869572014-07-17 11:36:08 -07001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright 2014 Google Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
Julius Werner62336812015-05-18 13:11:12 -070030#include <assert.h>
Furquan Shaikh24869572014-07-17 11:36:08 -070031#include <stdlib.h>
32#include <stdint.h>
33#include <string.h>
Julius Wernerfe4cbf12015-10-07 18:38:24 -070034#include <symbols.h>
Furquan Shaikh24869572014-07-17 11:36:08 -070035
Julius Werner62336812015-05-18 13:11:12 -070036#include <console/console.h>
Furquan Shaikh24869572014-07-17 11:36:08 -070037#include <arch/mmu.h>
38#include <arch/lib_helpers.h>
39#include <arch/cache.h>
40
Julius Wernerfe4cbf12015-10-07 18:38:24 -070041/* This just caches the next free table slot (okay to do since they fill up from
42 * bottom to top and can never be freed up again). It will reset to its initial
43 * value on stage transition, so we still need to check it for UNUSED_DESC. */
44static uint64_t *next_free_table = (void *)_ttb;
Furquan Shaikh24869572014-07-17 11:36:08 -070045
Julius Werner62336812015-05-18 13:11:12 -070046static void print_tag(int level, uint64_t tag)
47{
Jimmy Huang2e01e8d2015-05-20 15:57:06 +080048 printk(level, tag & MA_MEM_NC ? "non-cacheable | " :
49 " cacheable | ");
Julius Werner62336812015-05-18 13:11:12 -070050 printk(level, tag & MA_RO ? "read-only | " :
51 "read-write | ");
52 printk(level, tag & MA_NS ? "non-secure | " :
53 " secure | ");
54 printk(level, tag & MA_MEM ? "normal\n" :
55 "device\n");
56}
Furquan Shaikh24869572014-07-17 11:36:08 -070057
58/* Func : get_block_attr
59 * Desc : Get block descriptor attributes based on the value of tag in memrange
60 * region
61 */
62static uint64_t get_block_attr(unsigned long tag)
63{
64 uint64_t attr;
65
66 attr = (tag & MA_NS)? BLOCK_NS : 0;
67 attr |= (tag & MA_RO)? BLOCK_AP_RO : BLOCK_AP_RW;
68 attr |= BLOCK_ACCESS;
Aaron Durbin4633dc12014-08-12 17:40:38 -050069
70 if (tag & MA_MEM) {
Furquan Shaikh55aa17b2015-03-27 22:52:18 -070071 attr |= BLOCK_SH_INNER_SHAREABLE;
Aaron Durbin4633dc12014-08-12 17:40:38 -050072 if (tag & MA_MEM_NC)
73 attr |= BLOCK_INDEX_MEM_NORMAL_NC << BLOCK_INDEX_SHIFT;
74 else
75 attr |= BLOCK_INDEX_MEM_NORMAL << BLOCK_INDEX_SHIFT;
76 } else {
77 attr |= BLOCK_INDEX_MEM_DEV_NGNRNE << BLOCK_INDEX_SHIFT;
Jimmy Huangc159a0e2015-09-15 15:29:10 +080078 attr |= BLOCK_XN;
Aaron Durbin4633dc12014-08-12 17:40:38 -050079 }
80
Furquan Shaikh24869572014-07-17 11:36:08 -070081 return attr;
82}
83
Julius Werner62336812015-05-18 13:11:12 -070084/* Func : setup_new_table
85 * Desc : Get next free table from TTB and set it up to match old parent entry.
Furquan Shaikh24869572014-07-17 11:36:08 -070086 */
Julius Werner62336812015-05-18 13:11:12 -070087static uint64_t *setup_new_table(uint64_t desc, size_t xlat_size)
Furquan Shaikh24869572014-07-17 11:36:08 -070088{
Julius Wernerfe4cbf12015-10-07 18:38:24 -070089 while (next_free_table[0] != UNUSED_DESC) {
90 next_free_table += GRANULE_SIZE/sizeof(*next_free_table);
91 if (_ettb - (u8 *)next_free_table <= 0)
92 die("Ran out of page table space!");
93 }
Furquan Shaikh24869572014-07-17 11:36:08 -070094
Julius Wernerfe4cbf12015-10-07 18:38:24 -070095 void *frame_base = (void *)(desc & XLAT_ADDR_MASK);
96 printk(BIOS_DEBUG, "Backing address range [%p:%p) with new page"
97 " table @%p\n", frame_base, frame_base +
98 (xlat_size << BITS_RESOLVED_PER_LVL), next_free_table);
Furquan Shaikh24869572014-07-17 11:36:08 -070099
Julius Werner62336812015-05-18 13:11:12 -0700100 if (!desc) {
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700101 memset(next_free_table, 0, GRANULE_SIZE);
Julius Werner62336812015-05-18 13:11:12 -0700102 } else {
103 /* Can reuse old parent entry, but may need to adjust type. */
104 if (xlat_size == L3_XLAT_SIZE)
105 desc |= PAGE_DESC;
106
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700107 int i = 0;
108 for (; i < GRANULE_SIZE/sizeof(*next_free_table); i++) {
109 next_free_table[i] = desc;
110 desc += xlat_size;
111 }
Julius Werner62336812015-05-18 13:11:12 -0700112 }
Furquan Shaikh24869572014-07-17 11:36:08 -0700113
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700114 return next_free_table;
Furquan Shaikh24869572014-07-17 11:36:08 -0700115}
116
117/* Func: get_next_level_table
Julius Werner62336812015-05-18 13:11:12 -0700118 * Desc: Check if the table entry is a valid descriptor. If not, initialize new
Furquan Shaikh24869572014-07-17 11:36:08 -0700119 * table, update the entry and return the table addr. If valid, return the addr
120 */
Julius Werner62336812015-05-18 13:11:12 -0700121static uint64_t *get_next_level_table(uint64_t *ptr, size_t xlat_size)
Furquan Shaikh24869572014-07-17 11:36:08 -0700122{
123 uint64_t desc = *ptr;
124
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700125 if ((desc & DESC_MASK) != TABLE_DESC) {
Julius Werner62336812015-05-18 13:11:12 -0700126 uint64_t *new_table = setup_new_table(desc, xlat_size);
Furquan Shaikh24869572014-07-17 11:36:08 -0700127 desc = ((uint64_t)new_table) | TABLE_DESC;
128 *ptr = desc;
129 }
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700130 return (uint64_t *)(desc & XLAT_ADDR_MASK);
Furquan Shaikh24869572014-07-17 11:36:08 -0700131}
132
133/* Func : init_xlat_table
134 * Desc : Given a base address and size, it identifies the indices within
135 * different level XLAT tables which map the given base addr. Similar to table
136 * walk, except that all invalid entries during the walk are updated
137 * accordingly. On success, it returns the size of the block/page addressed by
Julius Werner62336812015-05-18 13:11:12 -0700138 * the final table.
Furquan Shaikh24869572014-07-17 11:36:08 -0700139 */
140static uint64_t init_xlat_table(uint64_t base_addr,
141 uint64_t size,
142 uint64_t tag)
143{
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100144 uint64_t l0_index = (base_addr & L0_ADDR_MASK) >> L0_ADDR_SHIFT;
Julius Werner62336812015-05-18 13:11:12 -0700145 uint64_t l1_index = (base_addr & L1_ADDR_MASK) >> L1_ADDR_SHIFT;
146 uint64_t l2_index = (base_addr & L2_ADDR_MASK) >> L2_ADDR_SHIFT;
147 uint64_t l3_index = (base_addr & L3_ADDR_MASK) >> L3_ADDR_SHIFT;
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700148 uint64_t *table = (uint64_t *)_ttb;
Furquan Shaikh24869572014-07-17 11:36:08 -0700149 uint64_t desc;
150 uint64_t attr = get_block_attr(tag);
151
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100152 /* L0 entry stores a table descriptor (doesn't support blocks) */
153 table = get_next_level_table(&table[l0_index], L1_XLAT_SIZE);
154
155 /* L1 table lookup */
156 if ((size >= L1_XLAT_SIZE) &&
157 IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) {
Jimmy Huangdea45972015-04-13 20:28:38 +0800158 /* If block address is aligned and size is greater than
159 * or equal to size addressed by each L1 entry, we can
160 * directly store a block desc */
161 desc = base_addr | BLOCK_DESC | attr;
162 table[l1_index] = desc;
163 /* L2 lookup is not required */
164 return L1_XLAT_SIZE;
Furquan Shaikh24869572014-07-17 11:36:08 -0700165 }
166
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100167 /* L1 entry stores a table descriptor */
168 table = get_next_level_table(&table[l1_index], L2_XLAT_SIZE);
169
170 /* L2 table lookup */
Jimmy Huangdea45972015-04-13 20:28:38 +0800171 if ((size >= L2_XLAT_SIZE) &&
172 IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) {
173 /* If block address is aligned and size is greater than
174 * or equal to size addressed by each L2 entry, we can
175 * directly store a block desc */
Furquan Shaikh24869572014-07-17 11:36:08 -0700176 desc = base_addr | BLOCK_DESC | attr;
177 table[l2_index] = desc;
178 /* L3 lookup is not required */
179 return L2_XLAT_SIZE;
Furquan Shaikh24869572014-07-17 11:36:08 -0700180 }
181
Julius Werner62336812015-05-18 13:11:12 -0700182 /* L2 entry stores a table descriptor */
183 table = get_next_level_table(&table[l2_index], L3_XLAT_SIZE);
184
Furquan Shaikh24869572014-07-17 11:36:08 -0700185 /* L3 table lookup */
186 desc = base_addr | PAGE_DESC | attr;
187 table[l3_index] = desc;
188 return L3_XLAT_SIZE;
189}
190
191/* Func : sanity_check
Julius Werner62336812015-05-18 13:11:12 -0700192 * Desc : Check address/size alignment of a table or page.
Furquan Shaikh24869572014-07-17 11:36:08 -0700193 */
Julius Werner62336812015-05-18 13:11:12 -0700194static void sanity_check(uint64_t addr, uint64_t size)
Furquan Shaikh24869572014-07-17 11:36:08 -0700195{
Julius Werner62336812015-05-18 13:11:12 -0700196 assert(!(addr & GRANULE_SIZE_MASK) &&
197 !(size & GRANULE_SIZE_MASK) &&
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100198 (addr + size < (1UL << BITS_PER_VA)) &&
Julius Werner62336812015-05-18 13:11:12 -0700199 size >= GRANULE_SIZE);
Furquan Shaikh24869572014-07-17 11:36:08 -0700200}
201
Julius Werner372d0ff2016-01-26 19:17:53 -0800202/* Func : get_pte
203 * Desc : Returns the page table entry governing a specific address. */
204static uint64_t get_pte(void *addr)
205{
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100206 int shift = L0_ADDR_SHIFT;
Julius Werner372d0ff2016-01-26 19:17:53 -0800207 uint64_t *pte = (uint64_t *)_ttb;
208
209 while (1) {
210 int index = ((uintptr_t)addr >> shift) &
211 ((1UL << BITS_RESOLVED_PER_LVL) - 1);
212
213 if ((pte[index] & DESC_MASK) != TABLE_DESC ||
214 shift <= GRANULE_SIZE_SHIFT)
215 return pte[index];
216
217 pte = (uint64_t *)(pte[index] & XLAT_ADDR_MASK);
218 shift -= BITS_RESOLVED_PER_LVL;
219 }
220}
221
Julius Werner62336812015-05-18 13:11:12 -0700222/* Func : mmu_config_range
223 * Desc : This function repeatedly calls init_xlat_table with the base
Furquan Shaikh24869572014-07-17 11:36:08 -0700224 * address. Based on size returned from init_xlat_table, base_addr is updated
225 * and subsequent calls are made for initializing the xlat table until the whole
226 * region is initialized.
227 */
Julius Werner62336812015-05-18 13:11:12 -0700228void mmu_config_range(void *start, size_t size, uint64_t tag)
Furquan Shaikh24869572014-07-17 11:36:08 -0700229{
Julius Werner62336812015-05-18 13:11:12 -0700230 uint64_t base_addr = (uintptr_t)start;
Furquan Shaikh24869572014-07-17 11:36:08 -0700231 uint64_t temp_size = size;
232
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700233 printk(BIOS_INFO, "Mapping address range [%p:%p) as ",
234 start, start + size);
235 print_tag(BIOS_INFO, tag);
Furquan Shaikh35531192015-05-20 17:10:55 -0700236
Julius Werner62336812015-05-18 13:11:12 -0700237 sanity_check(base_addr, temp_size);
Furquan Shaikh24869572014-07-17 11:36:08 -0700238
Julius Werner62336812015-05-18 13:11:12 -0700239 while (temp_size)
240 temp_size -= init_xlat_table(base_addr + (size - temp_size),
241 temp_size, tag);
Furquan Shaikh24869572014-07-17 11:36:08 -0700242
Julius Werner62336812015-05-18 13:11:12 -0700243 /* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
244 dsb();
245 tlbiall_current();
246 dsb();
247 isb();
Furquan Shaikh24869572014-07-17 11:36:08 -0700248}
249
250/* Func : mmu_init
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700251 * Desc : Initialize MMU registers and page table memory region. This must be
252 * called exactly ONCE PER BOOT before trying to configure any mappings.
Furquan Shaikh24869572014-07-17 11:36:08 -0700253 */
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700254void mmu_init(void)
Furquan Shaikh24869572014-07-17 11:36:08 -0700255{
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700256 /* Initially mark all table slots unused (first PTE == UNUSED_DESC). */
257 uint64_t *table = (uint64_t *)_ttb;
258 for (; _ettb - (u8 *)table > 0; table += GRANULE_SIZE/sizeof(*table))
259 table[0] = UNUSED_DESC;
Furquan Shaikh24869572014-07-17 11:36:08 -0700260
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100261 /* Initialize the root table (L0) to be completely unmapped. */
262 uint64_t *root = setup_new_table(INVALID_DESC, L0_XLAT_SIZE);
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700263 assert((u8 *)root == _ttb);
Furquan Shaikh24869572014-07-17 11:36:08 -0700264
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700265 /* Initialize TTBR */
266 raw_write_ttbr0_el3((uintptr_t)root);
Furquan Shaikh24869572014-07-17 11:36:08 -0700267
268 /* Initialize MAIR indices */
269 raw_write_mair_el3(MAIR_ATTRIBUTES);
270
Furquan Shaikh24869572014-07-17 11:36:08 -0700271 /* Initialize TCR flags */
272 raw_write_tcr_el3(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100273 TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_256TB |
Furquan Shaikh24869572014-07-17 11:36:08 -0700274 TCR_TBI_USED);
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700275}
Furquan Shaikh24869572014-07-17 11:36:08 -0700276
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700277void mmu_enable(void)
278{
Julius Werner372d0ff2016-01-26 19:17:53 -0800279 if (((get_pte(_ttb) >> BLOCK_INDEX_SHIFT) & BLOCK_INDEX_MASK)
280 != BLOCK_INDEX_MEM_NORMAL ||
281 ((get_pte(_ettb - 1) >> BLOCK_INDEX_SHIFT) & BLOCK_INDEX_MASK)
282 != BLOCK_INDEX_MEM_NORMAL)
283 die("TTB memory type must match TCR (normal, cacheable)!");
284
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700285 uint32_t sctlr = raw_read_sctlr_el3();
Furquan Shaikh24869572014-07-17 11:36:08 -0700286 sctlr |= SCTLR_C | SCTLR_M | SCTLR_I;
287 raw_write_sctlr_el3(sctlr);
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700288 isb();
289}
Furquan Shaikh24869572014-07-17 11:36:08 -0700290
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700291/*
292 * CAUTION: This implementation assumes that coreboot never uses non-identity
293 * page tables for pages containing executed code. If you ever want to violate
294 * this assumption, have fun figuring out the associated problems on your own.
295 */
296void mmu_disable(void)
297{
Julius Wernerbaa3e702015-04-21 14:32:36 -0700298 dcache_clean_invalidate_all();
Julius Wernerfe4cbf12015-10-07 18:38:24 -0700299 uint32_t sctlr = raw_read_sctlr_el3();
300 sctlr &= ~(SCTLR_C | SCTLR_M);
301 raw_write_sctlr_el3(sctlr);
Furquan Shaikh24869572014-07-17 11:36:08 -0700302 isb();
Furquan Shaikh24869572014-07-17 11:36:08 -0700303}