blob: a3e8d7758a2329a0c81cd8c239f477c9504176b1 [file] [log] [blame]
Furquan Shaikh24869572014-07-17 11:36:08 -07001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright 2014 Google Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
Julius Werner62336812015-05-18 13:11:12 -070030#include <assert.h>
Furquan Shaikh24869572014-07-17 11:36:08 -070031#include <stdlib.h>
32#include <stdint.h>
33#include <string.h>
34
Julius Werner62336812015-05-18 13:11:12 -070035#include <console/console.h>
Furquan Shaikh24869572014-07-17 11:36:08 -070036#include <memrange.h>
37#include <arch/mmu.h>
38#include <arch/lib_helpers.h>
39#include <arch/cache.h>
40
41/* Maximum number of XLAT Tables available based on ttb buffer size */
42static unsigned int max_tables;
43/* Address of ttb buffer */
44static uint64_t *xlat_addr;
Furquan Shaikhc4fb6132014-09-06 18:47:33 -070045static int free_idx;
Furquan Shaikh24869572014-07-17 11:36:08 -070046
Julius Werner62336812015-05-18 13:11:12 -070047static void print_tag(int level, uint64_t tag)
48{
Jimmy Huang2e01e8d2015-05-20 15:57:06 +080049 printk(level, tag & MA_MEM_NC ? "non-cacheable | " :
50 " cacheable | ");
Julius Werner62336812015-05-18 13:11:12 -070051 printk(level, tag & MA_RO ? "read-only | " :
52 "read-write | ");
53 printk(level, tag & MA_NS ? "non-secure | " :
54 " secure | ");
55 printk(level, tag & MA_MEM ? "normal\n" :
56 "device\n");
57}
Furquan Shaikh24869572014-07-17 11:36:08 -070058
59/* Func : get_block_attr
60 * Desc : Get block descriptor attributes based on the value of tag in memrange
61 * region
62 */
63static uint64_t get_block_attr(unsigned long tag)
64{
65 uint64_t attr;
66
67 attr = (tag & MA_NS)? BLOCK_NS : 0;
68 attr |= (tag & MA_RO)? BLOCK_AP_RO : BLOCK_AP_RW;
69 attr |= BLOCK_ACCESS;
Aaron Durbin4633dc12014-08-12 17:40:38 -050070
71 if (tag & MA_MEM) {
Furquan Shaikh55aa17b2015-03-27 22:52:18 -070072 attr |= BLOCK_SH_INNER_SHAREABLE;
Aaron Durbin4633dc12014-08-12 17:40:38 -050073 if (tag & MA_MEM_NC)
74 attr |= BLOCK_INDEX_MEM_NORMAL_NC << BLOCK_INDEX_SHIFT;
75 else
76 attr |= BLOCK_INDEX_MEM_NORMAL << BLOCK_INDEX_SHIFT;
77 } else {
78 attr |= BLOCK_INDEX_MEM_DEV_NGNRNE << BLOCK_INDEX_SHIFT;
79 }
80
Furquan Shaikh24869572014-07-17 11:36:08 -070081 return attr;
82}
83
Furquan Shaikh24869572014-07-17 11:36:08 -070084/* Func : table_desc_valid
85 * Desc : Check if a table entry contains valid desc
86 */
87static uint64_t table_desc_valid(uint64_t desc)
88{
89 return((desc & TABLE_DESC) == TABLE_DESC);
90}
91
Julius Werner62336812015-05-18 13:11:12 -070092/* Func : setup_new_table
93 * Desc : Get next free table from TTB and set it up to match old parent entry.
Furquan Shaikh24869572014-07-17 11:36:08 -070094 */
Julius Werner62336812015-05-18 13:11:12 -070095static uint64_t *setup_new_table(uint64_t desc, size_t xlat_size)
Furquan Shaikh24869572014-07-17 11:36:08 -070096{
Julius Werner62336812015-05-18 13:11:12 -070097 uint64_t *new, *entry;
Furquan Shaikh24869572014-07-17 11:36:08 -070098
Julius Werner62336812015-05-18 13:11:12 -070099 assert(free_idx < max_tables);
Furquan Shaikh24869572014-07-17 11:36:08 -0700100
101 new = (uint64_t*)((unsigned char *)xlat_addr + free_idx * GRANULE_SIZE);
102 free_idx++;
103
Julius Werner62336812015-05-18 13:11:12 -0700104 if (!desc) {
105 memset(new, 0, GRANULE_SIZE);
106 } else {
107 /* Can reuse old parent entry, but may need to adjust type. */
108 if (xlat_size == L3_XLAT_SIZE)
109 desc |= PAGE_DESC;
110
111 for (entry = new; (u8 *)entry < (u8 *)new + GRANULE_SIZE;
112 entry++, desc += xlat_size)
113 *entry = desc;
114 }
Furquan Shaikh24869572014-07-17 11:36:08 -0700115
116 return new;
117}
118
119/* Func : get_table_from_desc
120 * Desc : Get next level table address from table descriptor
121 */
122static uint64_t *get_table_from_desc(uint64_t desc)
123{
124 uint64_t *ptr = (uint64_t*)(desc & XLAT_TABLE_MASK);
125 return ptr;
126}
127
128/* Func: get_next_level_table
Julius Werner62336812015-05-18 13:11:12 -0700129 * Desc: Check if the table entry is a valid descriptor. If not, initialize new
Furquan Shaikh24869572014-07-17 11:36:08 -0700130 * table, update the entry and return the table addr. If valid, return the addr
131 */
Julius Werner62336812015-05-18 13:11:12 -0700132static uint64_t *get_next_level_table(uint64_t *ptr, size_t xlat_size)
Furquan Shaikh24869572014-07-17 11:36:08 -0700133{
134 uint64_t desc = *ptr;
135
136 if (!table_desc_valid(desc)) {
Julius Werner62336812015-05-18 13:11:12 -0700137 uint64_t *new_table = setup_new_table(desc, xlat_size);
Furquan Shaikh24869572014-07-17 11:36:08 -0700138 desc = ((uint64_t)new_table) | TABLE_DESC;
139 *ptr = desc;
140 }
141 return get_table_from_desc(desc);
142}
143
144/* Func : init_xlat_table
145 * Desc : Given a base address and size, it identifies the indices within
146 * different level XLAT tables which map the given base addr. Similar to table
147 * walk, except that all invalid entries during the walk are updated
148 * accordingly. On success, it returns the size of the block/page addressed by
Julius Werner62336812015-05-18 13:11:12 -0700149 * the final table.
Furquan Shaikh24869572014-07-17 11:36:08 -0700150 */
151static uint64_t init_xlat_table(uint64_t base_addr,
152 uint64_t size,
153 uint64_t tag)
154{
Julius Werner62336812015-05-18 13:11:12 -0700155 uint64_t l1_index = (base_addr & L1_ADDR_MASK) >> L1_ADDR_SHIFT;
156 uint64_t l2_index = (base_addr & L2_ADDR_MASK) >> L2_ADDR_SHIFT;
157 uint64_t l3_index = (base_addr & L3_ADDR_MASK) >> L3_ADDR_SHIFT;
Furquan Shaikh24869572014-07-17 11:36:08 -0700158 uint64_t *table = xlat_addr;
159 uint64_t desc;
160 uint64_t attr = get_block_attr(tag);
161
Julius Werner62336812015-05-18 13:11:12 -0700162 /* L1 table lookup
163 * If VA has bits more than L2 can resolve, lookup starts at L1
164 * Assumption: we don't need L0 table in coreboot */
Jimmy Huangdea45972015-04-13 20:28:38 +0800165 if (BITS_PER_VA > L1_ADDR_SHIFT) {
166 if ((size >= L1_XLAT_SIZE) &&
167 IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) {
168 /* If block address is aligned and size is greater than
169 * or equal to size addressed by each L1 entry, we can
170 * directly store a block desc */
171 desc = base_addr | BLOCK_DESC | attr;
172 table[l1_index] = desc;
173 /* L2 lookup is not required */
174 return L1_XLAT_SIZE;
Jimmy Huangdea45972015-04-13 20:28:38 +0800175 }
Julius Werner62336812015-05-18 13:11:12 -0700176 table = get_next_level_table(&table[l1_index], L2_XLAT_SIZE);
Furquan Shaikh24869572014-07-17 11:36:08 -0700177 }
178
Julius Werner62336812015-05-18 13:11:12 -0700179 /* L2 table lookup
180 * If lookup was performed at L1, L2 table addr is obtained from L1 desc
181 * else, lookup starts at ttbr address */
Jimmy Huangdea45972015-04-13 20:28:38 +0800182 if ((size >= L2_XLAT_SIZE) &&
183 IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) {
184 /* If block address is aligned and size is greater than
185 * or equal to size addressed by each L2 entry, we can
186 * directly store a block desc */
Furquan Shaikh24869572014-07-17 11:36:08 -0700187 desc = base_addr | BLOCK_DESC | attr;
188 table[l2_index] = desc;
189 /* L3 lookup is not required */
190 return L2_XLAT_SIZE;
Furquan Shaikh24869572014-07-17 11:36:08 -0700191 }
192
Julius Werner62336812015-05-18 13:11:12 -0700193 /* L2 entry stores a table descriptor */
194 table = get_next_level_table(&table[l2_index], L3_XLAT_SIZE);
195
Furquan Shaikh24869572014-07-17 11:36:08 -0700196 /* L3 table lookup */
197 desc = base_addr | PAGE_DESC | attr;
198 table[l3_index] = desc;
199 return L3_XLAT_SIZE;
200}
201
202/* Func : sanity_check
Julius Werner62336812015-05-18 13:11:12 -0700203 * Desc : Check address/size alignment of a table or page.
Furquan Shaikh24869572014-07-17 11:36:08 -0700204 */
Julius Werner62336812015-05-18 13:11:12 -0700205static void sanity_check(uint64_t addr, uint64_t size)
Furquan Shaikh24869572014-07-17 11:36:08 -0700206{
Julius Werner62336812015-05-18 13:11:12 -0700207 assert(!(addr & GRANULE_SIZE_MASK) &&
208 !(size & GRANULE_SIZE_MASK) &&
209 size >= GRANULE_SIZE);
Furquan Shaikh24869572014-07-17 11:36:08 -0700210}
211
Julius Werner62336812015-05-18 13:11:12 -0700212/* Func : mmu_config_range
213 * Desc : This function repeatedly calls init_xlat_table with the base
Furquan Shaikh24869572014-07-17 11:36:08 -0700214 * address. Based on size returned from init_xlat_table, base_addr is updated
215 * and subsequent calls are made for initializing the xlat table until the whole
216 * region is initialized.
217 */
Julius Werner62336812015-05-18 13:11:12 -0700218void mmu_config_range(void *start, size_t size, uint64_t tag)
Furquan Shaikh24869572014-07-17 11:36:08 -0700219{
Julius Werner62336812015-05-18 13:11:12 -0700220 uint64_t base_addr = (uintptr_t)start;
Furquan Shaikh24869572014-07-17 11:36:08 -0700221 uint64_t temp_size = size;
222
Furquan Shaikh35531192015-05-20 17:10:55 -0700223 if (!IS_ENABLED(CONFIG_SMP)) {
224 printk(BIOS_INFO, "Mapping address range [%p:%p) as ",
225 start, start + size);
226 print_tag(BIOS_INFO, tag);
227 }
228
Julius Werner62336812015-05-18 13:11:12 -0700229 sanity_check(base_addr, temp_size);
Furquan Shaikh24869572014-07-17 11:36:08 -0700230
Julius Werner62336812015-05-18 13:11:12 -0700231 while (temp_size)
232 temp_size -= init_xlat_table(base_addr + (size - temp_size),
233 temp_size, tag);
Furquan Shaikh24869572014-07-17 11:36:08 -0700234
Julius Werner62336812015-05-18 13:11:12 -0700235 /* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
236 dsb();
237 tlbiall_current();
238 dsb();
239 isb();
Furquan Shaikh24869572014-07-17 11:36:08 -0700240}
241
242/* Func : mmu_init
243 * Desc : Initialize mmu based on the mmap_ranges passed. ttb_buffer is used as
244 * the base address for xlat tables. ttb_size defines the max number of tables
245 * that can be used
246 */
247void mmu_init(struct memranges *mmap_ranges,
248 uint64_t *ttb_buffer,
249 uint64_t ttb_size)
250{
251 struct range_entry *mmap_entry;
252
Julius Werner62336812015-05-18 13:11:12 -0700253 sanity_check((uint64_t)ttb_buffer, ttb_size);
Furquan Shaikh24869572014-07-17 11:36:08 -0700254
255 memset((void*)ttb_buffer, 0, GRANULE_SIZE);
256 max_tables = (ttb_size >> GRANULE_SIZE_SHIFT);
257 xlat_addr = ttb_buffer;
Furquan Shaikhc4fb6132014-09-06 18:47:33 -0700258 free_idx = 1;
Furquan Shaikh24869572014-07-17 11:36:08 -0700259
Julius Werner62336812015-05-18 13:11:12 -0700260 if (mmap_ranges)
261 memranges_each_entry(mmap_entry, mmap_ranges) {
262 mmu_config_range((void *)range_entry_base(mmap_entry),
263 range_entry_size(mmap_entry),
264 range_entry_tag(mmap_entry));
265 }
Furquan Shaikh24869572014-07-17 11:36:08 -0700266}
267
Aaron Durbin339f8b32014-08-27 14:58:43 -0500268void mmu_enable(void)
Furquan Shaikh24869572014-07-17 11:36:08 -0700269{
270 uint32_t sctlr;
271
272 /* Initialize MAIR indices */
273 raw_write_mair_el3(MAIR_ATTRIBUTES);
274
275 /* Invalidate TLBs */
276 tlbiall_el3();
277
278 /* Initialize TCR flags */
279 raw_write_tcr_el3(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
Jimmy Huangdea45972015-04-13 20:28:38 +0800280 TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_64GB |
Furquan Shaikh24869572014-07-17 11:36:08 -0700281 TCR_TBI_USED);
282
283 /* Initialize TTBR */
Aaron Durbin339f8b32014-08-27 14:58:43 -0500284 raw_write_ttbr0_el3((uintptr_t)xlat_addr);
Furquan Shaikh24869572014-07-17 11:36:08 -0700285
Julius Werner62336812015-05-18 13:11:12 -0700286 /* Ensure system register writes are committed before enabling MMU */
Furquan Shaikh24869572014-07-17 11:36:08 -0700287 isb();
288
289 /* Enable MMU */
290 sctlr = raw_read_sctlr_el3();
291 sctlr |= SCTLR_C | SCTLR_M | SCTLR_I;
292 raw_write_sctlr_el3(sctlr);
293
294 isb();
Furquan Shaikh24869572014-07-17 11:36:08 -0700295}