blob: 99c97fb51e7d0aa1802a21677f815e324f627963 [file] [log] [blame]
Furquan Shaikh24869572014-07-17 11:36:08 -07001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright 2014 Google Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <stdlib.h>
31#include <stdint.h>
32#include <string.h>
33
Furquan Shaikh24869572014-07-17 11:36:08 -070034#include <memrange.h>
35#include <arch/mmu.h>
36#include <arch/lib_helpers.h>
37#include <arch/cache.h>
38
39/* Maximum number of XLAT Tables available based on ttb buffer size */
40static unsigned int max_tables;
41/* Address of ttb buffer */
42static uint64_t *xlat_addr;
Furquan Shaikhc4fb6132014-09-06 18:47:33 -070043static int free_idx;
Furquan Shaikh24869572014-07-17 11:36:08 -070044
45static const uint64_t level_to_addr_mask[] = {
46 L1_ADDR_MASK,
47 L2_ADDR_MASK,
48 L3_ADDR_MASK,
49};
50
51static const uint64_t level_to_addr_shift[] = {
52 L1_ADDR_SHIFT,
53 L2_ADDR_SHIFT,
54 L3_ADDR_SHIFT,
55};
56
57/* Func : get_block_attr
58 * Desc : Get block descriptor attributes based on the value of tag in memrange
59 * region
60 */
61static uint64_t get_block_attr(unsigned long tag)
62{
63 uint64_t attr;
64
65 attr = (tag & MA_NS)? BLOCK_NS : 0;
66 attr |= (tag & MA_RO)? BLOCK_AP_RO : BLOCK_AP_RW;
67 attr |= BLOCK_ACCESS;
Aaron Durbin4633dc12014-08-12 17:40:38 -050068
69 if (tag & MA_MEM) {
Furquan Shaikh55aa17b2015-03-27 22:52:18 -070070 attr |= BLOCK_SH_INNER_SHAREABLE;
Aaron Durbin4633dc12014-08-12 17:40:38 -050071 if (tag & MA_MEM_NC)
72 attr |= BLOCK_INDEX_MEM_NORMAL_NC << BLOCK_INDEX_SHIFT;
73 else
74 attr |= BLOCK_INDEX_MEM_NORMAL << BLOCK_INDEX_SHIFT;
75 } else {
76 attr |= BLOCK_INDEX_MEM_DEV_NGNRNE << BLOCK_INDEX_SHIFT;
77 }
78
Furquan Shaikh24869572014-07-17 11:36:08 -070079 return attr;
80}
81
82/* Func : get_index_from_addr
83 * Desc : Get index into table at a given level using appropriate bits from the
84 * base address
85 */
86static uint64_t get_index_from_addr(uint64_t addr, uint8_t level)
87{
88 uint64_t mask = level_to_addr_mask[level-1];
89 uint8_t shift = level_to_addr_shift[level-1];
90
91 return ((addr & mask) >> shift);
92}
93
94/* Func : table_desc_valid
95 * Desc : Check if a table entry contains valid desc
96 */
97static uint64_t table_desc_valid(uint64_t desc)
98{
99 return((desc & TABLE_DESC) == TABLE_DESC);
100}
101
102/* Func : get_new_table
103 * Desc : Return the next free XLAT table from ttb buffer
104 */
105static uint64_t *get_new_table(void)
106{
Furquan Shaikh24869572014-07-17 11:36:08 -0700107 uint64_t *new;
108
109 if (free_idx >= max_tables) {
Furquan Shaikh24869572014-07-17 11:36:08 -0700110 return NULL;
111 }
112
113 new = (uint64_t*)((unsigned char *)xlat_addr + free_idx * GRANULE_SIZE);
114 free_idx++;
115
116 memset(new, 0, GRANULE_SIZE);
117
118 return new;
119}
120
121/* Func : get_table_from_desc
122 * Desc : Get next level table address from table descriptor
123 */
124static uint64_t *get_table_from_desc(uint64_t desc)
125{
126 uint64_t *ptr = (uint64_t*)(desc & XLAT_TABLE_MASK);
127 return ptr;
128}
129
130/* Func: get_next_level_table
131 * Desc: Check if the table entry is a valid descriptor. If not, allocate new
132 * table, update the entry and return the table addr. If valid, return the addr
133 */
134static uint64_t *get_next_level_table(uint64_t *ptr)
135{
136 uint64_t desc = *ptr;
137
138 if (!table_desc_valid(desc)) {
139 uint64_t *new_table = get_new_table();
140 if (new_table == NULL)
141 return NULL;
142 desc = ((uint64_t)new_table) | TABLE_DESC;
143 *ptr = desc;
144 }
145 return get_table_from_desc(desc);
146}
147
148/* Func : init_xlat_table
149 * Desc : Given a base address and size, it identifies the indices within
150 * different level XLAT tables which map the given base addr. Similar to table
151 * walk, except that all invalid entries during the walk are updated
152 * accordingly. On success, it returns the size of the block/page addressed by
153 * the final table
154 */
155static uint64_t init_xlat_table(uint64_t base_addr,
156 uint64_t size,
157 uint64_t tag)
158{
159 uint64_t l1_index = get_index_from_addr(base_addr,1);
160 uint64_t l2_index = get_index_from_addr(base_addr,2);
161 uint64_t l3_index = get_index_from_addr(base_addr,3);
162 uint64_t *table = xlat_addr;
163 uint64_t desc;
164 uint64_t attr = get_block_attr(tag);
165
166 /* L1 table lookup */
Jimmy Huangdea45972015-04-13 20:28:38 +0800167 /* If VA has bits more than L2 can resolve, lookup starts at L1
168 Assumption: we don't need L0 table in coreboot */
169 if (BITS_PER_VA > L1_ADDR_SHIFT) {
170 if ((size >= L1_XLAT_SIZE) &&
171 IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) {
172 /* If block address is aligned and size is greater than
173 * or equal to size addressed by each L1 entry, we can
174 * directly store a block desc */
175 desc = base_addr | BLOCK_DESC | attr;
176 table[l1_index] = desc;
177 /* L2 lookup is not required */
178 return L1_XLAT_SIZE;
179 } else {
180 table = get_next_level_table(&table[l1_index]);
181 if (!table)
182 return 0;
183 }
Furquan Shaikh24869572014-07-17 11:36:08 -0700184 }
185
186 /* L2 table lookup */
187 /* If lookup was performed at L1, L2 table addr is obtained from L1 desc
188 else, lookup starts at ttbr address */
Jimmy Huangdea45972015-04-13 20:28:38 +0800189 if ((size >= L2_XLAT_SIZE) &&
190 IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) {
191 /* If block address is aligned and size is greater than
192 * or equal to size addressed by each L2 entry, we can
193 * directly store a block desc */
Furquan Shaikh24869572014-07-17 11:36:08 -0700194 desc = base_addr | BLOCK_DESC | attr;
195 table[l2_index] = desc;
196 /* L3 lookup is not required */
197 return L2_XLAT_SIZE;
198 } else {
199 /* L2 entry stores a table descriptor */
200 table = get_next_level_table(&table[l2_index]);
201 if (!table)
202 return 0;
203 }
204
205 /* L3 table lookup */
206 desc = base_addr | PAGE_DESC | attr;
207 table[l3_index] = desc;
208 return L3_XLAT_SIZE;
209}
210
211/* Func : sanity_check
212 * Desc : Check if the address is aligned and size is atleast the granule size
213 */
214static uint64_t sanity_check(uint64_t addr,
215 uint64_t size)
216{
217 /* Address should be atleast 64 KiB aligned */
218 if (addr & GRANULE_SIZE_MASK)
219 return 1;
220
221 /* Size should be atleast granule size */
222 if (size < GRANULE_SIZE)
223 return 1;
224
225 return 0;
226}
227
228/* Func : init_mmap_entry
229 * Desc : For each mmap entry, this function calls init_xlat_table with the base
230 * address. Based on size returned from init_xlat_table, base_addr is updated
231 * and subsequent calls are made for initializing the xlat table until the whole
232 * region is initialized.
233 */
234static void init_mmap_entry(struct range_entry *r)
235{
236 uint64_t base_addr = range_entry_base(r);
237 uint64_t size = range_entry_size(r);
238 uint64_t tag = range_entry_tag(r);
239 uint64_t temp_size = size;
240
241 while (temp_size) {
242 uint64_t ret;
243
244 if (sanity_check(base_addr,temp_size)) {
Furquan Shaikh24869572014-07-17 11:36:08 -0700245 return;
246 }
247
248 ret = init_xlat_table(base_addr + (size - temp_size),
249 temp_size,tag);
250
251 if (ret == 0)
252 return;
253
254 temp_size -= ret;
255 }
256}
257
258/* Func : mmu_init
259 * Desc : Initialize mmu based on the mmap_ranges passed. ttb_buffer is used as
260 * the base address for xlat tables. ttb_size defines the max number of tables
261 * that can be used
262 */
263void mmu_init(struct memranges *mmap_ranges,
264 uint64_t *ttb_buffer,
265 uint64_t ttb_size)
266{
267 struct range_entry *mmap_entry;
268
269 if (sanity_check((uint64_t)ttb_buffer, ttb_size)) {
Furquan Shaikh24869572014-07-17 11:36:08 -0700270 return;
271 }
272
273 memset((void*)ttb_buffer, 0, GRANULE_SIZE);
274 max_tables = (ttb_size >> GRANULE_SIZE_SHIFT);
275 xlat_addr = ttb_buffer;
Furquan Shaikhc4fb6132014-09-06 18:47:33 -0700276 free_idx = 1;
Furquan Shaikh24869572014-07-17 11:36:08 -0700277
Furquan Shaikh24869572014-07-17 11:36:08 -0700278 memranges_each_entry(mmap_entry, mmap_ranges) {
279 init_mmap_entry(mmap_entry);
280 }
Furquan Shaikh24869572014-07-17 11:36:08 -0700281}
282
Aaron Durbin339f8b32014-08-27 14:58:43 -0500283void mmu_enable(void)
Furquan Shaikh24869572014-07-17 11:36:08 -0700284{
285 uint32_t sctlr;
286
287 /* Initialize MAIR indices */
288 raw_write_mair_el3(MAIR_ATTRIBUTES);
289
290 /* Invalidate TLBs */
291 tlbiall_el3();
292
293 /* Initialize TCR flags */
294 raw_write_tcr_el3(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
Jimmy Huangdea45972015-04-13 20:28:38 +0800295 TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_64GB |
Furquan Shaikh24869572014-07-17 11:36:08 -0700296 TCR_TBI_USED);
297
298 /* Initialize TTBR */
Aaron Durbin339f8b32014-08-27 14:58:43 -0500299 raw_write_ttbr0_el3((uintptr_t)xlat_addr);
Furquan Shaikh24869572014-07-17 11:36:08 -0700300
301 /* Ensure all translation table writes are committed before enabling MMU */
302 dsb();
303 isb();
304
305 /* Enable MMU */
306 sctlr = raw_read_sctlr_el3();
307 sctlr |= SCTLR_C | SCTLR_M | SCTLR_I;
308 raw_write_sctlr_el3(sctlr);
309
310 isb();
Furquan Shaikh24869572014-07-17 11:36:08 -0700311}