blob: bb7954473c2bf706ac49499b9ac12c7a28aa597f [file] [log] [blame]
Furquan Shaikhadabbe52014-09-04 15:32:17 -07001/*
Furquan Shaikhadabbe52014-09-04 15:32:17 -07002 *
3 * Copyright 2014 Google Inc.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <assert.h>
30#include <stdlib.h>
31#include <stdint.h>
32#include <string.h>
33
34#include <arch/mmu.h>
35#include <arch/lib_helpers.h>
36#include <arch/cache.h>
37
38/* Maximum number of XLAT Tables available based on ttb buffer size */
39static unsigned int max_tables;
40/* Address of ttb buffer */
41static uint64_t *xlat_addr;
42
43static int free_idx;
Yi Chou1739c992023-11-17 19:58:11 +080044static uint8_t ttb_buffer[TTB_DEFAULT_SIZE] __aligned(GRANULE_SIZE)
45 __attribute__((__section__(".ttb_buffer")));
Furquan Shaikhadabbe52014-09-04 15:32:17 -070046
Julius Werner62336812015-05-18 13:11:12 -070047static const char * const tag_to_string[] = {
48 [TYPE_NORMAL_MEM] = "normal",
49 [TYPE_DEV_MEM] = "device",
50 [TYPE_DMA_MEM] = "uncached",
51};
52
Furquan Shaikhadabbe52014-09-04 15:32:17 -070053/*
54 * The usedmem_ranges is used to describe all the memory ranges that are
55 * actually used by payload i.e. _start -> _end in linker script and the
56 * coreboot tables. This is required for two purposes:
57 * 1) During the pre_sysinfo_scan_mmu_setup, these are the only ranges
58 * initialized in the page table as we do not know the entire memory map.
59 * 2) During the post_sysinfo_scan_mmu_setup, these ranges are used to check if
60 * the DMA buffer is being placed in a sane location and does not overlap any of
61 * the used mem ranges.
62 */
Aaron Durbin9425a542014-10-07 23:36:55 -050063static struct mmu_ranges usedmem_ranges;
Furquan Shaikhadabbe52014-09-04 15:32:17 -070064
Furquan Shaikhadabbe52014-09-04 15:32:17 -070065static void __attribute__((noreturn)) mmu_error(void)
66{
67 halt();
68}
69
Julius Werner62336812015-05-18 13:11:12 -070070/* Func : get_block_attr
Furquan Shaikhadabbe52014-09-04 15:32:17 -070071 * Desc : Get block descriptor attributes based on the value of tag in memrange
72 * region
73 */
74static uint64_t get_block_attr(unsigned long tag)
75{
76 uint64_t attr;
77
78 /* We should be in EL2(which is non-secure only) or EL1(non-secure) */
79 attr = BLOCK_NS;
80
81 /* Assuming whole memory is read-write */
82 attr |= BLOCK_AP_RW;
83
84 attr |= BLOCK_ACCESS;
85
86 switch (tag) {
87
88 case TYPE_NORMAL_MEM:
Furquan Shaikhc7692672015-03-31 22:15:07 -070089 attr |= BLOCK_SH_INNER_SHAREABLE;
Furquan Shaikhadabbe52014-09-04 15:32:17 -070090 attr |= (BLOCK_INDEX_MEM_NORMAL << BLOCK_INDEX_SHIFT);
91 break;
92 case TYPE_DEV_MEM:
93 attr |= BLOCK_INDEX_MEM_DEV_NGNRNE << BLOCK_INDEX_SHIFT;
Jimmy Huangc159a0e2015-09-15 15:29:10 +080094 attr |= BLOCK_XN;
Furquan Shaikhadabbe52014-09-04 15:32:17 -070095 break;
96 case TYPE_DMA_MEM:
97 attr |= BLOCK_INDEX_MEM_NORMAL_NC << BLOCK_INDEX_SHIFT;
98 break;
99 }
100
101 return attr;
102}
103
Julius Werner62336812015-05-18 13:11:12 -0700104/* Func : table_desc_valid
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700105 * Desc : Check if a table entry contains valid desc
106 */
107static uint64_t table_desc_valid(uint64_t desc)
108{
109 return((desc & TABLE_DESC) == TABLE_DESC);
110}
111
Julius Werner62336812015-05-18 13:11:12 -0700112/* Func : setup_new_table
113 * Desc : Get next free table from TTB and set it up to match old parent entry.
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700114 */
Julius Werner62336812015-05-18 13:11:12 -0700115static uint64_t *setup_new_table(uint64_t desc, size_t xlat_size)
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700116{
Julius Werner62336812015-05-18 13:11:12 -0700117 uint64_t *new, *entry;
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700118
Julius Werner62336812015-05-18 13:11:12 -0700119 assert(free_idx < max_tables);
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700120
121 new = (uint64_t*)((unsigned char *)xlat_addr + free_idx * GRANULE_SIZE);
122 free_idx++;
123
Julius Werner62336812015-05-18 13:11:12 -0700124 if (!desc) {
125 memset(new, 0, GRANULE_SIZE);
126 } else {
127 /* Can reuse old parent entry, but may need to adjust type. */
128 if (xlat_size == L3_XLAT_SIZE)
129 desc |= PAGE_DESC;
130
131 for (entry = new; (u8 *)entry < (u8 *)new + GRANULE_SIZE;
132 entry++, desc += xlat_size)
133 *entry = desc;
134 }
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700135
136 return new;
137}
138
Julius Werner62336812015-05-18 13:11:12 -0700139/* Func : get_table_from_desc
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700140 * Desc : Get next level table address from table descriptor
141 */
142static uint64_t *get_table_from_desc(uint64_t desc)
143{
144 uint64_t *ptr = (uint64_t*)(desc & XLAT_TABLE_MASK);
145 return ptr;
146}
147
Julius Werner62336812015-05-18 13:11:12 -0700148/* Func: get_next_level_table
149 * Desc: Check if the table entry is a valid descriptor. If not, initialize new
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700150 * table, update the entry and return the table addr. If valid, return the addr.
151 */
Julius Werner62336812015-05-18 13:11:12 -0700152static uint64_t *get_next_level_table(uint64_t *ptr, size_t xlat_size)
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700153{
154 uint64_t desc = *ptr;
155
156 if (!table_desc_valid(desc)) {
Julius Werner62336812015-05-18 13:11:12 -0700157 uint64_t *new_table = setup_new_table(desc, xlat_size);
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700158 desc = ((uint64_t)new_table) | TABLE_DESC;
159 *ptr = desc;
160 }
161 return get_table_from_desc(desc);
162}
163
Julius Werner62336812015-05-18 13:11:12 -0700164/* Func : init_xlat_table
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700165 * Desc : Given a base address and size, it identifies the indices within
166 * different level XLAT tables which map the given base addr. Similar to table
167 * walk, except that all invalid entries during the walk are updated
168 * accordingly. On success, it returns the size of the block/page addressed by
169 * the final table.
170 */
171static uint64_t init_xlat_table(uint64_t base_addr,
172 uint64_t size,
173 uint64_t tag)
174{
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100175 uint64_t l0_index = (base_addr & L0_ADDR_MASK) >> L0_ADDR_SHIFT;
Julius Werner62336812015-05-18 13:11:12 -0700176 uint64_t l1_index = (base_addr & L1_ADDR_MASK) >> L1_ADDR_SHIFT;
177 uint64_t l2_index = (base_addr & L2_ADDR_MASK) >> L2_ADDR_SHIFT;
178 uint64_t l3_index = (base_addr & L3_ADDR_MASK) >> L3_ADDR_SHIFT;
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700179 uint64_t *table = xlat_addr;
180 uint64_t desc;
181 uint64_t attr = get_block_attr(tag);
182
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100183 /* L0 entry stores a table descriptor (doesn't support blocks) */
184 table = get_next_level_table(&table[l0_index], L1_XLAT_SIZE);
185
186 /* L1 table lookup */
187 if ((size >= L1_XLAT_SIZE) &&
188 IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) {
Jimmy Huang0fd3e792015-04-13 20:28:38 +0800189 /* If block address is aligned and size is greater than
190 * or equal to size addressed by each L1 entry, we can
191 * directly store a block desc */
192 desc = base_addr | BLOCK_DESC | attr;
193 table[l1_index] = desc;
194 /* L2 lookup is not required */
195 return L1_XLAT_SIZE;
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700196 }
197
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100198 /* L1 entry stores a table descriptor */
199 table = get_next_level_table(&table[l1_index], L2_XLAT_SIZE);
200
201 /* L2 table lookup */
Jimmy Huang0fd3e792015-04-13 20:28:38 +0800202 if ((size >= L2_XLAT_SIZE) &&
203 IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) {
Julius Werner62336812015-05-18 13:11:12 -0700204 /* If block address is aligned and size is greater than
205 * or equal to size addressed by each L2 entry, we can
206 * directly store a block desc */
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700207 desc = base_addr | BLOCK_DESC | attr;
208 table[l2_index] = desc;
209 /* L3 lookup is not required */
210 return L2_XLAT_SIZE;
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700211 }
212
Julius Werner62336812015-05-18 13:11:12 -0700213 /* L2 entry stores a table descriptor */
214 table = get_next_level_table(&table[l2_index], L3_XLAT_SIZE);
215
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700216 /* L3 table lookup */
217 desc = base_addr | PAGE_DESC | attr;
218 table[l3_index] = desc;
219 return L3_XLAT_SIZE;
220}
221
Julius Werner62336812015-05-18 13:11:12 -0700222/* Func : sanity_check
223 * Desc : Check address/size alignment of a table or page.
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700224 */
Julius Werner62336812015-05-18 13:11:12 -0700225static void sanity_check(uint64_t addr, uint64_t size)
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700226{
Julius Werner62336812015-05-18 13:11:12 -0700227 assert(!(addr & GRANULE_SIZE_MASK) &&
228 !(size & GRANULE_SIZE_MASK) &&
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100229 (addr + size < (1UL << BITS_PER_VA)) &&
Julius Werner62336812015-05-18 13:11:12 -0700230 size >= GRANULE_SIZE);
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700231}
232
Julius Werner62336812015-05-18 13:11:12 -0700233/* Func : mmu_config_range
234 * Desc : This function repeatedly calls init_xlat_table with the base
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700235 * address. Based on size returned from init_xlat_table, base_addr is updated
236 * and subsequent calls are made for initializing the xlat table until the whole
237 * region is initialized.
238 */
Julius Werner62336812015-05-18 13:11:12 -0700239void mmu_config_range(void *start, size_t size, uint64_t tag)
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700240{
Julius Werner62336812015-05-18 13:11:12 -0700241 uint64_t base_addr = (uintptr_t)start;
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700242 uint64_t temp_size = size;
243
Julius Werner62336812015-05-18 13:11:12 -0700244 assert(tag < ARRAY_SIZE(tag_to_string));
245 printf("Libpayload: ARM64 MMU: Mapping address range [%p:%p) as %s\n",
246 start, start + size, tag_to_string[tag]);
247 sanity_check(base_addr, temp_size);
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700248
Julius Werner62336812015-05-18 13:11:12 -0700249 while (temp_size)
250 temp_size -= init_xlat_table(base_addr + (size - temp_size),
251 temp_size, tag);
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700252
Julius Werner62336812015-05-18 13:11:12 -0700253 /* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
254 dsb();
Julius Wernerca52a252018-10-10 15:31:36 -0700255 tlbiall_el2();
Julius Werner62336812015-05-18 13:11:12 -0700256 dsb();
257 isb();
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700258}
259
Julius Werner62336812015-05-18 13:11:12 -0700260/* Func : mmu_init
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700261 * Desc : Initialize mmu based on the mmu_memrange passed. ttb_buffer is used as
262 * the base address for xlat tables. TTB_DEFAULT_SIZE defines the max number of
263 * tables that can be used
huang lin7b9bca02016-03-03 15:29:34 +0800264 * Assuming that memory 0-4GiB is device memory.
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700265 */
266uint64_t mmu_init(struct mmu_ranges *mmu_ranges)
267{
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700268 int i = 0;
269
270 xlat_addr = (uint64_t *)&ttb_buffer;
271
272 memset((void*)xlat_addr, 0, GRANULE_SIZE);
273 max_tables = (TTB_DEFAULT_SIZE >> GRANULE_SIZE_SHIFT);
274 free_idx = 1;
275
Julius Werner540a9802019-12-09 13:03:29 -0800276 printf("Libpayload ARM64: TTB_BUFFER: %p Max Tables: %d\n",
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700277 (void*)xlat_addr, max_tables);
278
huang lin7b9bca02016-03-03 15:29:34 +0800279 /*
280 * To keep things simple we start with mapping the entire base 4GB as
281 * device memory. This accommodates various architectures' default
282 * settings (for instance rk3399 mmio starts at 0xf8000000); it is
283 * fine tuned (e.g. mapping DRAM areas as write-back) later in the
284 * boot process.
285 */
286 mmu_config_range(NULL, 0x100000000, TYPE_DEV_MEM);
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700287
Julius Werner62336812015-05-18 13:11:12 -0700288 for (; i < mmu_ranges->used; i++)
289 mmu_config_range((void *)mmu_ranges->entries[i].base,
290 mmu_ranges->entries[i].size,
291 mmu_ranges->entries[i].type);
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700292
293 printf("Libpayload ARM64: MMU init done\n");
294 return 0;
295}
296
297static uint32_t is_mmu_enabled(void)
298{
299 uint32_t sctlr;
300
Julius Wernerca52a252018-10-10 15:31:36 -0700301 sctlr = raw_read_sctlr_el2();
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700302
303 return (sctlr & SCTLR_M);
304}
305
306/*
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700307 * Func: mmu_enable
308 * Desc: Initialize MAIR, TCR, TTBR and enable MMU by setting appropriate bits
309 * in SCTLR
310 */
311void mmu_enable(void)
312{
313 uint32_t sctlr;
314
315 /* Initialize MAIR indices */
Julius Wernerca52a252018-10-10 15:31:36 -0700316 raw_write_mair_el2(MAIR_ATTRIBUTES);
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700317
318 /* Invalidate TLBs */
Julius Wernerca52a252018-10-10 15:31:36 -0700319 tlbiall_el2();
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700320
321 /* Initialize TCR flags */
Julius Wernerca52a252018-10-10 15:31:36 -0700322 raw_write_tcr_el2(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
Patrick Rudolph57afc5e2018-03-05 09:53:47 +0100323 TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_256TB |
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700324 TCR_TBI_USED);
325
326 /* Initialize TTBR */
Julius Wernerca52a252018-10-10 15:31:36 -0700327 raw_write_ttbr0_el2((uintptr_t)xlat_addr);
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700328
Julius Werner62336812015-05-18 13:11:12 -0700329 /* Ensure system register writes are committed before enabling MMU */
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700330 isb();
331
332 /* Enable MMU */
Julius Wernerca52a252018-10-10 15:31:36 -0700333 sctlr = raw_read_sctlr_el2();
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700334 sctlr |= SCTLR_C | SCTLR_M | SCTLR_I;
Julius Wernerca52a252018-10-10 15:31:36 -0700335 raw_write_sctlr_el2(sctlr);
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700336
337 isb();
338
339 if(is_mmu_enabled())
340 printf("ARM64: MMU enable done\n");
341 else
342 printf("ARM64: MMU enable failed\n");
343}
344
345/*
Aaron Durbin9425a542014-10-07 23:36:55 -0500346 * Func: mmu_add_memrange
347 * Desc: Adds a new memory range
348 */
Furquan Shaikh69856232014-10-08 01:04:18 -0700349static struct mmu_memrange *mmu_add_memrange(struct mmu_ranges *r,
350 uint64_t base, uint64_t size,
351 uint64_t type)
Aaron Durbin9425a542014-10-07 23:36:55 -0500352{
353 struct mmu_memrange *curr = NULL;
354 int i = r->used;
355
356 if (i < ARRAY_SIZE(r->entries)) {
357 curr = &r->entries[i];
358 curr->base = base;
359 curr->size = size;
360 curr->type = type;
361
362 r->used = i + 1;
363 }
364
365 return curr;
366}
367
Furquan Shaikh69856232014-10-08 01:04:18 -0700368/* Structure to define properties of new memrange request */
369struct mmu_new_range_prop {
370 /* Type of memrange */
371 uint64_t type;
372 /* Size of the range */
373 uint64_t size;
374 /*
375 * If any restrictions on the max addr limit(This addr is exclusive for
376 * the range), else 0
377 */
378 uint64_t lim_excl;
379 /* If any restrictions on alignment of the range base, else 0 */
380 uint64_t align;
381 /*
382 * Function to test whether selected range is fine.
383 * NULL=any range is fine
384 * Return value 1=valid range, 0=otherwise
385 */
386 int (*is_valid_range)(uint64_t, uint64_t);
387 /* From what type of source range should this range be extracted */
388 uint64_t src_type;
389};
390
Aaron Durbin9425a542014-10-07 23:36:55 -0500391/*
Furquan Shaikh69856232014-10-08 01:04:18 -0700392 * Func: mmu_is_range_free
Elyes HAOUAS9c55c372021-02-03 20:18:03 +0100393 * Desc: We need to ensure that the new range being allocated doesn't overlap
Furquan Shaikh69856232014-10-08 01:04:18 -0700394 * with any used memory range. Basically:
395 * 1. Memory ranges used by the payload (usedmem_ranges)
396 * 2. Any area that falls below _end symbol in linker script (Kernel needs to be
397 * loaded in lower areas of memory, So, the payload linker script can have
398 * kernel memory below _start and _end. Thus, we want to make sure we do not
399 * step in those areas as well.
400 * Returns: 1 on success, 0 on error
401 * ASSUMPTION: All the memory used by payload resides below the program
402 * proper. If there is any memory used above the _end symbol, then it should be
403 * marked as used memory in usedmem_ranges during the presysinfo_scan.
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700404 */
Furquan Shaikh69856232014-10-08 01:04:18 -0700405static int mmu_is_range_free(uint64_t r_base,
406 uint64_t r_end)
407{
408 uint64_t payload_end = (uint64_t)&_end;
409 uint64_t i;
410 struct mmu_memrange *r = &usedmem_ranges.entries[0];
411
412 /* Allocate memranges only above payload */
413 if ((r_base <= payload_end) || (r_end <= payload_end))
414 return 0;
415
416 for (i = 0; i < usedmem_ranges.used; i++) {
417 uint64_t start = r[i].base;
418 uint64_t end = start + r[i].size;
419
Julius Werner41ddd4f2016-08-05 10:37:52 -0700420 if ((start < r_end) && (end > r_base))
Furquan Shaikh69856232014-10-08 01:04:18 -0700421 return 0;
422 }
423
424 return 1;
425}
426
427/*
428 * Func: mmu_get_new_range
429 * Desc: Add a requested new memrange. We take as input set of all memranges and
430 * a structure to define the new memrange properties i.e. its type, size,
431 * max_addr it can grow upto, alignment restrictions, source type to take range
432 * from and finally a function pointer to check if the chosen range is valid.
433 */
434static struct mmu_memrange *mmu_get_new_range(struct mmu_ranges *mmu_ranges,
435 struct mmu_new_range_prop *new)
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700436{
437 int i = 0;
438 struct mmu_memrange *r = &mmu_ranges->entries[0];
439
Furquan Shaikh69856232014-10-08 01:04:18 -0700440 if (new->size == 0) {
441 printf("MMU Error: Invalid range size\n");
442 return NULL;
443 }
444
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700445 for (; i < mmu_ranges->used; i++) {
446
Furquan Shaikh69856232014-10-08 01:04:18 -0700447 if ((r[i].type != new->src_type) ||
448 (r[i].size < new->size) ||
449 (new->lim_excl && (r[i].base >= new->lim_excl)))
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700450 continue;
451
452 uint64_t base_addr;
453 uint64_t range_end_addr = r[i].base + r[i].size;
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700454 uint64_t end_addr = range_end_addr;
455
Furquan Shaikh69856232014-10-08 01:04:18 -0700456 /* Make sure we do not go above max if it is non-zero */
457 if (new->lim_excl && (end_addr >= new->lim_excl))
458 end_addr = new->lim_excl;
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700459
Aaron Durbin9425a542014-10-07 23:36:55 -0500460 while (1) {
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700461 /*
Furquan Shaikh69856232014-10-08 01:04:18 -0700462 * In case of alignment requirement,
463 * if end_addr is aligned, then base_addr will be too.
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700464 */
Furquan Shaikh69856232014-10-08 01:04:18 -0700465 if (new->align)
466 end_addr = ALIGN_DOWN(end_addr, new->align);
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700467
Furquan Shaikh69856232014-10-08 01:04:18 -0700468 base_addr = end_addr - new->size;
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700469
470 if (base_addr < r[i].base)
471 break;
Aaron Durbin9425a542014-10-07 23:36:55 -0500472
Furquan Shaikh69856232014-10-08 01:04:18 -0700473 /*
474 * If the selected range is not used and valid for the
475 * user, move ahead with it
476 */
477 if (mmu_is_range_free(base_addr, end_addr) &&
478 ((new->is_valid_range == NULL) ||
479 new->is_valid_range(base_addr, end_addr)))
Aaron Durbin9425a542014-10-07 23:36:55 -0500480 break;
481
482 /* Drop to the next address. */
483 end_addr -= 1;
484 }
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700485
486 if (base_addr < r[i].base)
487 continue;
488
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700489 if (end_addr != range_end_addr) {
490 /* Add a new memrange since we split up one
491 * range crossing the 4GiB boundary or doing an
492 * ALIGN_DOWN on end_addr.
493 */
494 r[i].size -= (range_end_addr - end_addr);
495 if (mmu_add_memrange(mmu_ranges, end_addr,
496 range_end_addr - end_addr,
Furquan Shaikh69856232014-10-08 01:04:18 -0700497 r[i].type) == NULL)
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700498 mmu_error();
499 }
500
Furquan Shaikh69856232014-10-08 01:04:18 -0700501 if (r[i].size == new->size) {
502 r[i].type = new->type;
503 return &r[i];
504 }
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700505
Furquan Shaikh69856232014-10-08 01:04:18 -0700506 r[i].size -= new->size;
507
508 r = mmu_add_memrange(mmu_ranges, base_addr, new->size,
509 new->type);
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700510
511 if (r == NULL)
512 mmu_error();
513
514 return r;
515 }
516
517 /* Should never reach here if everything went fine */
Furquan Shaikh69856232014-10-08 01:04:18 -0700518 printf("ARM64 ERROR: No region allocated\n");
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700519 return NULL;
520}
521
522/*
Furquan Shaikh69856232014-10-08 01:04:18 -0700523 * Func: mmu_alloc_range
524 * Desc: Call get_new_range to get a new memrange which is unused and mark it as
525 * used to avoid same range being allocated for different purposes.
526 */
527static struct mmu_memrange *mmu_alloc_range(struct mmu_ranges *mmu_ranges,
528 struct mmu_new_range_prop *p)
529{
530 struct mmu_memrange *r = mmu_get_new_range(mmu_ranges, p);
531
532 if (r == NULL)
533 return NULL;
534
535 /*
536 * Mark this memrange as used memory. Important since function
537 * can be called multiple times and we do not want to reuse some
538 * range already allocated.
539 */
540 if (mmu_add_memrange(&usedmem_ranges, r->base, r->size, r->type)
541 == NULL)
542 mmu_error();
543
544 return r;
545}
546
547/*
548 * Func: mmu_add_dma_range
549 * Desc: Add a memrange for dma operations. This is special because we want to
550 * initialize this memory as non-cacheable. We have a constraint that the DMA
551 * buffer should be below 4GiB(32-bit only). So, we lookup a TYPE_NORMAL_MEM
552 * from the lowest available addresses and align it to page size i.e. 64KiB.
553 */
554static struct mmu_memrange *mmu_add_dma_range(struct mmu_ranges *mmu_ranges)
555{
556 struct mmu_new_range_prop prop;
557
558 prop.type = TYPE_DMA_MEM;
559 /* DMA_DEFAULT_SIZE is multiple of GRANULE_SIZE */
560 assert((DMA_DEFAULT_SIZE % GRANULE_SIZE) == 0);
561 prop.size = DMA_DEFAULT_SIZE;
Furquan Shaikhe4a642c2015-01-31 23:24:32 -0800562 prop.lim_excl = (uint64_t)CONFIG_LP_DMA_LIM_EXCL * MiB;
Furquan Shaikh69856232014-10-08 01:04:18 -0700563 prop.align = GRANULE_SIZE;
564 prop.is_valid_range = NULL;
565 prop.src_type = TYPE_NORMAL_MEM;
566
567 return mmu_alloc_range(mmu_ranges, &prop);
568}
569
Jimmy Zhangbe1b4f12014-10-09 18:42:00 -0700570static struct mmu_memrange *_mmu_add_fb_range(
571 uint32_t size,
572 struct mmu_ranges *mmu_ranges)
573{
574 struct mmu_new_range_prop prop;
575
576 prop.type = TYPE_DMA_MEM;
577
578 /* make sure to allocate a size of multiple of GRANULE_SIZE */
579 size = ALIGN_UP(size, GRANULE_SIZE);
580 prop.size = size;
581 prop.lim_excl = MIN_64_BIT_ADDR;
582 prop.align = MB_SIZE;
583 prop.is_valid_range = NULL;
584 prop.src_type = TYPE_NORMAL_MEM;
585
586 return mmu_alloc_range(mmu_ranges, &prop);
587}
588
Furquan Shaikh69856232014-10-08 01:04:18 -0700589/*
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700590 * Func: mmu_extract_ranges
591 * Desc: Assumption is that coreboot tables have memranges in sorted
592 * order. So, if there is an opportunity to combine ranges, we do that as
593 * well. Memranges are initialized for both CB_MEM_RAM and CB_MEM_TABLE as
594 * TYPE_NORMAL_MEM.
595 */
596static void mmu_extract_ranges(struct memrange *cb_ranges,
597 uint64_t ncb,
598 struct mmu_ranges *mmu_ranges)
599{
600 int i = 0;
601 struct mmu_memrange *prev_range = NULL;
602
603 /* Extract memory ranges to be mapped */
604 for (; i < ncb; i++) {
605 switch (cb_ranges[i].type) {
606 case CB_MEM_RAM:
607 case CB_MEM_TABLE:
608 if (prev_range && (prev_range->base + prev_range->size
609 == cb_ranges[i].base)) {
610 prev_range->size += cb_ranges[i].size;
611 } else {
612 prev_range = mmu_add_memrange(mmu_ranges,
613 cb_ranges[i].base,
614 cb_ranges[i].size,
615 TYPE_NORMAL_MEM);
616 if (prev_range == NULL)
617 mmu_error();
618 }
619 break;
620 default:
621 break;
622 }
623 }
624}
625
Jimmy Zhangbe1b4f12014-10-09 18:42:00 -0700626static void mmu_add_fb_range(struct mmu_ranges *mmu_ranges)
627{
628 struct mmu_memrange *fb_range;
Nico Huber5e0db582020-07-18 15:20:00 +0200629 struct cb_framebuffer *framebuffer = &lib_sysinfo.framebuffer;
Jimmy Zhangbe1b4f12014-10-09 18:42:00 -0700630 uint32_t fb_size;
631
Patrick Georgi5dc87fe2016-04-28 06:03:57 +0200632 /* Check whether framebuffer is needed */
Jimmy Zhangbe1b4f12014-10-09 18:42:00 -0700633 fb_size = framebuffer->bytes_per_line * framebuffer->y_resolution;
634 if (!fb_size)
635 return;
636
Patrick Georgi5dc87fe2016-04-28 06:03:57 +0200637 /* framebuffer address has been set already, so just add it as DMA */
638 if (framebuffer->physical_address) {
639 if (mmu_add_memrange(mmu_ranges,
640 framebuffer->physical_address,
641 fb_size,
642 TYPE_DMA_MEM) == NULL)
643 mmu_error();
644 return;
645 }
646
Jimmy Zhangbe1b4f12014-10-09 18:42:00 -0700647 /* Allocate framebuffer */
648 fb_range = _mmu_add_fb_range(fb_size, mmu_ranges);
649 if (fb_range == NULL)
650 mmu_error();
651
Nico Huber5e0db582020-07-18 15:20:00 +0200652 framebuffer->physical_address = fb_range->base;
Jimmy Zhangbe1b4f12014-10-09 18:42:00 -0700653}
654
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700655/*
656 * Func: mmu_init_ranges
657 * Desc: Initialize mmu_memranges based on the memranges obtained from coreboot
658 * tables. Also, initialize dma memrange and xlat_addr for ttb buffer.
659 */
660struct mmu_memrange *mmu_init_ranges_from_sysinfo(struct memrange *cb_ranges,
661 uint64_t ncb,
662 struct mmu_ranges *mmu_ranges)
663{
664 struct mmu_memrange *dma_range;
665
Aaron Durbin9425a542014-10-07 23:36:55 -0500666 /* Initialize mmu_ranges to contain no entries. */
667 mmu_ranges->used = 0;
668
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700669 /* Extract ranges from memrange in lib_sysinfo */
670 mmu_extract_ranges(cb_ranges, ncb, mmu_ranges);
671
672 /* Get a range for dma */
673 dma_range = mmu_add_dma_range(mmu_ranges);
674
Jimmy Zhangbe1b4f12014-10-09 18:42:00 -0700675 /* Get a range for framebuffer */
676 mmu_add_fb_range(mmu_ranges);
677
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700678 if (dma_range == NULL)
679 mmu_error();
680
681 return dma_range;
682}
683
684/*
Furquan Shaikhadabbe52014-09-04 15:32:17 -0700685 * Func: mmu_presysinfo_memory_used
686 * Desc: Initializes all the memory used for presysinfo page table
687 * initialization and enabling of MMU. All these ranges are stored in
688 * usedmem_ranges. usedmem_ranges plays an important role in selecting the dma
689 * buffer as well since we check the dma buffer range against the used memory
690 * ranges to prevent any overstepping.
691 */
692void mmu_presysinfo_memory_used(uint64_t base, uint64_t size)
693{
694 uint64_t range_base;
695
696 range_base = ALIGN_DOWN(base, GRANULE_SIZE);
697
698 size += (base - range_base);
699 size = ALIGN_UP(size, GRANULE_SIZE);
700
701 mmu_add_memrange(&usedmem_ranges, range_base, size, TYPE_NORMAL_MEM);
702}
703
704void mmu_presysinfo_enable(void)
705{
706 mmu_init(&usedmem_ranges);
707 mmu_enable();
708}
Meng-Huan Yuc9655462020-12-01 11:44:41 +0800709
710const struct mmu_ranges *mmu_get_used_ranges(void)
711{
712 return &usedmem_ranges;
713}