Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 1 | /* |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 2 | * |
| 3 | * Copyright 2014 Google Inc. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * 1. Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * 2. Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in the |
| 12 | * documentation and/or other materials provided with the distribution. |
| 13 | * 3. The name of the author may not be used to endorse or promote products |
| 14 | * derived from this software without specific prior written permission. |
| 15 | * |
| 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 17 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
| 20 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 21 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 22 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 23 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 24 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 25 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 26 | * SUCH DAMAGE. |
| 27 | */ |
| 28 | |
| 29 | #include <assert.h> |
| 30 | #include <stdlib.h> |
| 31 | #include <stdint.h> |
| 32 | #include <string.h> |
| 33 | |
| 34 | #include <arch/mmu.h> |
| 35 | #include <arch/lib_helpers.h> |
| 36 | #include <arch/cache.h> |
| 37 | |
| 38 | /* Maximum number of XLAT Tables available based on ttb buffer size */ |
| 39 | static unsigned int max_tables; |
| 40 | /* Address of ttb buffer */ |
| 41 | static uint64_t *xlat_addr; |
| 42 | |
| 43 | static int free_idx; |
Yi Chou | 1739c99 | 2023-11-17 19:58:11 +0800 | [diff] [blame^] | 44 | static uint8_t ttb_buffer[TTB_DEFAULT_SIZE] __aligned(GRANULE_SIZE) |
| 45 | __attribute__((__section__(".ttb_buffer"))); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 46 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 47 | static const char * const tag_to_string[] = { |
| 48 | [TYPE_NORMAL_MEM] = "normal", |
| 49 | [TYPE_DEV_MEM] = "device", |
| 50 | [TYPE_DMA_MEM] = "uncached", |
| 51 | }; |
| 52 | |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 53 | /* |
| 54 | * The usedmem_ranges is used to describe all the memory ranges that are |
| 55 | * actually used by payload i.e. _start -> _end in linker script and the |
| 56 | * coreboot tables. This is required for two purposes: |
| 57 | * 1) During the pre_sysinfo_scan_mmu_setup, these are the only ranges |
| 58 | * initialized in the page table as we do not know the entire memory map. |
| 59 | * 2) During the post_sysinfo_scan_mmu_setup, these ranges are used to check if |
| 60 | * the DMA buffer is being placed in a sane location and does not overlap any of |
| 61 | * the used mem ranges. |
| 62 | */ |
Aaron Durbin | 9425a54 | 2014-10-07 23:36:55 -0500 | [diff] [blame] | 63 | static struct mmu_ranges usedmem_ranges; |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 64 | |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 65 | static void __attribute__((noreturn)) mmu_error(void) |
| 66 | { |
| 67 | halt(); |
| 68 | } |
| 69 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 70 | /* Func : get_block_attr |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 71 | * Desc : Get block descriptor attributes based on the value of tag in memrange |
| 72 | * region |
| 73 | */ |
| 74 | static uint64_t get_block_attr(unsigned long tag) |
| 75 | { |
| 76 | uint64_t attr; |
| 77 | |
| 78 | /* We should be in EL2(which is non-secure only) or EL1(non-secure) */ |
| 79 | attr = BLOCK_NS; |
| 80 | |
| 81 | /* Assuming whole memory is read-write */ |
| 82 | attr |= BLOCK_AP_RW; |
| 83 | |
| 84 | attr |= BLOCK_ACCESS; |
| 85 | |
| 86 | switch (tag) { |
| 87 | |
| 88 | case TYPE_NORMAL_MEM: |
Furquan Shaikh | c769267 | 2015-03-31 22:15:07 -0700 | [diff] [blame] | 89 | attr |= BLOCK_SH_INNER_SHAREABLE; |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 90 | attr |= (BLOCK_INDEX_MEM_NORMAL << BLOCK_INDEX_SHIFT); |
| 91 | break; |
| 92 | case TYPE_DEV_MEM: |
| 93 | attr |= BLOCK_INDEX_MEM_DEV_NGNRNE << BLOCK_INDEX_SHIFT; |
Jimmy Huang | c159a0e | 2015-09-15 15:29:10 +0800 | [diff] [blame] | 94 | attr |= BLOCK_XN; |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 95 | break; |
| 96 | case TYPE_DMA_MEM: |
| 97 | attr |= BLOCK_INDEX_MEM_NORMAL_NC << BLOCK_INDEX_SHIFT; |
| 98 | break; |
| 99 | } |
| 100 | |
| 101 | return attr; |
| 102 | } |
| 103 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 104 | /* Func : table_desc_valid |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 105 | * Desc : Check if a table entry contains valid desc |
| 106 | */ |
| 107 | static uint64_t table_desc_valid(uint64_t desc) |
| 108 | { |
| 109 | return((desc & TABLE_DESC) == TABLE_DESC); |
| 110 | } |
| 111 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 112 | /* Func : setup_new_table |
| 113 | * Desc : Get next free table from TTB and set it up to match old parent entry. |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 114 | */ |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 115 | static uint64_t *setup_new_table(uint64_t desc, size_t xlat_size) |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 116 | { |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 117 | uint64_t *new, *entry; |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 118 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 119 | assert(free_idx < max_tables); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 120 | |
| 121 | new = (uint64_t*)((unsigned char *)xlat_addr + free_idx * GRANULE_SIZE); |
| 122 | free_idx++; |
| 123 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 124 | if (!desc) { |
| 125 | memset(new, 0, GRANULE_SIZE); |
| 126 | } else { |
| 127 | /* Can reuse old parent entry, but may need to adjust type. */ |
| 128 | if (xlat_size == L3_XLAT_SIZE) |
| 129 | desc |= PAGE_DESC; |
| 130 | |
| 131 | for (entry = new; (u8 *)entry < (u8 *)new + GRANULE_SIZE; |
| 132 | entry++, desc += xlat_size) |
| 133 | *entry = desc; |
| 134 | } |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 135 | |
| 136 | return new; |
| 137 | } |
| 138 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 139 | /* Func : get_table_from_desc |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 140 | * Desc : Get next level table address from table descriptor |
| 141 | */ |
| 142 | static uint64_t *get_table_from_desc(uint64_t desc) |
| 143 | { |
| 144 | uint64_t *ptr = (uint64_t*)(desc & XLAT_TABLE_MASK); |
| 145 | return ptr; |
| 146 | } |
| 147 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 148 | /* Func: get_next_level_table |
| 149 | * Desc: Check if the table entry is a valid descriptor. If not, initialize new |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 150 | * table, update the entry and return the table addr. If valid, return the addr. |
| 151 | */ |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 152 | static uint64_t *get_next_level_table(uint64_t *ptr, size_t xlat_size) |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 153 | { |
| 154 | uint64_t desc = *ptr; |
| 155 | |
| 156 | if (!table_desc_valid(desc)) { |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 157 | uint64_t *new_table = setup_new_table(desc, xlat_size); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 158 | desc = ((uint64_t)new_table) | TABLE_DESC; |
| 159 | *ptr = desc; |
| 160 | } |
| 161 | return get_table_from_desc(desc); |
| 162 | } |
| 163 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 164 | /* Func : init_xlat_table |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 165 | * Desc : Given a base address and size, it identifies the indices within |
| 166 | * different level XLAT tables which map the given base addr. Similar to table |
| 167 | * walk, except that all invalid entries during the walk are updated |
| 168 | * accordingly. On success, it returns the size of the block/page addressed by |
| 169 | * the final table. |
| 170 | */ |
| 171 | static uint64_t init_xlat_table(uint64_t base_addr, |
| 172 | uint64_t size, |
| 173 | uint64_t tag) |
| 174 | { |
Patrick Rudolph | 57afc5e | 2018-03-05 09:53:47 +0100 | [diff] [blame] | 175 | uint64_t l0_index = (base_addr & L0_ADDR_MASK) >> L0_ADDR_SHIFT; |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 176 | uint64_t l1_index = (base_addr & L1_ADDR_MASK) >> L1_ADDR_SHIFT; |
| 177 | uint64_t l2_index = (base_addr & L2_ADDR_MASK) >> L2_ADDR_SHIFT; |
| 178 | uint64_t l3_index = (base_addr & L3_ADDR_MASK) >> L3_ADDR_SHIFT; |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 179 | uint64_t *table = xlat_addr; |
| 180 | uint64_t desc; |
| 181 | uint64_t attr = get_block_attr(tag); |
| 182 | |
Patrick Rudolph | 57afc5e | 2018-03-05 09:53:47 +0100 | [diff] [blame] | 183 | /* L0 entry stores a table descriptor (doesn't support blocks) */ |
| 184 | table = get_next_level_table(&table[l0_index], L1_XLAT_SIZE); |
| 185 | |
| 186 | /* L1 table lookup */ |
| 187 | if ((size >= L1_XLAT_SIZE) && |
| 188 | IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) { |
Jimmy Huang | 0fd3e79 | 2015-04-13 20:28:38 +0800 | [diff] [blame] | 189 | /* If block address is aligned and size is greater than |
| 190 | * or equal to size addressed by each L1 entry, we can |
| 191 | * directly store a block desc */ |
| 192 | desc = base_addr | BLOCK_DESC | attr; |
| 193 | table[l1_index] = desc; |
| 194 | /* L2 lookup is not required */ |
| 195 | return L1_XLAT_SIZE; |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 196 | } |
| 197 | |
Patrick Rudolph | 57afc5e | 2018-03-05 09:53:47 +0100 | [diff] [blame] | 198 | /* L1 entry stores a table descriptor */ |
| 199 | table = get_next_level_table(&table[l1_index], L2_XLAT_SIZE); |
| 200 | |
| 201 | /* L2 table lookup */ |
Jimmy Huang | 0fd3e79 | 2015-04-13 20:28:38 +0800 | [diff] [blame] | 202 | if ((size >= L2_XLAT_SIZE) && |
| 203 | IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) { |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 204 | /* If block address is aligned and size is greater than |
| 205 | * or equal to size addressed by each L2 entry, we can |
| 206 | * directly store a block desc */ |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 207 | desc = base_addr | BLOCK_DESC | attr; |
| 208 | table[l2_index] = desc; |
| 209 | /* L3 lookup is not required */ |
| 210 | return L2_XLAT_SIZE; |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 211 | } |
| 212 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 213 | /* L2 entry stores a table descriptor */ |
| 214 | table = get_next_level_table(&table[l2_index], L3_XLAT_SIZE); |
| 215 | |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 216 | /* L3 table lookup */ |
| 217 | desc = base_addr | PAGE_DESC | attr; |
| 218 | table[l3_index] = desc; |
| 219 | return L3_XLAT_SIZE; |
| 220 | } |
| 221 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 222 | /* Func : sanity_check |
| 223 | * Desc : Check address/size alignment of a table or page. |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 224 | */ |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 225 | static void sanity_check(uint64_t addr, uint64_t size) |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 226 | { |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 227 | assert(!(addr & GRANULE_SIZE_MASK) && |
| 228 | !(size & GRANULE_SIZE_MASK) && |
Patrick Rudolph | 57afc5e | 2018-03-05 09:53:47 +0100 | [diff] [blame] | 229 | (addr + size < (1UL << BITS_PER_VA)) && |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 230 | size >= GRANULE_SIZE); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 231 | } |
| 232 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 233 | /* Func : mmu_config_range |
| 234 | * Desc : This function repeatedly calls init_xlat_table with the base |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 235 | * address. Based on size returned from init_xlat_table, base_addr is updated |
| 236 | * and subsequent calls are made for initializing the xlat table until the whole |
| 237 | * region is initialized. |
| 238 | */ |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 239 | void mmu_config_range(void *start, size_t size, uint64_t tag) |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 240 | { |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 241 | uint64_t base_addr = (uintptr_t)start; |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 242 | uint64_t temp_size = size; |
| 243 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 244 | assert(tag < ARRAY_SIZE(tag_to_string)); |
| 245 | printf("Libpayload: ARM64 MMU: Mapping address range [%p:%p) as %s\n", |
| 246 | start, start + size, tag_to_string[tag]); |
| 247 | sanity_check(base_addr, temp_size); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 248 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 249 | while (temp_size) |
| 250 | temp_size -= init_xlat_table(base_addr + (size - temp_size), |
| 251 | temp_size, tag); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 252 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 253 | /* ARMv8 MMUs snoop L1 data cache, no need to flush it. */ |
| 254 | dsb(); |
Julius Werner | ca52a25 | 2018-10-10 15:31:36 -0700 | [diff] [blame] | 255 | tlbiall_el2(); |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 256 | dsb(); |
| 257 | isb(); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 258 | } |
| 259 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 260 | /* Func : mmu_init |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 261 | * Desc : Initialize mmu based on the mmu_memrange passed. ttb_buffer is used as |
| 262 | * the base address for xlat tables. TTB_DEFAULT_SIZE defines the max number of |
| 263 | * tables that can be used |
huang lin | 7b9bca0 | 2016-03-03 15:29:34 +0800 | [diff] [blame] | 264 | * Assuming that memory 0-4GiB is device memory. |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 265 | */ |
| 266 | uint64_t mmu_init(struct mmu_ranges *mmu_ranges) |
| 267 | { |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 268 | int i = 0; |
| 269 | |
| 270 | xlat_addr = (uint64_t *)&ttb_buffer; |
| 271 | |
| 272 | memset((void*)xlat_addr, 0, GRANULE_SIZE); |
| 273 | max_tables = (TTB_DEFAULT_SIZE >> GRANULE_SIZE_SHIFT); |
| 274 | free_idx = 1; |
| 275 | |
Julius Werner | 540a980 | 2019-12-09 13:03:29 -0800 | [diff] [blame] | 276 | printf("Libpayload ARM64: TTB_BUFFER: %p Max Tables: %d\n", |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 277 | (void*)xlat_addr, max_tables); |
| 278 | |
huang lin | 7b9bca0 | 2016-03-03 15:29:34 +0800 | [diff] [blame] | 279 | /* |
| 280 | * To keep things simple we start with mapping the entire base 4GB as |
| 281 | * device memory. This accommodates various architectures' default |
| 282 | * settings (for instance rk3399 mmio starts at 0xf8000000); it is |
| 283 | * fine tuned (e.g. mapping DRAM areas as write-back) later in the |
| 284 | * boot process. |
| 285 | */ |
| 286 | mmu_config_range(NULL, 0x100000000, TYPE_DEV_MEM); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 287 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 288 | for (; i < mmu_ranges->used; i++) |
| 289 | mmu_config_range((void *)mmu_ranges->entries[i].base, |
| 290 | mmu_ranges->entries[i].size, |
| 291 | mmu_ranges->entries[i].type); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 292 | |
| 293 | printf("Libpayload ARM64: MMU init done\n"); |
| 294 | return 0; |
| 295 | } |
| 296 | |
| 297 | static uint32_t is_mmu_enabled(void) |
| 298 | { |
| 299 | uint32_t sctlr; |
| 300 | |
Julius Werner | ca52a25 | 2018-10-10 15:31:36 -0700 | [diff] [blame] | 301 | sctlr = raw_read_sctlr_el2(); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 302 | |
| 303 | return (sctlr & SCTLR_M); |
| 304 | } |
| 305 | |
| 306 | /* |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 307 | * Func: mmu_enable |
| 308 | * Desc: Initialize MAIR, TCR, TTBR and enable MMU by setting appropriate bits |
| 309 | * in SCTLR |
| 310 | */ |
| 311 | void mmu_enable(void) |
| 312 | { |
| 313 | uint32_t sctlr; |
| 314 | |
| 315 | /* Initialize MAIR indices */ |
Julius Werner | ca52a25 | 2018-10-10 15:31:36 -0700 | [diff] [blame] | 316 | raw_write_mair_el2(MAIR_ATTRIBUTES); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 317 | |
| 318 | /* Invalidate TLBs */ |
Julius Werner | ca52a25 | 2018-10-10 15:31:36 -0700 | [diff] [blame] | 319 | tlbiall_el2(); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 320 | |
| 321 | /* Initialize TCR flags */ |
Julius Werner | ca52a25 | 2018-10-10 15:31:36 -0700 | [diff] [blame] | 322 | raw_write_tcr_el2(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC | |
Patrick Rudolph | 57afc5e | 2018-03-05 09:53:47 +0100 | [diff] [blame] | 323 | TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_256TB | |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 324 | TCR_TBI_USED); |
| 325 | |
| 326 | /* Initialize TTBR */ |
Julius Werner | ca52a25 | 2018-10-10 15:31:36 -0700 | [diff] [blame] | 327 | raw_write_ttbr0_el2((uintptr_t)xlat_addr); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 328 | |
Julius Werner | 6233681 | 2015-05-18 13:11:12 -0700 | [diff] [blame] | 329 | /* Ensure system register writes are committed before enabling MMU */ |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 330 | isb(); |
| 331 | |
| 332 | /* Enable MMU */ |
Julius Werner | ca52a25 | 2018-10-10 15:31:36 -0700 | [diff] [blame] | 333 | sctlr = raw_read_sctlr_el2(); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 334 | sctlr |= SCTLR_C | SCTLR_M | SCTLR_I; |
Julius Werner | ca52a25 | 2018-10-10 15:31:36 -0700 | [diff] [blame] | 335 | raw_write_sctlr_el2(sctlr); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 336 | |
| 337 | isb(); |
| 338 | |
| 339 | if(is_mmu_enabled()) |
| 340 | printf("ARM64: MMU enable done\n"); |
| 341 | else |
| 342 | printf("ARM64: MMU enable failed\n"); |
| 343 | } |
| 344 | |
| 345 | /* |
Aaron Durbin | 9425a54 | 2014-10-07 23:36:55 -0500 | [diff] [blame] | 346 | * Func: mmu_add_memrange |
| 347 | * Desc: Adds a new memory range |
| 348 | */ |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 349 | static struct mmu_memrange *mmu_add_memrange(struct mmu_ranges *r, |
| 350 | uint64_t base, uint64_t size, |
| 351 | uint64_t type) |
Aaron Durbin | 9425a54 | 2014-10-07 23:36:55 -0500 | [diff] [blame] | 352 | { |
| 353 | struct mmu_memrange *curr = NULL; |
| 354 | int i = r->used; |
| 355 | |
| 356 | if (i < ARRAY_SIZE(r->entries)) { |
| 357 | curr = &r->entries[i]; |
| 358 | curr->base = base; |
| 359 | curr->size = size; |
| 360 | curr->type = type; |
| 361 | |
| 362 | r->used = i + 1; |
| 363 | } |
| 364 | |
| 365 | return curr; |
| 366 | } |
| 367 | |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 368 | /* Structure to define properties of new memrange request */ |
| 369 | struct mmu_new_range_prop { |
| 370 | /* Type of memrange */ |
| 371 | uint64_t type; |
| 372 | /* Size of the range */ |
| 373 | uint64_t size; |
| 374 | /* |
| 375 | * If any restrictions on the max addr limit(This addr is exclusive for |
| 376 | * the range), else 0 |
| 377 | */ |
| 378 | uint64_t lim_excl; |
| 379 | /* If any restrictions on alignment of the range base, else 0 */ |
| 380 | uint64_t align; |
| 381 | /* |
| 382 | * Function to test whether selected range is fine. |
| 383 | * NULL=any range is fine |
| 384 | * Return value 1=valid range, 0=otherwise |
| 385 | */ |
| 386 | int (*is_valid_range)(uint64_t, uint64_t); |
| 387 | /* From what type of source range should this range be extracted */ |
| 388 | uint64_t src_type; |
| 389 | }; |
| 390 | |
Aaron Durbin | 9425a54 | 2014-10-07 23:36:55 -0500 | [diff] [blame] | 391 | /* |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 392 | * Func: mmu_is_range_free |
Elyes HAOUAS | 9c55c37 | 2021-02-03 20:18:03 +0100 | [diff] [blame] | 393 | * Desc: We need to ensure that the new range being allocated doesn't overlap |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 394 | * with any used memory range. Basically: |
| 395 | * 1. Memory ranges used by the payload (usedmem_ranges) |
| 396 | * 2. Any area that falls below _end symbol in linker script (Kernel needs to be |
| 397 | * loaded in lower areas of memory, So, the payload linker script can have |
| 398 | * kernel memory below _start and _end. Thus, we want to make sure we do not |
| 399 | * step in those areas as well. |
| 400 | * Returns: 1 on success, 0 on error |
| 401 | * ASSUMPTION: All the memory used by payload resides below the program |
| 402 | * proper. If there is any memory used above the _end symbol, then it should be |
| 403 | * marked as used memory in usedmem_ranges during the presysinfo_scan. |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 404 | */ |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 405 | static int mmu_is_range_free(uint64_t r_base, |
| 406 | uint64_t r_end) |
| 407 | { |
| 408 | uint64_t payload_end = (uint64_t)&_end; |
| 409 | uint64_t i; |
| 410 | struct mmu_memrange *r = &usedmem_ranges.entries[0]; |
| 411 | |
| 412 | /* Allocate memranges only above payload */ |
| 413 | if ((r_base <= payload_end) || (r_end <= payload_end)) |
| 414 | return 0; |
| 415 | |
| 416 | for (i = 0; i < usedmem_ranges.used; i++) { |
| 417 | uint64_t start = r[i].base; |
| 418 | uint64_t end = start + r[i].size; |
| 419 | |
Julius Werner | 41ddd4f | 2016-08-05 10:37:52 -0700 | [diff] [blame] | 420 | if ((start < r_end) && (end > r_base)) |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 421 | return 0; |
| 422 | } |
| 423 | |
| 424 | return 1; |
| 425 | } |
| 426 | |
| 427 | /* |
| 428 | * Func: mmu_get_new_range |
| 429 | * Desc: Add a requested new memrange. We take as input set of all memranges and |
| 430 | * a structure to define the new memrange properties i.e. its type, size, |
| 431 | * max_addr it can grow upto, alignment restrictions, source type to take range |
| 432 | * from and finally a function pointer to check if the chosen range is valid. |
| 433 | */ |
| 434 | static struct mmu_memrange *mmu_get_new_range(struct mmu_ranges *mmu_ranges, |
| 435 | struct mmu_new_range_prop *new) |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 436 | { |
| 437 | int i = 0; |
| 438 | struct mmu_memrange *r = &mmu_ranges->entries[0]; |
| 439 | |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 440 | if (new->size == 0) { |
| 441 | printf("MMU Error: Invalid range size\n"); |
| 442 | return NULL; |
| 443 | } |
| 444 | |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 445 | for (; i < mmu_ranges->used; i++) { |
| 446 | |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 447 | if ((r[i].type != new->src_type) || |
| 448 | (r[i].size < new->size) || |
| 449 | (new->lim_excl && (r[i].base >= new->lim_excl))) |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 450 | continue; |
| 451 | |
| 452 | uint64_t base_addr; |
| 453 | uint64_t range_end_addr = r[i].base + r[i].size; |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 454 | uint64_t end_addr = range_end_addr; |
| 455 | |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 456 | /* Make sure we do not go above max if it is non-zero */ |
| 457 | if (new->lim_excl && (end_addr >= new->lim_excl)) |
| 458 | end_addr = new->lim_excl; |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 459 | |
Aaron Durbin | 9425a54 | 2014-10-07 23:36:55 -0500 | [diff] [blame] | 460 | while (1) { |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 461 | /* |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 462 | * In case of alignment requirement, |
| 463 | * if end_addr is aligned, then base_addr will be too. |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 464 | */ |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 465 | if (new->align) |
| 466 | end_addr = ALIGN_DOWN(end_addr, new->align); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 467 | |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 468 | base_addr = end_addr - new->size; |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 469 | |
| 470 | if (base_addr < r[i].base) |
| 471 | break; |
Aaron Durbin | 9425a54 | 2014-10-07 23:36:55 -0500 | [diff] [blame] | 472 | |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 473 | /* |
| 474 | * If the selected range is not used and valid for the |
| 475 | * user, move ahead with it |
| 476 | */ |
| 477 | if (mmu_is_range_free(base_addr, end_addr) && |
| 478 | ((new->is_valid_range == NULL) || |
| 479 | new->is_valid_range(base_addr, end_addr))) |
Aaron Durbin | 9425a54 | 2014-10-07 23:36:55 -0500 | [diff] [blame] | 480 | break; |
| 481 | |
| 482 | /* Drop to the next address. */ |
| 483 | end_addr -= 1; |
| 484 | } |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 485 | |
| 486 | if (base_addr < r[i].base) |
| 487 | continue; |
| 488 | |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 489 | if (end_addr != range_end_addr) { |
| 490 | /* Add a new memrange since we split up one |
| 491 | * range crossing the 4GiB boundary or doing an |
| 492 | * ALIGN_DOWN on end_addr. |
| 493 | */ |
| 494 | r[i].size -= (range_end_addr - end_addr); |
| 495 | if (mmu_add_memrange(mmu_ranges, end_addr, |
| 496 | range_end_addr - end_addr, |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 497 | r[i].type) == NULL) |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 498 | mmu_error(); |
| 499 | } |
| 500 | |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 501 | if (r[i].size == new->size) { |
| 502 | r[i].type = new->type; |
| 503 | return &r[i]; |
| 504 | } |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 505 | |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 506 | r[i].size -= new->size; |
| 507 | |
| 508 | r = mmu_add_memrange(mmu_ranges, base_addr, new->size, |
| 509 | new->type); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 510 | |
| 511 | if (r == NULL) |
| 512 | mmu_error(); |
| 513 | |
| 514 | return r; |
| 515 | } |
| 516 | |
| 517 | /* Should never reach here if everything went fine */ |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 518 | printf("ARM64 ERROR: No region allocated\n"); |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 519 | return NULL; |
| 520 | } |
| 521 | |
| 522 | /* |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 523 | * Func: mmu_alloc_range |
| 524 | * Desc: Call get_new_range to get a new memrange which is unused and mark it as |
| 525 | * used to avoid same range being allocated for different purposes. |
| 526 | */ |
| 527 | static struct mmu_memrange *mmu_alloc_range(struct mmu_ranges *mmu_ranges, |
| 528 | struct mmu_new_range_prop *p) |
| 529 | { |
| 530 | struct mmu_memrange *r = mmu_get_new_range(mmu_ranges, p); |
| 531 | |
| 532 | if (r == NULL) |
| 533 | return NULL; |
| 534 | |
| 535 | /* |
| 536 | * Mark this memrange as used memory. Important since function |
| 537 | * can be called multiple times and we do not want to reuse some |
| 538 | * range already allocated. |
| 539 | */ |
| 540 | if (mmu_add_memrange(&usedmem_ranges, r->base, r->size, r->type) |
| 541 | == NULL) |
| 542 | mmu_error(); |
| 543 | |
| 544 | return r; |
| 545 | } |
| 546 | |
| 547 | /* |
| 548 | * Func: mmu_add_dma_range |
| 549 | * Desc: Add a memrange for dma operations. This is special because we want to |
| 550 | * initialize this memory as non-cacheable. We have a constraint that the DMA |
| 551 | * buffer should be below 4GiB(32-bit only). So, we lookup a TYPE_NORMAL_MEM |
| 552 | * from the lowest available addresses and align it to page size i.e. 64KiB. |
| 553 | */ |
| 554 | static struct mmu_memrange *mmu_add_dma_range(struct mmu_ranges *mmu_ranges) |
| 555 | { |
| 556 | struct mmu_new_range_prop prop; |
| 557 | |
| 558 | prop.type = TYPE_DMA_MEM; |
| 559 | /* DMA_DEFAULT_SIZE is multiple of GRANULE_SIZE */ |
| 560 | assert((DMA_DEFAULT_SIZE % GRANULE_SIZE) == 0); |
| 561 | prop.size = DMA_DEFAULT_SIZE; |
Furquan Shaikh | e4a642c | 2015-01-31 23:24:32 -0800 | [diff] [blame] | 562 | prop.lim_excl = (uint64_t)CONFIG_LP_DMA_LIM_EXCL * MiB; |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 563 | prop.align = GRANULE_SIZE; |
| 564 | prop.is_valid_range = NULL; |
| 565 | prop.src_type = TYPE_NORMAL_MEM; |
| 566 | |
| 567 | return mmu_alloc_range(mmu_ranges, &prop); |
| 568 | } |
| 569 | |
Jimmy Zhang | be1b4f1 | 2014-10-09 18:42:00 -0700 | [diff] [blame] | 570 | static struct mmu_memrange *_mmu_add_fb_range( |
| 571 | uint32_t size, |
| 572 | struct mmu_ranges *mmu_ranges) |
| 573 | { |
| 574 | struct mmu_new_range_prop prop; |
| 575 | |
| 576 | prop.type = TYPE_DMA_MEM; |
| 577 | |
| 578 | /* make sure to allocate a size of multiple of GRANULE_SIZE */ |
| 579 | size = ALIGN_UP(size, GRANULE_SIZE); |
| 580 | prop.size = size; |
| 581 | prop.lim_excl = MIN_64_BIT_ADDR; |
| 582 | prop.align = MB_SIZE; |
| 583 | prop.is_valid_range = NULL; |
| 584 | prop.src_type = TYPE_NORMAL_MEM; |
| 585 | |
| 586 | return mmu_alloc_range(mmu_ranges, &prop); |
| 587 | } |
| 588 | |
Furquan Shaikh | 6985623 | 2014-10-08 01:04:18 -0700 | [diff] [blame] | 589 | /* |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 590 | * Func: mmu_extract_ranges |
| 591 | * Desc: Assumption is that coreboot tables have memranges in sorted |
| 592 | * order. So, if there is an opportunity to combine ranges, we do that as |
| 593 | * well. Memranges are initialized for both CB_MEM_RAM and CB_MEM_TABLE as |
| 594 | * TYPE_NORMAL_MEM. |
| 595 | */ |
| 596 | static void mmu_extract_ranges(struct memrange *cb_ranges, |
| 597 | uint64_t ncb, |
| 598 | struct mmu_ranges *mmu_ranges) |
| 599 | { |
| 600 | int i = 0; |
| 601 | struct mmu_memrange *prev_range = NULL; |
| 602 | |
| 603 | /* Extract memory ranges to be mapped */ |
| 604 | for (; i < ncb; i++) { |
| 605 | switch (cb_ranges[i].type) { |
| 606 | case CB_MEM_RAM: |
| 607 | case CB_MEM_TABLE: |
| 608 | if (prev_range && (prev_range->base + prev_range->size |
| 609 | == cb_ranges[i].base)) { |
| 610 | prev_range->size += cb_ranges[i].size; |
| 611 | } else { |
| 612 | prev_range = mmu_add_memrange(mmu_ranges, |
| 613 | cb_ranges[i].base, |
| 614 | cb_ranges[i].size, |
| 615 | TYPE_NORMAL_MEM); |
| 616 | if (prev_range == NULL) |
| 617 | mmu_error(); |
| 618 | } |
| 619 | break; |
| 620 | default: |
| 621 | break; |
| 622 | } |
| 623 | } |
| 624 | } |
| 625 | |
Jimmy Zhang | be1b4f1 | 2014-10-09 18:42:00 -0700 | [diff] [blame] | 626 | static void mmu_add_fb_range(struct mmu_ranges *mmu_ranges) |
| 627 | { |
| 628 | struct mmu_memrange *fb_range; |
Nico Huber | 5e0db58 | 2020-07-18 15:20:00 +0200 | [diff] [blame] | 629 | struct cb_framebuffer *framebuffer = &lib_sysinfo.framebuffer; |
Jimmy Zhang | be1b4f1 | 2014-10-09 18:42:00 -0700 | [diff] [blame] | 630 | uint32_t fb_size; |
| 631 | |
Patrick Georgi | 5dc87fe | 2016-04-28 06:03:57 +0200 | [diff] [blame] | 632 | /* Check whether framebuffer is needed */ |
Jimmy Zhang | be1b4f1 | 2014-10-09 18:42:00 -0700 | [diff] [blame] | 633 | fb_size = framebuffer->bytes_per_line * framebuffer->y_resolution; |
| 634 | if (!fb_size) |
| 635 | return; |
| 636 | |
Patrick Georgi | 5dc87fe | 2016-04-28 06:03:57 +0200 | [diff] [blame] | 637 | /* framebuffer address has been set already, so just add it as DMA */ |
| 638 | if (framebuffer->physical_address) { |
| 639 | if (mmu_add_memrange(mmu_ranges, |
| 640 | framebuffer->physical_address, |
| 641 | fb_size, |
| 642 | TYPE_DMA_MEM) == NULL) |
| 643 | mmu_error(); |
| 644 | return; |
| 645 | } |
| 646 | |
Jimmy Zhang | be1b4f1 | 2014-10-09 18:42:00 -0700 | [diff] [blame] | 647 | /* Allocate framebuffer */ |
| 648 | fb_range = _mmu_add_fb_range(fb_size, mmu_ranges); |
| 649 | if (fb_range == NULL) |
| 650 | mmu_error(); |
| 651 | |
Nico Huber | 5e0db58 | 2020-07-18 15:20:00 +0200 | [diff] [blame] | 652 | framebuffer->physical_address = fb_range->base; |
Jimmy Zhang | be1b4f1 | 2014-10-09 18:42:00 -0700 | [diff] [blame] | 653 | } |
| 654 | |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 655 | /* |
| 656 | * Func: mmu_init_ranges |
| 657 | * Desc: Initialize mmu_memranges based on the memranges obtained from coreboot |
| 658 | * tables. Also, initialize dma memrange and xlat_addr for ttb buffer. |
| 659 | */ |
| 660 | struct mmu_memrange *mmu_init_ranges_from_sysinfo(struct memrange *cb_ranges, |
| 661 | uint64_t ncb, |
| 662 | struct mmu_ranges *mmu_ranges) |
| 663 | { |
| 664 | struct mmu_memrange *dma_range; |
| 665 | |
Aaron Durbin | 9425a54 | 2014-10-07 23:36:55 -0500 | [diff] [blame] | 666 | /* Initialize mmu_ranges to contain no entries. */ |
| 667 | mmu_ranges->used = 0; |
| 668 | |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 669 | /* Extract ranges from memrange in lib_sysinfo */ |
| 670 | mmu_extract_ranges(cb_ranges, ncb, mmu_ranges); |
| 671 | |
| 672 | /* Get a range for dma */ |
| 673 | dma_range = mmu_add_dma_range(mmu_ranges); |
| 674 | |
Jimmy Zhang | be1b4f1 | 2014-10-09 18:42:00 -0700 | [diff] [blame] | 675 | /* Get a range for framebuffer */ |
| 676 | mmu_add_fb_range(mmu_ranges); |
| 677 | |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 678 | if (dma_range == NULL) |
| 679 | mmu_error(); |
| 680 | |
| 681 | return dma_range; |
| 682 | } |
| 683 | |
| 684 | /* |
Furquan Shaikh | adabbe5 | 2014-09-04 15:32:17 -0700 | [diff] [blame] | 685 | * Func: mmu_presysinfo_memory_used |
| 686 | * Desc: Initializes all the memory used for presysinfo page table |
| 687 | * initialization and enabling of MMU. All these ranges are stored in |
| 688 | * usedmem_ranges. usedmem_ranges plays an important role in selecting the dma |
| 689 | * buffer as well since we check the dma buffer range against the used memory |
| 690 | * ranges to prevent any overstepping. |
| 691 | */ |
| 692 | void mmu_presysinfo_memory_used(uint64_t base, uint64_t size) |
| 693 | { |
| 694 | uint64_t range_base; |
| 695 | |
| 696 | range_base = ALIGN_DOWN(base, GRANULE_SIZE); |
| 697 | |
| 698 | size += (base - range_base); |
| 699 | size = ALIGN_UP(size, GRANULE_SIZE); |
| 700 | |
| 701 | mmu_add_memrange(&usedmem_ranges, range_base, size, TYPE_NORMAL_MEM); |
| 702 | } |
| 703 | |
| 704 | void mmu_presysinfo_enable(void) |
| 705 | { |
| 706 | mmu_init(&usedmem_ranges); |
| 707 | mmu_enable(); |
| 708 | } |
Meng-Huan Yu | c965546 | 2020-12-01 11:44:41 +0800 | [diff] [blame] | 709 | |
| 710 | const struct mmu_ranges *mmu_get_used_ranges(void) |
| 711 | { |
| 712 | return &usedmem_ranges; |
| 713 | } |