David Hendricks | f9be756 | 2013-03-21 21:58:50 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is part of the coreboot project. |
| 3 | * |
Deepa Dinamani | e197748 | 2015-01-28 14:15:56 -0800 | [diff] [blame] | 4 | * Copyright (c) 2015, The Linux Foundation. All rights reserved. |
David Hendricks | f9be756 | 2013-03-21 21:58:50 -0700 | [diff] [blame] | 5 | * Copyright 2013 Google Inc. |
| 6 | * |
| 7 | * Redistribution and use in source and binary forms, with or without |
| 8 | * modification, are permitted provided that the following conditions |
| 9 | * are met: |
| 10 | * 1. Redistributions of source code must retain the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer. |
| 12 | * 2. Redistributions in binary form must reproduce the above copyright |
| 13 | * notice, this list of conditions and the following disclaimer in the |
| 14 | * documentation and/or other materials provided with the distribution. |
| 15 | * 3. The name of the author may not be used to endorse or promote products |
| 16 | * derived from this software without specific prior written permission. |
| 17 | * |
| 18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 19 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
| 22 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 23 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 24 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 25 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 26 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 27 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 28 | * SUCH DAMAGE. |
| 29 | */ |
| 30 | |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 31 | #include <assert.h> |
Gabe Black | ee4bfbf | 2013-08-13 21:05:43 -0700 | [diff] [blame] | 32 | #include <config.h> |
David Hendricks | f9be756 | 2013-03-21 21:58:50 -0700 | [diff] [blame] | 33 | #include <stdlib.h> |
David Hendricks | fa244a6 | 2013-03-28 18:07:30 -0700 | [diff] [blame] | 34 | #include <stdint.h> |
Julius Werner | ec5e5e0 | 2014-08-20 15:29:56 -0700 | [diff] [blame] | 35 | #include <symbols.h> |
David Hendricks | f9be756 | 2013-03-21 21:58:50 -0700 | [diff] [blame] | 36 | |
| 37 | #include <cbmem.h> |
| 38 | #include <console/console.h> |
| 39 | |
| 40 | #include <arch/cache.h> |
Gabe Black | 800790d | 2013-05-18 22:45:54 -0700 | [diff] [blame] | 41 | #include <arch/io.h> |
David Hendricks | f9be756 | 2013-03-21 21:58:50 -0700 | [diff] [blame] | 42 | |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 43 | #if CONFIG_ARM_LPAE |
| 44 | /* See B3.6.2 of ARMv7 Architecture Reference Manual */ |
| 45 | /* TODO: Utilize the contiguous hint flag */ |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 46 | #define ATTR_BLOCK (\ |
Jimmy Zhang | c1f7cbe | 2014-06-06 17:00:10 -0700 | [diff] [blame] | 47 | 0ULL << 54 | /* XN. 0:Not restricted */ \ |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 48 | 0ULL << 53 | /* PXN. 0:Not restricted */ \ |
| 49 | 1 << 10 | /* AF. 1:Accessed. This is to prevent access \ |
| 50 | * fault when accessed for the first time */ \ |
| 51 | 0 << 6 | /* AP[2:1]. 0b00:full access from PL1 */ \ |
| 52 | 0 << 5 | /* NS. 0:Output address is in Secure space */ \ |
| 53 | 0 << 1 | /* block/table. 0:block entry */ \ |
| 54 | 1 << 0 /* validity. 1:valid */ \ |
| 55 | ) |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 56 | #define ATTR_PAGE (ATTR_BLOCK | 1 << 1) |
| 57 | #define ATTR_NEXTLEVEL (0x3) |
| 58 | #define ATTR_NC ((MAIR_INDX_NC << 2) | (1ULL << 53) | (1ULL << 54)) |
| 59 | #define ATTR_WT (MAIR_INDX_WT << 2) |
| 60 | #define ATTR_WB (MAIR_INDX_WB << 2) |
David Hendricks | f9be756 | 2013-03-21 21:58:50 -0700 | [diff] [blame] | 61 | |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 62 | #define PAGE_MASK 0x000ffffffffff000ULL |
| 63 | #define BLOCK_MASK 0x000fffffffe00000ULL |
| 64 | #define NEXTLEVEL_MASK PAGE_MASK |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 65 | #define BLOCK_SHIFT 21 |
Gabe Black | 800790d | 2013-05-18 22:45:54 -0700 | [diff] [blame] | 66 | |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 67 | typedef uint64_t pte_t; |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 68 | #else /* CONFIG_ARM_LPAE */ |
David Hendricks | f9be756 | 2013-03-21 21:58:50 -0700 | [diff] [blame] | 69 | /* |
| 70 | * Section entry bits: |
| 71 | * 31:20 - section base address |
| 72 | * 18 - 0 to indicate normal section (versus supersection) |
| 73 | * 17 - nG, 0 to indicate page is global |
| 74 | * 16 - S, 0 for non-shareable (?) |
| 75 | * 15 - APX, 0 for full access |
| 76 | * 14:12 - TEX, 0b000 for outer and inner write-back |
| 77 | * 11:10 - AP, 0b11 for full access |
| 78 | * 9 - P, ? (FIXME: not described or possibly obsolete?) |
| 79 | * 8: 5 - Domain |
| 80 | * 4 - XN, 1 to set execute-never (and also avoid prefetches) |
| 81 | * 3 - C, 1 for cacheable |
| 82 | * 2 - B, 1 for bufferable |
| 83 | * 1: 0 - 0b10 to indicate section entry |
| 84 | */ |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 85 | #define ATTR_BLOCK ((3 << 10) | 0x2) |
| 86 | #define ATTR_PAGE ((3 << 4) | 0x2) |
| 87 | #define ATTR_NEXTLEVEL (0x1) |
| 88 | #define ATTR_NC (1 << 4) |
| 89 | #define ATTR_WT (1 << 3) |
| 90 | #define ATTR_WB ((1 << 3) | (1 << 2)) |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 91 | |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 92 | #define PAGE_MASK 0xfffff000UL |
| 93 | #define BLOCK_MASK 0xfff00000UL |
| 94 | #define NEXTLEVEL_MASK 0xfffffc00UL |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 95 | #define BLOCK_SHIFT 20 |
| 96 | |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 97 | typedef uint32_t pte_t; |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 98 | #endif /* CONFIG_ARM_LPAE */ |
| 99 | |
Julius Werner | 03a0a65 | 2015-09-29 17:28:15 -0700 | [diff] [blame] | 100 | /* We set the first PTE to a sentinel value that cannot occur naturally (has |
| 101 | * attributes set but bits [1:0] are 0 -> unmapped) to mark unused subtables. */ |
| 102 | #define ATTR_UNUSED 0xBADbA6E0 |
| 103 | #define SUBTABLE_PTES (1 << (BLOCK_SHIFT - PAGE_SHIFT)) |
| 104 | |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 105 | /* |
| 106 | * mask/shift/size for pages and blocks |
| 107 | */ |
| 108 | #define PAGE_SHIFT 12 |
| 109 | #define PAGE_SIZE (1UL << PAGE_SHIFT) |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 110 | #define BLOCK_SIZE (1UL << BLOCK_SHIFT) |
| 111 | |
| 112 | /* |
| 113 | * MAIR Index |
| 114 | */ |
| 115 | #define MAIR_INDX_NC 0 |
| 116 | #define MAIR_INDX_WT 1 |
| 117 | #define MAIR_INDX_WB 2 |
| 118 | |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 119 | static pte_t *const ttb_buff = (void *)_ttb; |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 120 | |
Julius Werner | 8c09377 | 2016-02-09 16:09:15 -0800 | [diff] [blame] | 121 | /* Not all boards want to use subtables and declare them in memlayout.ld. */ |
| 122 | DECLARE_OPTIONAL_REGION(ttb_subtables); |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 123 | |
| 124 | static struct { |
| 125 | pte_t value; |
| 126 | const char *name; |
| 127 | } attrs[] = { |
| 128 | [DCACHE_OFF] = {.value = ATTR_NC, .name = "uncached"}, |
| 129 | [DCACHE_WRITEBACK] = {.value = ATTR_WB, .name = "writeback"}, |
| 130 | [DCACHE_WRITETHROUGH] = {.value = ATTR_WT, .name = "writethrough"}, |
| 131 | }; |
| 132 | |
| 133 | /* Fills page table entries in |table| from |start_idx| to |end_idx| with |attr| |
| 134 | * and performs necessary invalidations. |offset| is the start address of the |
| 135 | * area described by |table|, and |shift| is the size-shift of each frame. */ |
| 136 | static void mmu_fill_table(pte_t *table, u32 start_idx, u32 end_idx, |
| 137 | uintptr_t offset, u32 shift, pte_t attr) |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 138 | { |
| 139 | int i; |
| 140 | |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 141 | /* Write out page table entries. */ |
| 142 | for (i = start_idx; i < end_idx; i++) |
| 143 | table[i] = (offset + (i << shift)) | attr; |
| 144 | |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 145 | /* Flush the page table entries from the dcache. */ |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 146 | for (i = start_idx; i < end_idx; i++) |
| 147 | dccmvac((uintptr_t)&table[i]); |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 148 | dsb(); |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 149 | |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 150 | /* Invalidate the TLB entries. */ |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 151 | for (i = start_idx; i < end_idx; i++) |
| 152 | tlbimvaa(offset + (i << shift)); |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 153 | dsb(); |
| 154 | isb(); |
| 155 | } |
| 156 | |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 157 | static pte_t *mmu_create_subtable(pte_t *pgd_entry) |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 158 | { |
Julius Werner | 03a0a65 | 2015-09-29 17:28:15 -0700 | [diff] [blame] | 159 | pte_t *table = (pte_t *)_ttb_subtables; |
| 160 | |
| 161 | /* Find unused subtable (first PTE == ATTR_UNUSED). */ |
| 162 | while (table[0] != ATTR_UNUSED) { |
| 163 | table += SUBTABLE_PTES; |
| 164 | if ((pte_t *)_ettb_subtables - table <= 0) |
| 165 | die("Not enough room for another sub-pagetable!"); |
| 166 | } |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 167 | |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 168 | /* We assume that *pgd_entry must already be a valid block mapping. */ |
| 169 | uintptr_t start_addr = (uintptr_t)(*pgd_entry & BLOCK_MASK); |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 170 | printk(BIOS_DEBUG, "Creating new subtable @%p for [%#.8x:%#.8lx)\n", |
| 171 | table, start_addr, start_addr + BLOCK_SIZE); |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 172 | |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 173 | /* Initialize the new subtable with entries of the same attributes |
| 174 | * (XN bit moves from 4 to 0, set PAGE unless block was unmapped). */ |
| 175 | pte_t attr = *pgd_entry & ~(BLOCK_MASK); |
| 176 | if (!IS_ENABLED(CONFIG_ARM_LPAE) && (attr & (1 << 4))) |
| 177 | attr = ((attr & ~(1 << 4)) | (1 << 0)); |
| 178 | if (attr & ATTR_BLOCK) |
| 179 | attr = (attr & ~ATTR_BLOCK) | ATTR_PAGE; |
Julius Werner | 03a0a65 | 2015-09-29 17:28:15 -0700 | [diff] [blame] | 180 | mmu_fill_table(table, 0, SUBTABLE_PTES, start_addr, PAGE_SHIFT, attr); |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 181 | |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 182 | /* Replace old entry in upper level table to point at subtable. */ |
| 183 | *pgd_entry = (pte_t)(uintptr_t)table | ATTR_NEXTLEVEL; |
| 184 | dccmvac((uintptr_t)pgd_entry); |
| 185 | dsb(); |
| 186 | tlbimvaa(start_addr); |
| 187 | dsb(); |
| 188 | isb(); |
| 189 | |
| 190 | return table; |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 191 | } |
| 192 | |
Deepa Dinamani | e197748 | 2015-01-28 14:15:56 -0800 | [diff] [blame] | 193 | static pte_t *mmu_validate_create_sub_table(u32 start_kb, u32 size_kb) |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 194 | { |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 195 | pte_t *pgd_entry = &ttb_buff[start_kb / (BLOCK_SIZE/KiB)]; |
| 196 | pte_t *table = (void *)(uintptr_t)(*pgd_entry & NEXTLEVEL_MASK); |
David Hendricks | f9be756 | 2013-03-21 21:58:50 -0700 | [diff] [blame] | 197 | |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 198 | /* Make sure the range is contained within a single superpage. */ |
| 199 | assert(((start_kb + size_kb - 1) & (BLOCK_MASK/KiB)) |
| 200 | == (start_kb & (BLOCK_MASK/KiB)) && start_kb < 4 * (GiB/KiB)); |
David Hendricks | f9be756 | 2013-03-21 21:58:50 -0700 | [diff] [blame] | 201 | |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 202 | if ((*pgd_entry & ~NEXTLEVEL_MASK) != ATTR_NEXTLEVEL) |
| 203 | table = mmu_create_subtable(pgd_entry); |
David Hendricks | f9be756 | 2013-03-21 21:58:50 -0700 | [diff] [blame] | 204 | |
Deepa Dinamani | e197748 | 2015-01-28 14:15:56 -0800 | [diff] [blame] | 205 | return table; |
| 206 | } |
| 207 | |
| 208 | void mmu_config_range_kb(u32 start_kb, u32 size_kb, enum dcache_policy policy) |
| 209 | { |
| 210 | pte_t *table = mmu_validate_create_sub_table(start_kb, size_kb); |
| 211 | |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 212 | /* Always _one_ _damn_ bit that won't fit... (XN moves from 4 to 0) */ |
| 213 | pte_t attr = attrs[policy].value; |
| 214 | if (!IS_ENABLED(CONFIG_ARM_LPAE) && (attr & (1 << 4))) |
| 215 | attr = ((attr & ~(1 << 4)) | (1 << 0)); |
Gabe Black | 800790d | 2013-05-18 22:45:54 -0700 | [diff] [blame] | 216 | |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 217 | /* Mask away high address bits that are handled by upper level table. */ |
| 218 | u32 mask = BLOCK_SIZE/KiB - 1; |
| 219 | printk(BIOS_DEBUG, "Mapping address range [%#.8x:%#.8x) as %s\n", |
| 220 | start_kb * KiB, (start_kb + size_kb) * KiB, attrs[policy].name); |
Varadarajan Narayanan | 0067a42 | 2016-03-03 15:14:46 +0530 | [diff] [blame] | 221 | |
| 222 | u32 end_kb = ALIGN_UP((start_kb + size_kb), PAGE_SIZE/KiB) - |
| 223 | (start_kb & ~mask); |
| 224 | |
| 225 | assert(end_kb <= BLOCK_SIZE/KiB); |
| 226 | |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 227 | mmu_fill_table(table, (start_kb & mask) / (PAGE_SIZE/KiB), |
Varadarajan Narayanan | 0067a42 | 2016-03-03 15:14:46 +0530 | [diff] [blame] | 228 | end_kb / (PAGE_SIZE/KiB), |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 229 | (start_kb & ~mask) * KiB, PAGE_SHIFT, ATTR_PAGE | attr); |
| 230 | } |
| 231 | |
Deepa Dinamani | e197748 | 2015-01-28 14:15:56 -0800 | [diff] [blame] | 232 | void mmu_disable_range_kb(u32 start_kb, u32 size_kb) |
| 233 | { |
| 234 | pte_t *table = mmu_validate_create_sub_table(start_kb, size_kb); |
| 235 | |
| 236 | /* Mask away high address bits that are handled by upper level table. */ |
| 237 | u32 mask = BLOCK_SIZE/KiB - 1; |
| 238 | printk(BIOS_DEBUG, "Setting address range [%#.8x:%#.8x) as unmapped\n", |
| 239 | start_kb * KiB, (start_kb + size_kb) * KiB); |
| 240 | mmu_fill_table(table, (start_kb & mask) / (PAGE_SIZE/KiB), |
| 241 | div_round_up((start_kb + size_kb) & mask, PAGE_SIZE/KiB), |
| 242 | (start_kb & ~mask) * KiB, PAGE_SHIFT, 0); |
| 243 | } |
| 244 | |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 245 | void mmu_disable_range(u32 start_mb, u32 size_mb) |
| 246 | { |
| 247 | printk(BIOS_DEBUG, "Setting address range [%#.8x:%#.8x) as unmapped\n", |
| 248 | start_mb * MiB, (start_mb + size_mb) * MiB); |
| 249 | assert(start_mb + size_mb <= 4 * (GiB/MiB)); |
| 250 | mmu_fill_table(ttb_buff, start_mb / (BLOCK_SIZE/MiB), |
| 251 | div_round_up(start_mb + size_mb, BLOCK_SIZE/MiB), |
| 252 | 0, BLOCK_SHIFT, 0); |
| 253 | } |
| 254 | |
| 255 | void mmu_config_range(u32 start_mb, u32 size_mb, enum dcache_policy policy) |
| 256 | { |
| 257 | printk(BIOS_DEBUG, "Mapping address range [%#.8x:%#.8x) as %s\n", |
| 258 | start_mb * MiB, (start_mb + size_mb) * MiB, attrs[policy].name); |
| 259 | assert(start_mb + size_mb <= 4 * (GiB/MiB)); |
| 260 | mmu_fill_table(ttb_buff, start_mb / (BLOCK_SIZE/MiB), |
| 261 | div_round_up(start_mb + size_mb, BLOCK_SIZE/MiB), |
| 262 | 0, BLOCK_SHIFT, ATTR_BLOCK | attrs[policy].value); |
David Hendricks | f9be756 | 2013-03-21 21:58:50 -0700 | [diff] [blame] | 263 | } |
| 264 | |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 265 | /* |
| 266 | * For coreboot's purposes, we will create a simple identity map. |
| 267 | * |
| 268 | * If LPAE is disabled, we will create a L1 page |
| 269 | * table in RAM with 1MB section translation entries over the 4GB address space. |
| 270 | * (ref: section 10.2 and example 15-4 in Cortex-A series programmer's guide) |
| 271 | * |
| 272 | * If LPAE is enabled, we do two level translation with one L1 table with 4 |
| 273 | * entries, each covering a 1GB space, and four L2 tables with 512 entries, each |
| 274 | * covering a 2MB space. |
| 275 | */ |
David Hendricks | f9be756 | 2013-03-21 21:58:50 -0700 | [diff] [blame] | 276 | void mmu_init(void) |
| 277 | { |
Julius Werner | 03a0a65 | 2015-09-29 17:28:15 -0700 | [diff] [blame] | 278 | /* Initially mark all subtables as unused (first PTE == ATTR_UNUSED). */ |
| 279 | pte_t *table = (pte_t *)_ttb_subtables; |
| 280 | for (; (pte_t *)_ettb_subtables - table > 0; table += SUBTABLE_PTES) |
| 281 | table[0] = ATTR_UNUSED; |
| 282 | |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 283 | if (CONFIG_ARM_LPAE) { |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 284 | pte_t *const pgd_buff = (pte_t*)(_ttb + 16*KiB); |
| 285 | pte_t *pmd = ttb_buff; |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 286 | int i; |
| 287 | |
| 288 | printk(BIOS_DEBUG, "LPAE Translation tables are @ %p\n", |
| 289 | ttb_buff); |
| 290 | ASSERT((read_mmfr0() & 0xf) >= 5); |
| 291 | |
| 292 | /* |
| 293 | * Set MAIR |
| 294 | * See B4.1.104 of ARMv7 Architecture Reference Manual |
| 295 | */ |
| 296 | write_mair0( |
| 297 | 0x00 << (MAIR_INDX_NC*8) | /* Strongly-ordered, |
| 298 | * Non-Cacheable */ |
| 299 | 0xaa << (MAIR_INDX_WT*8) | /* Write-Thru, |
| 300 | * Read-Allocate */ |
| 301 | 0xff << (MAIR_INDX_WB*8) /* Write-Back, |
| 302 | * Read/Write-Allocate */ |
| 303 | ); |
| 304 | |
| 305 | /* |
| 306 | * Set up L1 table |
| 307 | * Once set here, L1 table won't be modified by coreboot. |
| 308 | * See B3.6.1 of ARMv7 Architecture Reference Manual |
| 309 | */ |
| 310 | for (i = 0; i < 4; i++) { |
Julius Werner | 108548a | 2014-10-09 17:31:45 -0700 | [diff] [blame] | 311 | pgd_buff[i] = ((uint32_t)pmd & NEXTLEVEL_MASK) | |
| 312 | ATTR_NEXTLEVEL; |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 313 | pmd += BLOCK_SIZE / PAGE_SIZE; |
| 314 | } |
| 315 | |
| 316 | /* |
| 317 | * Set TTBR0 |
| 318 | */ |
| 319 | write_ttbr0((uintptr_t)pgd_buff); |
| 320 | } else { |
| 321 | printk(BIOS_DEBUG, "Translation table is @ %p\n", ttb_buff); |
| 322 | |
| 323 | /* |
| 324 | * Translation table base 0 address is in bits 31:14-N, where N |
| 325 | * is given by bits 2:0 in TTBCR (which we set to 0). All lower |
| 326 | * bits in this register should be zero for coreboot. |
| 327 | */ |
| 328 | write_ttbr0((uintptr_t)ttb_buff); |
| 329 | } |
David Hendricks | f9be756 | 2013-03-21 21:58:50 -0700 | [diff] [blame] | 330 | |
| 331 | /* |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 332 | * Set TTBCR |
| 333 | * See B4.1.153 of ARMv7 Architecture Reference Manual |
| 334 | * See B3.5.4 and B3.6.4 for how TTBR0 or TTBR1 is selected. |
David Hendricks | f9be756 | 2013-03-21 21:58:50 -0700 | [diff] [blame] | 335 | */ |
Daisuke Nojiri | f574a32 | 2014-02-27 14:56:39 -0800 | [diff] [blame] | 336 | write_ttbcr( |
| 337 | CONFIG_ARM_LPAE << 31 | /* EAE. 1:Enable LPAE */ |
| 338 | 0 << 16 | 0 << 0 /* Use TTBR0 for all addresses */ |
| 339 | ); |
David Hendricks | f9be756 | 2013-03-21 21:58:50 -0700 | [diff] [blame] | 340 | |
Julius Werner | ba11d6f | 2014-10-16 09:56:27 -0700 | [diff] [blame] | 341 | /* Set domain 0 to Client so XN bit works (to prevent prefetches) */ |
| 342 | write_dacr(0x5); |
David Hendricks | f9be756 | 2013-03-21 21:58:50 -0700 | [diff] [blame] | 343 | } |