Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 1 | /* |
Stefan Reinauer | cdc5cc6 | 2007-04-24 18:40:02 +0000 | [diff] [blame] | 2 | * mtrr.c: setting MTRR to decent values for cache initialization on P6 |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 3 | * |
| 4 | * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel |
| 5 | * |
| 6 | * Copyright 2000 Silicon Integrated System Corporation |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 7 | * Copyright 2013 Google Inc. |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License as published by |
| 11 | * the Free Software Foundation; either version 2 of the License, or |
| 12 | * (at your option) any later version. |
| 13 | * |
| 14 | * This program is distributed in the hope that it will be useful, |
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 17 | * GNU General Public License for more details. |
| 18 | * |
| 19 | * You should have received a copy of the GNU General Public License |
| 20 | * along with this program; if not, write to the Free Software |
| 21 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 22 | * |
| 23 | * |
| 24 | * Reference: Intel Architecture Software Developer's Manual, Volume 3: System Programming |
| 25 | */ |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 26 | |
Eric Biederman | f8a2ddd | 2004-10-30 08:05:41 +0000 | [diff] [blame] | 27 | #include <stddef.h> |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 28 | #include <stdlib.h> |
| 29 | #include <string.h> |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 30 | #include <console/console.h> |
| 31 | #include <device/device.h> |
| 32 | #include <cpu/x86/msr.h> |
| 33 | #include <cpu/x86/mtrr.h> |
| 34 | #include <cpu/x86/cache.h> |
Stefan Reinauer | 00093a8 | 2011-11-02 16:12:34 -0700 | [diff] [blame] | 35 | #include <cpu/x86/lapic.h> |
Sven Schnelle | adfbcb79 | 2012-01-10 12:01:43 +0100 | [diff] [blame] | 36 | #include <arch/cpu.h> |
Stefan Reinauer | 00093a8 | 2011-11-02 16:12:34 -0700 | [diff] [blame] | 37 | #include <arch/acpi.h> |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 38 | #include <memrange.h> |
Aaron Durbin | 57686f8 | 2013-03-20 15:50:59 -0500 | [diff] [blame] | 39 | #if CONFIG_X86_AMD_FIXED_MTRRS |
| 40 | #include <cpu/amd/mtrr.h> |
| 41 | #define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM) |
| 42 | #else |
| 43 | #define MTRR_FIXED_WRBACK_BITS 0 |
| 44 | #endif |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 45 | |
Stefan Reinauer | c00dfbc | 2012-04-03 16:24:37 -0700 | [diff] [blame] | 46 | /* 2 MTRRS are reserved for the operating system */ |
| 47 | #define BIOS_MTRRS 6 |
| 48 | #define OS_MTRRS 2 |
| 49 | #define MTRRS (BIOS_MTRRS + OS_MTRRS) |
| 50 | |
| 51 | static int total_mtrrs = MTRRS; |
| 52 | static int bios_mtrrs = BIOS_MTRRS; |
| 53 | |
| 54 | static void detect_var_mtrrs(void) |
| 55 | { |
| 56 | msr_t msr; |
| 57 | |
| 58 | msr = rdmsr(MTRRcap_MSR); |
| 59 | |
| 60 | total_mtrrs = msr.lo & 0xff; |
| 61 | bios_mtrrs = total_mtrrs - OS_MTRRS; |
| 62 | } |
| 63 | |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 64 | void enable_fixed_mtrr(void) |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 65 | { |
| 66 | msr_t msr; |
| 67 | |
| 68 | msr = rdmsr(MTRRdefType_MSR); |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 69 | msr.lo |= MTRRdefTypeEn | MTRRdefTypeFixEn; |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 70 | wrmsr(MTRRdefType_MSR, msr); |
| 71 | } |
| 72 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 73 | static void enable_var_mtrr(unsigned char deftype) |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 74 | { |
| 75 | msr_t msr; |
| 76 | |
| 77 | msr = rdmsr(MTRRdefType_MSR); |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 78 | msr.lo &= ~0xff; |
| 79 | msr.lo |= MTRRdefTypeEn | deftype; |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 80 | wrmsr(MTRRdefType_MSR, msr); |
| 81 | } |
| 82 | |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 83 | /* fms: find most sigificant bit set, stolen from Linux Kernel Source. */ |
| 84 | static inline unsigned int fms(unsigned int x) |
| 85 | { |
| 86 | int r; |
| 87 | |
| 88 | __asm__("bsrl %1,%0\n\t" |
| 89 | "jnz 1f\n\t" |
| 90 | "movl $0,%0\n" |
| 91 | "1:" : "=r" (r) : "g" (x)); |
| 92 | return r; |
| 93 | } |
| 94 | |
Marc Jones | 5cbdc1e | 2009-04-01 22:07:53 +0000 | [diff] [blame] | 95 | /* fls: find least sigificant bit set */ |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 96 | static inline unsigned int fls(unsigned int x) |
| 97 | { |
| 98 | int r; |
| 99 | |
| 100 | __asm__("bsfl %1,%0\n\t" |
| 101 | "jnz 1f\n\t" |
| 102 | "movl $32,%0\n" |
| 103 | "1:" : "=r" (r) : "g" (x)); |
| 104 | return r; |
| 105 | } |
| 106 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 107 | #define MTRR_VERBOSE_LEVEL BIOS_NEVER |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 108 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 109 | /* MTRRs are at a 4KiB granularity. Therefore all address calculations can |
| 110 | * be done with 32-bit numbers. This allows for the MTRR code to handle |
| 111 | * up to 2^44 bytes (16 TiB) of address space. */ |
| 112 | #define RANGE_SHIFT 12 |
| 113 | #define ADDR_SHIFT_TO_RANGE_SHIFT(x) \ |
| 114 | (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT) |
| 115 | #define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT) |
| 116 | #define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT) |
| 117 | #define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR) |
| 118 | |
| 119 | /* The minimum alignment while handling variable MTRR ranges is 64MiB. */ |
| 120 | #define MTRR_MIN_ALIGN PHYS_TO_RANGE_ADDR(64 << 20) |
| 121 | /* Helpful constants. */ |
| 122 | #define RANGE_1MB PHYS_TO_RANGE_ADDR(1 << 20) |
| 123 | #define RANGE_4GB (1 << (ADDR_SHIFT_TO_RANGE_SHIFT(32))) |
| 124 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 125 | /* |
| 126 | * The default MTRR type selection uses 3 approaches for selecting the |
| 127 | * optimal number of variable MTRRs. For each range do 3 calculations: |
| 128 | * 1. UC as default type with no holes at top of range. |
| 129 | * 2. UC as default using holes at top of range. |
| 130 | * 3. WB as default. |
| 131 | * If using holes is optimal for a range when UC is the default type the |
| 132 | * tag is updated to direct the commit routine to use a hole at the top |
| 133 | * of a range. |
| 134 | */ |
| 135 | #define MTRR_ALGO_SHIFT (8) |
| 136 | #define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1) |
| 137 | /* If the default type is UC use the hole carving algorithm for a range. */ |
| 138 | #define MTRR_RANGE_UC_USE_HOLE (1 << MTRR_ALGO_SHIFT) |
| 139 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 140 | static inline uint32_t range_entry_base_mtrr_addr(struct range_entry *r) |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 141 | { |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 142 | return PHYS_TO_RANGE_ADDR(range_entry_base(r)); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 143 | } |
| 144 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 145 | static inline uint32_t range_entry_end_mtrr_addr(struct range_entry *r) |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 146 | { |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 147 | return PHYS_TO_RANGE_ADDR(range_entry_end(r)); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 148 | } |
| 149 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 150 | static inline int range_entry_mtrr_type(struct range_entry *r) |
| 151 | { |
| 152 | return range_entry_tag(r) & MTRR_TAG_MASK; |
| 153 | } |
| 154 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 155 | static struct memranges *get_physical_address_space(void) |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 156 | { |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 157 | static struct memranges *addr_space; |
| 158 | static struct memranges addr_space_storage; |
Duncan Laurie | 7389fa9 | 2011-12-22 10:59:40 -0800 | [diff] [blame] | 159 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 160 | /* In order to handle some chipsets not being able to pre-determine |
| 161 | * uncacheable ranges, such as graphics memory, at resource inseration |
| 162 | * time remove unacheable regions from the cacheable ones. */ |
| 163 | if (addr_space == NULL) { |
| 164 | struct range_entry *r; |
Aaron Durbin | 9b027fe | 2013-03-26 14:10:34 -0500 | [diff] [blame] | 165 | unsigned long mask; |
| 166 | unsigned long match; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 167 | |
| 168 | addr_space = &addr_space_storage; |
| 169 | |
Aaron Durbin | 9b027fe | 2013-03-26 14:10:34 -0500 | [diff] [blame] | 170 | mask = IORESOURCE_CACHEABLE; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 171 | /* Collect cacheable and uncacheable address ranges. The |
| 172 | * uncacheable regions take precedence over the cacheable |
| 173 | * regions. */ |
| 174 | memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK); |
| 175 | memranges_add_resources(addr_space, mask, 0, |
| 176 | MTRR_TYPE_UNCACHEABLE); |
| 177 | |
Aaron Durbin | 9b027fe | 2013-03-26 14:10:34 -0500 | [diff] [blame] | 178 | /* Handle any write combining resources. Only prefetchable |
| 179 | * resources with the IORESOURCE_WRCOMB flag are appropriate |
| 180 | * for this MTRR type. */ |
| 181 | match = IORESOURCE_PREFETCH | IORESOURCE_WRCOMB; |
| 182 | mask |= match; |
| 183 | memranges_add_resources(addr_space, mask, match, |
| 184 | MTRR_TYPE_WRCOMB); |
| 185 | |
Aaron Durbin | 77a5b40 | 2013-03-26 12:47:47 -0500 | [diff] [blame] | 186 | #if CONFIG_CACHE_ROM |
| 187 | /* Add a write-protect region covering the ROM size |
| 188 | * when CONFIG_CACHE_ROM is enabled. The ROM is assumed |
| 189 | * to be located at 4GiB - rom size. */ |
| 190 | resource_t rom_base = RANGE_TO_PHYS_ADDR( |
| 191 | RANGE_4GB - PHYS_TO_RANGE_ADDR(CONFIG_ROM_SIZE)); |
| 192 | memranges_insert(addr_space, rom_base, CONFIG_ROM_SIZE, |
| 193 | MTRR_TYPE_WRPROT); |
| 194 | #endif |
| 195 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 196 | /* The address space below 4GiB is special. It needs to be |
| 197 | * covered entirly by range entries so that MTRR calculations |
| 198 | * can be properly done for the full 32-bit address space. |
| 199 | * Therefore, ensure holes are filled up to 4GiB as |
| 200 | * uncacheable */ |
| 201 | memranges_fill_holes_up_to(addr_space, |
| 202 | RANGE_TO_PHYS_ADDR(RANGE_4GB), |
| 203 | MTRR_TYPE_UNCACHEABLE); |
| 204 | |
| 205 | printk(BIOS_DEBUG, "MTRR: Physical address space:\n"); |
| 206 | memranges_each_entry(r, addr_space) |
| 207 | printk(BIOS_DEBUG, |
| 208 | "0x%016llx - 0x%016llx size 0x%08llx type %ld\n", |
| 209 | range_entry_base(r), range_entry_end(r), |
| 210 | range_entry_size(r), range_entry_tag(r)); |
Carl-Daniel Hailfinger | 7dde1da | 2009-02-11 16:57:32 +0000 | [diff] [blame] | 211 | } |
Stefan Reinauer | 7f86ed1 | 2009-02-12 16:02:16 +0000 | [diff] [blame] | 212 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 213 | return addr_space; |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 214 | } |
| 215 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 216 | /* Fixed MTRR descriptor. This structure defines the step size and begin |
| 217 | * and end (exclusive) address covered by a set of fixe MTRR MSRs. |
| 218 | * It also describes the offset in byte intervals to store the calculated MTRR |
| 219 | * type in an array. */ |
| 220 | struct fixed_mtrr_desc { |
| 221 | uint32_t begin; |
| 222 | uint32_t end; |
| 223 | uint32_t step; |
| 224 | int range_index; |
| 225 | int msr_index_base; |
Eric Biederman | f8a2ddd | 2004-10-30 08:05:41 +0000 | [diff] [blame] | 226 | }; |
| 227 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 228 | /* Shared MTRR calculations. Can be reused by APs. */ |
| 229 | static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES]; |
| 230 | |
| 231 | /* Fixed MTRR descriptors. */ |
| 232 | static const struct fixed_mtrr_desc fixed_mtrr_desc[] = { |
| 233 | { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000), |
| 234 | PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRRfix64K_00000_MSR }, |
| 235 | { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000), |
| 236 | PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRRfix16K_80000_MSR }, |
| 237 | { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000), |
| 238 | PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRRfix4K_C0000_MSR }, |
| 239 | }; |
| 240 | |
| 241 | static void calc_fixed_mtrrs(void) |
Eric Biederman | f8a2ddd | 2004-10-30 08:05:41 +0000 | [diff] [blame] | 242 | { |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 243 | static int fixed_mtrr_types_initialized; |
| 244 | struct memranges *phys_addr_space; |
| 245 | struct range_entry *r; |
| 246 | const struct fixed_mtrr_desc *desc; |
| 247 | const struct fixed_mtrr_desc *last_desc; |
| 248 | uint32_t begin; |
| 249 | uint32_t end; |
| 250 | int type_index; |
| 251 | |
| 252 | if (fixed_mtrr_types_initialized) |
Eric Biederman | f8a2ddd | 2004-10-30 08:05:41 +0000 | [diff] [blame] | 253 | return; |
Kyösti Mälkki | 2d42b34 | 2012-07-12 00:18:22 +0300 | [diff] [blame] | 254 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 255 | phys_addr_space = get_physical_address_space(); |
Kyösti Mälkki | 2d42b34 | 2012-07-12 00:18:22 +0300 | [diff] [blame] | 256 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 257 | /* Set all fixed ranges to uncacheable first. */ |
| 258 | memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES); |
Kyösti Mälkki | 2d42b34 | 2012-07-12 00:18:22 +0300 | [diff] [blame] | 259 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 260 | desc = &fixed_mtrr_desc[0]; |
| 261 | last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1]; |
| 262 | type_index = desc->range_index; |
Kyösti Mälkki | 1ec5e74 | 2012-07-26 23:51:20 +0300 | [diff] [blame] | 263 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 264 | memranges_each_entry(r, phys_addr_space) { |
| 265 | begin = range_entry_base_mtrr_addr(r); |
| 266 | end = range_entry_end_mtrr_addr(r); |
Kyösti Mälkki | 2d42b34 | 2012-07-12 00:18:22 +0300 | [diff] [blame] | 267 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 268 | if (begin >= last_desc->end) |
| 269 | break; |
| 270 | |
| 271 | if (end > last_desc->end) |
| 272 | end = last_desc->end; |
| 273 | |
| 274 | /* Get to the correct fixed mtrr descriptor. */ |
| 275 | while (begin >= desc->end) |
| 276 | desc++; |
| 277 | |
| 278 | type_index = desc->range_index; |
| 279 | type_index += (begin - desc->begin) / desc->step; |
| 280 | |
| 281 | while (begin != end) { |
| 282 | unsigned char type; |
| 283 | |
| 284 | type = range_entry_tag(r); |
| 285 | printk(MTRR_VERBOSE_LEVEL, |
| 286 | "MTRR addr 0x%x-0x%x set to %d type @ %d\n", |
| 287 | begin, begin + desc->step, type, type_index); |
| 288 | if (type == MTRR_TYPE_WRBACK) |
| 289 | type |= MTRR_FIXED_WRBACK_BITS; |
| 290 | fixed_mtrr_types[type_index] = type; |
| 291 | type_index++; |
| 292 | begin += desc->step; |
| 293 | if (begin == desc->end) |
| 294 | desc++; |
Yinghai Lu | 6360187 | 2005-01-27 22:48:12 +0000 | [diff] [blame] | 295 | } |
Eric Biederman | f8a2ddd | 2004-10-30 08:05:41 +0000 | [diff] [blame] | 296 | } |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 297 | fixed_mtrr_types_initialized = 1; |
| 298 | } |
| 299 | |
| 300 | static void commit_fixed_mtrrs(void) |
| 301 | { |
| 302 | int i; |
| 303 | int j; |
| 304 | int msr_num; |
| 305 | int type_index; |
| 306 | /* 8 ranges per msr. */ |
| 307 | msr_t fixed_msrs[NUM_FIXED_MTRRS]; |
| 308 | unsigned long msr_index[NUM_FIXED_MTRRS]; |
| 309 | |
| 310 | memset(&fixed_msrs, 0, sizeof(fixed_msrs)); |
| 311 | |
| 312 | disable_cache(); |
| 313 | |
| 314 | msr_num = 0; |
| 315 | type_index = 0; |
| 316 | for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) { |
| 317 | const struct fixed_mtrr_desc *desc; |
| 318 | int num_ranges; |
| 319 | |
| 320 | desc = &fixed_mtrr_desc[i]; |
| 321 | num_ranges = (desc->end - desc->begin) / desc->step; |
| 322 | for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) { |
| 323 | msr_index[msr_num] = desc->msr_index_base + |
| 324 | (j / RANGES_PER_FIXED_MTRR); |
| 325 | fixed_msrs[msr_num].lo |= |
| 326 | fixed_mtrr_types[type_index++] << 0; |
| 327 | fixed_msrs[msr_num].lo |= |
| 328 | fixed_mtrr_types[type_index++] << 8; |
| 329 | fixed_msrs[msr_num].lo |= |
| 330 | fixed_mtrr_types[type_index++] << 16; |
| 331 | fixed_msrs[msr_num].lo |= |
| 332 | fixed_mtrr_types[type_index++] << 24; |
| 333 | fixed_msrs[msr_num].hi |= |
| 334 | fixed_mtrr_types[type_index++] << 0; |
| 335 | fixed_msrs[msr_num].hi |= |
| 336 | fixed_mtrr_types[type_index++] << 8; |
| 337 | fixed_msrs[msr_num].hi |= |
| 338 | fixed_mtrr_types[type_index++] << 16; |
| 339 | fixed_msrs[msr_num].hi |= |
| 340 | fixed_mtrr_types[type_index++] << 24; |
| 341 | msr_num++; |
| 342 | } |
| 343 | } |
| 344 | |
| 345 | for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++) { |
| 346 | printk(BIOS_DEBUG, "MTRR: Fixed MSR 0x%lx 0x%08x%08x\n", |
| 347 | msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo); |
| 348 | wrmsr(msr_index[i], fixed_msrs[i]); |
| 349 | } |
| 350 | |
| 351 | enable_cache(); |
Eric Biederman | f8a2ddd | 2004-10-30 08:05:41 +0000 | [diff] [blame] | 352 | } |
| 353 | |
Aaron Durbin | 57686f8 | 2013-03-20 15:50:59 -0500 | [diff] [blame] | 354 | void x86_setup_fixed_mtrrs_no_enable(void) |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 355 | { |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 356 | calc_fixed_mtrrs(); |
| 357 | commit_fixed_mtrrs(); |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 358 | } |
Stefan Reinauer | 7f86ed1 | 2009-02-12 16:02:16 +0000 | [diff] [blame] | 359 | |
Aaron Durbin | 57686f8 | 2013-03-20 15:50:59 -0500 | [diff] [blame] | 360 | void x86_setup_fixed_mtrrs(void) |
| 361 | { |
| 362 | x86_setup_fixed_mtrrs_no_enable(); |
| 363 | |
| 364 | printk(BIOS_SPEW, "call enable_fixed_mtrr()\n"); |
| 365 | enable_fixed_mtrr(); |
| 366 | } |
| 367 | |
Aaron Durbin | 77a5b40 | 2013-03-26 12:47:47 -0500 | [diff] [blame] | 368 | /* Keep track of the MTRR that covers the ROM for caching purposes. */ |
| 369 | #if CONFIG_CACHE_ROM |
| 370 | static long rom_cache_mtrr = -1; |
| 371 | |
Aaron Durbin | bc07f5d | 2013-03-26 13:09:39 -0500 | [diff] [blame] | 372 | long x86_mtrr_rom_cache_var_index(void) |
| 373 | { |
| 374 | return rom_cache_mtrr; |
| 375 | } |
| 376 | |
Aaron Durbin | 77a5b40 | 2013-03-26 12:47:47 -0500 | [diff] [blame] | 377 | void x86_mtrr_enable_rom_caching(void) |
| 378 | { |
| 379 | msr_t msr_val; |
| 380 | unsigned long index; |
| 381 | |
| 382 | if (rom_cache_mtrr < 0) |
| 383 | return; |
| 384 | |
| 385 | index = rom_cache_mtrr; |
| 386 | disable_cache(); |
| 387 | msr_val = rdmsr(MTRRphysBase_MSR(index)); |
| 388 | msr_val.lo &= ~0xff; |
| 389 | msr_val.lo |= MTRR_TYPE_WRPROT; |
| 390 | wrmsr(MTRRphysBase_MSR(index), msr_val); |
| 391 | enable_cache(); |
| 392 | } |
| 393 | |
| 394 | void x86_mtrr_disable_rom_caching(void) |
| 395 | { |
| 396 | msr_t msr_val; |
| 397 | unsigned long index; |
| 398 | |
| 399 | if (rom_cache_mtrr < 0) |
| 400 | return; |
| 401 | |
| 402 | index = rom_cache_mtrr; |
| 403 | disable_cache(); |
| 404 | msr_val = rdmsr(MTRRphysBase_MSR(index)); |
| 405 | msr_val.lo &= ~0xff; |
| 406 | wrmsr(MTRRphysBase_MSR(index), msr_val); |
| 407 | enable_cache(); |
| 408 | } |
| 409 | #endif |
| 410 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 411 | struct var_mtrr_state { |
| 412 | struct memranges *addr_space; |
| 413 | int above4gb; |
| 414 | int address_bits; |
| 415 | int commit_mtrrs; |
| 416 | int mtrr_index; |
| 417 | int def_mtrr_type; |
| 418 | }; |
Aaron Durbin | 57686f8 | 2013-03-20 15:50:59 -0500 | [diff] [blame] | 419 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 420 | static void clear_var_mtrr(int index) |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 421 | { |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 422 | msr_t msr_val; |
| 423 | |
| 424 | msr_val = rdmsr(MTRRphysMask_MSR(index)); |
| 425 | msr_val.lo &= ~MTRRphysMaskValid; |
| 426 | wrmsr(MTRRphysMask_MSR(index), msr_val); |
| 427 | } |
| 428 | |
| 429 | static void write_var_mtrr(struct var_mtrr_state *var_state, |
| 430 | uint32_t base, uint32_t size, int mtrr_type) |
| 431 | { |
| 432 | msr_t msr_val; |
| 433 | unsigned long msr_index; |
| 434 | resource_t rbase; |
| 435 | resource_t rsize; |
| 436 | resource_t mask; |
| 437 | |
| 438 | /* Some variable MTRRs are attempted to be saved for the OS use. |
| 439 | * However, it's more important to try to map the full address space |
| 440 | * properly. */ |
| 441 | if (var_state->mtrr_index >= bios_mtrrs) |
| 442 | printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n"); |
| 443 | if (var_state->mtrr_index >= total_mtrrs) { |
| 444 | printk(BIOS_ERR, "ERROR: Not enough MTTRs available!\n"); |
| 445 | return; |
| 446 | } |
| 447 | |
| 448 | rbase = base; |
| 449 | rsize = size; |
| 450 | |
| 451 | rbase = RANGE_TO_PHYS_ADDR(rbase); |
| 452 | rsize = RANGE_TO_PHYS_ADDR(rsize); |
| 453 | rsize = -rsize; |
| 454 | |
| 455 | mask = (1ULL << var_state->address_bits) - 1; |
| 456 | rsize = rsize & mask; |
| 457 | |
Aaron Durbin | 77a5b40 | 2013-03-26 12:47:47 -0500 | [diff] [blame] | 458 | #if CONFIG_CACHE_ROM |
| 459 | /* CONFIG_CACHE_ROM allocates an MTRR specifically for allowing |
| 460 | * one to turn on caching for faster ROM access. However, it is |
| 461 | * left to the MTRR callers to enable it. */ |
| 462 | if (mtrr_type == MTRR_TYPE_WRPROT) { |
| 463 | mtrr_type = MTRR_TYPE_UNCACHEABLE; |
| 464 | if (rom_cache_mtrr < 0) |
| 465 | rom_cache_mtrr = var_state->mtrr_index; |
| 466 | } |
| 467 | #endif |
| 468 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 469 | printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n", |
| 470 | var_state->mtrr_index, rbase, rsize, mtrr_type); |
| 471 | |
| 472 | msr_val.lo = rbase; |
| 473 | msr_val.lo |= mtrr_type; |
| 474 | |
| 475 | msr_val.hi = rbase >> 32; |
| 476 | msr_index = MTRRphysBase_MSR(var_state->mtrr_index); |
| 477 | wrmsr(msr_index, msr_val); |
| 478 | |
| 479 | msr_val.lo = rsize; |
| 480 | msr_val.lo |= MTRRphysMaskValid; |
| 481 | msr_val.hi = rsize >> 32; |
| 482 | msr_index = MTRRphysMask_MSR(var_state->mtrr_index); |
| 483 | wrmsr(msr_index, msr_val); |
| 484 | } |
| 485 | |
| 486 | static void calc_var_mtrr_range(struct var_mtrr_state *var_state, |
| 487 | uint32_t base, uint32_t size, int mtrr_type) |
| 488 | { |
| 489 | while (size != 0) { |
| 490 | uint32_t addr_lsb; |
| 491 | uint32_t size_msb; |
| 492 | uint32_t mtrr_size; |
| 493 | |
| 494 | addr_lsb = fls(base); |
| 495 | size_msb = fms(size); |
| 496 | |
| 497 | /* All MTRR entries need to have their base aligned to the mask |
| 498 | * size. The maximum size is calculated by a function of the |
| 499 | * min base bit set and maximum size bit set. */ |
| 500 | if (addr_lsb > size_msb) |
| 501 | mtrr_size = 1 << size_msb; |
| 502 | else |
| 503 | mtrr_size = 1 << addr_lsb; |
| 504 | |
| 505 | if (var_state->commit_mtrrs) |
| 506 | write_var_mtrr(var_state, base, mtrr_size, mtrr_type); |
| 507 | |
| 508 | size -= mtrr_size; |
| 509 | base += mtrr_size; |
| 510 | var_state->mtrr_index++; |
| 511 | } |
| 512 | } |
| 513 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 514 | static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state, |
| 515 | struct range_entry *r) |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 516 | { |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 517 | uint32_t a1, a2, b1, b2; |
| 518 | int mtrr_type; |
| 519 | struct range_entry *next; |
| 520 | |
| 521 | /* |
| 522 | * Determine MTRRs based on the following algoirthm for the given entry: |
| 523 | * +------------------+ b2 = ALIGN_UP(end) |
| 524 | * | 0 or more bytes | <-- hole is carved out between b1 and b2 |
| 525 | * +------------------+ a2 = b1 = end |
| 526 | * | | |
| 527 | * +------------------+ a1 = begin |
| 528 | * |
| 529 | * Thus, there are 3 sub-ranges to configure variable MTRRs for. |
| 530 | */ |
| 531 | mtrr_type = range_entry_mtrr_type(r); |
| 532 | |
| 533 | a1 = range_entry_base_mtrr_addr(r); |
| 534 | a2 = range_entry_end_mtrr_addr(r); |
| 535 | |
| 536 | /* The end address is under 1MiB. The fixed MTRRs take |
| 537 | * precedence over the variable ones. Therefore this range |
| 538 | * can be ignored. */ |
| 539 | if (a2 < RANGE_1MB) |
| 540 | return; |
| 541 | |
| 542 | /* Again, the fixed MTRRs take precedence so the beginning |
| 543 | * of the range can be set to 0 if it starts below 1MiB. */ |
| 544 | if (a1 < RANGE_1MB) |
| 545 | a1 = 0; |
| 546 | |
| 547 | /* If the range starts above 4GiB the processing is done. */ |
| 548 | if (!var_state->above4gb && a1 >= RANGE_4GB) |
| 549 | return; |
| 550 | |
| 551 | /* Clip the upper address to 4GiB if addresses above 4GiB |
| 552 | * are not being processed. */ |
| 553 | if (!var_state->above4gb && a2 > RANGE_4GB) |
| 554 | a2 = RANGE_4GB; |
| 555 | |
Aaron Durbin | 5392424 | 2013-03-29 11:48:27 -0500 | [diff] [blame^] | 556 | next = memranges_next_entry(var_state->addr_space, r); |
| 557 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 558 | b1 = a2; |
Aaron Durbin | 5392424 | 2013-03-29 11:48:27 -0500 | [diff] [blame^] | 559 | |
| 560 | /* First check if a1 is >= 4GiB and the current etnry is the last |
| 561 | * entry. If so perform an optimization of covering a larger range |
| 562 | * defined by the base address' alignment. */ |
| 563 | if (a1 >= RANGE_4GB && next == NULL) { |
| 564 | uint32_t addr_lsb; |
| 565 | |
| 566 | addr_lsb = fls(a1); |
| 567 | b2 = (1 << addr_lsb) + a1; |
| 568 | if (b2 >= a2) { |
| 569 | calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type); |
| 570 | return; |
| 571 | } |
| 572 | } |
| 573 | |
| 574 | /* Handle the min alignment roundup case. */ |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 575 | b2 = ALIGN_UP(a2, MTRR_MIN_ALIGN); |
| 576 | |
| 577 | /* Check against the next range. If the current range_entry is the |
| 578 | * last entry then carving a hole is no problem. If the current entry |
| 579 | * isn't the last entry then check that the last entry covers the |
| 580 | * entire hole range with the default mtrr type. */ |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 581 | if (next != NULL && |
| 582 | (range_entry_mtrr_type(next) != var_state->def_mtrr_type || |
| 583 | range_entry_end_mtrr_addr(next) < b2)) { |
| 584 | calc_var_mtrr_range(var_state, a1, a2 - a1, mtrr_type); |
| 585 | return; |
| 586 | } |
| 587 | |
| 588 | calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type); |
| 589 | calc_var_mtrr_range(var_state, b1, b2 - b1, var_state->def_mtrr_type); |
| 590 | } |
| 591 | |
| 592 | static void calc_var_mtrrs_without_hole(struct var_mtrr_state *var_state, |
| 593 | struct range_entry *r) |
| 594 | { |
| 595 | uint32_t a1, a2, b1, b2, c1, c2; |
| 596 | int mtrr_type; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 597 | |
| 598 | /* |
| 599 | * For each range that meets the non-default type process it in the |
| 600 | * following manner: |
| 601 | * +------------------+ c2 = end |
| 602 | * | 0 or more bytes | |
| 603 | * +------------------+ b2 = c1 = ALIGN_DOWN(end) |
| 604 | * | | |
| 605 | * +------------------+ b1 = a2 = ALIGN_UP(begin) |
| 606 | * | 0 or more bytes | |
| 607 | * +------------------+ a1 = begin |
| 608 | * |
| 609 | * Thus, there are 3 sub-ranges to configure variable MTRRs for. |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 610 | */ |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 611 | mtrr_type = range_entry_mtrr_type(r); |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 612 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 613 | a1 = range_entry_base_mtrr_addr(r); |
| 614 | c2 = range_entry_end_mtrr_addr(r); |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 615 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 616 | /* The end address is under 1MiB. The fixed MTRRs take |
| 617 | * precedence over the variable ones. Therefore this range |
| 618 | * can be ignored. */ |
| 619 | if (c2 < RANGE_1MB) |
| 620 | return; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 621 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 622 | /* Again, the fixed MTRRs take precedence so the beginning |
| 623 | * of the range can be set to 0 if it starts below 1MiB. */ |
| 624 | if (a1 < RANGE_1MB) |
| 625 | a1 = 0; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 626 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 627 | /* If the range starts above 4GiB the processing is done. */ |
| 628 | if (!var_state->above4gb && a1 >= RANGE_4GB) |
| 629 | return; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 630 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 631 | /* Clip the upper address to 4GiB if addresses above 4GiB |
| 632 | * are not being processed. */ |
| 633 | if (!var_state->above4gb && c2 > RANGE_4GB) |
| 634 | c2 = RANGE_4GB; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 635 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 636 | /* Don't align up or down on the range if it is smaller |
| 637 | * than the minimum granularity. */ |
| 638 | if ((c2 - a1) < MTRR_MIN_ALIGN) { |
| 639 | calc_var_mtrr_range(var_state, a1, c2 - a1, mtrr_type); |
| 640 | return; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 641 | } |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 642 | |
| 643 | b1 = a2 = ALIGN_UP(a1, MTRR_MIN_ALIGN); |
| 644 | b2 = c1 = ALIGN_DOWN(c2, MTRR_MIN_ALIGN); |
| 645 | |
| 646 | calc_var_mtrr_range(var_state, a1, a2 - a1, mtrr_type); |
| 647 | calc_var_mtrr_range(var_state, b1, b2 - b1, mtrr_type); |
| 648 | calc_var_mtrr_range(var_state, c1, c2 - c1, mtrr_type); |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 649 | } |
| 650 | |
| 651 | static int calc_var_mtrrs(struct memranges *addr_space, |
| 652 | int above4gb, int address_bits) |
| 653 | { |
| 654 | int wb_deftype_count; |
| 655 | int uc_deftype_count; |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 656 | struct range_entry *r; |
Eric Biederman | f8a2ddd | 2004-10-30 08:05:41 +0000 | [diff] [blame] | 657 | struct var_mtrr_state var_state; |
Eric Biederman | f8a2ddd | 2004-10-30 08:05:41 +0000 | [diff] [blame] | 658 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 659 | /* The default MTRR cacheability type is determined by calculating |
| 660 | * the number of MTTRs required for each MTTR type as if it was the |
| 661 | * default. */ |
| 662 | var_state.addr_space = addr_space; |
Scott Duplichan | f3cce2f | 2010-11-13 19:07:59 +0000 | [diff] [blame] | 663 | var_state.above4gb = above4gb; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 664 | var_state.address_bits = address_bits; |
| 665 | var_state.commit_mtrrs = 0; |
Stefan Reinauer | 7f86ed1 | 2009-02-12 16:02:16 +0000 | [diff] [blame] | 666 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 667 | wb_deftype_count = 0; |
| 668 | uc_deftype_count = 0; |
Duncan Laurie | 7389fa9 | 2011-12-22 10:59:40 -0800 | [diff] [blame] | 669 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 670 | /* |
| 671 | * For each range do 3 calculations: |
| 672 | * 1. UC as default type with no holes at top of range. |
| 673 | * 2. UC as default using holes at top of range. |
| 674 | * 3. WB as default. |
| 675 | * The lowest count is then used as default after totalling all |
| 676 | * MTRRs. Note that the optimal algoirthm for UC default is marked in |
| 677 | * the tag of each range regardless of final decision. UC takes |
| 678 | * precedence in the MTRR archiecture. Therefore, only holes can be |
| 679 | * used when the type of the region is MTRR_TYPE_WRBACK with |
| 680 | * MTRR_TYPE_UNCACHEABLE as the default type. |
| 681 | */ |
| 682 | memranges_each_entry(r, var_state.addr_space) { |
| 683 | int mtrr_type; |
| 684 | |
| 685 | mtrr_type = range_entry_mtrr_type(r); |
| 686 | |
| 687 | if (mtrr_type != MTRR_TYPE_UNCACHEABLE) { |
| 688 | int uc_hole_count; |
| 689 | int uc_no_hole_count; |
| 690 | |
| 691 | var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE; |
| 692 | var_state.mtrr_index = 0; |
| 693 | |
| 694 | /* No hole calculation. */ |
| 695 | calc_var_mtrrs_without_hole(&var_state, r); |
| 696 | uc_no_hole_count = var_state.mtrr_index; |
| 697 | |
| 698 | /* Hole calculation only if type is WB. The 64 number |
| 699 | * is a count that is unachievable, thus making it |
| 700 | * a default large number in the case of not doing |
| 701 | * the hole calculation. */ |
| 702 | uc_hole_count = 64; |
| 703 | if (mtrr_type == MTRR_TYPE_WRBACK) { |
| 704 | var_state.mtrr_index = 0; |
| 705 | calc_var_mtrrs_with_hole(&var_state, r); |
| 706 | uc_hole_count = var_state.mtrr_index; |
| 707 | } |
| 708 | |
| 709 | /* Mark the entry with the optimal algorithm. */ |
| 710 | if (uc_no_hole_count < uc_hole_count) { |
| 711 | uc_deftype_count += uc_no_hole_count; |
| 712 | } else { |
| 713 | unsigned long new_tag; |
| 714 | |
| 715 | new_tag = mtrr_type | MTRR_RANGE_UC_USE_HOLE; |
| 716 | range_entry_update_tag(r, new_tag); |
| 717 | uc_deftype_count += uc_hole_count; |
| 718 | } |
| 719 | } |
| 720 | |
| 721 | if (mtrr_type != MTRR_TYPE_WRBACK) { |
| 722 | var_state.mtrr_index = 0; |
| 723 | var_state.def_mtrr_type = MTRR_TYPE_WRBACK; |
| 724 | calc_var_mtrrs_without_hole(&var_state, r); |
| 725 | wb_deftype_count += var_state.mtrr_index; |
| 726 | } |
| 727 | } |
Scott Duplichan | f3cce2f | 2010-11-13 19:07:59 +0000 | [diff] [blame] | 728 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 729 | printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n", |
| 730 | wb_deftype_count, uc_deftype_count); |
Kyösti Mälkki | ffc1fb3 | 2012-07-11 14:40:19 +0300 | [diff] [blame] | 731 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 732 | if (wb_deftype_count < uc_deftype_count) { |
| 733 | printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n"); |
| 734 | return MTRR_TYPE_WRBACK; |
| 735 | } |
| 736 | printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n"); |
| 737 | return MTRR_TYPE_UNCACHEABLE; |
| 738 | } |
Kyösti Mälkki | ffc1fb3 | 2012-07-11 14:40:19 +0300 | [diff] [blame] | 739 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 740 | static void commit_var_mtrrs(struct memranges *addr_space, int def_type, |
| 741 | int above4gb, int address_bits) |
| 742 | { |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 743 | struct range_entry *r; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 744 | struct var_mtrr_state var_state; |
| 745 | int i; |
| 746 | |
| 747 | var_state.addr_space = addr_space; |
| 748 | var_state.above4gb = above4gb; |
| 749 | var_state.address_bits = address_bits; |
| 750 | /* Write the MSRs. */ |
| 751 | var_state.commit_mtrrs = 1; |
| 752 | var_state.mtrr_index = 0; |
| 753 | var_state.def_mtrr_type = def_type; |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 754 | |
| 755 | memranges_each_entry(r, var_state.addr_space) { |
| 756 | if (range_entry_mtrr_type(r) == def_type) |
| 757 | continue; |
| 758 | |
| 759 | if (def_type == MTRR_TYPE_UNCACHEABLE && |
| 760 | (range_entry_tag(r) & MTRR_RANGE_UC_USE_HOLE)) |
| 761 | calc_var_mtrrs_with_hole(&var_state, r); |
| 762 | else |
| 763 | calc_var_mtrrs_without_hole(&var_state, r); |
| 764 | } |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 765 | |
| 766 | /* Clear all remaining variable MTTRs. */ |
| 767 | for (i = var_state.mtrr_index; i < total_mtrrs; i++) |
| 768 | clear_var_mtrr(i); |
| 769 | } |
| 770 | |
| 771 | void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb) |
| 772 | { |
| 773 | static int mtrr_default_type = -1; |
| 774 | struct memranges *addr_space; |
| 775 | |
| 776 | addr_space = get_physical_address_space(); |
| 777 | |
| 778 | if (mtrr_default_type == -1) { |
| 779 | if (above4gb == 2) |
| 780 | detect_var_mtrrs(); |
| 781 | mtrr_default_type = |
| 782 | calc_var_mtrrs(addr_space, !!above4gb, address_bits); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 783 | } |
Stefan Reinauer | 00093a8 | 2011-11-02 16:12:34 -0700 | [diff] [blame] | 784 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 785 | disable_cache(); |
| 786 | commit_var_mtrrs(addr_space, mtrr_default_type, !!above4gb, |
| 787 | address_bits); |
| 788 | enable_var_mtrr(mtrr_default_type); |
| 789 | enable_cache(); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 790 | } |
| 791 | |
Sven Schnelle | adfbcb79 | 2012-01-10 12:01:43 +0100 | [diff] [blame] | 792 | void x86_setup_mtrrs(void) |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 793 | { |
Sven Schnelle | adfbcb79 | 2012-01-10 12:01:43 +0100 | [diff] [blame] | 794 | int address_size; |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 795 | x86_setup_fixed_mtrrs(); |
Sven Schnelle | adfbcb79 | 2012-01-10 12:01:43 +0100 | [diff] [blame] | 796 | address_size = cpu_phys_address_size(); |
| 797 | printk(BIOS_DEBUG, "CPU physical address size: %d bits\n", address_size); |
| 798 | x86_setup_var_mtrrs(address_size, 1); |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 799 | } |
| 800 | |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 801 | int x86_mtrr_check(void) |
| 802 | { |
| 803 | /* Only Pentium Pro and later have MTRR */ |
| 804 | msr_t msr; |
Stefan Reinauer | c02b4fc | 2010-03-22 11:42:32 +0000 | [diff] [blame] | 805 | printk(BIOS_DEBUG, "\nMTRR check\n"); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 806 | |
| 807 | msr = rdmsr(0x2ff); |
| 808 | msr.lo >>= 10; |
| 809 | |
Stefan Reinauer | c02b4fc | 2010-03-22 11:42:32 +0000 | [diff] [blame] | 810 | printk(BIOS_DEBUG, "Fixed MTRRs : "); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 811 | if (msr.lo & 0x01) |
Stefan Reinauer | c02b4fc | 2010-03-22 11:42:32 +0000 | [diff] [blame] | 812 | printk(BIOS_DEBUG, "Enabled\n"); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 813 | else |
Stefan Reinauer | c02b4fc | 2010-03-22 11:42:32 +0000 | [diff] [blame] | 814 | printk(BIOS_DEBUG, "Disabled\n"); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 815 | |
Stefan Reinauer | c02b4fc | 2010-03-22 11:42:32 +0000 | [diff] [blame] | 816 | printk(BIOS_DEBUG, "Variable MTRRs: "); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 817 | if (msr.lo & 0x02) |
Stefan Reinauer | c02b4fc | 2010-03-22 11:42:32 +0000 | [diff] [blame] | 818 | printk(BIOS_DEBUG, "Enabled\n"); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 819 | else |
Stefan Reinauer | c02b4fc | 2010-03-22 11:42:32 +0000 | [diff] [blame] | 820 | printk(BIOS_DEBUG, "Disabled\n"); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 821 | |
Stefan Reinauer | c02b4fc | 2010-03-22 11:42:32 +0000 | [diff] [blame] | 822 | printk(BIOS_DEBUG, "\n"); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 823 | |
| 824 | post_code(0x93); |
| 825 | return ((int) msr.lo); |
| 826 | } |