Patrick Georgi | 02363b5 | 2020-05-05 20:48:50 +0200 | [diff] [blame] | 1 | /* This file is part of the coreboot project. */ |
Elyes HAOUAS | 3a7346c | 2020-05-07 07:46:17 +0200 | [diff] [blame^] | 2 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
| 3 | |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 4 | /* |
Martin Roth | d57ace2 | 2019-08-31 10:48:37 -0600 | [diff] [blame] | 5 | * mtrr.c: setting MTRR to decent values for cache initialization on P6 |
| 6 | * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 7 | * |
Lee Leahy | c591707 | 2017-03-15 16:38:51 -0700 | [diff] [blame] | 8 | * Reference: Intel Architecture Software Developer's Manual, Volume 3: System |
| 9 | * Programming |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 10 | */ |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 11 | |
Eric Biederman | f8a2ddd | 2004-10-30 08:05:41 +0000 | [diff] [blame] | 12 | #include <stddef.h> |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 13 | #include <string.h> |
Aaron Durbin | bebf669 | 2013-04-24 20:59:43 -0500 | [diff] [blame] | 14 | #include <bootstate.h> |
Elyes HAOUAS | d26844c | 2019-06-21 07:31:40 +0200 | [diff] [blame] | 15 | #include <commonlib/helpers.h> |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 16 | #include <console/console.h> |
| 17 | #include <device/device.h> |
Aaron Durbin | ca4f4b8 | 2014-02-08 15:41:52 -0600 | [diff] [blame] | 18 | #include <device/pci_ids.h> |
Aaron Durbin | ebf142a | 2013-03-29 16:23:23 -0500 | [diff] [blame] | 19 | #include <cpu/cpu.h> |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 20 | #include <cpu/x86/msr.h> |
| 21 | #include <cpu/x86/mtrr.h> |
| 22 | #include <cpu/x86/cache.h> |
Stefan Reinauer | 00093a8 | 2011-11-02 16:12:34 -0700 | [diff] [blame] | 23 | #include <cpu/x86/lapic.h> |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 24 | #include <memrange.h> |
Aaron Durbin | 57686f8 | 2013-03-20 15:50:59 -0500 | [diff] [blame] | 25 | #include <cpu/amd/mtrr.h> |
Richard Spiegel | b28025a | 2019-02-20 11:00:19 -0700 | [diff] [blame] | 26 | #include <assert.h> |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 27 | #if CONFIG(X86_AMD_FIXED_MTRRS) |
Aaron Durbin | 57686f8 | 2013-03-20 15:50:59 -0500 | [diff] [blame] | 28 | #define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM) |
| 29 | #else |
| 30 | #define MTRR_FIXED_WRBACK_BITS 0 |
| 31 | #endif |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 32 | |
Stefan Reinauer | c00dfbc | 2012-04-03 16:24:37 -0700 | [diff] [blame] | 33 | /* 2 MTRRS are reserved for the operating system */ |
| 34 | #define BIOS_MTRRS 6 |
| 35 | #define OS_MTRRS 2 |
| 36 | #define MTRRS (BIOS_MTRRS + OS_MTRRS) |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 37 | /* |
Isaac Christensen | 81f90c5 | 2014-09-24 14:59:32 -0600 | [diff] [blame] | 38 | * Static storage size for variable MTRRs. It's sized sufficiently large to |
| 39 | * handle different types of CPUs. Empirically, 16 variable MTRRs has not |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 40 | * yet been observed. |
| 41 | */ |
| 42 | #define NUM_MTRR_STATIC_STORAGE 16 |
Stefan Reinauer | c00dfbc | 2012-04-03 16:24:37 -0700 | [diff] [blame] | 43 | |
| 44 | static int total_mtrrs = MTRRS; |
| 45 | static int bios_mtrrs = BIOS_MTRRS; |
| 46 | |
| 47 | static void detect_var_mtrrs(void) |
| 48 | { |
| 49 | msr_t msr; |
| 50 | |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 51 | msr = rdmsr(MTRR_CAP_MSR); |
Stefan Reinauer | c00dfbc | 2012-04-03 16:24:37 -0700 | [diff] [blame] | 52 | |
| 53 | total_mtrrs = msr.lo & 0xff; |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 54 | |
| 55 | if (total_mtrrs > NUM_MTRR_STATIC_STORAGE) { |
| 56 | printk(BIOS_WARNING, |
| 57 | "MTRRs detected (%d) > NUM_MTRR_STATIC_STORAGE (%d)\n", |
| 58 | total_mtrrs, NUM_MTRR_STATIC_STORAGE); |
| 59 | total_mtrrs = NUM_MTRR_STATIC_STORAGE; |
| 60 | } |
Stefan Reinauer | c00dfbc | 2012-04-03 16:24:37 -0700 | [diff] [blame] | 61 | bios_mtrrs = total_mtrrs - OS_MTRRS; |
| 62 | } |
| 63 | |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 64 | void enable_fixed_mtrr(void) |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 65 | { |
| 66 | msr_t msr; |
| 67 | |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 68 | msr = rdmsr(MTRR_DEF_TYPE_MSR); |
| 69 | msr.lo |= MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN; |
| 70 | wrmsr(MTRR_DEF_TYPE_MSR, msr); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 71 | } |
| 72 | |
Marshall Dawson | c0dbeda | 2017-10-19 09:45:16 -0600 | [diff] [blame] | 73 | void fixed_mtrrs_expose_amd_rwdram(void) |
| 74 | { |
| 75 | msr_t syscfg; |
| 76 | |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 77 | if (!CONFIG(X86_AMD_FIXED_MTRRS)) |
Marshall Dawson | c0dbeda | 2017-10-19 09:45:16 -0600 | [diff] [blame] | 78 | return; |
| 79 | |
| 80 | syscfg = rdmsr(SYSCFG_MSR); |
| 81 | syscfg.lo |= SYSCFG_MSR_MtrrFixDramModEn; |
| 82 | wrmsr(SYSCFG_MSR, syscfg); |
| 83 | } |
| 84 | |
| 85 | void fixed_mtrrs_hide_amd_rwdram(void) |
| 86 | { |
| 87 | msr_t syscfg; |
| 88 | |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 89 | if (!CONFIG(X86_AMD_FIXED_MTRRS)) |
Marshall Dawson | c0dbeda | 2017-10-19 09:45:16 -0600 | [diff] [blame] | 90 | return; |
| 91 | |
| 92 | syscfg = rdmsr(SYSCFG_MSR); |
| 93 | syscfg.lo &= ~SYSCFG_MSR_MtrrFixDramModEn; |
| 94 | wrmsr(SYSCFG_MSR, syscfg); |
| 95 | } |
| 96 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 97 | static void enable_var_mtrr(unsigned char deftype) |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 98 | { |
| 99 | msr_t msr; |
| 100 | |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 101 | msr = rdmsr(MTRR_DEF_TYPE_MSR); |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 102 | msr.lo &= ~0xff; |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 103 | msr.lo |= MTRR_DEF_TYPE_EN | deftype; |
| 104 | wrmsr(MTRR_DEF_TYPE_MSR, msr); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 105 | } |
| 106 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 107 | #define MTRR_VERBOSE_LEVEL BIOS_NEVER |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 108 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 109 | /* MTRRs are at a 4KiB granularity. Therefore all address calculations can |
| 110 | * be done with 32-bit numbers. This allows for the MTRR code to handle |
| 111 | * up to 2^44 bytes (16 TiB) of address space. */ |
| 112 | #define RANGE_SHIFT 12 |
| 113 | #define ADDR_SHIFT_TO_RANGE_SHIFT(x) \ |
| 114 | (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT) |
| 115 | #define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT) |
| 116 | #define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT) |
| 117 | #define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR) |
| 118 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 119 | /* Helpful constants. */ |
| 120 | #define RANGE_1MB PHYS_TO_RANGE_ADDR(1 << 20) |
| 121 | #define RANGE_4GB (1 << (ADDR_SHIFT_TO_RANGE_SHIFT(32))) |
| 122 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 123 | #define MTRR_ALGO_SHIFT (8) |
| 124 | #define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1) |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 125 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 126 | static inline uint32_t range_entry_base_mtrr_addr(struct range_entry *r) |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 127 | { |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 128 | return PHYS_TO_RANGE_ADDR(range_entry_base(r)); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 129 | } |
| 130 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 131 | static inline uint32_t range_entry_end_mtrr_addr(struct range_entry *r) |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 132 | { |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 133 | return PHYS_TO_RANGE_ADDR(range_entry_end(r)); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 134 | } |
| 135 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 136 | static inline int range_entry_mtrr_type(struct range_entry *r) |
| 137 | { |
| 138 | return range_entry_tag(r) & MTRR_TAG_MASK; |
| 139 | } |
| 140 | |
Aaron Durbin | ca4f4b8 | 2014-02-08 15:41:52 -0600 | [diff] [blame] | 141 | static int filter_vga_wrcomb(struct device *dev, struct resource *res) |
| 142 | { |
| 143 | /* Only handle PCI devices. */ |
| 144 | if (dev->path.type != DEVICE_PATH_PCI) |
| 145 | return 0; |
| 146 | |
| 147 | /* Only handle VGA class devices. */ |
| 148 | if (((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)) |
| 149 | return 0; |
| 150 | |
| 151 | /* Add resource as write-combining in the address space. */ |
| 152 | return 1; |
| 153 | } |
| 154 | |
Aaron Durbin | 2bebd7b | 2016-11-10 15:15:35 -0600 | [diff] [blame] | 155 | static void print_physical_address_space(const struct memranges *addr_space, |
| 156 | const char *identifier) |
| 157 | { |
| 158 | const struct range_entry *r; |
| 159 | |
| 160 | if (identifier) |
| 161 | printk(BIOS_DEBUG, "MTRR: %s Physical address space:\n", |
| 162 | identifier); |
| 163 | else |
| 164 | printk(BIOS_DEBUG, "MTRR: Physical address space:\n"); |
| 165 | |
| 166 | memranges_each_entry(r, addr_space) |
| 167 | printk(BIOS_DEBUG, |
| 168 | "0x%016llx - 0x%016llx size 0x%08llx type %ld\n", |
| 169 | range_entry_base(r), range_entry_end(r), |
| 170 | range_entry_size(r), range_entry_tag(r)); |
| 171 | } |
| 172 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 173 | static struct memranges *get_physical_address_space(void) |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 174 | { |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 175 | static struct memranges *addr_space; |
| 176 | static struct memranges addr_space_storage; |
Duncan Laurie | 7389fa9 | 2011-12-22 10:59:40 -0800 | [diff] [blame] | 177 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 178 | /* In order to handle some chipsets not being able to pre-determine |
Martin Roth | 4c3ab73 | 2013-07-08 16:23:54 -0600 | [diff] [blame] | 179 | * uncacheable ranges, such as graphics memory, at resource insertion |
| 180 | * time remove uncacheable regions from the cacheable ones. */ |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 181 | if (addr_space == NULL) { |
Aaron Durbin | 9b027fe | 2013-03-26 14:10:34 -0500 | [diff] [blame] | 182 | unsigned long mask; |
| 183 | unsigned long match; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 184 | |
| 185 | addr_space = &addr_space_storage; |
| 186 | |
Aaron Durbin | 9b027fe | 2013-03-26 14:10:34 -0500 | [diff] [blame] | 187 | mask = IORESOURCE_CACHEABLE; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 188 | /* Collect cacheable and uncacheable address ranges. The |
| 189 | * uncacheable regions take precedence over the cacheable |
| 190 | * regions. */ |
| 191 | memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK); |
| 192 | memranges_add_resources(addr_space, mask, 0, |
Lee Leahy | a07d0dd | 2017-03-15 14:25:22 -0700 | [diff] [blame] | 193 | MTRR_TYPE_UNCACHEABLE); |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 194 | |
Aaron Durbin | 9b027fe | 2013-03-26 14:10:34 -0500 | [diff] [blame] | 195 | /* Handle any write combining resources. Only prefetchable |
Vladimir Serbinenko | 30fe612 | 2014-02-05 23:25:28 +0100 | [diff] [blame] | 196 | * resources are appropriate for this MTRR type. */ |
| 197 | match = IORESOURCE_PREFETCH; |
Aaron Durbin | 9b027fe | 2013-03-26 14:10:34 -0500 | [diff] [blame] | 198 | mask |= match; |
Lee Leahy | c591707 | 2017-03-15 16:38:51 -0700 | [diff] [blame] | 199 | memranges_add_resources_filter(addr_space, mask, match, |
| 200 | MTRR_TYPE_WRCOMB, filter_vga_wrcomb); |
Aaron Durbin | 9b027fe | 2013-03-26 14:10:34 -0500 | [diff] [blame] | 201 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 202 | /* The address space below 4GiB is special. It needs to be |
Martin Roth | 2f91403 | 2016-01-15 10:20:11 -0700 | [diff] [blame] | 203 | * covered entirely by range entries so that MTRR calculations |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 204 | * can be properly done for the full 32-bit address space. |
| 205 | * Therefore, ensure holes are filled up to 4GiB as |
| 206 | * uncacheable */ |
| 207 | memranges_fill_holes_up_to(addr_space, |
Lee Leahy | a07d0dd | 2017-03-15 14:25:22 -0700 | [diff] [blame] | 208 | RANGE_TO_PHYS_ADDR(RANGE_4GB), |
| 209 | MTRR_TYPE_UNCACHEABLE); |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 210 | |
Aaron Durbin | 2bebd7b | 2016-11-10 15:15:35 -0600 | [diff] [blame] | 211 | print_physical_address_space(addr_space, NULL); |
Carl-Daniel Hailfinger | 7dde1da | 2009-02-11 16:57:32 +0000 | [diff] [blame] | 212 | } |
Stefan Reinauer | 7f86ed1 | 2009-02-12 16:02:16 +0000 | [diff] [blame] | 213 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 214 | return addr_space; |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 215 | } |
| 216 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 217 | /* Fixed MTRR descriptor. This structure defines the step size and begin |
Martin Roth | 4c3ab73 | 2013-07-08 16:23:54 -0600 | [diff] [blame] | 218 | * and end (exclusive) address covered by a set of fixed MTRR MSRs. |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 219 | * It also describes the offset in byte intervals to store the calculated MTRR |
| 220 | * type in an array. */ |
| 221 | struct fixed_mtrr_desc { |
| 222 | uint32_t begin; |
| 223 | uint32_t end; |
| 224 | uint32_t step; |
| 225 | int range_index; |
| 226 | int msr_index_base; |
Eric Biederman | f8a2ddd | 2004-10-30 08:05:41 +0000 | [diff] [blame] | 227 | }; |
| 228 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 229 | /* Shared MTRR calculations. Can be reused by APs. */ |
| 230 | static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES]; |
| 231 | |
| 232 | /* Fixed MTRR descriptors. */ |
| 233 | static const struct fixed_mtrr_desc fixed_mtrr_desc[] = { |
| 234 | { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000), |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 235 | PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRR_FIX_64K_00000 }, |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 236 | { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000), |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 237 | PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRR_FIX_16K_80000 }, |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 238 | { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000), |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 239 | PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRR_FIX_4K_C0000 }, |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 240 | }; |
| 241 | |
| 242 | static void calc_fixed_mtrrs(void) |
Eric Biederman | f8a2ddd | 2004-10-30 08:05:41 +0000 | [diff] [blame] | 243 | { |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 244 | static int fixed_mtrr_types_initialized; |
| 245 | struct memranges *phys_addr_space; |
| 246 | struct range_entry *r; |
| 247 | const struct fixed_mtrr_desc *desc; |
| 248 | const struct fixed_mtrr_desc *last_desc; |
| 249 | uint32_t begin; |
| 250 | uint32_t end; |
| 251 | int type_index; |
| 252 | |
| 253 | if (fixed_mtrr_types_initialized) |
Eric Biederman | f8a2ddd | 2004-10-30 08:05:41 +0000 | [diff] [blame] | 254 | return; |
Kyösti Mälkki | 2d42b34 | 2012-07-12 00:18:22 +0300 | [diff] [blame] | 255 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 256 | phys_addr_space = get_physical_address_space(); |
Kyösti Mälkki | 2d42b34 | 2012-07-12 00:18:22 +0300 | [diff] [blame] | 257 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 258 | /* Set all fixed ranges to uncacheable first. */ |
| 259 | memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES); |
Kyösti Mälkki | 2d42b34 | 2012-07-12 00:18:22 +0300 | [diff] [blame] | 260 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 261 | desc = &fixed_mtrr_desc[0]; |
| 262 | last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1]; |
Kyösti Mälkki | 1ec5e74 | 2012-07-26 23:51:20 +0300 | [diff] [blame] | 263 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 264 | memranges_each_entry(r, phys_addr_space) { |
| 265 | begin = range_entry_base_mtrr_addr(r); |
| 266 | end = range_entry_end_mtrr_addr(r); |
Kyösti Mälkki | 2d42b34 | 2012-07-12 00:18:22 +0300 | [diff] [blame] | 267 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 268 | if (begin >= last_desc->end) |
| 269 | break; |
| 270 | |
| 271 | if (end > last_desc->end) |
| 272 | end = last_desc->end; |
| 273 | |
| 274 | /* Get to the correct fixed mtrr descriptor. */ |
| 275 | while (begin >= desc->end) |
| 276 | desc++; |
| 277 | |
| 278 | type_index = desc->range_index; |
| 279 | type_index += (begin - desc->begin) / desc->step; |
| 280 | |
| 281 | while (begin != end) { |
| 282 | unsigned char type; |
| 283 | |
| 284 | type = range_entry_tag(r); |
| 285 | printk(MTRR_VERBOSE_LEVEL, |
| 286 | "MTRR addr 0x%x-0x%x set to %d type @ %d\n", |
| 287 | begin, begin + desc->step, type, type_index); |
| 288 | if (type == MTRR_TYPE_WRBACK) |
| 289 | type |= MTRR_FIXED_WRBACK_BITS; |
| 290 | fixed_mtrr_types[type_index] = type; |
| 291 | type_index++; |
| 292 | begin += desc->step; |
| 293 | if (begin == desc->end) |
| 294 | desc++; |
Yinghai Lu | 6360187 | 2005-01-27 22:48:12 +0000 | [diff] [blame] | 295 | } |
Eric Biederman | f8a2ddd | 2004-10-30 08:05:41 +0000 | [diff] [blame] | 296 | } |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 297 | fixed_mtrr_types_initialized = 1; |
| 298 | } |
| 299 | |
| 300 | static void commit_fixed_mtrrs(void) |
| 301 | { |
| 302 | int i; |
| 303 | int j; |
| 304 | int msr_num; |
| 305 | int type_index; |
| 306 | /* 8 ranges per msr. */ |
| 307 | msr_t fixed_msrs[NUM_FIXED_MTRRS]; |
| 308 | unsigned long msr_index[NUM_FIXED_MTRRS]; |
| 309 | |
Marshall Dawson | c0dbeda | 2017-10-19 09:45:16 -0600 | [diff] [blame] | 310 | fixed_mtrrs_expose_amd_rwdram(); |
| 311 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 312 | memset(&fixed_msrs, 0, sizeof(fixed_msrs)); |
| 313 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 314 | msr_num = 0; |
| 315 | type_index = 0; |
| 316 | for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) { |
| 317 | const struct fixed_mtrr_desc *desc; |
| 318 | int num_ranges; |
| 319 | |
| 320 | desc = &fixed_mtrr_desc[i]; |
| 321 | num_ranges = (desc->end - desc->begin) / desc->step; |
| 322 | for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) { |
| 323 | msr_index[msr_num] = desc->msr_index_base + |
| 324 | (j / RANGES_PER_FIXED_MTRR); |
| 325 | fixed_msrs[msr_num].lo |= |
| 326 | fixed_mtrr_types[type_index++] << 0; |
| 327 | fixed_msrs[msr_num].lo |= |
| 328 | fixed_mtrr_types[type_index++] << 8; |
| 329 | fixed_msrs[msr_num].lo |= |
| 330 | fixed_mtrr_types[type_index++] << 16; |
| 331 | fixed_msrs[msr_num].lo |= |
| 332 | fixed_mtrr_types[type_index++] << 24; |
| 333 | fixed_msrs[msr_num].hi |= |
| 334 | fixed_mtrr_types[type_index++] << 0; |
| 335 | fixed_msrs[msr_num].hi |= |
| 336 | fixed_mtrr_types[type_index++] << 8; |
| 337 | fixed_msrs[msr_num].hi |= |
| 338 | fixed_mtrr_types[type_index++] << 16; |
| 339 | fixed_msrs[msr_num].hi |= |
| 340 | fixed_mtrr_types[type_index++] << 24; |
| 341 | msr_num++; |
| 342 | } |
| 343 | } |
| 344 | |
Jacob Garber | 5b92272 | 2019-05-28 11:47:49 -0600 | [diff] [blame] | 345 | /* Ensure that both arrays were fully initialized */ |
| 346 | ASSERT(msr_num == NUM_FIXED_MTRRS) |
| 347 | |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 348 | for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++) |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 349 | printk(BIOS_DEBUG, "MTRR: Fixed MSR 0x%lx 0x%08x%08x\n", |
| 350 | msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo); |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 351 | |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 352 | disable_cache(); |
| 353 | for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++) |
| 354 | wrmsr(msr_index[i], fixed_msrs[i]); |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 355 | enable_cache(); |
Marshall Dawson | c0dbeda | 2017-10-19 09:45:16 -0600 | [diff] [blame] | 356 | fixed_mtrrs_hide_amd_rwdram(); |
| 357 | |
Eric Biederman | f8a2ddd | 2004-10-30 08:05:41 +0000 | [diff] [blame] | 358 | } |
| 359 | |
Aaron Durbin | 57686f8 | 2013-03-20 15:50:59 -0500 | [diff] [blame] | 360 | void x86_setup_fixed_mtrrs_no_enable(void) |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 361 | { |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 362 | calc_fixed_mtrrs(); |
| 363 | commit_fixed_mtrrs(); |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 364 | } |
Stefan Reinauer | 7f86ed1 | 2009-02-12 16:02:16 +0000 | [diff] [blame] | 365 | |
Aaron Durbin | 57686f8 | 2013-03-20 15:50:59 -0500 | [diff] [blame] | 366 | void x86_setup_fixed_mtrrs(void) |
| 367 | { |
| 368 | x86_setup_fixed_mtrrs_no_enable(); |
| 369 | |
| 370 | printk(BIOS_SPEW, "call enable_fixed_mtrr()\n"); |
| 371 | enable_fixed_mtrr(); |
| 372 | } |
| 373 | |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 374 | struct var_mtrr_regs { |
| 375 | msr_t base; |
| 376 | msr_t mask; |
| 377 | }; |
| 378 | |
| 379 | struct var_mtrr_solution { |
| 380 | int mtrr_default_type; |
| 381 | int num_used; |
| 382 | struct var_mtrr_regs regs[NUM_MTRR_STATIC_STORAGE]; |
| 383 | }; |
| 384 | |
| 385 | /* Global storage for variable MTRR solution. */ |
| 386 | static struct var_mtrr_solution mtrr_global_solution; |
| 387 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 388 | struct var_mtrr_state { |
| 389 | struct memranges *addr_space; |
| 390 | int above4gb; |
| 391 | int address_bits; |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 392 | int prepare_msrs; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 393 | int mtrr_index; |
| 394 | int def_mtrr_type; |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 395 | struct var_mtrr_regs *regs; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 396 | }; |
Aaron Durbin | 57686f8 | 2013-03-20 15:50:59 -0500 | [diff] [blame] | 397 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 398 | static void clear_var_mtrr(int index) |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 399 | { |
Aaron Durbin | 2bebd7b | 2016-11-10 15:15:35 -0600 | [diff] [blame] | 400 | msr_t msr = { .lo = 0, .hi = 0 }; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 401 | |
Aaron Durbin | 2bebd7b | 2016-11-10 15:15:35 -0600 | [diff] [blame] | 402 | wrmsr(MTRR_PHYS_BASE(index), msr); |
| 403 | wrmsr(MTRR_PHYS_MASK(index), msr); |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 404 | } |
| 405 | |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 406 | static void prep_var_mtrr(struct var_mtrr_state *var_state, |
Lee Leahy | a07d0dd | 2017-03-15 14:25:22 -0700 | [diff] [blame] | 407 | uint32_t base, uint32_t size, int mtrr_type) |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 408 | { |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 409 | struct var_mtrr_regs *regs; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 410 | resource_t rbase; |
| 411 | resource_t rsize; |
| 412 | resource_t mask; |
| 413 | |
| 414 | /* Some variable MTRRs are attempted to be saved for the OS use. |
| 415 | * However, it's more important to try to map the full address space |
| 416 | * properly. */ |
| 417 | if (var_state->mtrr_index >= bios_mtrrs) |
| 418 | printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n"); |
| 419 | if (var_state->mtrr_index >= total_mtrrs) { |
Jonathan Neuschäfer | bb3a5ef | 2018-04-09 20:14:19 +0200 | [diff] [blame] | 420 | printk(BIOS_ERR, "ERROR: Not enough MTRRs available! MTRR index is %d with %d MTRRs in total.\n", |
Paul Menzel | 6a70dbc | 2015-10-15 12:41:53 +0200 | [diff] [blame] | 421 | var_state->mtrr_index, total_mtrrs); |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 422 | return; |
| 423 | } |
| 424 | |
| 425 | rbase = base; |
| 426 | rsize = size; |
| 427 | |
| 428 | rbase = RANGE_TO_PHYS_ADDR(rbase); |
| 429 | rsize = RANGE_TO_PHYS_ADDR(rsize); |
| 430 | rsize = -rsize; |
| 431 | |
| 432 | mask = (1ULL << var_state->address_bits) - 1; |
| 433 | rsize = rsize & mask; |
| 434 | |
| 435 | printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n", |
| 436 | var_state->mtrr_index, rbase, rsize, mtrr_type); |
| 437 | |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 438 | regs = &var_state->regs[var_state->mtrr_index]; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 439 | |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 440 | regs->base.lo = rbase; |
| 441 | regs->base.lo |= mtrr_type; |
| 442 | regs->base.hi = rbase >> 32; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 443 | |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 444 | regs->mask.lo = rsize; |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 445 | regs->mask.lo |= MTRR_PHYS_MASK_VALID; |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 446 | regs->mask.hi = rsize >> 32; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 447 | } |
| 448 | |
| 449 | static void calc_var_mtrr_range(struct var_mtrr_state *var_state, |
Lee Leahy | a07d0dd | 2017-03-15 14:25:22 -0700 | [diff] [blame] | 450 | uint32_t base, uint32_t size, int mtrr_type) |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 451 | { |
| 452 | while (size != 0) { |
| 453 | uint32_t addr_lsb; |
| 454 | uint32_t size_msb; |
| 455 | uint32_t mtrr_size; |
| 456 | |
| 457 | addr_lsb = fls(base); |
| 458 | size_msb = fms(size); |
| 459 | |
| 460 | /* All MTRR entries need to have their base aligned to the mask |
| 461 | * size. The maximum size is calculated by a function of the |
| 462 | * min base bit set and maximum size bit set. */ |
| 463 | if (addr_lsb > size_msb) |
| 464 | mtrr_size = 1 << size_msb; |
| 465 | else |
| 466 | mtrr_size = 1 << addr_lsb; |
| 467 | |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 468 | if (var_state->prepare_msrs) |
| 469 | prep_var_mtrr(var_state, base, mtrr_size, mtrr_type); |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 470 | |
| 471 | size -= mtrr_size; |
| 472 | base += mtrr_size; |
| 473 | var_state->mtrr_index++; |
| 474 | } |
| 475 | } |
| 476 | |
Nico Huber | bd5fb66 | 2017-10-07 13:40:19 +0200 | [diff] [blame] | 477 | static uint32_t optimize_var_mtrr_hole(const uint32_t base, |
| 478 | const uint32_t hole, |
| 479 | const uint64_t limit, |
| 480 | const int carve_hole) |
| 481 | { |
| 482 | /* |
| 483 | * With default type UC, we can potentially optimize a WB |
| 484 | * range with unaligned upper end, by aligning it up and |
| 485 | * carving the added "hole" out again. |
| 486 | * |
| 487 | * To optimize the upper end of the hole, we will test |
| 488 | * how many MTRRs calc_var_mtrr_range() will spend for any |
| 489 | * alignment of the hole's upper end. |
| 490 | * |
| 491 | * We take four parameters, the lower end of the WB range |
| 492 | * `base`, upper end of the WB range as start of the `hole`, |
| 493 | * a `limit` how far we may align the upper end of the hole |
| 494 | * up and a flag `carve_hole` whether we should count MTRRs |
| 495 | * for carving the hole out. We return the optimal upper end |
| 496 | * for the hole (which may be the same as the end of the WB |
| 497 | * range in case we don't gain anything by aligning up). |
| 498 | */ |
| 499 | |
| 500 | const int dont_care = 0; |
| 501 | struct var_mtrr_state var_state = { 0, }; |
| 502 | |
| 503 | unsigned int align, best_count; |
| 504 | uint32_t best_end = hole; |
| 505 | |
| 506 | /* calculate MTRR count for the WB range alone (w/o a hole) */ |
| 507 | calc_var_mtrr_range(&var_state, base, hole - base, dont_care); |
| 508 | best_count = var_state.mtrr_index; |
| 509 | var_state.mtrr_index = 0; |
| 510 | |
| 511 | for (align = fls(hole) + 1; align <= fms(hole); ++align) { |
| 512 | const uint64_t hole_end = ALIGN_UP((uint64_t)hole, 1 << align); |
| 513 | if (hole_end > limit) |
| 514 | break; |
| 515 | |
| 516 | /* calculate MTRR count for this alignment */ |
| 517 | calc_var_mtrr_range( |
| 518 | &var_state, base, hole_end - base, dont_care); |
| 519 | if (carve_hole) |
| 520 | calc_var_mtrr_range( |
| 521 | &var_state, hole, hole_end - hole, dont_care); |
| 522 | |
| 523 | if (var_state.mtrr_index < best_count) { |
| 524 | best_count = var_state.mtrr_index; |
| 525 | best_end = hole_end; |
| 526 | } |
| 527 | var_state.mtrr_index = 0; |
| 528 | } |
| 529 | |
| 530 | return best_end; |
| 531 | } |
| 532 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 533 | static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state, |
Lee Leahy | a07d0dd | 2017-03-15 14:25:22 -0700 | [diff] [blame] | 534 | struct range_entry *r) |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 535 | { |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 536 | uint32_t a1, a2, b1, b2; |
Nico Huber | bd5fb66 | 2017-10-07 13:40:19 +0200 | [diff] [blame] | 537 | int mtrr_type, carve_hole; |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 538 | |
| 539 | /* |
Martin Roth | 4c3ab73 | 2013-07-08 16:23:54 -0600 | [diff] [blame] | 540 | * Determine MTRRs based on the following algorithm for the given entry: |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 541 | * +------------------+ b2 = ALIGN_UP(end) |
| 542 | * | 0 or more bytes | <-- hole is carved out between b1 and b2 |
Nico Huber | bd5fb66 | 2017-10-07 13:40:19 +0200 | [diff] [blame] | 543 | * +------------------+ a2 = b1 = original end |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 544 | * | | |
| 545 | * +------------------+ a1 = begin |
| 546 | * |
Nico Huber | bd5fb66 | 2017-10-07 13:40:19 +0200 | [diff] [blame] | 547 | * Thus, there are up to 2 sub-ranges to configure variable MTRRs for. |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 548 | */ |
| 549 | mtrr_type = range_entry_mtrr_type(r); |
| 550 | |
| 551 | a1 = range_entry_base_mtrr_addr(r); |
| 552 | a2 = range_entry_end_mtrr_addr(r); |
| 553 | |
Aaron Durbin | a38677b | 2016-07-21 14:26:34 -0500 | [diff] [blame] | 554 | /* The end address is within the first 1MiB. The fixed MTRRs take |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 555 | * precedence over the variable ones. Therefore this range |
| 556 | * can be ignored. */ |
Aaron Durbin | a38677b | 2016-07-21 14:26:34 -0500 | [diff] [blame] | 557 | if (a2 <= RANGE_1MB) |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 558 | return; |
| 559 | |
| 560 | /* Again, the fixed MTRRs take precedence so the beginning |
Aaron Durbin | a38677b | 2016-07-21 14:26:34 -0500 | [diff] [blame] | 561 | * of the range can be set to 0 if it starts at or below 1MiB. */ |
| 562 | if (a1 <= RANGE_1MB) |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 563 | a1 = 0; |
| 564 | |
| 565 | /* If the range starts above 4GiB the processing is done. */ |
| 566 | if (!var_state->above4gb && a1 >= RANGE_4GB) |
| 567 | return; |
| 568 | |
| 569 | /* Clip the upper address to 4GiB if addresses above 4GiB |
| 570 | * are not being processed. */ |
| 571 | if (!var_state->above4gb && a2 > RANGE_4GB) |
| 572 | a2 = RANGE_4GB; |
| 573 | |
| 574 | b1 = a2; |
Nico Huber | 64f0bcb | 2017-10-07 16:37:04 +0200 | [diff] [blame] | 575 | b2 = a2; |
| 576 | carve_hole = 0; |
Aaron Durbin | 5392424 | 2013-03-29 11:48:27 -0500 | [diff] [blame] | 577 | |
Nico Huber | 64f0bcb | 2017-10-07 16:37:04 +0200 | [diff] [blame] | 578 | /* We only consider WB type ranges for hole-carving. */ |
| 579 | if (mtrr_type == MTRR_TYPE_WRBACK) { |
| 580 | struct range_entry *next; |
| 581 | uint64_t b2_limit; |
| 582 | /* |
| 583 | * Depending on the type of the next range, there are three |
| 584 | * different situations to handle: |
| 585 | * |
| 586 | * 1. WB range is last in address space: |
| 587 | * Aligning up, up to the next power of 2, may gain us |
| 588 | * something. |
| 589 | * |
| 590 | * 2. The next range is of type UC: |
| 591 | * We may align up, up to the _end_ of the next range. If |
| 592 | * there is a gap between the current and the next range, |
| 593 | * it would have been covered by the default type UC anyway. |
| 594 | * |
| 595 | * 3. The next range is not of type UC: |
| 596 | * We may align up, up to the _base_ of the next range. This |
| 597 | * may either be the end of the current range (if the next |
| 598 | * range follows immediately) or the end of the gap between |
| 599 | * the ranges. |
| 600 | */ |
| 601 | next = memranges_next_entry(var_state->addr_space, r); |
| 602 | if (next == NULL) { |
| 603 | b2_limit = ALIGN_UP((uint64_t)b1, 1 << fms(b1)); |
| 604 | /* If it's the last range above 4GiB, we won't carve |
| 605 | the hole out. If an OS wanted to move MMIO there, |
| 606 | it would have to override the MTRR setting using |
| 607 | PAT just like it would with WB as default type. */ |
| 608 | carve_hole = a1 < RANGE_4GB; |
| 609 | } else if (range_entry_mtrr_type(next) |
| 610 | == MTRR_TYPE_UNCACHEABLE) { |
| 611 | b2_limit = range_entry_end_mtrr_addr(next); |
| 612 | carve_hole = 1; |
| 613 | } else { |
| 614 | b2_limit = range_entry_base_mtrr_addr(next); |
| 615 | carve_hole = 1; |
| 616 | } |
| 617 | b2 = optimize_var_mtrr_hole(a1, b1, b2_limit, carve_hole); |
Aaron Durbin | 5392424 | 2013-03-29 11:48:27 -0500 | [diff] [blame] | 618 | } |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 619 | |
| 620 | calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type); |
Nico Huber | bd5fb66 | 2017-10-07 13:40:19 +0200 | [diff] [blame] | 621 | if (carve_hole && b2 != b1) { |
| 622 | calc_var_mtrr_range(var_state, b1, b2 - b1, |
| 623 | MTRR_TYPE_UNCACHEABLE); |
| 624 | } |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 625 | } |
| 626 | |
Aaron Durbin | 5b9e3b6 | 2014-02-05 16:00:43 -0600 | [diff] [blame] | 627 | static void __calc_var_mtrrs(struct memranges *addr_space, |
Lee Leahy | a07d0dd | 2017-03-15 14:25:22 -0700 | [diff] [blame] | 628 | int above4gb, int address_bits, |
| 629 | int *num_def_wb_mtrrs, int *num_def_uc_mtrrs) |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 630 | { |
| 631 | int wb_deftype_count; |
| 632 | int uc_deftype_count; |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 633 | struct range_entry *r; |
Eric Biederman | f8a2ddd | 2004-10-30 08:05:41 +0000 | [diff] [blame] | 634 | struct var_mtrr_state var_state; |
Eric Biederman | f8a2ddd | 2004-10-30 08:05:41 +0000 | [diff] [blame] | 635 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 636 | /* The default MTRR cacheability type is determined by calculating |
Paul Menzel | 4fe9813 | 2014-01-25 15:55:28 +0100 | [diff] [blame] | 637 | * the number of MTRRs required for each MTRR type as if it was the |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 638 | * default. */ |
| 639 | var_state.addr_space = addr_space; |
Scott Duplichan | f3cce2f | 2010-11-13 19:07:59 +0000 | [diff] [blame] | 640 | var_state.above4gb = above4gb; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 641 | var_state.address_bits = address_bits; |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 642 | var_state.prepare_msrs = 0; |
Stefan Reinauer | 7f86ed1 | 2009-02-12 16:02:16 +0000 | [diff] [blame] | 643 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 644 | wb_deftype_count = 0; |
| 645 | uc_deftype_count = 0; |
Duncan Laurie | 7389fa9 | 2011-12-22 10:59:40 -0800 | [diff] [blame] | 646 | |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 647 | /* |
Nico Huber | 64f0bcb | 2017-10-07 16:37:04 +0200 | [diff] [blame] | 648 | * For each range do 2 calculations: |
| 649 | * 1. UC as default type with possible holes at top of range. |
| 650 | * 2. WB as default. |
Martin Roth | 4c3ab73 | 2013-07-08 16:23:54 -0600 | [diff] [blame] | 651 | * The lowest count is then used as default after totaling all |
Nico Huber | 64f0bcb | 2017-10-07 16:37:04 +0200 | [diff] [blame] | 652 | * MTRRs. UC takes precedence in the MTRR architecture. There- |
| 653 | * fore, only holes can be used when the type of the region is |
| 654 | * MTRR_TYPE_WRBACK with MTRR_TYPE_UNCACHEABLE as the default |
| 655 | * type. |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 656 | */ |
| 657 | memranges_each_entry(r, var_state.addr_space) { |
| 658 | int mtrr_type; |
| 659 | |
| 660 | mtrr_type = range_entry_mtrr_type(r); |
| 661 | |
| 662 | if (mtrr_type != MTRR_TYPE_UNCACHEABLE) { |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 663 | var_state.mtrr_index = 0; |
Nico Huber | 64f0bcb | 2017-10-07 16:37:04 +0200 | [diff] [blame] | 664 | var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE; |
| 665 | calc_var_mtrrs_with_hole(&var_state, r); |
| 666 | uc_deftype_count += var_state.mtrr_index; |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 667 | } |
| 668 | |
| 669 | if (mtrr_type != MTRR_TYPE_WRBACK) { |
| 670 | var_state.mtrr_index = 0; |
| 671 | var_state.def_mtrr_type = MTRR_TYPE_WRBACK; |
Nico Huber | 64f0bcb | 2017-10-07 16:37:04 +0200 | [diff] [blame] | 672 | calc_var_mtrrs_with_hole(&var_state, r); |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 673 | wb_deftype_count += var_state.mtrr_index; |
| 674 | } |
| 675 | } |
Aaron Durbin | 5b9e3b6 | 2014-02-05 16:00:43 -0600 | [diff] [blame] | 676 | *num_def_wb_mtrrs = wb_deftype_count; |
| 677 | *num_def_uc_mtrrs = uc_deftype_count; |
| 678 | } |
| 679 | |
| 680 | static int calc_var_mtrrs(struct memranges *addr_space, |
Lee Leahy | a07d0dd | 2017-03-15 14:25:22 -0700 | [diff] [blame] | 681 | int above4gb, int address_bits) |
Aaron Durbin | 5b9e3b6 | 2014-02-05 16:00:43 -0600 | [diff] [blame] | 682 | { |
| 683 | int wb_deftype_count = 0; |
| 684 | int uc_deftype_count = 0; |
| 685 | |
| 686 | __calc_var_mtrrs(addr_space, above4gb, address_bits, &wb_deftype_count, |
Lee Leahy | a07d0dd | 2017-03-15 14:25:22 -0700 | [diff] [blame] | 687 | &uc_deftype_count); |
Aaron Durbin | 5b9e3b6 | 2014-02-05 16:00:43 -0600 | [diff] [blame] | 688 | |
| 689 | if (wb_deftype_count > bios_mtrrs && uc_deftype_count > bios_mtrrs) { |
| 690 | printk(BIOS_DEBUG, "MTRR: Removing WRCOMB type. " |
| 691 | "WB/UC MTRR counts: %d/%d > %d.\n", |
| 692 | wb_deftype_count, uc_deftype_count, bios_mtrrs); |
| 693 | memranges_update_tag(addr_space, MTRR_TYPE_WRCOMB, |
Lee Leahy | a07d0dd | 2017-03-15 14:25:22 -0700 | [diff] [blame] | 694 | MTRR_TYPE_UNCACHEABLE); |
Aaron Durbin | 5b9e3b6 | 2014-02-05 16:00:43 -0600 | [diff] [blame] | 695 | __calc_var_mtrrs(addr_space, above4gb, address_bits, |
Lee Leahy | a07d0dd | 2017-03-15 14:25:22 -0700 | [diff] [blame] | 696 | &wb_deftype_count, &uc_deftype_count); |
Aaron Durbin | 5b9e3b6 | 2014-02-05 16:00:43 -0600 | [diff] [blame] | 697 | } |
Scott Duplichan | f3cce2f | 2010-11-13 19:07:59 +0000 | [diff] [blame] | 698 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 699 | printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n", |
| 700 | wb_deftype_count, uc_deftype_count); |
Kyösti Mälkki | ffc1fb3 | 2012-07-11 14:40:19 +0300 | [diff] [blame] | 701 | |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 702 | if (wb_deftype_count < uc_deftype_count) { |
| 703 | printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n"); |
| 704 | return MTRR_TYPE_WRBACK; |
| 705 | } |
| 706 | printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n"); |
| 707 | return MTRR_TYPE_UNCACHEABLE; |
| 708 | } |
Kyösti Mälkki | ffc1fb3 | 2012-07-11 14:40:19 +0300 | [diff] [blame] | 709 | |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 710 | static void prepare_var_mtrrs(struct memranges *addr_space, int def_type, |
| 711 | int above4gb, int address_bits, |
| 712 | struct var_mtrr_solution *sol) |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 713 | { |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 714 | struct range_entry *r; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 715 | struct var_mtrr_state var_state; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 716 | |
| 717 | var_state.addr_space = addr_space; |
| 718 | var_state.above4gb = above4gb; |
| 719 | var_state.address_bits = address_bits; |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 720 | /* Prepare the MSRs. */ |
| 721 | var_state.prepare_msrs = 1; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 722 | var_state.mtrr_index = 0; |
| 723 | var_state.def_mtrr_type = def_type; |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 724 | var_state.regs = &sol->regs[0]; |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 725 | |
| 726 | memranges_each_entry(r, var_state.addr_space) { |
| 727 | if (range_entry_mtrr_type(r) == def_type) |
| 728 | continue; |
Nico Huber | 64f0bcb | 2017-10-07 16:37:04 +0200 | [diff] [blame] | 729 | calc_var_mtrrs_with_hole(&var_state, r); |
Aaron Durbin | e383442 | 2013-03-28 20:48:51 -0500 | [diff] [blame] | 730 | } |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 731 | |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 732 | /* Update the solution. */ |
| 733 | sol->num_used = var_state.mtrr_index; |
| 734 | } |
| 735 | |
Aaron Durbin | d9762f7 | 2017-06-12 12:48:38 -0500 | [diff] [blame] | 736 | static int commit_var_mtrrs(const struct var_mtrr_solution *sol) |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 737 | { |
| 738 | int i; |
| 739 | |
Aaron Durbin | d9762f7 | 2017-06-12 12:48:38 -0500 | [diff] [blame] | 740 | if (sol->num_used > total_mtrrs) { |
| 741 | printk(BIOS_WARNING, "Not enough MTRRs: %d vs %d\n", |
| 742 | sol->num_used, total_mtrrs); |
| 743 | return -1; |
| 744 | } |
| 745 | |
Isaac Christensen | 81f90c5 | 2014-09-24 14:59:32 -0600 | [diff] [blame] | 746 | /* Write out the variable MTRRs. */ |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 747 | disable_cache(); |
| 748 | for (i = 0; i < sol->num_used; i++) { |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 749 | wrmsr(MTRR_PHYS_BASE(i), sol->regs[i].base); |
| 750 | wrmsr(MTRR_PHYS_MASK(i), sol->regs[i].mask); |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 751 | } |
| 752 | /* Clear the ones that are unused. */ |
| 753 | for (; i < total_mtrrs; i++) |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 754 | clear_var_mtrr(i); |
Isaac Christensen | 81f90c5 | 2014-09-24 14:59:32 -0600 | [diff] [blame] | 755 | enable_var_mtrr(sol->mtrr_default_type); |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 756 | enable_cache(); |
| 757 | |
Aaron Durbin | d9762f7 | 2017-06-12 12:48:38 -0500 | [diff] [blame] | 758 | return 0; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 759 | } |
| 760 | |
| 761 | void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb) |
| 762 | { |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 763 | static struct var_mtrr_solution *sol = NULL; |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 764 | struct memranges *addr_space; |
| 765 | |
| 766 | addr_space = get_physical_address_space(); |
| 767 | |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 768 | if (sol == NULL) { |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 769 | sol = &mtrr_global_solution; |
| 770 | sol->mtrr_default_type = |
Aaron Durbin | bb4e79a | 2013-03-26 14:09:47 -0500 | [diff] [blame] | 771 | calc_var_mtrrs(addr_space, !!above4gb, address_bits); |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 772 | prepare_var_mtrrs(addr_space, sol->mtrr_default_type, |
| 773 | !!above4gb, address_bits, sol); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 774 | } |
Stefan Reinauer | 00093a8 | 2011-11-02 16:12:34 -0700 | [diff] [blame] | 775 | |
Gabe Black | 7756fe7 | 2014-02-25 01:40:34 -0800 | [diff] [blame] | 776 | commit_var_mtrrs(sol); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 777 | } |
| 778 | |
Sven Schnelle | adfbcb79 | 2012-01-10 12:01:43 +0100 | [diff] [blame] | 779 | void x86_setup_mtrrs(void) |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 780 | { |
Sven Schnelle | adfbcb79 | 2012-01-10 12:01:43 +0100 | [diff] [blame] | 781 | int address_size; |
Aaron Durbin | e63be89 | 2016-03-07 16:05:36 -0600 | [diff] [blame] | 782 | |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 783 | x86_setup_fixed_mtrrs(); |
Sven Schnelle | adfbcb79 | 2012-01-10 12:01:43 +0100 | [diff] [blame] | 784 | address_size = cpu_phys_address_size(); |
Aaron Durbin | e63be89 | 2016-03-07 16:05:36 -0600 | [diff] [blame] | 785 | printk(BIOS_DEBUG, "CPU physical address size: %d bits\n", |
| 786 | address_size); |
| 787 | /* Always handle addresses above 4GiB. */ |
Sven Schnelle | adfbcb79 | 2012-01-10 12:01:43 +0100 | [diff] [blame] | 788 | x86_setup_var_mtrrs(address_size, 1); |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 789 | } |
| 790 | |
Aaron Durbin | e63be89 | 2016-03-07 16:05:36 -0600 | [diff] [blame] | 791 | void x86_setup_mtrrs_with_detect(void) |
| 792 | { |
| 793 | detect_var_mtrrs(); |
| 794 | x86_setup_mtrrs(); |
| 795 | } |
| 796 | |
Kyösti Mälkki | 38a8fb0 | 2014-06-30 13:48:18 +0300 | [diff] [blame] | 797 | void x86_mtrr_check(void) |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 798 | { |
| 799 | /* Only Pentium Pro and later have MTRR */ |
| 800 | msr_t msr; |
Stefan Reinauer | c02b4fc | 2010-03-22 11:42:32 +0000 | [diff] [blame] | 801 | printk(BIOS_DEBUG, "\nMTRR check\n"); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 802 | |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 803 | msr = rdmsr(MTRR_DEF_TYPE_MSR); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 804 | |
Stefan Reinauer | c02b4fc | 2010-03-22 11:42:32 +0000 | [diff] [blame] | 805 | printk(BIOS_DEBUG, "Fixed MTRRs : "); |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 806 | if (msr.lo & MTRR_DEF_TYPE_FIX_EN) |
Stefan Reinauer | c02b4fc | 2010-03-22 11:42:32 +0000 | [diff] [blame] | 807 | printk(BIOS_DEBUG, "Enabled\n"); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 808 | else |
Stefan Reinauer | c02b4fc | 2010-03-22 11:42:32 +0000 | [diff] [blame] | 809 | printk(BIOS_DEBUG, "Disabled\n"); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 810 | |
Stefan Reinauer | c02b4fc | 2010-03-22 11:42:32 +0000 | [diff] [blame] | 811 | printk(BIOS_DEBUG, "Variable MTRRs: "); |
Alexandru Gagniuc | 86091f9 | 2015-09-30 20:23:09 -0700 | [diff] [blame] | 812 | if (msr.lo & MTRR_DEF_TYPE_EN) |
Stefan Reinauer | c02b4fc | 2010-03-22 11:42:32 +0000 | [diff] [blame] | 813 | printk(BIOS_DEBUG, "Enabled\n"); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 814 | else |
Stefan Reinauer | c02b4fc | 2010-03-22 11:42:32 +0000 | [diff] [blame] | 815 | printk(BIOS_DEBUG, "Disabled\n"); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 816 | |
Stefan Reinauer | c02b4fc | 2010-03-22 11:42:32 +0000 | [diff] [blame] | 817 | printk(BIOS_DEBUG, "\n"); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 818 | |
| 819 | post_code(0x93); |
Eric Biederman | fcd5ace | 2004-10-14 19:29:29 +0000 | [diff] [blame] | 820 | } |
Aaron Durbin | 2bebd7b | 2016-11-10 15:15:35 -0600 | [diff] [blame] | 821 | |
| 822 | static bool put_back_original_solution; |
| 823 | |
| 824 | void mtrr_use_temp_range(uintptr_t begin, size_t size, int type) |
| 825 | { |
| 826 | const struct range_entry *r; |
| 827 | const struct memranges *orig; |
| 828 | struct var_mtrr_solution sol; |
| 829 | struct memranges addr_space; |
| 830 | const int above4gb = 1; /* Cover above 4GiB by default. */ |
| 831 | int address_bits; |
| 832 | |
| 833 | /* Make a copy of the original address space and tweak it with the |
| 834 | * provided range. */ |
| 835 | memranges_init_empty(&addr_space, NULL, 0); |
| 836 | orig = get_physical_address_space(); |
| 837 | memranges_each_entry(r, orig) { |
| 838 | unsigned long tag = range_entry_tag(r); |
| 839 | |
Aaron Durbin | 2bebd7b | 2016-11-10 15:15:35 -0600 | [diff] [blame] | 840 | /* Remove any write combining MTRRs from the temporary |
| 841 | * solution as it just fragments the address space. */ |
| 842 | if (tag == MTRR_TYPE_WRCOMB) |
| 843 | tag = MTRR_TYPE_UNCACHEABLE; |
| 844 | |
| 845 | memranges_insert(&addr_space, range_entry_base(r), |
| 846 | range_entry_size(r), tag); |
| 847 | } |
| 848 | |
| 849 | /* Place new range into the address space. */ |
| 850 | memranges_insert(&addr_space, begin, size, type); |
| 851 | |
| 852 | print_physical_address_space(&addr_space, "TEMPORARY"); |
| 853 | |
| 854 | /* Calculate a new solution with the updated address space. */ |
| 855 | address_bits = cpu_phys_address_size(); |
| 856 | memset(&sol, 0, sizeof(sol)); |
| 857 | sol.mtrr_default_type = |
| 858 | calc_var_mtrrs(&addr_space, above4gb, address_bits); |
| 859 | prepare_var_mtrrs(&addr_space, sol.mtrr_default_type, |
| 860 | above4gb, address_bits, &sol); |
Aaron Durbin | d9762f7 | 2017-06-12 12:48:38 -0500 | [diff] [blame] | 861 | |
| 862 | if (commit_var_mtrrs(&sol) < 0) |
| 863 | printk(BIOS_WARNING, "Unable to insert temporary MTRR range: 0x%016llx - 0x%016llx size 0x%08llx type %d\n", |
| 864 | (long long)begin, (long long)begin + size, |
| 865 | (long long)size, type); |
| 866 | else |
| 867 | put_back_original_solution = true; |
Aaron Durbin | 2bebd7b | 2016-11-10 15:15:35 -0600 | [diff] [blame] | 868 | |
| 869 | memranges_teardown(&addr_space); |
Aaron Durbin | 2bebd7b | 2016-11-10 15:15:35 -0600 | [diff] [blame] | 870 | } |
| 871 | |
| 872 | static void remove_temp_solution(void *unused) |
| 873 | { |
| 874 | if (put_back_original_solution) |
| 875 | commit_var_mtrrs(&mtrr_global_solution); |
| 876 | } |
| 877 | |
| 878 | BOOT_STATE_INIT_ENTRY(BS_OS_RESUME, BS_ON_ENTRY, remove_temp_solution, NULL); |
| 879 | BOOT_STATE_INIT_ENTRY(BS_PAYLOAD_LOAD, BS_ON_EXIT, remove_temp_solution, NULL); |