blob: 77525a790712efe5ac8aa9c825dd3270deec8afd [file] [log] [blame]
Elyes HAOUAS3a7346c2020-05-07 07:46:17 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
2
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00003/*
Martin Rothd57ace22019-08-31 10:48:37 -06004 * mtrr.c: setting MTRR to decent values for cache initialization on P6
5 * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00006 *
Lee Leahyc5917072017-03-15 16:38:51 -07007 * Reference: Intel Architecture Software Developer's Manual, Volume 3: System
8 * Programming
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00009 */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000010
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +000011#include <stddef.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050012#include <string.h>
Aaron Durbinbebf6692013-04-24 20:59:43 -050013#include <bootstate.h>
Elyes HAOUASd26844c2019-06-21 07:31:40 +020014#include <commonlib/helpers.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000015#include <console/console.h>
16#include <device/device.h>
Aaron Durbinca4f4b82014-02-08 15:41:52 -060017#include <device/pci_ids.h>
Aaron Durbinebf142a2013-03-29 16:23:23 -050018#include <cpu/cpu.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000019#include <cpu/x86/msr.h>
20#include <cpu/x86/mtrr.h>
21#include <cpu/x86/cache.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050022#include <memrange.h>
Aaron Durbin57686f82013-03-20 15:50:59 -050023#include <cpu/amd/mtrr.h>
Richard Spiegelb28025a2019-02-20 11:00:19 -070024#include <assert.h>
Julius Wernercd49cce2019-03-05 16:53:33 -080025#if CONFIG(X86_AMD_FIXED_MTRRS)
Aaron Durbin57686f82013-03-20 15:50:59 -050026#define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
27#else
28#define MTRR_FIXED_WRBACK_BITS 0
29#endif
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000030
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070031/* 2 MTRRS are reserved for the operating system */
32#define BIOS_MTRRS 6
33#define OS_MTRRS 2
34#define MTRRS (BIOS_MTRRS + OS_MTRRS)
Gabe Black7756fe72014-02-25 01:40:34 -080035/*
Isaac Christensen81f90c52014-09-24 14:59:32 -060036 * Static storage size for variable MTRRs. It's sized sufficiently large to
37 * handle different types of CPUs. Empirically, 16 variable MTRRs has not
Gabe Black7756fe72014-02-25 01:40:34 -080038 * yet been observed.
39 */
40#define NUM_MTRR_STATIC_STORAGE 16
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070041
42static int total_mtrrs = MTRRS;
43static int bios_mtrrs = BIOS_MTRRS;
44
45static void detect_var_mtrrs(void)
46{
47 msr_t msr;
48
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070049 msr = rdmsr(MTRR_CAP_MSR);
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070050
51 total_mtrrs = msr.lo & 0xff;
Gabe Black7756fe72014-02-25 01:40:34 -080052
53 if (total_mtrrs > NUM_MTRR_STATIC_STORAGE) {
54 printk(BIOS_WARNING,
55 "MTRRs detected (%d) > NUM_MTRR_STATIC_STORAGE (%d)\n",
56 total_mtrrs, NUM_MTRR_STATIC_STORAGE);
57 total_mtrrs = NUM_MTRR_STATIC_STORAGE;
58 }
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070059 bios_mtrrs = total_mtrrs - OS_MTRRS;
60}
61
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000062void enable_fixed_mtrr(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000063{
64 msr_t msr;
65
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070066 msr = rdmsr(MTRR_DEF_TYPE_MSR);
67 msr.lo |= MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN;
68 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000069}
70
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060071void fixed_mtrrs_expose_amd_rwdram(void)
72{
73 msr_t syscfg;
74
Julius Wernercd49cce2019-03-05 16:53:33 -080075 if (!CONFIG(X86_AMD_FIXED_MTRRS))
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060076 return;
77
78 syscfg = rdmsr(SYSCFG_MSR);
79 syscfg.lo |= SYSCFG_MSR_MtrrFixDramModEn;
80 wrmsr(SYSCFG_MSR, syscfg);
81}
82
83void fixed_mtrrs_hide_amd_rwdram(void)
84{
85 msr_t syscfg;
86
Julius Wernercd49cce2019-03-05 16:53:33 -080087 if (!CONFIG(X86_AMD_FIXED_MTRRS))
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060088 return;
89
90 syscfg = rdmsr(SYSCFG_MSR);
91 syscfg.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
92 wrmsr(SYSCFG_MSR, syscfg);
93}
94
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050095static void enable_var_mtrr(unsigned char deftype)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000096{
97 msr_t msr;
98
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070099 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500100 msr.lo &= ~0xff;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700101 msr.lo |= MTRR_DEF_TYPE_EN | deftype;
102 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000103}
104
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500105#define MTRR_VERBOSE_LEVEL BIOS_NEVER
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000106
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500107/* MTRRs are at a 4KiB granularity. Therefore all address calculations can
108 * be done with 32-bit numbers. This allows for the MTRR code to handle
109 * up to 2^44 bytes (16 TiB) of address space. */
110#define RANGE_SHIFT 12
111#define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
112 (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
113#define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
114#define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
115#define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR)
116
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500117/* Helpful constants. */
118#define RANGE_1MB PHYS_TO_RANGE_ADDR(1 << 20)
119#define RANGE_4GB (1 << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
120
Aaron Durbine3834422013-03-28 20:48:51 -0500121#define MTRR_ALGO_SHIFT (8)
122#define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1)
Aaron Durbine3834422013-03-28 20:48:51 -0500123
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500124static inline uint32_t range_entry_base_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000125{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500126 return PHYS_TO_RANGE_ADDR(range_entry_base(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000127}
128
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500129static inline uint32_t range_entry_end_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000130{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500131 return PHYS_TO_RANGE_ADDR(range_entry_end(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000132}
133
Aaron Durbine3834422013-03-28 20:48:51 -0500134static inline int range_entry_mtrr_type(struct range_entry *r)
135{
136 return range_entry_tag(r) & MTRR_TAG_MASK;
137}
138
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600139static int filter_vga_wrcomb(struct device *dev, struct resource *res)
140{
141 /* Only handle PCI devices. */
142 if (dev->path.type != DEVICE_PATH_PCI)
143 return 0;
144
145 /* Only handle VGA class devices. */
146 if (((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
147 return 0;
148
149 /* Add resource as write-combining in the address space. */
150 return 1;
151}
152
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600153static void print_physical_address_space(const struct memranges *addr_space,
154 const char *identifier)
155{
156 const struct range_entry *r;
157
158 if (identifier)
159 printk(BIOS_DEBUG, "MTRR: %s Physical address space:\n",
160 identifier);
161 else
162 printk(BIOS_DEBUG, "MTRR: Physical address space:\n");
163
164 memranges_each_entry(r, addr_space)
165 printk(BIOS_DEBUG,
166 "0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
167 range_entry_base(r), range_entry_end(r),
168 range_entry_size(r), range_entry_tag(r));
169}
170
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500171static struct memranges *get_physical_address_space(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000172{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500173 static struct memranges *addr_space;
174 static struct memranges addr_space_storage;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800175
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500176 /* In order to handle some chipsets not being able to pre-determine
Martin Roth4c3ab732013-07-08 16:23:54 -0600177 * uncacheable ranges, such as graphics memory, at resource insertion
178 * time remove uncacheable regions from the cacheable ones. */
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500179 if (addr_space == NULL) {
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500180 unsigned long mask;
181 unsigned long match;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500182
183 addr_space = &addr_space_storage;
184
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500185 mask = IORESOURCE_CACHEABLE;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500186 /* Collect cacheable and uncacheable address ranges. The
187 * uncacheable regions take precedence over the cacheable
188 * regions. */
189 memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
190 memranges_add_resources(addr_space, mask, 0,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700191 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500192
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500193 /* Handle any write combining resources. Only prefetchable
Vladimir Serbinenko30fe6122014-02-05 23:25:28 +0100194 * resources are appropriate for this MTRR type. */
195 match = IORESOURCE_PREFETCH;
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500196 mask |= match;
Lee Leahyc5917072017-03-15 16:38:51 -0700197 memranges_add_resources_filter(addr_space, mask, match,
198 MTRR_TYPE_WRCOMB, filter_vga_wrcomb);
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500199
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500200 /* The address space below 4GiB is special. It needs to be
Martin Roth2f914032016-01-15 10:20:11 -0700201 * covered entirely by range entries so that MTRR calculations
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500202 * can be properly done for the full 32-bit address space.
203 * Therefore, ensure holes are filled up to 4GiB as
204 * uncacheable */
205 memranges_fill_holes_up_to(addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700206 RANGE_TO_PHYS_ADDR(RANGE_4GB),
207 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500208
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600209 print_physical_address_space(addr_space, NULL);
Carl-Daniel Hailfinger7dde1da2009-02-11 16:57:32 +0000210 }
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000211
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500212 return addr_space;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000213}
214
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500215/* Fixed MTRR descriptor. This structure defines the step size and begin
Martin Roth4c3ab732013-07-08 16:23:54 -0600216 * and end (exclusive) address covered by a set of fixed MTRR MSRs.
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500217 * It also describes the offset in byte intervals to store the calculated MTRR
218 * type in an array. */
219struct fixed_mtrr_desc {
220 uint32_t begin;
221 uint32_t end;
222 uint32_t step;
223 int range_index;
224 int msr_index_base;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000225};
226
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500227/* Shared MTRR calculations. Can be reused by APs. */
228static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES];
229
230/* Fixed MTRR descriptors. */
231static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
232 { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700233 PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRR_FIX_64K_00000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500234 { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700235 PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRR_FIX_16K_80000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500236 { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700237 PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRR_FIX_4K_C0000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500238};
239
240static void calc_fixed_mtrrs(void)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000241{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500242 static int fixed_mtrr_types_initialized;
243 struct memranges *phys_addr_space;
244 struct range_entry *r;
245 const struct fixed_mtrr_desc *desc;
246 const struct fixed_mtrr_desc *last_desc;
247 uint32_t begin;
248 uint32_t end;
249 int type_index;
250
251 if (fixed_mtrr_types_initialized)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000252 return;
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300253
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500254 phys_addr_space = get_physical_address_space();
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300255
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500256 /* Set all fixed ranges to uncacheable first. */
257 memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300258
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500259 desc = &fixed_mtrr_desc[0];
260 last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1];
Kyösti Mälkki1ec5e742012-07-26 23:51:20 +0300261
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500262 memranges_each_entry(r, phys_addr_space) {
263 begin = range_entry_base_mtrr_addr(r);
264 end = range_entry_end_mtrr_addr(r);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300265
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500266 if (begin >= last_desc->end)
267 break;
268
269 if (end > last_desc->end)
270 end = last_desc->end;
271
272 /* Get to the correct fixed mtrr descriptor. */
273 while (begin >= desc->end)
274 desc++;
275
276 type_index = desc->range_index;
277 type_index += (begin - desc->begin) / desc->step;
278
279 while (begin != end) {
280 unsigned char type;
281
282 type = range_entry_tag(r);
283 printk(MTRR_VERBOSE_LEVEL,
284 "MTRR addr 0x%x-0x%x set to %d type @ %d\n",
285 begin, begin + desc->step, type, type_index);
286 if (type == MTRR_TYPE_WRBACK)
287 type |= MTRR_FIXED_WRBACK_BITS;
288 fixed_mtrr_types[type_index] = type;
289 type_index++;
290 begin += desc->step;
291 if (begin == desc->end)
292 desc++;
Yinghai Lu63601872005-01-27 22:48:12 +0000293 }
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000294 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500295 fixed_mtrr_types_initialized = 1;
296}
297
298static void commit_fixed_mtrrs(void)
299{
300 int i;
301 int j;
302 int msr_num;
303 int type_index;
304 /* 8 ranges per msr. */
305 msr_t fixed_msrs[NUM_FIXED_MTRRS];
306 unsigned long msr_index[NUM_FIXED_MTRRS];
307
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600308 fixed_mtrrs_expose_amd_rwdram();
309
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500310 memset(&fixed_msrs, 0, sizeof(fixed_msrs));
311
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500312 msr_num = 0;
313 type_index = 0;
314 for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) {
315 const struct fixed_mtrr_desc *desc;
316 int num_ranges;
317
318 desc = &fixed_mtrr_desc[i];
319 num_ranges = (desc->end - desc->begin) / desc->step;
320 for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) {
321 msr_index[msr_num] = desc->msr_index_base +
322 (j / RANGES_PER_FIXED_MTRR);
323 fixed_msrs[msr_num].lo |=
324 fixed_mtrr_types[type_index++] << 0;
325 fixed_msrs[msr_num].lo |=
326 fixed_mtrr_types[type_index++] << 8;
327 fixed_msrs[msr_num].lo |=
328 fixed_mtrr_types[type_index++] << 16;
329 fixed_msrs[msr_num].lo |=
330 fixed_mtrr_types[type_index++] << 24;
331 fixed_msrs[msr_num].hi |=
332 fixed_mtrr_types[type_index++] << 0;
333 fixed_msrs[msr_num].hi |=
334 fixed_mtrr_types[type_index++] << 8;
335 fixed_msrs[msr_num].hi |=
336 fixed_mtrr_types[type_index++] << 16;
337 fixed_msrs[msr_num].hi |=
338 fixed_mtrr_types[type_index++] << 24;
339 msr_num++;
340 }
341 }
342
Jacob Garber5b922722019-05-28 11:47:49 -0600343 /* Ensure that both arrays were fully initialized */
344 ASSERT(msr_num == NUM_FIXED_MTRRS)
345
Gabe Black7756fe72014-02-25 01:40:34 -0800346 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500347 printk(BIOS_DEBUG, "MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
348 msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500349
Gabe Black7756fe72014-02-25 01:40:34 -0800350 disable_cache();
351 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
352 wrmsr(msr_index[i], fixed_msrs[i]);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500353 enable_cache();
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600354 fixed_mtrrs_hide_amd_rwdram();
355
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000356}
357
Aaron Durbin57686f82013-03-20 15:50:59 -0500358void x86_setup_fixed_mtrrs_no_enable(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000359{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500360 calc_fixed_mtrrs();
361 commit_fixed_mtrrs();
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000362}
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000363
Aaron Durbin57686f82013-03-20 15:50:59 -0500364void x86_setup_fixed_mtrrs(void)
365{
366 x86_setup_fixed_mtrrs_no_enable();
367
368 printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
369 enable_fixed_mtrr();
370}
371
Gabe Black7756fe72014-02-25 01:40:34 -0800372struct var_mtrr_regs {
373 msr_t base;
374 msr_t mask;
375};
376
377struct var_mtrr_solution {
378 int mtrr_default_type;
379 int num_used;
380 struct var_mtrr_regs regs[NUM_MTRR_STATIC_STORAGE];
381};
382
383/* Global storage for variable MTRR solution. */
384static struct var_mtrr_solution mtrr_global_solution;
385
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500386struct var_mtrr_state {
387 struct memranges *addr_space;
388 int above4gb;
389 int address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800390 int prepare_msrs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500391 int mtrr_index;
392 int def_mtrr_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800393 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500394};
Aaron Durbin57686f82013-03-20 15:50:59 -0500395
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500396static void clear_var_mtrr(int index)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000397{
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600398 msr_t msr = { .lo = 0, .hi = 0 };
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500399
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600400 wrmsr(MTRR_PHYS_BASE(index), msr);
401 wrmsr(MTRR_PHYS_MASK(index), msr);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500402}
403
Gabe Black7756fe72014-02-25 01:40:34 -0800404static void prep_var_mtrr(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700405 uint32_t base, uint32_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500406{
Gabe Black7756fe72014-02-25 01:40:34 -0800407 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500408 resource_t rbase;
409 resource_t rsize;
410 resource_t mask;
411
412 /* Some variable MTRRs are attempted to be saved for the OS use.
413 * However, it's more important to try to map the full address space
414 * properly. */
415 if (var_state->mtrr_index >= bios_mtrrs)
416 printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n");
417 if (var_state->mtrr_index >= total_mtrrs) {
Jonathan Neuschäferbb3a5ef2018-04-09 20:14:19 +0200418 printk(BIOS_ERR, "ERROR: Not enough MTRRs available! MTRR index is %d with %d MTRRs in total.\n",
Paul Menzel6a70dbc2015-10-15 12:41:53 +0200419 var_state->mtrr_index, total_mtrrs);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500420 return;
421 }
422
423 rbase = base;
424 rsize = size;
425
426 rbase = RANGE_TO_PHYS_ADDR(rbase);
427 rsize = RANGE_TO_PHYS_ADDR(rsize);
428 rsize = -rsize;
429
430 mask = (1ULL << var_state->address_bits) - 1;
431 rsize = rsize & mask;
432
433 printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
434 var_state->mtrr_index, rbase, rsize, mtrr_type);
435
Gabe Black7756fe72014-02-25 01:40:34 -0800436 regs = &var_state->regs[var_state->mtrr_index];
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500437
Gabe Black7756fe72014-02-25 01:40:34 -0800438 regs->base.lo = rbase;
439 regs->base.lo |= mtrr_type;
440 regs->base.hi = rbase >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500441
Gabe Black7756fe72014-02-25 01:40:34 -0800442 regs->mask.lo = rsize;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700443 regs->mask.lo |= MTRR_PHYS_MASK_VALID;
Gabe Black7756fe72014-02-25 01:40:34 -0800444 regs->mask.hi = rsize >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500445}
446
447static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700448 uint32_t base, uint32_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500449{
450 while (size != 0) {
451 uint32_t addr_lsb;
452 uint32_t size_msb;
453 uint32_t mtrr_size;
454
455 addr_lsb = fls(base);
456 size_msb = fms(size);
457
458 /* All MTRR entries need to have their base aligned to the mask
459 * size. The maximum size is calculated by a function of the
460 * min base bit set and maximum size bit set. */
461 if (addr_lsb > size_msb)
462 mtrr_size = 1 << size_msb;
463 else
464 mtrr_size = 1 << addr_lsb;
465
Gabe Black7756fe72014-02-25 01:40:34 -0800466 if (var_state->prepare_msrs)
467 prep_var_mtrr(var_state, base, mtrr_size, mtrr_type);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500468
469 size -= mtrr_size;
470 base += mtrr_size;
471 var_state->mtrr_index++;
472 }
473}
474
Nico Huberbd5fb662017-10-07 13:40:19 +0200475static uint32_t optimize_var_mtrr_hole(const uint32_t base,
476 const uint32_t hole,
477 const uint64_t limit,
478 const int carve_hole)
479{
480 /*
481 * With default type UC, we can potentially optimize a WB
482 * range with unaligned upper end, by aligning it up and
483 * carving the added "hole" out again.
484 *
485 * To optimize the upper end of the hole, we will test
486 * how many MTRRs calc_var_mtrr_range() will spend for any
487 * alignment of the hole's upper end.
488 *
489 * We take four parameters, the lower end of the WB range
490 * `base`, upper end of the WB range as start of the `hole`,
491 * a `limit` how far we may align the upper end of the hole
492 * up and a flag `carve_hole` whether we should count MTRRs
493 * for carving the hole out. We return the optimal upper end
494 * for the hole (which may be the same as the end of the WB
495 * range in case we don't gain anything by aligning up).
496 */
497
498 const int dont_care = 0;
499 struct var_mtrr_state var_state = { 0, };
500
501 unsigned int align, best_count;
502 uint32_t best_end = hole;
503
504 /* calculate MTRR count for the WB range alone (w/o a hole) */
505 calc_var_mtrr_range(&var_state, base, hole - base, dont_care);
506 best_count = var_state.mtrr_index;
507 var_state.mtrr_index = 0;
508
509 for (align = fls(hole) + 1; align <= fms(hole); ++align) {
510 const uint64_t hole_end = ALIGN_UP((uint64_t)hole, 1 << align);
511 if (hole_end > limit)
512 break;
513
514 /* calculate MTRR count for this alignment */
515 calc_var_mtrr_range(
516 &var_state, base, hole_end - base, dont_care);
517 if (carve_hole)
518 calc_var_mtrr_range(
519 &var_state, hole, hole_end - hole, dont_care);
520
521 if (var_state.mtrr_index < best_count) {
522 best_count = var_state.mtrr_index;
523 best_end = hole_end;
524 }
525 var_state.mtrr_index = 0;
526 }
527
528 return best_end;
529}
530
Aaron Durbine3834422013-03-28 20:48:51 -0500531static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700532 struct range_entry *r)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500533{
Aaron Durbine3834422013-03-28 20:48:51 -0500534 uint32_t a1, a2, b1, b2;
Nico Huberbd5fb662017-10-07 13:40:19 +0200535 int mtrr_type, carve_hole;
Aaron Durbine3834422013-03-28 20:48:51 -0500536
537 /*
Martin Roth4c3ab732013-07-08 16:23:54 -0600538 * Determine MTRRs based on the following algorithm for the given entry:
Aaron Durbine3834422013-03-28 20:48:51 -0500539 * +------------------+ b2 = ALIGN_UP(end)
540 * | 0 or more bytes | <-- hole is carved out between b1 and b2
Nico Huberbd5fb662017-10-07 13:40:19 +0200541 * +------------------+ a2 = b1 = original end
Aaron Durbine3834422013-03-28 20:48:51 -0500542 * | |
543 * +------------------+ a1 = begin
544 *
Nico Huberbd5fb662017-10-07 13:40:19 +0200545 * Thus, there are up to 2 sub-ranges to configure variable MTRRs for.
Aaron Durbine3834422013-03-28 20:48:51 -0500546 */
547 mtrr_type = range_entry_mtrr_type(r);
548
549 a1 = range_entry_base_mtrr_addr(r);
550 a2 = range_entry_end_mtrr_addr(r);
551
Aaron Durbina38677b2016-07-21 14:26:34 -0500552 /* The end address is within the first 1MiB. The fixed MTRRs take
Aaron Durbine3834422013-03-28 20:48:51 -0500553 * precedence over the variable ones. Therefore this range
554 * can be ignored. */
Aaron Durbina38677b2016-07-21 14:26:34 -0500555 if (a2 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500556 return;
557
558 /* Again, the fixed MTRRs take precedence so the beginning
Aaron Durbina38677b2016-07-21 14:26:34 -0500559 * of the range can be set to 0 if it starts at or below 1MiB. */
560 if (a1 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500561 a1 = 0;
562
563 /* If the range starts above 4GiB the processing is done. */
564 if (!var_state->above4gb && a1 >= RANGE_4GB)
565 return;
566
567 /* Clip the upper address to 4GiB if addresses above 4GiB
568 * are not being processed. */
569 if (!var_state->above4gb && a2 > RANGE_4GB)
570 a2 = RANGE_4GB;
571
572 b1 = a2;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200573 b2 = a2;
574 carve_hole = 0;
Aaron Durbin53924242013-03-29 11:48:27 -0500575
Nico Huber64f0bcb2017-10-07 16:37:04 +0200576 /* We only consider WB type ranges for hole-carving. */
577 if (mtrr_type == MTRR_TYPE_WRBACK) {
578 struct range_entry *next;
579 uint64_t b2_limit;
580 /*
581 * Depending on the type of the next range, there are three
582 * different situations to handle:
583 *
584 * 1. WB range is last in address space:
585 * Aligning up, up to the next power of 2, may gain us
586 * something.
587 *
588 * 2. The next range is of type UC:
589 * We may align up, up to the _end_ of the next range. If
590 * there is a gap between the current and the next range,
591 * it would have been covered by the default type UC anyway.
592 *
593 * 3. The next range is not of type UC:
594 * We may align up, up to the _base_ of the next range. This
595 * may either be the end of the current range (if the next
596 * range follows immediately) or the end of the gap between
597 * the ranges.
598 */
599 next = memranges_next_entry(var_state->addr_space, r);
600 if (next == NULL) {
601 b2_limit = ALIGN_UP((uint64_t)b1, 1 << fms(b1));
602 /* If it's the last range above 4GiB, we won't carve
603 the hole out. If an OS wanted to move MMIO there,
604 it would have to override the MTRR setting using
605 PAT just like it would with WB as default type. */
606 carve_hole = a1 < RANGE_4GB;
607 } else if (range_entry_mtrr_type(next)
608 == MTRR_TYPE_UNCACHEABLE) {
609 b2_limit = range_entry_end_mtrr_addr(next);
610 carve_hole = 1;
611 } else {
612 b2_limit = range_entry_base_mtrr_addr(next);
613 carve_hole = 1;
614 }
615 b2 = optimize_var_mtrr_hole(a1, b1, b2_limit, carve_hole);
Aaron Durbin53924242013-03-29 11:48:27 -0500616 }
Aaron Durbine3834422013-03-28 20:48:51 -0500617
618 calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
Nico Huberbd5fb662017-10-07 13:40:19 +0200619 if (carve_hole && b2 != b1) {
620 calc_var_mtrr_range(var_state, b1, b2 - b1,
621 MTRR_TYPE_UNCACHEABLE);
622 }
Aaron Durbine3834422013-03-28 20:48:51 -0500623}
624
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600625static void __calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700626 int above4gb, int address_bits,
627 int *num_def_wb_mtrrs, int *num_def_uc_mtrrs)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500628{
629 int wb_deftype_count;
630 int uc_deftype_count;
Aaron Durbine3834422013-03-28 20:48:51 -0500631 struct range_entry *r;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000632 struct var_mtrr_state var_state;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000633
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500634 /* The default MTRR cacheability type is determined by calculating
Paul Menzel4fe98132014-01-25 15:55:28 +0100635 * the number of MTRRs required for each MTRR type as if it was the
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500636 * default. */
637 var_state.addr_space = addr_space;
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000638 var_state.above4gb = above4gb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500639 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800640 var_state.prepare_msrs = 0;
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000641
Aaron Durbine3834422013-03-28 20:48:51 -0500642 wb_deftype_count = 0;
643 uc_deftype_count = 0;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800644
Aaron Durbine3834422013-03-28 20:48:51 -0500645 /*
Nico Huber64f0bcb2017-10-07 16:37:04 +0200646 * For each range do 2 calculations:
647 * 1. UC as default type with possible holes at top of range.
648 * 2. WB as default.
Martin Roth4c3ab732013-07-08 16:23:54 -0600649 * The lowest count is then used as default after totaling all
Nico Huber64f0bcb2017-10-07 16:37:04 +0200650 * MTRRs. UC takes precedence in the MTRR architecture. There-
651 * fore, only holes can be used when the type of the region is
652 * MTRR_TYPE_WRBACK with MTRR_TYPE_UNCACHEABLE as the default
653 * type.
Aaron Durbine3834422013-03-28 20:48:51 -0500654 */
655 memranges_each_entry(r, var_state.addr_space) {
656 int mtrr_type;
657
658 mtrr_type = range_entry_mtrr_type(r);
659
660 if (mtrr_type != MTRR_TYPE_UNCACHEABLE) {
Aaron Durbine3834422013-03-28 20:48:51 -0500661 var_state.mtrr_index = 0;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200662 var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE;
663 calc_var_mtrrs_with_hole(&var_state, r);
664 uc_deftype_count += var_state.mtrr_index;
Aaron Durbine3834422013-03-28 20:48:51 -0500665 }
666
667 if (mtrr_type != MTRR_TYPE_WRBACK) {
668 var_state.mtrr_index = 0;
669 var_state.def_mtrr_type = MTRR_TYPE_WRBACK;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200670 calc_var_mtrrs_with_hole(&var_state, r);
Aaron Durbine3834422013-03-28 20:48:51 -0500671 wb_deftype_count += var_state.mtrr_index;
672 }
673 }
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600674 *num_def_wb_mtrrs = wb_deftype_count;
675 *num_def_uc_mtrrs = uc_deftype_count;
676}
677
678static int calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700679 int above4gb, int address_bits)
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600680{
681 int wb_deftype_count = 0;
682 int uc_deftype_count = 0;
683
684 __calc_var_mtrrs(addr_space, above4gb, address_bits, &wb_deftype_count,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700685 &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600686
687 if (wb_deftype_count > bios_mtrrs && uc_deftype_count > bios_mtrrs) {
688 printk(BIOS_DEBUG, "MTRR: Removing WRCOMB type. "
689 "WB/UC MTRR counts: %d/%d > %d.\n",
690 wb_deftype_count, uc_deftype_count, bios_mtrrs);
691 memranges_update_tag(addr_space, MTRR_TYPE_WRCOMB,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700692 MTRR_TYPE_UNCACHEABLE);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600693 __calc_var_mtrrs(addr_space, above4gb, address_bits,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700694 &wb_deftype_count, &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600695 }
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000696
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500697 printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
698 wb_deftype_count, uc_deftype_count);
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300699
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500700 if (wb_deftype_count < uc_deftype_count) {
701 printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n");
702 return MTRR_TYPE_WRBACK;
703 }
704 printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n");
705 return MTRR_TYPE_UNCACHEABLE;
706}
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300707
Gabe Black7756fe72014-02-25 01:40:34 -0800708static void prepare_var_mtrrs(struct memranges *addr_space, int def_type,
709 int above4gb, int address_bits,
710 struct var_mtrr_solution *sol)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500711{
Aaron Durbine3834422013-03-28 20:48:51 -0500712 struct range_entry *r;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500713 struct var_mtrr_state var_state;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500714
715 var_state.addr_space = addr_space;
716 var_state.above4gb = above4gb;
717 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800718 /* Prepare the MSRs. */
719 var_state.prepare_msrs = 1;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500720 var_state.mtrr_index = 0;
721 var_state.def_mtrr_type = def_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800722 var_state.regs = &sol->regs[0];
Aaron Durbine3834422013-03-28 20:48:51 -0500723
724 memranges_each_entry(r, var_state.addr_space) {
725 if (range_entry_mtrr_type(r) == def_type)
726 continue;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200727 calc_var_mtrrs_with_hole(&var_state, r);
Aaron Durbine3834422013-03-28 20:48:51 -0500728 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500729
Gabe Black7756fe72014-02-25 01:40:34 -0800730 /* Update the solution. */
731 sol->num_used = var_state.mtrr_index;
732}
733
Aaron Durbind9762f72017-06-12 12:48:38 -0500734static int commit_var_mtrrs(const struct var_mtrr_solution *sol)
Gabe Black7756fe72014-02-25 01:40:34 -0800735{
736 int i;
737
Aaron Durbind9762f72017-06-12 12:48:38 -0500738 if (sol->num_used > total_mtrrs) {
739 printk(BIOS_WARNING, "Not enough MTRRs: %d vs %d\n",
740 sol->num_used, total_mtrrs);
741 return -1;
742 }
743
Isaac Christensen81f90c52014-09-24 14:59:32 -0600744 /* Write out the variable MTRRs. */
Gabe Black7756fe72014-02-25 01:40:34 -0800745 disable_cache();
746 for (i = 0; i < sol->num_used; i++) {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700747 wrmsr(MTRR_PHYS_BASE(i), sol->regs[i].base);
748 wrmsr(MTRR_PHYS_MASK(i), sol->regs[i].mask);
Gabe Black7756fe72014-02-25 01:40:34 -0800749 }
750 /* Clear the ones that are unused. */
751 for (; i < total_mtrrs; i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500752 clear_var_mtrr(i);
Isaac Christensen81f90c52014-09-24 14:59:32 -0600753 enable_var_mtrr(sol->mtrr_default_type);
Gabe Black7756fe72014-02-25 01:40:34 -0800754 enable_cache();
755
Aaron Durbind9762f72017-06-12 12:48:38 -0500756 return 0;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500757}
758
759void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
760{
Gabe Black7756fe72014-02-25 01:40:34 -0800761 static struct var_mtrr_solution *sol = NULL;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500762 struct memranges *addr_space;
763
764 addr_space = get_physical_address_space();
765
Gabe Black7756fe72014-02-25 01:40:34 -0800766 if (sol == NULL) {
Gabe Black7756fe72014-02-25 01:40:34 -0800767 sol = &mtrr_global_solution;
768 sol->mtrr_default_type =
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500769 calc_var_mtrrs(addr_space, !!above4gb, address_bits);
Gabe Black7756fe72014-02-25 01:40:34 -0800770 prepare_var_mtrrs(addr_space, sol->mtrr_default_type,
771 !!above4gb, address_bits, sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000772 }
Stefan Reinauer00093a82011-11-02 16:12:34 -0700773
Gabe Black7756fe72014-02-25 01:40:34 -0800774 commit_var_mtrrs(sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000775}
776
Aaron Durbin1ebbb162020-05-28 10:17:34 -0600777static void _x86_setup_mtrrs(unsigned int above4gb)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000778{
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100779 int address_size;
Aaron Durbine63be892016-03-07 16:05:36 -0600780
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000781 x86_setup_fixed_mtrrs();
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100782 address_size = cpu_phys_address_size();
Aaron Durbine63be892016-03-07 16:05:36 -0600783 printk(BIOS_DEBUG, "CPU physical address size: %d bits\n",
784 address_size);
Aaron Durbin1ebbb162020-05-28 10:17:34 -0600785 x86_setup_var_mtrrs(address_size, above4gb);
786}
787
788void x86_setup_mtrrs(void)
789{
Aaron Durbine63be892016-03-07 16:05:36 -0600790 /* Always handle addresses above 4GiB. */
Aaron Durbin1ebbb162020-05-28 10:17:34 -0600791 _x86_setup_mtrrs(1);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000792}
793
Aaron Durbine63be892016-03-07 16:05:36 -0600794void x86_setup_mtrrs_with_detect(void)
795{
796 detect_var_mtrrs();
Aaron Durbin1ebbb162020-05-28 10:17:34 -0600797 /* Always handle addresses above 4GiB. */
798 _x86_setup_mtrrs(1);
799}
800
801void x86_setup_mtrrs_with_detect_no_above_4gb(void)
802{
803 detect_var_mtrrs();
804 _x86_setup_mtrrs(0);
Aaron Durbine63be892016-03-07 16:05:36 -0600805}
806
Kyösti Mälkki38a8fb02014-06-30 13:48:18 +0300807void x86_mtrr_check(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000808{
809 /* Only Pentium Pro and later have MTRR */
810 msr_t msr;
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000811 printk(BIOS_DEBUG, "\nMTRR check\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000812
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700813 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000814
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000815 printk(BIOS_DEBUG, "Fixed MTRRs : ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700816 if (msr.lo & MTRR_DEF_TYPE_FIX_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000817 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000818 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000819 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000820
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000821 printk(BIOS_DEBUG, "Variable MTRRs: ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700822 if (msr.lo & MTRR_DEF_TYPE_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000823 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000824 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000825 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000826
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000827 printk(BIOS_DEBUG, "\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000828
829 post_code(0x93);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000830}
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600831
832static bool put_back_original_solution;
833
834void mtrr_use_temp_range(uintptr_t begin, size_t size, int type)
835{
836 const struct range_entry *r;
837 const struct memranges *orig;
838 struct var_mtrr_solution sol;
839 struct memranges addr_space;
840 const int above4gb = 1; /* Cover above 4GiB by default. */
841 int address_bits;
842
843 /* Make a copy of the original address space and tweak it with the
844 * provided range. */
845 memranges_init_empty(&addr_space, NULL, 0);
846 orig = get_physical_address_space();
847 memranges_each_entry(r, orig) {
848 unsigned long tag = range_entry_tag(r);
849
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600850 /* Remove any write combining MTRRs from the temporary
851 * solution as it just fragments the address space. */
852 if (tag == MTRR_TYPE_WRCOMB)
853 tag = MTRR_TYPE_UNCACHEABLE;
854
855 memranges_insert(&addr_space, range_entry_base(r),
856 range_entry_size(r), tag);
857 }
858
859 /* Place new range into the address space. */
860 memranges_insert(&addr_space, begin, size, type);
861
862 print_physical_address_space(&addr_space, "TEMPORARY");
863
864 /* Calculate a new solution with the updated address space. */
865 address_bits = cpu_phys_address_size();
866 memset(&sol, 0, sizeof(sol));
867 sol.mtrr_default_type =
868 calc_var_mtrrs(&addr_space, above4gb, address_bits);
869 prepare_var_mtrrs(&addr_space, sol.mtrr_default_type,
870 above4gb, address_bits, &sol);
Aaron Durbind9762f72017-06-12 12:48:38 -0500871
872 if (commit_var_mtrrs(&sol) < 0)
873 printk(BIOS_WARNING, "Unable to insert temporary MTRR range: 0x%016llx - 0x%016llx size 0x%08llx type %d\n",
874 (long long)begin, (long long)begin + size,
875 (long long)size, type);
876 else
877 put_back_original_solution = true;
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600878
879 memranges_teardown(&addr_space);
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600880}
881
882static void remove_temp_solution(void *unused)
883{
884 if (put_back_original_solution)
885 commit_var_mtrrs(&mtrr_global_solution);
886}
887
888BOOT_STATE_INIT_ENTRY(BS_OS_RESUME, BS_ON_ENTRY, remove_temp_solution, NULL);
889BOOT_STATE_INIT_ENTRY(BS_PAYLOAD_LOAD, BS_ON_EXIT, remove_temp_solution, NULL);