blob: 185014e7166a67c95b2b13a43a03f0d9d1617337 [file] [log] [blame]
Elyes HAOUAS3a7346c2020-05-07 07:46:17 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
2
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00003/*
Martin Rothd57ace22019-08-31 10:48:37 -06004 * mtrr.c: setting MTRR to decent values for cache initialization on P6
5 * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00006 *
Lee Leahyc5917072017-03-15 16:38:51 -07007 * Reference: Intel Architecture Software Developer's Manual, Volume 3: System
8 * Programming
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00009 */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000010
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +000011#include <stddef.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050012#include <string.h>
Aaron Durbinbebf6692013-04-24 20:59:43 -050013#include <bootstate.h>
Elyes HAOUASd26844c2019-06-21 07:31:40 +020014#include <commonlib/helpers.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000015#include <console/console.h>
16#include <device/device.h>
Aaron Durbinca4f4b82014-02-08 15:41:52 -060017#include <device/pci_ids.h>
Aaron Durbinebf142a2013-03-29 16:23:23 -050018#include <cpu/cpu.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000019#include <cpu/x86/msr.h>
20#include <cpu/x86/mtrr.h>
21#include <cpu/x86/cache.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050022#include <memrange.h>
Aaron Durbin57686f82013-03-20 15:50:59 -050023#include <cpu/amd/mtrr.h>
Richard Spiegelb28025a2019-02-20 11:00:19 -070024#include <assert.h>
Julius Wernercd49cce2019-03-05 16:53:33 -080025#if CONFIG(X86_AMD_FIXED_MTRRS)
Aaron Durbin57686f82013-03-20 15:50:59 -050026#define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
27#else
28#define MTRR_FIXED_WRBACK_BITS 0
29#endif
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000030
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -060031#define MIN_MTRRS 8
32
Gabe Black7756fe72014-02-25 01:40:34 -080033/*
Isaac Christensen81f90c52014-09-24 14:59:32 -060034 * Static storage size for variable MTRRs. It's sized sufficiently large to
35 * handle different types of CPUs. Empirically, 16 variable MTRRs has not
Gabe Black7756fe72014-02-25 01:40:34 -080036 * yet been observed.
37 */
38#define NUM_MTRR_STATIC_STORAGE 16
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070039
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -060040static int total_mtrrs;
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070041
42static void detect_var_mtrrs(void)
43{
44 msr_t msr;
45
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070046 msr = rdmsr(MTRR_CAP_MSR);
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070047
48 total_mtrrs = msr.lo & 0xff;
Gabe Black7756fe72014-02-25 01:40:34 -080049
50 if (total_mtrrs > NUM_MTRR_STATIC_STORAGE) {
51 printk(BIOS_WARNING,
52 "MTRRs detected (%d) > NUM_MTRR_STATIC_STORAGE (%d)\n",
53 total_mtrrs, NUM_MTRR_STATIC_STORAGE);
54 total_mtrrs = NUM_MTRR_STATIC_STORAGE;
55 }
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070056}
57
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000058void enable_fixed_mtrr(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000059{
60 msr_t msr;
61
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070062 msr = rdmsr(MTRR_DEF_TYPE_MSR);
63 msr.lo |= MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN;
64 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000065}
66
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060067void fixed_mtrrs_expose_amd_rwdram(void)
68{
69 msr_t syscfg;
70
Julius Wernercd49cce2019-03-05 16:53:33 -080071 if (!CONFIG(X86_AMD_FIXED_MTRRS))
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060072 return;
73
74 syscfg = rdmsr(SYSCFG_MSR);
75 syscfg.lo |= SYSCFG_MSR_MtrrFixDramModEn;
76 wrmsr(SYSCFG_MSR, syscfg);
77}
78
79void fixed_mtrrs_hide_amd_rwdram(void)
80{
81 msr_t syscfg;
82
Julius Wernercd49cce2019-03-05 16:53:33 -080083 if (!CONFIG(X86_AMD_FIXED_MTRRS))
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060084 return;
85
86 syscfg = rdmsr(SYSCFG_MSR);
87 syscfg.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
88 wrmsr(SYSCFG_MSR, syscfg);
89}
90
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050091static void enable_var_mtrr(unsigned char deftype)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000092{
93 msr_t msr;
94
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070095 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050096 msr.lo &= ~0xff;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070097 msr.lo |= MTRR_DEF_TYPE_EN | deftype;
98 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000099}
100
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500101#define MTRR_VERBOSE_LEVEL BIOS_NEVER
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000102
Jonathan Zhang320ad932020-10-14 15:07:51 -0700103/* MTRRs are at a 4KiB granularity. */
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500104#define RANGE_SHIFT 12
105#define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
106 (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
107#define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
108#define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
109#define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR)
110
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500111/* Helpful constants. */
Jonathan Zhang320ad932020-10-14 15:07:51 -0700112#define RANGE_1MB PHYS_TO_RANGE_ADDR(1ULL << 20)
113#define RANGE_4GB (1ULL << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500114
Aaron Durbine3834422013-03-28 20:48:51 -0500115#define MTRR_ALGO_SHIFT (8)
116#define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1)
Aaron Durbine3834422013-03-28 20:48:51 -0500117
Jonathan Zhang320ad932020-10-14 15:07:51 -0700118static inline uint64_t range_entry_base_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000119{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500120 return PHYS_TO_RANGE_ADDR(range_entry_base(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000121}
122
Jonathan Zhang320ad932020-10-14 15:07:51 -0700123static inline uint64_t range_entry_end_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000124{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500125 return PHYS_TO_RANGE_ADDR(range_entry_end(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000126}
127
Aaron Durbine3834422013-03-28 20:48:51 -0500128static inline int range_entry_mtrr_type(struct range_entry *r)
129{
130 return range_entry_tag(r) & MTRR_TAG_MASK;
131}
132
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600133static int filter_vga_wrcomb(struct device *dev, struct resource *res)
134{
135 /* Only handle PCI devices. */
136 if (dev->path.type != DEVICE_PATH_PCI)
137 return 0;
138
139 /* Only handle VGA class devices. */
140 if (((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
141 return 0;
142
143 /* Add resource as write-combining in the address space. */
144 return 1;
145}
146
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600147static void print_physical_address_space(const struct memranges *addr_space,
148 const char *identifier)
149{
150 const struct range_entry *r;
151
152 if (identifier)
153 printk(BIOS_DEBUG, "MTRR: %s Physical address space:\n",
154 identifier);
155 else
156 printk(BIOS_DEBUG, "MTRR: Physical address space:\n");
157
158 memranges_each_entry(r, addr_space)
159 printk(BIOS_DEBUG,
160 "0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
161 range_entry_base(r), range_entry_end(r),
162 range_entry_size(r), range_entry_tag(r));
163}
164
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500165static struct memranges *get_physical_address_space(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000166{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500167 static struct memranges *addr_space;
168 static struct memranges addr_space_storage;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800169
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500170 /* In order to handle some chipsets not being able to pre-determine
Martin Roth4c3ab732013-07-08 16:23:54 -0600171 * uncacheable ranges, such as graphics memory, at resource insertion
172 * time remove uncacheable regions from the cacheable ones. */
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500173 if (addr_space == NULL) {
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500174 unsigned long mask;
175 unsigned long match;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500176
177 addr_space = &addr_space_storage;
178
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500179 mask = IORESOURCE_CACHEABLE;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500180 /* Collect cacheable and uncacheable address ranges. The
181 * uncacheable regions take precedence over the cacheable
182 * regions. */
183 memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
184 memranges_add_resources(addr_space, mask, 0,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700185 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500186
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500187 /* Handle any write combining resources. Only prefetchable
Vladimir Serbinenko30fe6122014-02-05 23:25:28 +0100188 * resources are appropriate for this MTRR type. */
189 match = IORESOURCE_PREFETCH;
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500190 mask |= match;
Lee Leahyc5917072017-03-15 16:38:51 -0700191 memranges_add_resources_filter(addr_space, mask, match,
192 MTRR_TYPE_WRCOMB, filter_vga_wrcomb);
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500193
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500194 /* The address space below 4GiB is special. It needs to be
Martin Roth2f914032016-01-15 10:20:11 -0700195 * covered entirely by range entries so that MTRR calculations
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500196 * can be properly done for the full 32-bit address space.
197 * Therefore, ensure holes are filled up to 4GiB as
198 * uncacheable */
199 memranges_fill_holes_up_to(addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700200 RANGE_TO_PHYS_ADDR(RANGE_4GB),
201 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500202
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600203 print_physical_address_space(addr_space, NULL);
Carl-Daniel Hailfinger7dde1da2009-02-11 16:57:32 +0000204 }
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000205
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500206 return addr_space;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000207}
208
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500209/* Fixed MTRR descriptor. This structure defines the step size and begin
Martin Roth4c3ab732013-07-08 16:23:54 -0600210 * and end (exclusive) address covered by a set of fixed MTRR MSRs.
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500211 * It also describes the offset in byte intervals to store the calculated MTRR
212 * type in an array. */
213struct fixed_mtrr_desc {
214 uint32_t begin;
215 uint32_t end;
216 uint32_t step;
217 int range_index;
218 int msr_index_base;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000219};
220
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500221/* Shared MTRR calculations. Can be reused by APs. */
222static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES];
223
224/* Fixed MTRR descriptors. */
225static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
226 { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700227 PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRR_FIX_64K_00000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500228 { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700229 PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRR_FIX_16K_80000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500230 { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700231 PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRR_FIX_4K_C0000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500232};
233
234static void calc_fixed_mtrrs(void)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000235{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500236 static int fixed_mtrr_types_initialized;
237 struct memranges *phys_addr_space;
238 struct range_entry *r;
239 const struct fixed_mtrr_desc *desc;
240 const struct fixed_mtrr_desc *last_desc;
241 uint32_t begin;
242 uint32_t end;
243 int type_index;
244
245 if (fixed_mtrr_types_initialized)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000246 return;
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300247
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500248 phys_addr_space = get_physical_address_space();
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300249
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500250 /* Set all fixed ranges to uncacheable first. */
251 memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300252
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500253 desc = &fixed_mtrr_desc[0];
254 last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1];
Kyösti Mälkki1ec5e742012-07-26 23:51:20 +0300255
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500256 memranges_each_entry(r, phys_addr_space) {
257 begin = range_entry_base_mtrr_addr(r);
258 end = range_entry_end_mtrr_addr(r);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300259
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500260 if (begin >= last_desc->end)
261 break;
262
263 if (end > last_desc->end)
264 end = last_desc->end;
265
266 /* Get to the correct fixed mtrr descriptor. */
267 while (begin >= desc->end)
268 desc++;
269
270 type_index = desc->range_index;
271 type_index += (begin - desc->begin) / desc->step;
272
273 while (begin != end) {
274 unsigned char type;
275
276 type = range_entry_tag(r);
277 printk(MTRR_VERBOSE_LEVEL,
278 "MTRR addr 0x%x-0x%x set to %d type @ %d\n",
279 begin, begin + desc->step, type, type_index);
280 if (type == MTRR_TYPE_WRBACK)
281 type |= MTRR_FIXED_WRBACK_BITS;
282 fixed_mtrr_types[type_index] = type;
283 type_index++;
284 begin += desc->step;
285 if (begin == desc->end)
286 desc++;
Yinghai Lu63601872005-01-27 22:48:12 +0000287 }
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000288 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500289 fixed_mtrr_types_initialized = 1;
290}
291
292static void commit_fixed_mtrrs(void)
293{
294 int i;
295 int j;
296 int msr_num;
297 int type_index;
298 /* 8 ranges per msr. */
299 msr_t fixed_msrs[NUM_FIXED_MTRRS];
300 unsigned long msr_index[NUM_FIXED_MTRRS];
301
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600302 fixed_mtrrs_expose_amd_rwdram();
303
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500304 memset(&fixed_msrs, 0, sizeof(fixed_msrs));
305
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500306 msr_num = 0;
307 type_index = 0;
308 for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) {
309 const struct fixed_mtrr_desc *desc;
310 int num_ranges;
311
312 desc = &fixed_mtrr_desc[i];
313 num_ranges = (desc->end - desc->begin) / desc->step;
314 for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) {
315 msr_index[msr_num] = desc->msr_index_base +
316 (j / RANGES_PER_FIXED_MTRR);
317 fixed_msrs[msr_num].lo |=
318 fixed_mtrr_types[type_index++] << 0;
319 fixed_msrs[msr_num].lo |=
320 fixed_mtrr_types[type_index++] << 8;
321 fixed_msrs[msr_num].lo |=
322 fixed_mtrr_types[type_index++] << 16;
323 fixed_msrs[msr_num].lo |=
324 fixed_mtrr_types[type_index++] << 24;
325 fixed_msrs[msr_num].hi |=
326 fixed_mtrr_types[type_index++] << 0;
327 fixed_msrs[msr_num].hi |=
328 fixed_mtrr_types[type_index++] << 8;
329 fixed_msrs[msr_num].hi |=
330 fixed_mtrr_types[type_index++] << 16;
331 fixed_msrs[msr_num].hi |=
332 fixed_mtrr_types[type_index++] << 24;
333 msr_num++;
334 }
335 }
336
Jacob Garber5b922722019-05-28 11:47:49 -0600337 /* Ensure that both arrays were fully initialized */
338 ASSERT(msr_num == NUM_FIXED_MTRRS)
339
Gabe Black7756fe72014-02-25 01:40:34 -0800340 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500341 printk(BIOS_DEBUG, "MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
342 msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500343
Gabe Black7756fe72014-02-25 01:40:34 -0800344 disable_cache();
345 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
346 wrmsr(msr_index[i], fixed_msrs[i]);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500347 enable_cache();
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600348 fixed_mtrrs_hide_amd_rwdram();
349
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000350}
351
Aaron Durbin57686f82013-03-20 15:50:59 -0500352void x86_setup_fixed_mtrrs_no_enable(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000353{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500354 calc_fixed_mtrrs();
355 commit_fixed_mtrrs();
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000356}
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000357
Aaron Durbin57686f82013-03-20 15:50:59 -0500358void x86_setup_fixed_mtrrs(void)
359{
360 x86_setup_fixed_mtrrs_no_enable();
361
362 printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
363 enable_fixed_mtrr();
364}
365
Gabe Black7756fe72014-02-25 01:40:34 -0800366struct var_mtrr_regs {
367 msr_t base;
368 msr_t mask;
369};
370
371struct var_mtrr_solution {
372 int mtrr_default_type;
373 int num_used;
374 struct var_mtrr_regs regs[NUM_MTRR_STATIC_STORAGE];
375};
376
377/* Global storage for variable MTRR solution. */
378static struct var_mtrr_solution mtrr_global_solution;
379
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500380struct var_mtrr_state {
381 struct memranges *addr_space;
382 int above4gb;
383 int address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800384 int prepare_msrs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500385 int mtrr_index;
386 int def_mtrr_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800387 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500388};
Aaron Durbin57686f82013-03-20 15:50:59 -0500389
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500390static void clear_var_mtrr(int index)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000391{
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600392 msr_t msr = { .lo = 0, .hi = 0 };
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500393
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600394 wrmsr(MTRR_PHYS_BASE(index), msr);
395 wrmsr(MTRR_PHYS_MASK(index), msr);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500396}
397
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -0600398static int get_os_reserved_mtrrs(void)
399{
400 return CONFIG(RESERVE_MTRRS_FOR_OS) ? 2 : 0;
401}
402
Gabe Black7756fe72014-02-25 01:40:34 -0800403static void prep_var_mtrr(struct var_mtrr_state *var_state,
Jonathan Zhang320ad932020-10-14 15:07:51 -0700404 uint64_t base, uint64_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500405{
Gabe Black7756fe72014-02-25 01:40:34 -0800406 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500407 resource_t rbase;
408 resource_t rsize;
409 resource_t mask;
410
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500411 if (var_state->mtrr_index >= total_mtrrs) {
Julius Wernere9665952022-01-21 17:06:20 -0800412 printk(BIOS_ERR, "Not enough MTRRs available! MTRR index is %d with %d MTRRs in total.\n",
Paul Menzel6a70dbc2015-10-15 12:41:53 +0200413 var_state->mtrr_index, total_mtrrs);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500414 return;
415 }
416
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -0600417 /*
418 * If desired, 2 variable MTRRs are attempted to be saved for the OS to
419 * use. However, it's more important to try to map the full address
420 * space properly.
421 */
422 if (var_state->mtrr_index >= total_mtrrs - get_os_reserved_mtrrs())
423 printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n");
424
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500425 rbase = base;
426 rsize = size;
427
428 rbase = RANGE_TO_PHYS_ADDR(rbase);
429 rsize = RANGE_TO_PHYS_ADDR(rsize);
430 rsize = -rsize;
431
432 mask = (1ULL << var_state->address_bits) - 1;
433 rsize = rsize & mask;
434
435 printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
436 var_state->mtrr_index, rbase, rsize, mtrr_type);
437
Gabe Black7756fe72014-02-25 01:40:34 -0800438 regs = &var_state->regs[var_state->mtrr_index];
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500439
Gabe Black7756fe72014-02-25 01:40:34 -0800440 regs->base.lo = rbase;
441 regs->base.lo |= mtrr_type;
442 regs->base.hi = rbase >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500443
Gabe Black7756fe72014-02-25 01:40:34 -0800444 regs->mask.lo = rsize;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700445 regs->mask.lo |= MTRR_PHYS_MASK_VALID;
Gabe Black7756fe72014-02-25 01:40:34 -0800446 regs->mask.hi = rsize >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500447}
448
Jonathan Zhang320ad932020-10-14 15:07:51 -0700449/*
450 * fls64: find least significant bit set in a 64-bit word
451 * As samples, fls64(0x0) = 64; fls64(0x4400) = 10;
452 * fls64(0x40400000000) = 34.
453 */
454static uint32_t fls64(uint64_t x)
455{
456 uint32_t lo = (uint32_t)x;
457 if (lo)
458 return fls(lo);
459 uint32_t hi = x >> 32;
460 return fls(hi) + 32;
461}
462
463/*
464 * fms64: find most significant bit set in a 64-bit word
465 * As samples, fms64(0x0) = 0; fms64(0x4400) = 14;
466 * fms64(0x40400000000) = 42.
467 */
468static uint32_t fms64(uint64_t x)
469{
470 uint32_t hi = (uint32_t)(x >> 32);
471 if (!hi)
472 return fms((uint32_t)x);
473 return fms(hi) + 32;
474}
475
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500476static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
Jonathan Zhang320ad932020-10-14 15:07:51 -0700477 uint64_t base, uint64_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500478{
479 while (size != 0) {
480 uint32_t addr_lsb;
481 uint32_t size_msb;
Jonathan Zhang320ad932020-10-14 15:07:51 -0700482 uint64_t mtrr_size;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500483
Jonathan Zhang320ad932020-10-14 15:07:51 -0700484 addr_lsb = fls64(base);
485 size_msb = fms64(size);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500486
487 /* All MTRR entries need to have their base aligned to the mask
488 * size. The maximum size is calculated by a function of the
489 * min base bit set and maximum size bit set. */
490 if (addr_lsb > size_msb)
Jonathan Zhang8f594b72020-10-23 15:20:22 -0700491 mtrr_size = 1ULL << size_msb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500492 else
Jonathan Zhang8f594b72020-10-23 15:20:22 -0700493 mtrr_size = 1ULL << addr_lsb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500494
Gabe Black7756fe72014-02-25 01:40:34 -0800495 if (var_state->prepare_msrs)
496 prep_var_mtrr(var_state, base, mtrr_size, mtrr_type);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500497
498 size -= mtrr_size;
499 base += mtrr_size;
500 var_state->mtrr_index++;
501 }
502}
503
Jonathan Zhang320ad932020-10-14 15:07:51 -0700504static uint64_t optimize_var_mtrr_hole(const uint64_t base,
505 const uint64_t hole,
Nico Huberbd5fb662017-10-07 13:40:19 +0200506 const uint64_t limit,
507 const int carve_hole)
508{
509 /*
510 * With default type UC, we can potentially optimize a WB
511 * range with unaligned upper end, by aligning it up and
512 * carving the added "hole" out again.
513 *
514 * To optimize the upper end of the hole, we will test
515 * how many MTRRs calc_var_mtrr_range() will spend for any
516 * alignment of the hole's upper end.
517 *
518 * We take four parameters, the lower end of the WB range
519 * `base`, upper end of the WB range as start of the `hole`,
520 * a `limit` how far we may align the upper end of the hole
521 * up and a flag `carve_hole` whether we should count MTRRs
522 * for carving the hole out. We return the optimal upper end
523 * for the hole (which may be the same as the end of the WB
524 * range in case we don't gain anything by aligning up).
525 */
526
527 const int dont_care = 0;
528 struct var_mtrr_state var_state = { 0, };
529
530 unsigned int align, best_count;
531 uint32_t best_end = hole;
532
533 /* calculate MTRR count for the WB range alone (w/o a hole) */
534 calc_var_mtrr_range(&var_state, base, hole - base, dont_care);
535 best_count = var_state.mtrr_index;
536 var_state.mtrr_index = 0;
537
538 for (align = fls(hole) + 1; align <= fms(hole); ++align) {
539 const uint64_t hole_end = ALIGN_UP((uint64_t)hole, 1 << align);
540 if (hole_end > limit)
541 break;
542
543 /* calculate MTRR count for this alignment */
544 calc_var_mtrr_range(
545 &var_state, base, hole_end - base, dont_care);
546 if (carve_hole)
547 calc_var_mtrr_range(
548 &var_state, hole, hole_end - hole, dont_care);
549
550 if (var_state.mtrr_index < best_count) {
551 best_count = var_state.mtrr_index;
552 best_end = hole_end;
553 }
554 var_state.mtrr_index = 0;
555 }
556
557 return best_end;
558}
559
Aaron Durbine3834422013-03-28 20:48:51 -0500560static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700561 struct range_entry *r)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500562{
Jonathan Zhang320ad932020-10-14 15:07:51 -0700563 uint64_t a1, a2, b1, b2;
Nico Huberbd5fb662017-10-07 13:40:19 +0200564 int mtrr_type, carve_hole;
Aaron Durbine3834422013-03-28 20:48:51 -0500565
566 /*
Martin Roth4c3ab732013-07-08 16:23:54 -0600567 * Determine MTRRs based on the following algorithm for the given entry:
Aaron Durbine3834422013-03-28 20:48:51 -0500568 * +------------------+ b2 = ALIGN_UP(end)
569 * | 0 or more bytes | <-- hole is carved out between b1 and b2
Nico Huberbd5fb662017-10-07 13:40:19 +0200570 * +------------------+ a2 = b1 = original end
Aaron Durbine3834422013-03-28 20:48:51 -0500571 * | |
572 * +------------------+ a1 = begin
573 *
Nico Huberbd5fb662017-10-07 13:40:19 +0200574 * Thus, there are up to 2 sub-ranges to configure variable MTRRs for.
Aaron Durbine3834422013-03-28 20:48:51 -0500575 */
576 mtrr_type = range_entry_mtrr_type(r);
577
578 a1 = range_entry_base_mtrr_addr(r);
579 a2 = range_entry_end_mtrr_addr(r);
580
Aaron Durbina38677b2016-07-21 14:26:34 -0500581 /* The end address is within the first 1MiB. The fixed MTRRs take
Aaron Durbine3834422013-03-28 20:48:51 -0500582 * precedence over the variable ones. Therefore this range
583 * can be ignored. */
Aaron Durbina38677b2016-07-21 14:26:34 -0500584 if (a2 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500585 return;
586
587 /* Again, the fixed MTRRs take precedence so the beginning
Aaron Durbina38677b2016-07-21 14:26:34 -0500588 * of the range can be set to 0 if it starts at or below 1MiB. */
589 if (a1 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500590 a1 = 0;
591
592 /* If the range starts above 4GiB the processing is done. */
593 if (!var_state->above4gb && a1 >= RANGE_4GB)
594 return;
595
596 /* Clip the upper address to 4GiB if addresses above 4GiB
597 * are not being processed. */
598 if (!var_state->above4gb && a2 > RANGE_4GB)
599 a2 = RANGE_4GB;
600
601 b1 = a2;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200602 b2 = a2;
603 carve_hole = 0;
Aaron Durbin53924242013-03-29 11:48:27 -0500604
Nico Huber64f0bcb2017-10-07 16:37:04 +0200605 /* We only consider WB type ranges for hole-carving. */
606 if (mtrr_type == MTRR_TYPE_WRBACK) {
607 struct range_entry *next;
608 uint64_t b2_limit;
609 /*
610 * Depending on the type of the next range, there are three
611 * different situations to handle:
612 *
613 * 1. WB range is last in address space:
614 * Aligning up, up to the next power of 2, may gain us
615 * something.
616 *
617 * 2. The next range is of type UC:
618 * We may align up, up to the _end_ of the next range. If
619 * there is a gap between the current and the next range,
620 * it would have been covered by the default type UC anyway.
621 *
622 * 3. The next range is not of type UC:
623 * We may align up, up to the _base_ of the next range. This
624 * may either be the end of the current range (if the next
625 * range follows immediately) or the end of the gap between
626 * the ranges.
627 */
628 next = memranges_next_entry(var_state->addr_space, r);
629 if (next == NULL) {
630 b2_limit = ALIGN_UP((uint64_t)b1, 1 << fms(b1));
631 /* If it's the last range above 4GiB, we won't carve
632 the hole out. If an OS wanted to move MMIO there,
633 it would have to override the MTRR setting using
634 PAT just like it would with WB as default type. */
635 carve_hole = a1 < RANGE_4GB;
636 } else if (range_entry_mtrr_type(next)
637 == MTRR_TYPE_UNCACHEABLE) {
638 b2_limit = range_entry_end_mtrr_addr(next);
639 carve_hole = 1;
640 } else {
641 b2_limit = range_entry_base_mtrr_addr(next);
642 carve_hole = 1;
643 }
644 b2 = optimize_var_mtrr_hole(a1, b1, b2_limit, carve_hole);
Aaron Durbin53924242013-03-29 11:48:27 -0500645 }
Aaron Durbine3834422013-03-28 20:48:51 -0500646
647 calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
Nico Huberbd5fb662017-10-07 13:40:19 +0200648 if (carve_hole && b2 != b1) {
649 calc_var_mtrr_range(var_state, b1, b2 - b1,
650 MTRR_TYPE_UNCACHEABLE);
651 }
Aaron Durbine3834422013-03-28 20:48:51 -0500652}
653
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600654static void __calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700655 int above4gb, int address_bits,
656 int *num_def_wb_mtrrs, int *num_def_uc_mtrrs)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500657{
658 int wb_deftype_count;
659 int uc_deftype_count;
Aaron Durbine3834422013-03-28 20:48:51 -0500660 struct range_entry *r;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000661 struct var_mtrr_state var_state;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000662
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500663 /* The default MTRR cacheability type is determined by calculating
Paul Menzel4fe98132014-01-25 15:55:28 +0100664 * the number of MTRRs required for each MTRR type as if it was the
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500665 * default. */
666 var_state.addr_space = addr_space;
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000667 var_state.above4gb = above4gb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500668 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800669 var_state.prepare_msrs = 0;
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000670
Aaron Durbine3834422013-03-28 20:48:51 -0500671 wb_deftype_count = 0;
672 uc_deftype_count = 0;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800673
Aaron Durbine3834422013-03-28 20:48:51 -0500674 /*
Nico Huber64f0bcb2017-10-07 16:37:04 +0200675 * For each range do 2 calculations:
676 * 1. UC as default type with possible holes at top of range.
677 * 2. WB as default.
Martin Roth4c3ab732013-07-08 16:23:54 -0600678 * The lowest count is then used as default after totaling all
Nico Huber64f0bcb2017-10-07 16:37:04 +0200679 * MTRRs. UC takes precedence in the MTRR architecture. There-
680 * fore, only holes can be used when the type of the region is
681 * MTRR_TYPE_WRBACK with MTRR_TYPE_UNCACHEABLE as the default
682 * type.
Aaron Durbine3834422013-03-28 20:48:51 -0500683 */
684 memranges_each_entry(r, var_state.addr_space) {
685 int mtrr_type;
686
687 mtrr_type = range_entry_mtrr_type(r);
688
689 if (mtrr_type != MTRR_TYPE_UNCACHEABLE) {
Aaron Durbine3834422013-03-28 20:48:51 -0500690 var_state.mtrr_index = 0;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200691 var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE;
692 calc_var_mtrrs_with_hole(&var_state, r);
693 uc_deftype_count += var_state.mtrr_index;
Aaron Durbine3834422013-03-28 20:48:51 -0500694 }
695
696 if (mtrr_type != MTRR_TYPE_WRBACK) {
697 var_state.mtrr_index = 0;
698 var_state.def_mtrr_type = MTRR_TYPE_WRBACK;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200699 calc_var_mtrrs_with_hole(&var_state, r);
Aaron Durbine3834422013-03-28 20:48:51 -0500700 wb_deftype_count += var_state.mtrr_index;
701 }
702 }
Jonathan Zhang320ad932020-10-14 15:07:51 -0700703
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600704 *num_def_wb_mtrrs = wb_deftype_count;
705 *num_def_uc_mtrrs = uc_deftype_count;
706}
707
708static int calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700709 int above4gb, int address_bits)
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600710{
711 int wb_deftype_count = 0;
712 int uc_deftype_count = 0;
713
714 __calc_var_mtrrs(addr_space, above4gb, address_bits, &wb_deftype_count,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700715 &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600716
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -0600717 const int bios_mtrrs = total_mtrrs - get_os_reserved_mtrrs();
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600718 if (wb_deftype_count > bios_mtrrs && uc_deftype_count > bios_mtrrs) {
719 printk(BIOS_DEBUG, "MTRR: Removing WRCOMB type. "
720 "WB/UC MTRR counts: %d/%d > %d.\n",
721 wb_deftype_count, uc_deftype_count, bios_mtrrs);
722 memranges_update_tag(addr_space, MTRR_TYPE_WRCOMB,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700723 MTRR_TYPE_UNCACHEABLE);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600724 __calc_var_mtrrs(addr_space, above4gb, address_bits,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700725 &wb_deftype_count, &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600726 }
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000727
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500728 printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
729 wb_deftype_count, uc_deftype_count);
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300730
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500731 if (wb_deftype_count < uc_deftype_count) {
732 printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n");
733 return MTRR_TYPE_WRBACK;
734 }
735 printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n");
736 return MTRR_TYPE_UNCACHEABLE;
737}
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300738
Gabe Black7756fe72014-02-25 01:40:34 -0800739static void prepare_var_mtrrs(struct memranges *addr_space, int def_type,
740 int above4gb, int address_bits,
741 struct var_mtrr_solution *sol)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500742{
Aaron Durbine3834422013-03-28 20:48:51 -0500743 struct range_entry *r;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500744 struct var_mtrr_state var_state;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500745
746 var_state.addr_space = addr_space;
747 var_state.above4gb = above4gb;
748 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800749 /* Prepare the MSRs. */
750 var_state.prepare_msrs = 1;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500751 var_state.mtrr_index = 0;
752 var_state.def_mtrr_type = def_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800753 var_state.regs = &sol->regs[0];
Aaron Durbine3834422013-03-28 20:48:51 -0500754
755 memranges_each_entry(r, var_state.addr_space) {
756 if (range_entry_mtrr_type(r) == def_type)
757 continue;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200758 calc_var_mtrrs_with_hole(&var_state, r);
Aaron Durbine3834422013-03-28 20:48:51 -0500759 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500760
Gabe Black7756fe72014-02-25 01:40:34 -0800761 /* Update the solution. */
762 sol->num_used = var_state.mtrr_index;
763}
764
Aaron Durbind9762f72017-06-12 12:48:38 -0500765static int commit_var_mtrrs(const struct var_mtrr_solution *sol)
Gabe Black7756fe72014-02-25 01:40:34 -0800766{
767 int i;
768
Aaron Durbind9762f72017-06-12 12:48:38 -0500769 if (sol->num_used > total_mtrrs) {
770 printk(BIOS_WARNING, "Not enough MTRRs: %d vs %d\n",
771 sol->num_used, total_mtrrs);
772 return -1;
773 }
774
Isaac Christensen81f90c52014-09-24 14:59:32 -0600775 /* Write out the variable MTRRs. */
Gabe Black7756fe72014-02-25 01:40:34 -0800776 disable_cache();
777 for (i = 0; i < sol->num_used; i++) {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700778 wrmsr(MTRR_PHYS_BASE(i), sol->regs[i].base);
779 wrmsr(MTRR_PHYS_MASK(i), sol->regs[i].mask);
Gabe Black7756fe72014-02-25 01:40:34 -0800780 }
781 /* Clear the ones that are unused. */
782 for (; i < total_mtrrs; i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500783 clear_var_mtrr(i);
Isaac Christensen81f90c52014-09-24 14:59:32 -0600784 enable_var_mtrr(sol->mtrr_default_type);
Gabe Black7756fe72014-02-25 01:40:34 -0800785 enable_cache();
786
Aaron Durbind9762f72017-06-12 12:48:38 -0500787 return 0;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500788}
789
790void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
791{
Gabe Black7756fe72014-02-25 01:40:34 -0800792 static struct var_mtrr_solution *sol = NULL;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500793 struct memranges *addr_space;
794
795 addr_space = get_physical_address_space();
796
Gabe Black7756fe72014-02-25 01:40:34 -0800797 if (sol == NULL) {
Gabe Black7756fe72014-02-25 01:40:34 -0800798 sol = &mtrr_global_solution;
799 sol->mtrr_default_type =
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500800 calc_var_mtrrs(addr_space, !!above4gb, address_bits);
Gabe Black7756fe72014-02-25 01:40:34 -0800801 prepare_var_mtrrs(addr_space, sol->mtrr_default_type,
802 !!above4gb, address_bits, sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000803 }
Stefan Reinauer00093a82011-11-02 16:12:34 -0700804
Gabe Black7756fe72014-02-25 01:40:34 -0800805 commit_var_mtrrs(sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000806}
807
Aaron Durbin1ebbb162020-05-28 10:17:34 -0600808static void _x86_setup_mtrrs(unsigned int above4gb)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000809{
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100810 int address_size;
Aaron Durbine63be892016-03-07 16:05:36 -0600811
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000812 x86_setup_fixed_mtrrs();
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100813 address_size = cpu_phys_address_size();
Aaron Durbine63be892016-03-07 16:05:36 -0600814 printk(BIOS_DEBUG, "CPU physical address size: %d bits\n",
815 address_size);
Aaron Durbin1ebbb162020-05-28 10:17:34 -0600816 x86_setup_var_mtrrs(address_size, above4gb);
817}
818
819void x86_setup_mtrrs(void)
820{
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -0600821 /* Without detect, assume the minimum */
822 total_mtrrs = MIN_MTRRS;
Aaron Durbine63be892016-03-07 16:05:36 -0600823 /* Always handle addresses above 4GiB. */
Aaron Durbin1ebbb162020-05-28 10:17:34 -0600824 _x86_setup_mtrrs(1);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000825}
826
Aaron Durbine63be892016-03-07 16:05:36 -0600827void x86_setup_mtrrs_with_detect(void)
828{
829 detect_var_mtrrs();
Aaron Durbin1ebbb162020-05-28 10:17:34 -0600830 /* Always handle addresses above 4GiB. */
831 _x86_setup_mtrrs(1);
832}
833
834void x86_setup_mtrrs_with_detect_no_above_4gb(void)
835{
836 detect_var_mtrrs();
837 _x86_setup_mtrrs(0);
Aaron Durbine63be892016-03-07 16:05:36 -0600838}
839
Kyösti Mälkki38a8fb02014-06-30 13:48:18 +0300840void x86_mtrr_check(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000841{
842 /* Only Pentium Pro and later have MTRR */
843 msr_t msr;
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000844 printk(BIOS_DEBUG, "\nMTRR check\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000845
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700846 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000847
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000848 printk(BIOS_DEBUG, "Fixed MTRRs : ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700849 if (msr.lo & MTRR_DEF_TYPE_FIX_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000850 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000851 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000852 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000853
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000854 printk(BIOS_DEBUG, "Variable MTRRs: ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700855 if (msr.lo & MTRR_DEF_TYPE_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000856 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000857 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000858 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000859
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000860 printk(BIOS_DEBUG, "\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000861
862 post_code(0x93);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000863}
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600864
865static bool put_back_original_solution;
866
867void mtrr_use_temp_range(uintptr_t begin, size_t size, int type)
868{
869 const struct range_entry *r;
870 const struct memranges *orig;
871 struct var_mtrr_solution sol;
872 struct memranges addr_space;
873 const int above4gb = 1; /* Cover above 4GiB by default. */
874 int address_bits;
875
876 /* Make a copy of the original address space and tweak it with the
877 * provided range. */
878 memranges_init_empty(&addr_space, NULL, 0);
879 orig = get_physical_address_space();
880 memranges_each_entry(r, orig) {
881 unsigned long tag = range_entry_tag(r);
882
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600883 /* Remove any write combining MTRRs from the temporary
884 * solution as it just fragments the address space. */
885 if (tag == MTRR_TYPE_WRCOMB)
886 tag = MTRR_TYPE_UNCACHEABLE;
887
888 memranges_insert(&addr_space, range_entry_base(r),
889 range_entry_size(r), tag);
890 }
891
892 /* Place new range into the address space. */
893 memranges_insert(&addr_space, begin, size, type);
894
895 print_physical_address_space(&addr_space, "TEMPORARY");
896
897 /* Calculate a new solution with the updated address space. */
898 address_bits = cpu_phys_address_size();
899 memset(&sol, 0, sizeof(sol));
900 sol.mtrr_default_type =
901 calc_var_mtrrs(&addr_space, above4gb, address_bits);
902 prepare_var_mtrrs(&addr_space, sol.mtrr_default_type,
903 above4gb, address_bits, &sol);
Aaron Durbind9762f72017-06-12 12:48:38 -0500904
905 if (commit_var_mtrrs(&sol) < 0)
906 printk(BIOS_WARNING, "Unable to insert temporary MTRR range: 0x%016llx - 0x%016llx size 0x%08llx type %d\n",
907 (long long)begin, (long long)begin + size,
908 (long long)size, type);
909 else
910 put_back_original_solution = true;
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600911
912 memranges_teardown(&addr_space);
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600913}
914
915static void remove_temp_solution(void *unused)
916{
917 if (put_back_original_solution)
918 commit_var_mtrrs(&mtrr_global_solution);
919}
920
921BOOT_STATE_INIT_ENTRY(BS_OS_RESUME, BS_ON_ENTRY, remove_temp_solution, NULL);
922BOOT_STATE_INIT_ENTRY(BS_PAYLOAD_LOAD, BS_ON_EXIT, remove_temp_solution, NULL);