blob: d95e590950a43fe66d36ef43174851d9c9896453 [file] [log] [blame]
Elyes HAOUAS3a7346c2020-05-07 07:46:17 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
2
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00003/*
Martin Rothd57ace22019-08-31 10:48:37 -06004 * mtrr.c: setting MTRR to decent values for cache initialization on P6
5 * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00006 *
Lee Leahyc5917072017-03-15 16:38:51 -07007 * Reference: Intel Architecture Software Developer's Manual, Volume 3: System
8 * Programming
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00009 */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000010
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +000011#include <stddef.h>
Arthur Heymans4ed22602022-04-11 18:58:09 +020012#include <stdint.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050013#include <string.h>
Aaron Durbinbebf6692013-04-24 20:59:43 -050014#include <bootstate.h>
Elyes HAOUASd26844c2019-06-21 07:31:40 +020015#include <commonlib/helpers.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000016#include <console/console.h>
17#include <device/device.h>
Aaron Durbinca4f4b82014-02-08 15:41:52 -060018#include <device/pci_ids.h>
Aaron Durbinebf142a2013-03-29 16:23:23 -050019#include <cpu/cpu.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000020#include <cpu/x86/msr.h>
21#include <cpu/x86/mtrr.h>
22#include <cpu/x86/cache.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050023#include <memrange.h>
Aaron Durbin57686f82013-03-20 15:50:59 -050024#include <cpu/amd/mtrr.h>
Richard Spiegelb28025a2019-02-20 11:00:19 -070025#include <assert.h>
Julius Wernercd49cce2019-03-05 16:53:33 -080026#if CONFIG(X86_AMD_FIXED_MTRRS)
Aaron Durbin57686f82013-03-20 15:50:59 -050027#define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
28#else
29#define MTRR_FIXED_WRBACK_BITS 0
30#endif
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000031
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -060032#define MIN_MTRRS 8
33
Gabe Black7756fe72014-02-25 01:40:34 -080034/*
Isaac Christensen81f90c52014-09-24 14:59:32 -060035 * Static storage size for variable MTRRs. It's sized sufficiently large to
36 * handle different types of CPUs. Empirically, 16 variable MTRRs has not
Gabe Black7756fe72014-02-25 01:40:34 -080037 * yet been observed.
38 */
39#define NUM_MTRR_STATIC_STORAGE 16
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070040
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -060041static int total_mtrrs;
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070042
43static void detect_var_mtrrs(void)
44{
Subrata Banik7578ea42022-03-30 23:57:37 +053045 total_mtrrs = get_var_mtrr_count();
Gabe Black7756fe72014-02-25 01:40:34 -080046
47 if (total_mtrrs > NUM_MTRR_STATIC_STORAGE) {
48 printk(BIOS_WARNING,
49 "MTRRs detected (%d) > NUM_MTRR_STATIC_STORAGE (%d)\n",
50 total_mtrrs, NUM_MTRR_STATIC_STORAGE);
51 total_mtrrs = NUM_MTRR_STATIC_STORAGE;
52 }
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070053}
54
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000055void enable_fixed_mtrr(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000056{
57 msr_t msr;
58
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070059 msr = rdmsr(MTRR_DEF_TYPE_MSR);
60 msr.lo |= MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN;
61 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000062}
63
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060064void fixed_mtrrs_expose_amd_rwdram(void)
65{
66 msr_t syscfg;
67
Julius Wernercd49cce2019-03-05 16:53:33 -080068 if (!CONFIG(X86_AMD_FIXED_MTRRS))
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060069 return;
70
71 syscfg = rdmsr(SYSCFG_MSR);
72 syscfg.lo |= SYSCFG_MSR_MtrrFixDramModEn;
73 wrmsr(SYSCFG_MSR, syscfg);
74}
75
76void fixed_mtrrs_hide_amd_rwdram(void)
77{
78 msr_t syscfg;
79
Julius Wernercd49cce2019-03-05 16:53:33 -080080 if (!CONFIG(X86_AMD_FIXED_MTRRS))
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060081 return;
82
83 syscfg = rdmsr(SYSCFG_MSR);
84 syscfg.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
85 wrmsr(SYSCFG_MSR, syscfg);
86}
87
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050088static void enable_var_mtrr(unsigned char deftype)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000089{
90 msr_t msr;
91
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070092 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050093 msr.lo &= ~0xff;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070094 msr.lo |= MTRR_DEF_TYPE_EN | deftype;
95 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000096}
97
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050098#define MTRR_VERBOSE_LEVEL BIOS_NEVER
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000099
Jonathan Zhang320ad932020-10-14 15:07:51 -0700100/* MTRRs are at a 4KiB granularity. */
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500101#define RANGE_SHIFT 12
102#define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
103 (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
104#define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
105#define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
106#define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR)
107
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500108/* Helpful constants. */
Jonathan Zhang320ad932020-10-14 15:07:51 -0700109#define RANGE_1MB PHYS_TO_RANGE_ADDR(1ULL << 20)
110#define RANGE_4GB (1ULL << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500111
Aaron Durbine3834422013-03-28 20:48:51 -0500112#define MTRR_ALGO_SHIFT (8)
113#define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1)
Aaron Durbine3834422013-03-28 20:48:51 -0500114
Jonathan Zhang320ad932020-10-14 15:07:51 -0700115static inline uint64_t range_entry_base_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000116{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500117 return PHYS_TO_RANGE_ADDR(range_entry_base(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000118}
119
Jonathan Zhang320ad932020-10-14 15:07:51 -0700120static inline uint64_t range_entry_end_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000121{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500122 return PHYS_TO_RANGE_ADDR(range_entry_end(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000123}
124
Aaron Durbine3834422013-03-28 20:48:51 -0500125static inline int range_entry_mtrr_type(struct range_entry *r)
126{
127 return range_entry_tag(r) & MTRR_TAG_MASK;
128}
129
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600130static int filter_vga_wrcomb(struct device *dev, struct resource *res)
131{
132 /* Only handle PCI devices. */
133 if (dev->path.type != DEVICE_PATH_PCI)
134 return 0;
135
136 /* Only handle VGA class devices. */
137 if (((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
138 return 0;
139
140 /* Add resource as write-combining in the address space. */
141 return 1;
142}
143
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600144static void print_physical_address_space(const struct memranges *addr_space,
145 const char *identifier)
146{
147 const struct range_entry *r;
148
149 if (identifier)
150 printk(BIOS_DEBUG, "MTRR: %s Physical address space:\n",
151 identifier);
152 else
153 printk(BIOS_DEBUG, "MTRR: Physical address space:\n");
154
155 memranges_each_entry(r, addr_space)
156 printk(BIOS_DEBUG,
157 "0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
Werner Zeheaf11c92022-04-11 08:35:06 +0200158 range_entry_base(r), range_entry_end(r) - 1,
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600159 range_entry_size(r), range_entry_tag(r));
160}
161
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500162static struct memranges *get_physical_address_space(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000163{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500164 static struct memranges *addr_space;
165 static struct memranges addr_space_storage;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800166
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500167 /* In order to handle some chipsets not being able to pre-determine
Martin Roth4c3ab732013-07-08 16:23:54 -0600168 * uncacheable ranges, such as graphics memory, at resource insertion
169 * time remove uncacheable regions from the cacheable ones. */
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500170 if (addr_space == NULL) {
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500171 unsigned long mask;
172 unsigned long match;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500173
174 addr_space = &addr_space_storage;
175
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500176 mask = IORESOURCE_CACHEABLE;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500177 /* Collect cacheable and uncacheable address ranges. The
178 * uncacheable regions take precedence over the cacheable
179 * regions. */
180 memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
181 memranges_add_resources(addr_space, mask, 0,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700182 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500183
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500184 /* Handle any write combining resources. Only prefetchable
Vladimir Serbinenko30fe6122014-02-05 23:25:28 +0100185 * resources are appropriate for this MTRR type. */
186 match = IORESOURCE_PREFETCH;
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500187 mask |= match;
Lee Leahyc5917072017-03-15 16:38:51 -0700188 memranges_add_resources_filter(addr_space, mask, match,
189 MTRR_TYPE_WRCOMB, filter_vga_wrcomb);
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500190
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500191 /* The address space below 4GiB is special. It needs to be
Martin Roth2f914032016-01-15 10:20:11 -0700192 * covered entirely by range entries so that MTRR calculations
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500193 * can be properly done for the full 32-bit address space.
194 * Therefore, ensure holes are filled up to 4GiB as
195 * uncacheable */
196 memranges_fill_holes_up_to(addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700197 RANGE_TO_PHYS_ADDR(RANGE_4GB),
198 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500199
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600200 print_physical_address_space(addr_space, NULL);
Carl-Daniel Hailfinger7dde1da2009-02-11 16:57:32 +0000201 }
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000202
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500203 return addr_space;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000204}
205
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500206/* Fixed MTRR descriptor. This structure defines the step size and begin
Martin Roth4c3ab732013-07-08 16:23:54 -0600207 * and end (exclusive) address covered by a set of fixed MTRR MSRs.
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500208 * It also describes the offset in byte intervals to store the calculated MTRR
209 * type in an array. */
210struct fixed_mtrr_desc {
211 uint32_t begin;
212 uint32_t end;
213 uint32_t step;
214 int range_index;
215 int msr_index_base;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000216};
217
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500218/* Shared MTRR calculations. Can be reused by APs. */
219static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES];
220
221/* Fixed MTRR descriptors. */
222static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
223 { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700224 PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRR_FIX_64K_00000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500225 { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700226 PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRR_FIX_16K_80000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500227 { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700228 PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRR_FIX_4K_C0000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500229};
230
231static void calc_fixed_mtrrs(void)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000232{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500233 static int fixed_mtrr_types_initialized;
234 struct memranges *phys_addr_space;
235 struct range_entry *r;
236 const struct fixed_mtrr_desc *desc;
237 const struct fixed_mtrr_desc *last_desc;
238 uint32_t begin;
239 uint32_t end;
240 int type_index;
241
242 if (fixed_mtrr_types_initialized)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000243 return;
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300244
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500245 phys_addr_space = get_physical_address_space();
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300246
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500247 /* Set all fixed ranges to uncacheable first. */
248 memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300249
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500250 desc = &fixed_mtrr_desc[0];
251 last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1];
Kyösti Mälkki1ec5e742012-07-26 23:51:20 +0300252
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500253 memranges_each_entry(r, phys_addr_space) {
254 begin = range_entry_base_mtrr_addr(r);
255 end = range_entry_end_mtrr_addr(r);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300256
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500257 if (begin >= last_desc->end)
258 break;
259
260 if (end > last_desc->end)
261 end = last_desc->end;
262
263 /* Get to the correct fixed mtrr descriptor. */
264 while (begin >= desc->end)
265 desc++;
266
267 type_index = desc->range_index;
268 type_index += (begin - desc->begin) / desc->step;
269
270 while (begin != end) {
271 unsigned char type;
272
273 type = range_entry_tag(r);
274 printk(MTRR_VERBOSE_LEVEL,
275 "MTRR addr 0x%x-0x%x set to %d type @ %d\n",
Werner Zeheaf11c92022-04-11 08:35:06 +0200276 begin, begin + desc->step - 1, type, type_index);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500277 if (type == MTRR_TYPE_WRBACK)
278 type |= MTRR_FIXED_WRBACK_BITS;
279 fixed_mtrr_types[type_index] = type;
280 type_index++;
281 begin += desc->step;
282 if (begin == desc->end)
283 desc++;
Yinghai Lu63601872005-01-27 22:48:12 +0000284 }
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000285 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500286 fixed_mtrr_types_initialized = 1;
287}
288
289static void commit_fixed_mtrrs(void)
290{
291 int i;
292 int j;
293 int msr_num;
294 int type_index;
295 /* 8 ranges per msr. */
296 msr_t fixed_msrs[NUM_FIXED_MTRRS];
297 unsigned long msr_index[NUM_FIXED_MTRRS];
298
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600299 fixed_mtrrs_expose_amd_rwdram();
300
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500301 memset(&fixed_msrs, 0, sizeof(fixed_msrs));
302
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500303 msr_num = 0;
304 type_index = 0;
305 for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) {
306 const struct fixed_mtrr_desc *desc;
307 int num_ranges;
308
309 desc = &fixed_mtrr_desc[i];
310 num_ranges = (desc->end - desc->begin) / desc->step;
311 for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) {
312 msr_index[msr_num] = desc->msr_index_base +
313 (j / RANGES_PER_FIXED_MTRR);
314 fixed_msrs[msr_num].lo |=
315 fixed_mtrr_types[type_index++] << 0;
316 fixed_msrs[msr_num].lo |=
317 fixed_mtrr_types[type_index++] << 8;
318 fixed_msrs[msr_num].lo |=
319 fixed_mtrr_types[type_index++] << 16;
320 fixed_msrs[msr_num].lo |=
321 fixed_mtrr_types[type_index++] << 24;
322 fixed_msrs[msr_num].hi |=
323 fixed_mtrr_types[type_index++] << 0;
324 fixed_msrs[msr_num].hi |=
325 fixed_mtrr_types[type_index++] << 8;
326 fixed_msrs[msr_num].hi |=
327 fixed_mtrr_types[type_index++] << 16;
328 fixed_msrs[msr_num].hi |=
329 fixed_mtrr_types[type_index++] << 24;
330 msr_num++;
331 }
332 }
333
Jacob Garber5b922722019-05-28 11:47:49 -0600334 /* Ensure that both arrays were fully initialized */
335 ASSERT(msr_num == NUM_FIXED_MTRRS)
336
Gabe Black7756fe72014-02-25 01:40:34 -0800337 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500338 printk(BIOS_DEBUG, "MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
339 msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500340
Gabe Black7756fe72014-02-25 01:40:34 -0800341 disable_cache();
342 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
343 wrmsr(msr_index[i], fixed_msrs[i]);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500344 enable_cache();
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600345 fixed_mtrrs_hide_amd_rwdram();
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000346}
347
Aaron Durbin57686f82013-03-20 15:50:59 -0500348void x86_setup_fixed_mtrrs_no_enable(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000349{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500350 calc_fixed_mtrrs();
351 commit_fixed_mtrrs();
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000352}
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000353
Aaron Durbin57686f82013-03-20 15:50:59 -0500354void x86_setup_fixed_mtrrs(void)
355{
356 x86_setup_fixed_mtrrs_no_enable();
357
358 printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
359 enable_fixed_mtrr();
360}
361
Gabe Black7756fe72014-02-25 01:40:34 -0800362struct var_mtrr_regs {
363 msr_t base;
364 msr_t mask;
365};
366
367struct var_mtrr_solution {
368 int mtrr_default_type;
369 int num_used;
370 struct var_mtrr_regs regs[NUM_MTRR_STATIC_STORAGE];
371};
372
373/* Global storage for variable MTRR solution. */
374static struct var_mtrr_solution mtrr_global_solution;
375
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500376struct var_mtrr_state {
377 struct memranges *addr_space;
378 int above4gb;
379 int address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800380 int prepare_msrs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500381 int mtrr_index;
382 int def_mtrr_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800383 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500384};
Aaron Durbin57686f82013-03-20 15:50:59 -0500385
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500386static void clear_var_mtrr(int index)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000387{
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600388 msr_t msr = { .lo = 0, .hi = 0 };
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500389
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600390 wrmsr(MTRR_PHYS_BASE(index), msr);
391 wrmsr(MTRR_PHYS_MASK(index), msr);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500392}
393
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -0600394static int get_os_reserved_mtrrs(void)
395{
396 return CONFIG(RESERVE_MTRRS_FOR_OS) ? 2 : 0;
397}
398
Gabe Black7756fe72014-02-25 01:40:34 -0800399static void prep_var_mtrr(struct var_mtrr_state *var_state,
Jonathan Zhang320ad932020-10-14 15:07:51 -0700400 uint64_t base, uint64_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500401{
Gabe Black7756fe72014-02-25 01:40:34 -0800402 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500403 resource_t rbase;
404 resource_t rsize;
405 resource_t mask;
406
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500407 if (var_state->mtrr_index >= total_mtrrs) {
Julius Wernere9665952022-01-21 17:06:20 -0800408 printk(BIOS_ERR, "Not enough MTRRs available! MTRR index is %d with %d MTRRs in total.\n",
Paul Menzel6a70dbc2015-10-15 12:41:53 +0200409 var_state->mtrr_index, total_mtrrs);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500410 return;
411 }
412
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -0600413 /*
414 * If desired, 2 variable MTRRs are attempted to be saved for the OS to
415 * use. However, it's more important to try to map the full address
416 * space properly.
417 */
418 if (var_state->mtrr_index >= total_mtrrs - get_os_reserved_mtrrs())
419 printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n");
420
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500421 rbase = base;
422 rsize = size;
423
424 rbase = RANGE_TO_PHYS_ADDR(rbase);
425 rsize = RANGE_TO_PHYS_ADDR(rsize);
426 rsize = -rsize;
427
428 mask = (1ULL << var_state->address_bits) - 1;
429 rsize = rsize & mask;
430
431 printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
432 var_state->mtrr_index, rbase, rsize, mtrr_type);
433
Gabe Black7756fe72014-02-25 01:40:34 -0800434 regs = &var_state->regs[var_state->mtrr_index];
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500435
Gabe Black7756fe72014-02-25 01:40:34 -0800436 regs->base.lo = rbase;
437 regs->base.lo |= mtrr_type;
438 regs->base.hi = rbase >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500439
Gabe Black7756fe72014-02-25 01:40:34 -0800440 regs->mask.lo = rsize;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700441 regs->mask.lo |= MTRR_PHYS_MASK_VALID;
Gabe Black7756fe72014-02-25 01:40:34 -0800442 regs->mask.hi = rsize >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500443}
444
Jonathan Zhang320ad932020-10-14 15:07:51 -0700445/*
446 * fls64: find least significant bit set in a 64-bit word
447 * As samples, fls64(0x0) = 64; fls64(0x4400) = 10;
448 * fls64(0x40400000000) = 34.
449 */
450static uint32_t fls64(uint64_t x)
451{
452 uint32_t lo = (uint32_t)x;
453 if (lo)
454 return fls(lo);
455 uint32_t hi = x >> 32;
456 return fls(hi) + 32;
457}
458
459/*
460 * fms64: find most significant bit set in a 64-bit word
461 * As samples, fms64(0x0) = 0; fms64(0x4400) = 14;
462 * fms64(0x40400000000) = 42.
463 */
464static uint32_t fms64(uint64_t x)
465{
466 uint32_t hi = (uint32_t)(x >> 32);
467 if (!hi)
468 return fms((uint32_t)x);
469 return fms(hi) + 32;
470}
471
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500472static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
Jonathan Zhang320ad932020-10-14 15:07:51 -0700473 uint64_t base, uint64_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500474{
475 while (size != 0) {
476 uint32_t addr_lsb;
477 uint32_t size_msb;
Jonathan Zhang320ad932020-10-14 15:07:51 -0700478 uint64_t mtrr_size;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500479
Jonathan Zhang320ad932020-10-14 15:07:51 -0700480 addr_lsb = fls64(base);
481 size_msb = fms64(size);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500482
483 /* All MTRR entries need to have their base aligned to the mask
484 * size. The maximum size is calculated by a function of the
485 * min base bit set and maximum size bit set. */
486 if (addr_lsb > size_msb)
Jonathan Zhang8f594b72020-10-23 15:20:22 -0700487 mtrr_size = 1ULL << size_msb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500488 else
Jonathan Zhang8f594b72020-10-23 15:20:22 -0700489 mtrr_size = 1ULL << addr_lsb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500490
Gabe Black7756fe72014-02-25 01:40:34 -0800491 if (var_state->prepare_msrs)
492 prep_var_mtrr(var_state, base, mtrr_size, mtrr_type);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500493
494 size -= mtrr_size;
495 base += mtrr_size;
496 var_state->mtrr_index++;
497 }
498}
499
Jonathan Zhang320ad932020-10-14 15:07:51 -0700500static uint64_t optimize_var_mtrr_hole(const uint64_t base,
501 const uint64_t hole,
Nico Huberbd5fb662017-10-07 13:40:19 +0200502 const uint64_t limit,
503 const int carve_hole)
504{
505 /*
506 * With default type UC, we can potentially optimize a WB
507 * range with unaligned upper end, by aligning it up and
508 * carving the added "hole" out again.
509 *
510 * To optimize the upper end of the hole, we will test
511 * how many MTRRs calc_var_mtrr_range() will spend for any
512 * alignment of the hole's upper end.
513 *
514 * We take four parameters, the lower end of the WB range
515 * `base`, upper end of the WB range as start of the `hole`,
516 * a `limit` how far we may align the upper end of the hole
517 * up and a flag `carve_hole` whether we should count MTRRs
518 * for carving the hole out. We return the optimal upper end
519 * for the hole (which may be the same as the end of the WB
520 * range in case we don't gain anything by aligning up).
521 */
522
523 const int dont_care = 0;
524 struct var_mtrr_state var_state = { 0, };
525
526 unsigned int align, best_count;
527 uint32_t best_end = hole;
528
529 /* calculate MTRR count for the WB range alone (w/o a hole) */
530 calc_var_mtrr_range(&var_state, base, hole - base, dont_care);
531 best_count = var_state.mtrr_index;
532 var_state.mtrr_index = 0;
533
534 for (align = fls(hole) + 1; align <= fms(hole); ++align) {
535 const uint64_t hole_end = ALIGN_UP((uint64_t)hole, 1 << align);
536 if (hole_end > limit)
537 break;
538
539 /* calculate MTRR count for this alignment */
540 calc_var_mtrr_range(
541 &var_state, base, hole_end - base, dont_care);
542 if (carve_hole)
543 calc_var_mtrr_range(
544 &var_state, hole, hole_end - hole, dont_care);
545
546 if (var_state.mtrr_index < best_count) {
547 best_count = var_state.mtrr_index;
548 best_end = hole_end;
549 }
550 var_state.mtrr_index = 0;
551 }
552
553 return best_end;
554}
555
Aaron Durbine3834422013-03-28 20:48:51 -0500556static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700557 struct range_entry *r)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500558{
Jonathan Zhang320ad932020-10-14 15:07:51 -0700559 uint64_t a1, a2, b1, b2;
Nico Huberbd5fb662017-10-07 13:40:19 +0200560 int mtrr_type, carve_hole;
Aaron Durbine3834422013-03-28 20:48:51 -0500561
562 /*
Martin Roth4c3ab732013-07-08 16:23:54 -0600563 * Determine MTRRs based on the following algorithm for the given entry:
Aaron Durbine3834422013-03-28 20:48:51 -0500564 * +------------------+ b2 = ALIGN_UP(end)
565 * | 0 or more bytes | <-- hole is carved out between b1 and b2
Nico Huberbd5fb662017-10-07 13:40:19 +0200566 * +------------------+ a2 = b1 = original end
Aaron Durbine3834422013-03-28 20:48:51 -0500567 * | |
568 * +------------------+ a1 = begin
569 *
Nico Huberbd5fb662017-10-07 13:40:19 +0200570 * Thus, there are up to 2 sub-ranges to configure variable MTRRs for.
Aaron Durbine3834422013-03-28 20:48:51 -0500571 */
572 mtrr_type = range_entry_mtrr_type(r);
573
574 a1 = range_entry_base_mtrr_addr(r);
575 a2 = range_entry_end_mtrr_addr(r);
576
Aaron Durbina38677b2016-07-21 14:26:34 -0500577 /* The end address is within the first 1MiB. The fixed MTRRs take
Aaron Durbine3834422013-03-28 20:48:51 -0500578 * precedence over the variable ones. Therefore this range
579 * can be ignored. */
Aaron Durbina38677b2016-07-21 14:26:34 -0500580 if (a2 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500581 return;
582
583 /* Again, the fixed MTRRs take precedence so the beginning
Aaron Durbina38677b2016-07-21 14:26:34 -0500584 * of the range can be set to 0 if it starts at or below 1MiB. */
585 if (a1 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500586 a1 = 0;
587
588 /* If the range starts above 4GiB the processing is done. */
589 if (!var_state->above4gb && a1 >= RANGE_4GB)
590 return;
591
592 /* Clip the upper address to 4GiB if addresses above 4GiB
593 * are not being processed. */
594 if (!var_state->above4gb && a2 > RANGE_4GB)
595 a2 = RANGE_4GB;
596
597 b1 = a2;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200598 b2 = a2;
599 carve_hole = 0;
Aaron Durbin53924242013-03-29 11:48:27 -0500600
Nico Huber64f0bcb2017-10-07 16:37:04 +0200601 /* We only consider WB type ranges for hole-carving. */
602 if (mtrr_type == MTRR_TYPE_WRBACK) {
603 struct range_entry *next;
604 uint64_t b2_limit;
605 /*
606 * Depending on the type of the next range, there are three
607 * different situations to handle:
608 *
609 * 1. WB range is last in address space:
610 * Aligning up, up to the next power of 2, may gain us
611 * something.
612 *
613 * 2. The next range is of type UC:
614 * We may align up, up to the _end_ of the next range. If
615 * there is a gap between the current and the next range,
616 * it would have been covered by the default type UC anyway.
617 *
618 * 3. The next range is not of type UC:
619 * We may align up, up to the _base_ of the next range. This
620 * may either be the end of the current range (if the next
621 * range follows immediately) or the end of the gap between
622 * the ranges.
623 */
624 next = memranges_next_entry(var_state->addr_space, r);
625 if (next == NULL) {
626 b2_limit = ALIGN_UP((uint64_t)b1, 1 << fms(b1));
627 /* If it's the last range above 4GiB, we won't carve
628 the hole out. If an OS wanted to move MMIO there,
629 it would have to override the MTRR setting using
630 PAT just like it would with WB as default type. */
631 carve_hole = a1 < RANGE_4GB;
632 } else if (range_entry_mtrr_type(next)
633 == MTRR_TYPE_UNCACHEABLE) {
634 b2_limit = range_entry_end_mtrr_addr(next);
635 carve_hole = 1;
636 } else {
637 b2_limit = range_entry_base_mtrr_addr(next);
638 carve_hole = 1;
639 }
640 b2 = optimize_var_mtrr_hole(a1, b1, b2_limit, carve_hole);
Aaron Durbin53924242013-03-29 11:48:27 -0500641 }
Aaron Durbine3834422013-03-28 20:48:51 -0500642
643 calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
Nico Huberbd5fb662017-10-07 13:40:19 +0200644 if (carve_hole && b2 != b1) {
645 calc_var_mtrr_range(var_state, b1, b2 - b1,
646 MTRR_TYPE_UNCACHEABLE);
647 }
Aaron Durbine3834422013-03-28 20:48:51 -0500648}
649
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600650static void __calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700651 int above4gb, int address_bits,
652 int *num_def_wb_mtrrs, int *num_def_uc_mtrrs)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500653{
654 int wb_deftype_count;
655 int uc_deftype_count;
Aaron Durbine3834422013-03-28 20:48:51 -0500656 struct range_entry *r;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000657 struct var_mtrr_state var_state;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000658
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500659 /* The default MTRR cacheability type is determined by calculating
Paul Menzel4fe98132014-01-25 15:55:28 +0100660 * the number of MTRRs required for each MTRR type as if it was the
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500661 * default. */
662 var_state.addr_space = addr_space;
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000663 var_state.above4gb = above4gb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500664 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800665 var_state.prepare_msrs = 0;
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000666
Aaron Durbine3834422013-03-28 20:48:51 -0500667 wb_deftype_count = 0;
668 uc_deftype_count = 0;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800669
Aaron Durbine3834422013-03-28 20:48:51 -0500670 /*
Nico Huber64f0bcb2017-10-07 16:37:04 +0200671 * For each range do 2 calculations:
672 * 1. UC as default type with possible holes at top of range.
673 * 2. WB as default.
Martin Roth4c3ab732013-07-08 16:23:54 -0600674 * The lowest count is then used as default after totaling all
Nico Huber64f0bcb2017-10-07 16:37:04 +0200675 * MTRRs. UC takes precedence in the MTRR architecture. There-
676 * fore, only holes can be used when the type of the region is
677 * MTRR_TYPE_WRBACK with MTRR_TYPE_UNCACHEABLE as the default
678 * type.
Aaron Durbine3834422013-03-28 20:48:51 -0500679 */
680 memranges_each_entry(r, var_state.addr_space) {
681 int mtrr_type;
682
683 mtrr_type = range_entry_mtrr_type(r);
684
685 if (mtrr_type != MTRR_TYPE_UNCACHEABLE) {
Aaron Durbine3834422013-03-28 20:48:51 -0500686 var_state.mtrr_index = 0;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200687 var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE;
688 calc_var_mtrrs_with_hole(&var_state, r);
689 uc_deftype_count += var_state.mtrr_index;
Aaron Durbine3834422013-03-28 20:48:51 -0500690 }
691
692 if (mtrr_type != MTRR_TYPE_WRBACK) {
693 var_state.mtrr_index = 0;
694 var_state.def_mtrr_type = MTRR_TYPE_WRBACK;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200695 calc_var_mtrrs_with_hole(&var_state, r);
Aaron Durbine3834422013-03-28 20:48:51 -0500696 wb_deftype_count += var_state.mtrr_index;
697 }
698 }
Jonathan Zhang320ad932020-10-14 15:07:51 -0700699
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600700 *num_def_wb_mtrrs = wb_deftype_count;
701 *num_def_uc_mtrrs = uc_deftype_count;
702}
703
704static int calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700705 int above4gb, int address_bits)
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600706{
707 int wb_deftype_count = 0;
708 int uc_deftype_count = 0;
709
710 __calc_var_mtrrs(addr_space, above4gb, address_bits, &wb_deftype_count,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700711 &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600712
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -0600713 const int bios_mtrrs = total_mtrrs - get_os_reserved_mtrrs();
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600714 if (wb_deftype_count > bios_mtrrs && uc_deftype_count > bios_mtrrs) {
715 printk(BIOS_DEBUG, "MTRR: Removing WRCOMB type. "
716 "WB/UC MTRR counts: %d/%d > %d.\n",
717 wb_deftype_count, uc_deftype_count, bios_mtrrs);
718 memranges_update_tag(addr_space, MTRR_TYPE_WRCOMB,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700719 MTRR_TYPE_UNCACHEABLE);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600720 __calc_var_mtrrs(addr_space, above4gb, address_bits,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700721 &wb_deftype_count, &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600722 }
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000723
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500724 printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
725 wb_deftype_count, uc_deftype_count);
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300726
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500727 if (wb_deftype_count < uc_deftype_count) {
728 printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n");
729 return MTRR_TYPE_WRBACK;
730 }
731 printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n");
732 return MTRR_TYPE_UNCACHEABLE;
733}
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300734
Gabe Black7756fe72014-02-25 01:40:34 -0800735static void prepare_var_mtrrs(struct memranges *addr_space, int def_type,
736 int above4gb, int address_bits,
737 struct var_mtrr_solution *sol)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500738{
Aaron Durbine3834422013-03-28 20:48:51 -0500739 struct range_entry *r;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500740 struct var_mtrr_state var_state;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500741
742 var_state.addr_space = addr_space;
743 var_state.above4gb = above4gb;
744 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800745 /* Prepare the MSRs. */
746 var_state.prepare_msrs = 1;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500747 var_state.mtrr_index = 0;
748 var_state.def_mtrr_type = def_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800749 var_state.regs = &sol->regs[0];
Aaron Durbine3834422013-03-28 20:48:51 -0500750
751 memranges_each_entry(r, var_state.addr_space) {
752 if (range_entry_mtrr_type(r) == def_type)
753 continue;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200754 calc_var_mtrrs_with_hole(&var_state, r);
Aaron Durbine3834422013-03-28 20:48:51 -0500755 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500756
Gabe Black7756fe72014-02-25 01:40:34 -0800757 /* Update the solution. */
758 sol->num_used = var_state.mtrr_index;
759}
760
Aaron Durbind9762f72017-06-12 12:48:38 -0500761static int commit_var_mtrrs(const struct var_mtrr_solution *sol)
Gabe Black7756fe72014-02-25 01:40:34 -0800762{
763 int i;
764
Aaron Durbind9762f72017-06-12 12:48:38 -0500765 if (sol->num_used > total_mtrrs) {
766 printk(BIOS_WARNING, "Not enough MTRRs: %d vs %d\n",
767 sol->num_used, total_mtrrs);
768 return -1;
769 }
770
Isaac Christensen81f90c52014-09-24 14:59:32 -0600771 /* Write out the variable MTRRs. */
Gabe Black7756fe72014-02-25 01:40:34 -0800772 disable_cache();
773 for (i = 0; i < sol->num_used; i++) {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700774 wrmsr(MTRR_PHYS_BASE(i), sol->regs[i].base);
775 wrmsr(MTRR_PHYS_MASK(i), sol->regs[i].mask);
Gabe Black7756fe72014-02-25 01:40:34 -0800776 }
777 /* Clear the ones that are unused. */
778 for (; i < total_mtrrs; i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500779 clear_var_mtrr(i);
Isaac Christensen81f90c52014-09-24 14:59:32 -0600780 enable_var_mtrr(sol->mtrr_default_type);
Gabe Black7756fe72014-02-25 01:40:34 -0800781 enable_cache();
782
Aaron Durbind9762f72017-06-12 12:48:38 -0500783 return 0;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500784}
785
786void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
787{
Gabe Black7756fe72014-02-25 01:40:34 -0800788 static struct var_mtrr_solution *sol = NULL;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500789 struct memranges *addr_space;
790
791 addr_space = get_physical_address_space();
792
Gabe Black7756fe72014-02-25 01:40:34 -0800793 if (sol == NULL) {
Gabe Black7756fe72014-02-25 01:40:34 -0800794 sol = &mtrr_global_solution;
795 sol->mtrr_default_type =
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500796 calc_var_mtrrs(addr_space, !!above4gb, address_bits);
Gabe Black7756fe72014-02-25 01:40:34 -0800797 prepare_var_mtrrs(addr_space, sol->mtrr_default_type,
798 !!above4gb, address_bits, sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000799 }
Stefan Reinauer00093a82011-11-02 16:12:34 -0700800
Gabe Black7756fe72014-02-25 01:40:34 -0800801 commit_var_mtrrs(sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000802}
803
Aaron Durbin1ebbb162020-05-28 10:17:34 -0600804static void _x86_setup_mtrrs(unsigned int above4gb)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000805{
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100806 int address_size;
Aaron Durbine63be892016-03-07 16:05:36 -0600807
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000808 x86_setup_fixed_mtrrs();
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100809 address_size = cpu_phys_address_size();
Aaron Durbine63be892016-03-07 16:05:36 -0600810 printk(BIOS_DEBUG, "CPU physical address size: %d bits\n",
811 address_size);
Aaron Durbin1ebbb162020-05-28 10:17:34 -0600812 x86_setup_var_mtrrs(address_size, above4gb);
813}
814
815void x86_setup_mtrrs(void)
816{
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -0600817 /* Without detect, assume the minimum */
818 total_mtrrs = MIN_MTRRS;
Aaron Durbine63be892016-03-07 16:05:36 -0600819 /* Always handle addresses above 4GiB. */
Aaron Durbin1ebbb162020-05-28 10:17:34 -0600820 _x86_setup_mtrrs(1);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000821}
822
Aaron Durbine63be892016-03-07 16:05:36 -0600823void x86_setup_mtrrs_with_detect(void)
824{
825 detect_var_mtrrs();
Aaron Durbin1ebbb162020-05-28 10:17:34 -0600826 /* Always handle addresses above 4GiB. */
827 _x86_setup_mtrrs(1);
828}
829
830void x86_setup_mtrrs_with_detect_no_above_4gb(void)
831{
832 detect_var_mtrrs();
833 _x86_setup_mtrrs(0);
Aaron Durbine63be892016-03-07 16:05:36 -0600834}
835
Kyösti Mälkki38a8fb02014-06-30 13:48:18 +0300836void x86_mtrr_check(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000837{
838 /* Only Pentium Pro and later have MTRR */
839 msr_t msr;
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000840 printk(BIOS_DEBUG, "\nMTRR check\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000841
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700842 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000843
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000844 printk(BIOS_DEBUG, "Fixed MTRRs : ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700845 if (msr.lo & MTRR_DEF_TYPE_FIX_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000846 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000847 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000848 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000849
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000850 printk(BIOS_DEBUG, "Variable MTRRs: ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700851 if (msr.lo & MTRR_DEF_TYPE_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000852 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000853 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000854 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000855
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000856 printk(BIOS_DEBUG, "\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000857
858 post_code(0x93);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000859}
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600860
861static bool put_back_original_solution;
862
863void mtrr_use_temp_range(uintptr_t begin, size_t size, int type)
864{
865 const struct range_entry *r;
866 const struct memranges *orig;
867 struct var_mtrr_solution sol;
868 struct memranges addr_space;
869 const int above4gb = 1; /* Cover above 4GiB by default. */
870 int address_bits;
Arthur Heymans4ed22602022-04-11 18:58:09 +0200871 static struct temp_range {
872 uintptr_t begin;
873 size_t size;
874 int type;
875 } temp_ranges[10];
876
877 if (size == 0)
878 return;
879
880 int i;
881 for (i = 0; i < ARRAY_SIZE(temp_ranges); i++) {
882 if (temp_ranges[i].size == 0) {
883 temp_ranges[i].begin = begin;
884 temp_ranges[i].size = size;
885 temp_ranges[i].type = type;
886 break;
887 }
888 }
889 if (i == ARRAY_SIZE(temp_ranges)) {
890 printk(BIOS_ERR, "Out of temporary ranges for MTRR use\n");
891 return;
892 }
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600893
894 /* Make a copy of the original address space and tweak it with the
895 * provided range. */
896 memranges_init_empty(&addr_space, NULL, 0);
897 orig = get_physical_address_space();
898 memranges_each_entry(r, orig) {
899 unsigned long tag = range_entry_tag(r);
900
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600901 /* Remove any write combining MTRRs from the temporary
902 * solution as it just fragments the address space. */
903 if (tag == MTRR_TYPE_WRCOMB)
904 tag = MTRR_TYPE_UNCACHEABLE;
905
906 memranges_insert(&addr_space, range_entry_base(r),
907 range_entry_size(r), tag);
908 }
909
910 /* Place new range into the address space. */
Arthur Heymans4ed22602022-04-11 18:58:09 +0200911 for (i = 0; i < ARRAY_SIZE(temp_ranges); i++) {
912 if (temp_ranges[i].size != 0)
913 memranges_insert(&addr_space, temp_ranges[i].begin,
914 temp_ranges[i].size, temp_ranges[i].type);
915 }
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600916
917 print_physical_address_space(&addr_space, "TEMPORARY");
918
919 /* Calculate a new solution with the updated address space. */
920 address_bits = cpu_phys_address_size();
921 memset(&sol, 0, sizeof(sol));
922 sol.mtrr_default_type =
923 calc_var_mtrrs(&addr_space, above4gb, address_bits);
924 prepare_var_mtrrs(&addr_space, sol.mtrr_default_type,
925 above4gb, address_bits, &sol);
Aaron Durbind9762f72017-06-12 12:48:38 -0500926
927 if (commit_var_mtrrs(&sol) < 0)
928 printk(BIOS_WARNING, "Unable to insert temporary MTRR range: 0x%016llx - 0x%016llx size 0x%08llx type %d\n",
Werner Zeheaf11c92022-04-11 08:35:06 +0200929 (long long)begin, (long long)begin + size - 1,
Aaron Durbind9762f72017-06-12 12:48:38 -0500930 (long long)size, type);
931 else
Arthur Heymans29aa1e12022-05-30 18:51:45 +0200932 put_back_original_solution = true;
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600933
934 memranges_teardown(&addr_space);
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600935}
936
937static void remove_temp_solution(void *unused)
938{
939 if (put_back_original_solution)
940 commit_var_mtrrs(&mtrr_global_solution);
941}
942
943BOOT_STATE_INIT_ENTRY(BS_OS_RESUME, BS_ON_ENTRY, remove_temp_solution, NULL);
Subrata Banik1f09a2a2022-03-31 00:06:07 +0530944BOOT_STATE_INIT_ENTRY(BS_PAYLOAD_BOOT, BS_ON_ENTRY, remove_temp_solution, NULL);