blob: a3a89fe0cd9a2312140b95e4392ff6353a9c3e75 [file] [log] [blame]
Elyes HAOUAS3a7346c2020-05-07 07:46:17 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
2
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00003/*
Martin Rothd57ace22019-08-31 10:48:37 -06004 * mtrr.c: setting MTRR to decent values for cache initialization on P6
5 * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00006 *
Lee Leahyc5917072017-03-15 16:38:51 -07007 * Reference: Intel Architecture Software Developer's Manual, Volume 3: System
8 * Programming
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00009 */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000010
Elyes Haouas69c02b02022-10-02 09:57:08 +020011#include <assert.h>
Aaron Durbinbebf6692013-04-24 20:59:43 -050012#include <bootstate.h>
Elyes HAOUASd26844c2019-06-21 07:31:40 +020013#include <commonlib/helpers.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000014#include <console/console.h>
Elyes Haouas69c02b02022-10-02 09:57:08 +020015#include <cpu/amd/mtrr.h>
Aaron Durbinebf142a2013-03-29 16:23:23 -050016#include <cpu/cpu.h>
Elyes Haouas69c02b02022-10-02 09:57:08 +020017#include <cpu/x86/cache.h>
Felix Held447f5772022-12-14 23:07:52 +010018#include <cpu/x86/lapic.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000019#include <cpu/x86/msr.h>
20#include <cpu/x86/mtrr.h>
Elyes Haouas69c02b02022-10-02 09:57:08 +020021#include <device/device.h>
22#include <device/pci_ids.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050023#include <memrange.h>
Elyes Haouas69c02b02022-10-02 09:57:08 +020024#include <string.h>
25#include <types.h>
26
Julius Wernercd49cce2019-03-05 16:53:33 -080027#if CONFIG(X86_AMD_FIXED_MTRRS)
Aaron Durbin57686f82013-03-20 15:50:59 -050028#define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
29#else
30#define MTRR_FIXED_WRBACK_BITS 0
31#endif
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000032
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -060033#define MIN_MTRRS 8
34
Gabe Black7756fe72014-02-25 01:40:34 -080035/*
Isaac Christensen81f90c52014-09-24 14:59:32 -060036 * Static storage size for variable MTRRs. It's sized sufficiently large to
37 * handle different types of CPUs. Empirically, 16 variable MTRRs has not
Gabe Black7756fe72014-02-25 01:40:34 -080038 * yet been observed.
39 */
40#define NUM_MTRR_STATIC_STORAGE 16
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070041
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -060042static int total_mtrrs;
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070043
44static void detect_var_mtrrs(void)
45{
Subrata Banik7578ea42022-03-30 23:57:37 +053046 total_mtrrs = get_var_mtrr_count();
Gabe Black7756fe72014-02-25 01:40:34 -080047
48 if (total_mtrrs > NUM_MTRR_STATIC_STORAGE) {
49 printk(BIOS_WARNING,
50 "MTRRs detected (%d) > NUM_MTRR_STATIC_STORAGE (%d)\n",
51 total_mtrrs, NUM_MTRR_STATIC_STORAGE);
52 total_mtrrs = NUM_MTRR_STATIC_STORAGE;
53 }
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070054}
55
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000056void enable_fixed_mtrr(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000057{
58 msr_t msr;
59
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070060 msr = rdmsr(MTRR_DEF_TYPE_MSR);
61 msr.lo |= MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN;
62 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000063}
64
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060065void fixed_mtrrs_expose_amd_rwdram(void)
66{
67 msr_t syscfg;
68
Julius Wernercd49cce2019-03-05 16:53:33 -080069 if (!CONFIG(X86_AMD_FIXED_MTRRS))
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060070 return;
71
72 syscfg = rdmsr(SYSCFG_MSR);
73 syscfg.lo |= SYSCFG_MSR_MtrrFixDramModEn;
74 wrmsr(SYSCFG_MSR, syscfg);
75}
76
77void fixed_mtrrs_hide_amd_rwdram(void)
78{
79 msr_t syscfg;
80
Julius Wernercd49cce2019-03-05 16:53:33 -080081 if (!CONFIG(X86_AMD_FIXED_MTRRS))
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060082 return;
83
84 syscfg = rdmsr(SYSCFG_MSR);
85 syscfg.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
86 wrmsr(SYSCFG_MSR, syscfg);
87}
88
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050089static void enable_var_mtrr(unsigned char deftype)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000090{
91 msr_t msr;
92
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070093 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050094 msr.lo &= ~0xff;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070095 msr.lo |= MTRR_DEF_TYPE_EN | deftype;
96 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000097}
98
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050099#define MTRR_VERBOSE_LEVEL BIOS_NEVER
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000100
Jonathan Zhang320ad932020-10-14 15:07:51 -0700101/* MTRRs are at a 4KiB granularity. */
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500102#define RANGE_SHIFT 12
103#define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
104 (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
105#define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
106#define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500107
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500108/* Helpful constants. */
Jonathan Zhang320ad932020-10-14 15:07:51 -0700109#define RANGE_1MB PHYS_TO_RANGE_ADDR(1ULL << 20)
110#define RANGE_4GB (1ULL << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500111
Aaron Durbine3834422013-03-28 20:48:51 -0500112#define MTRR_ALGO_SHIFT (8)
113#define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1)
Aaron Durbine3834422013-03-28 20:48:51 -0500114
Jonathan Zhang320ad932020-10-14 15:07:51 -0700115static inline uint64_t range_entry_base_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000116{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500117 return PHYS_TO_RANGE_ADDR(range_entry_base(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000118}
119
Jonathan Zhang320ad932020-10-14 15:07:51 -0700120static inline uint64_t range_entry_end_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000121{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500122 return PHYS_TO_RANGE_ADDR(range_entry_end(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000123}
124
Aaron Durbine3834422013-03-28 20:48:51 -0500125static inline int range_entry_mtrr_type(struct range_entry *r)
126{
127 return range_entry_tag(r) & MTRR_TAG_MASK;
128}
129
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600130static int filter_vga_wrcomb(struct device *dev, struct resource *res)
131{
132 /* Only handle PCI devices. */
133 if (dev->path.type != DEVICE_PATH_PCI)
134 return 0;
135
136 /* Only handle VGA class devices. */
137 if (((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
138 return 0;
139
140 /* Add resource as write-combining in the address space. */
141 return 1;
142}
143
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600144static void print_physical_address_space(const struct memranges *addr_space,
145 const char *identifier)
146{
147 const struct range_entry *r;
148
149 if (identifier)
150 printk(BIOS_DEBUG, "MTRR: %s Physical address space:\n",
151 identifier);
152 else
153 printk(BIOS_DEBUG, "MTRR: Physical address space:\n");
154
155 memranges_each_entry(r, addr_space)
156 printk(BIOS_DEBUG,
157 "0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
Werner Zeheaf11c92022-04-11 08:35:06 +0200158 range_entry_base(r), range_entry_end(r) - 1,
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600159 range_entry_size(r), range_entry_tag(r));
160}
161
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500162static struct memranges *get_physical_address_space(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000163{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500164 static struct memranges *addr_space;
165 static struct memranges addr_space_storage;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800166
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500167 /* In order to handle some chipsets not being able to pre-determine
Martin Roth4c3ab732013-07-08 16:23:54 -0600168 * uncacheable ranges, such as graphics memory, at resource insertion
169 * time remove uncacheable regions from the cacheable ones. */
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500170 if (addr_space == NULL) {
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500171 unsigned long mask;
172 unsigned long match;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500173
174 addr_space = &addr_space_storage;
175
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500176 mask = IORESOURCE_CACHEABLE;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500177 /* Collect cacheable and uncacheable address ranges. The
178 * uncacheable regions take precedence over the cacheable
179 * regions. */
180 memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
181 memranges_add_resources(addr_space, mask, 0,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700182 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500183
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500184 /* Handle any write combining resources. Only prefetchable
Vladimir Serbinenko30fe6122014-02-05 23:25:28 +0100185 * resources are appropriate for this MTRR type. */
186 match = IORESOURCE_PREFETCH;
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500187 mask |= match;
Lee Leahyc5917072017-03-15 16:38:51 -0700188 memranges_add_resources_filter(addr_space, mask, match,
189 MTRR_TYPE_WRCOMB, filter_vga_wrcomb);
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500190
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500191 /* The address space below 4GiB is special. It needs to be
Martin Roth2f914032016-01-15 10:20:11 -0700192 * covered entirely by range entries so that MTRR calculations
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500193 * can be properly done for the full 32-bit address space.
194 * Therefore, ensure holes are filled up to 4GiB as
195 * uncacheable */
196 memranges_fill_holes_up_to(addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700197 RANGE_TO_PHYS_ADDR(RANGE_4GB),
198 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500199
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600200 print_physical_address_space(addr_space, NULL);
Carl-Daniel Hailfinger7dde1da2009-02-11 16:57:32 +0000201 }
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000202
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500203 return addr_space;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000204}
205
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500206/* Fixed MTRR descriptor. This structure defines the step size and begin
Martin Roth4c3ab732013-07-08 16:23:54 -0600207 * and end (exclusive) address covered by a set of fixed MTRR MSRs.
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500208 * It also describes the offset in byte intervals to store the calculated MTRR
209 * type in an array. */
210struct fixed_mtrr_desc {
211 uint32_t begin;
212 uint32_t end;
213 uint32_t step;
214 int range_index;
215 int msr_index_base;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000216};
217
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500218/* Shared MTRR calculations. Can be reused by APs. */
219static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES];
220
221/* Fixed MTRR descriptors. */
222static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
223 { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700224 PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRR_FIX_64K_00000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500225 { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700226 PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRR_FIX_16K_80000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500227 { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700228 PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRR_FIX_4K_C0000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500229};
230
231static void calc_fixed_mtrrs(void)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000232{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500233 static int fixed_mtrr_types_initialized;
234 struct memranges *phys_addr_space;
235 struct range_entry *r;
236 const struct fixed_mtrr_desc *desc;
237 const struct fixed_mtrr_desc *last_desc;
238 uint32_t begin;
239 uint32_t end;
240 int type_index;
241
242 if (fixed_mtrr_types_initialized)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000243 return;
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300244
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500245 phys_addr_space = get_physical_address_space();
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300246
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500247 /* Set all fixed ranges to uncacheable first. */
248 memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300249
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500250 desc = &fixed_mtrr_desc[0];
251 last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1];
Kyösti Mälkki1ec5e742012-07-26 23:51:20 +0300252
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500253 memranges_each_entry(r, phys_addr_space) {
254 begin = range_entry_base_mtrr_addr(r);
255 end = range_entry_end_mtrr_addr(r);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300256
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500257 if (begin >= last_desc->end)
258 break;
259
260 if (end > last_desc->end)
261 end = last_desc->end;
262
263 /* Get to the correct fixed mtrr descriptor. */
264 while (begin >= desc->end)
265 desc++;
266
267 type_index = desc->range_index;
268 type_index += (begin - desc->begin) / desc->step;
269
270 while (begin != end) {
271 unsigned char type;
272
273 type = range_entry_tag(r);
274 printk(MTRR_VERBOSE_LEVEL,
275 "MTRR addr 0x%x-0x%x set to %d type @ %d\n",
Werner Zeheaf11c92022-04-11 08:35:06 +0200276 begin, begin + desc->step - 1, type, type_index);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500277 if (type == MTRR_TYPE_WRBACK)
278 type |= MTRR_FIXED_WRBACK_BITS;
279 fixed_mtrr_types[type_index] = type;
280 type_index++;
281 begin += desc->step;
282 if (begin == desc->end)
283 desc++;
Yinghai Lu63601872005-01-27 22:48:12 +0000284 }
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000285 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500286 fixed_mtrr_types_initialized = 1;
287}
288
289static void commit_fixed_mtrrs(void)
290{
291 int i;
292 int j;
293 int msr_num;
294 int type_index;
Felix Heldca261092022-12-15 00:44:54 +0100295 const unsigned int lapic_id = lapicid();
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500296 /* 8 ranges per msr. */
297 msr_t fixed_msrs[NUM_FIXED_MTRRS];
298 unsigned long msr_index[NUM_FIXED_MTRRS];
299
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600300 fixed_mtrrs_expose_amd_rwdram();
301
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500302 memset(&fixed_msrs, 0, sizeof(fixed_msrs));
303
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500304 msr_num = 0;
305 type_index = 0;
306 for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) {
307 const struct fixed_mtrr_desc *desc;
308 int num_ranges;
309
310 desc = &fixed_mtrr_desc[i];
311 num_ranges = (desc->end - desc->begin) / desc->step;
312 for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) {
313 msr_index[msr_num] = desc->msr_index_base +
314 (j / RANGES_PER_FIXED_MTRR);
315 fixed_msrs[msr_num].lo |=
316 fixed_mtrr_types[type_index++] << 0;
317 fixed_msrs[msr_num].lo |=
318 fixed_mtrr_types[type_index++] << 8;
319 fixed_msrs[msr_num].lo |=
320 fixed_mtrr_types[type_index++] << 16;
321 fixed_msrs[msr_num].lo |=
322 fixed_mtrr_types[type_index++] << 24;
323 fixed_msrs[msr_num].hi |=
324 fixed_mtrr_types[type_index++] << 0;
325 fixed_msrs[msr_num].hi |=
326 fixed_mtrr_types[type_index++] << 8;
327 fixed_msrs[msr_num].hi |=
328 fixed_mtrr_types[type_index++] << 16;
329 fixed_msrs[msr_num].hi |=
330 fixed_mtrr_types[type_index++] << 24;
331 msr_num++;
332 }
333 }
334
Jacob Garber5b922722019-05-28 11:47:49 -0600335 /* Ensure that both arrays were fully initialized */
336 ASSERT(msr_num == NUM_FIXED_MTRRS)
337
Gabe Black7756fe72014-02-25 01:40:34 -0800338 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
Felix Held447f5772022-12-14 23:07:52 +0100339 printk(BIOS_DEBUG, "apic_id 0x%x: MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
Felix Heldca261092022-12-15 00:44:54 +0100340 lapic_id, msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500341
Gabe Black7756fe72014-02-25 01:40:34 -0800342 disable_cache();
343 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
344 wrmsr(msr_index[i], fixed_msrs[i]);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500345 enable_cache();
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600346 fixed_mtrrs_hide_amd_rwdram();
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000347}
348
Kyösti Mälkkie01742b2023-06-28 06:16:27 +0300349static void x86_setup_fixed_mtrrs_no_enable(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000350{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500351 calc_fixed_mtrrs();
352 commit_fixed_mtrrs();
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000353}
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000354
Kyösti Mälkkie01742b2023-06-28 06:16:27 +0300355static void x86_setup_fixed_mtrrs(void)
Aaron Durbin57686f82013-03-20 15:50:59 -0500356{
357 x86_setup_fixed_mtrrs_no_enable();
358
Felix Held447f5772022-12-14 23:07:52 +0100359 printk(BIOS_SPEW, "apic_id 0x%x call enable_fixed_mtrr()\n", lapicid());
Aaron Durbin57686f82013-03-20 15:50:59 -0500360 enable_fixed_mtrr();
361}
362
Gabe Black7756fe72014-02-25 01:40:34 -0800363struct var_mtrr_regs {
364 msr_t base;
365 msr_t mask;
366};
367
368struct var_mtrr_solution {
369 int mtrr_default_type;
370 int num_used;
371 struct var_mtrr_regs regs[NUM_MTRR_STATIC_STORAGE];
372};
373
374/* Global storage for variable MTRR solution. */
375static struct var_mtrr_solution mtrr_global_solution;
376
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500377struct var_mtrr_state {
378 struct memranges *addr_space;
379 int above4gb;
380 int address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800381 int prepare_msrs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500382 int mtrr_index;
383 int def_mtrr_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800384 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500385};
Aaron Durbin57686f82013-03-20 15:50:59 -0500386
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500387static void clear_var_mtrr(int index)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000388{
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600389 msr_t msr = { .lo = 0, .hi = 0 };
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500390
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600391 wrmsr(MTRR_PHYS_BASE(index), msr);
392 wrmsr(MTRR_PHYS_MASK(index), msr);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500393}
394
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -0600395static int get_os_reserved_mtrrs(void)
396{
397 return CONFIG(RESERVE_MTRRS_FOR_OS) ? 2 : 0;
398}
399
Gabe Black7756fe72014-02-25 01:40:34 -0800400static void prep_var_mtrr(struct var_mtrr_state *var_state,
Jonathan Zhang320ad932020-10-14 15:07:51 -0700401 uint64_t base, uint64_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500402{
Gabe Black7756fe72014-02-25 01:40:34 -0800403 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500404 resource_t rbase;
405 resource_t rsize;
406 resource_t mask;
407
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500408 if (var_state->mtrr_index >= total_mtrrs) {
Julius Wernere9665952022-01-21 17:06:20 -0800409 printk(BIOS_ERR, "Not enough MTRRs available! MTRR index is %d with %d MTRRs in total.\n",
Paul Menzel6a70dbc2015-10-15 12:41:53 +0200410 var_state->mtrr_index, total_mtrrs);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500411 return;
412 }
413
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -0600414 /*
415 * If desired, 2 variable MTRRs are attempted to be saved for the OS to
416 * use. However, it's more important to try to map the full address
417 * space properly.
418 */
419 if (var_state->mtrr_index >= total_mtrrs - get_os_reserved_mtrrs())
420 printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n");
421
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500422 rbase = base;
423 rsize = size;
424
425 rbase = RANGE_TO_PHYS_ADDR(rbase);
426 rsize = RANGE_TO_PHYS_ADDR(rsize);
427 rsize = -rsize;
428
429 mask = (1ULL << var_state->address_bits) - 1;
430 rsize = rsize & mask;
431
432 printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
433 var_state->mtrr_index, rbase, rsize, mtrr_type);
434
Gabe Black7756fe72014-02-25 01:40:34 -0800435 regs = &var_state->regs[var_state->mtrr_index];
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500436
Gabe Black7756fe72014-02-25 01:40:34 -0800437 regs->base.lo = rbase;
438 regs->base.lo |= mtrr_type;
439 regs->base.hi = rbase >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500440
Gabe Black7756fe72014-02-25 01:40:34 -0800441 regs->mask.lo = rsize;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700442 regs->mask.lo |= MTRR_PHYS_MASK_VALID;
Gabe Black7756fe72014-02-25 01:40:34 -0800443 regs->mask.hi = rsize >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500444}
445
Jonathan Zhang320ad932020-10-14 15:07:51 -0700446/*
447 * fls64: find least significant bit set in a 64-bit word
448 * As samples, fls64(0x0) = 64; fls64(0x4400) = 10;
449 * fls64(0x40400000000) = 34.
450 */
451static uint32_t fls64(uint64_t x)
452{
453 uint32_t lo = (uint32_t)x;
454 if (lo)
455 return fls(lo);
456 uint32_t hi = x >> 32;
457 return fls(hi) + 32;
458}
459
460/*
461 * fms64: find most significant bit set in a 64-bit word
462 * As samples, fms64(0x0) = 0; fms64(0x4400) = 14;
463 * fms64(0x40400000000) = 42.
464 */
465static uint32_t fms64(uint64_t x)
466{
467 uint32_t hi = (uint32_t)(x >> 32);
468 if (!hi)
469 return fms((uint32_t)x);
470 return fms(hi) + 32;
471}
472
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500473static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
Jonathan Zhang320ad932020-10-14 15:07:51 -0700474 uint64_t base, uint64_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500475{
476 while (size != 0) {
477 uint32_t addr_lsb;
478 uint32_t size_msb;
Jonathan Zhang320ad932020-10-14 15:07:51 -0700479 uint64_t mtrr_size;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500480
Jonathan Zhang320ad932020-10-14 15:07:51 -0700481 addr_lsb = fls64(base);
482 size_msb = fms64(size);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500483
484 /* All MTRR entries need to have their base aligned to the mask
485 * size. The maximum size is calculated by a function of the
486 * min base bit set and maximum size bit set. */
487 if (addr_lsb > size_msb)
Jonathan Zhang8f594b72020-10-23 15:20:22 -0700488 mtrr_size = 1ULL << size_msb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500489 else
Jonathan Zhang8f594b72020-10-23 15:20:22 -0700490 mtrr_size = 1ULL << addr_lsb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500491
Gabe Black7756fe72014-02-25 01:40:34 -0800492 if (var_state->prepare_msrs)
493 prep_var_mtrr(var_state, base, mtrr_size, mtrr_type);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500494
495 size -= mtrr_size;
496 base += mtrr_size;
497 var_state->mtrr_index++;
498 }
499}
500
Jonathan Zhang320ad932020-10-14 15:07:51 -0700501static uint64_t optimize_var_mtrr_hole(const uint64_t base,
502 const uint64_t hole,
Nico Huberbd5fb662017-10-07 13:40:19 +0200503 const uint64_t limit,
504 const int carve_hole)
505{
506 /*
507 * With default type UC, we can potentially optimize a WB
508 * range with unaligned upper end, by aligning it up and
509 * carving the added "hole" out again.
510 *
511 * To optimize the upper end of the hole, we will test
512 * how many MTRRs calc_var_mtrr_range() will spend for any
513 * alignment of the hole's upper end.
514 *
515 * We take four parameters, the lower end of the WB range
516 * `base`, upper end of the WB range as start of the `hole`,
517 * a `limit` how far we may align the upper end of the hole
518 * up and a flag `carve_hole` whether we should count MTRRs
519 * for carving the hole out. We return the optimal upper end
520 * for the hole (which may be the same as the end of the WB
521 * range in case we don't gain anything by aligning up).
522 */
523
524 const int dont_care = 0;
525 struct var_mtrr_state var_state = { 0, };
526
527 unsigned int align, best_count;
528 uint32_t best_end = hole;
529
530 /* calculate MTRR count for the WB range alone (w/o a hole) */
531 calc_var_mtrr_range(&var_state, base, hole - base, dont_care);
532 best_count = var_state.mtrr_index;
533 var_state.mtrr_index = 0;
534
535 for (align = fls(hole) + 1; align <= fms(hole); ++align) {
536 const uint64_t hole_end = ALIGN_UP((uint64_t)hole, 1 << align);
537 if (hole_end > limit)
538 break;
539
540 /* calculate MTRR count for this alignment */
541 calc_var_mtrr_range(
542 &var_state, base, hole_end - base, dont_care);
543 if (carve_hole)
544 calc_var_mtrr_range(
545 &var_state, hole, hole_end - hole, dont_care);
546
547 if (var_state.mtrr_index < best_count) {
548 best_count = var_state.mtrr_index;
549 best_end = hole_end;
550 }
551 var_state.mtrr_index = 0;
552 }
553
554 return best_end;
555}
556
Aaron Durbine3834422013-03-28 20:48:51 -0500557static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700558 struct range_entry *r)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500559{
Jonathan Zhang320ad932020-10-14 15:07:51 -0700560 uint64_t a1, a2, b1, b2;
Nico Huberbd5fb662017-10-07 13:40:19 +0200561 int mtrr_type, carve_hole;
Aaron Durbine3834422013-03-28 20:48:51 -0500562
563 /*
Martin Roth4c3ab732013-07-08 16:23:54 -0600564 * Determine MTRRs based on the following algorithm for the given entry:
Aaron Durbine3834422013-03-28 20:48:51 -0500565 * +------------------+ b2 = ALIGN_UP(end)
566 * | 0 or more bytes | <-- hole is carved out between b1 and b2
Nico Huberbd5fb662017-10-07 13:40:19 +0200567 * +------------------+ a2 = b1 = original end
Aaron Durbine3834422013-03-28 20:48:51 -0500568 * | |
569 * +------------------+ a1 = begin
570 *
Nico Huberbd5fb662017-10-07 13:40:19 +0200571 * Thus, there are up to 2 sub-ranges to configure variable MTRRs for.
Aaron Durbine3834422013-03-28 20:48:51 -0500572 */
573 mtrr_type = range_entry_mtrr_type(r);
574
575 a1 = range_entry_base_mtrr_addr(r);
576 a2 = range_entry_end_mtrr_addr(r);
577
Aaron Durbina38677b2016-07-21 14:26:34 -0500578 /* The end address is within the first 1MiB. The fixed MTRRs take
Aaron Durbine3834422013-03-28 20:48:51 -0500579 * precedence over the variable ones. Therefore this range
580 * can be ignored. */
Aaron Durbina38677b2016-07-21 14:26:34 -0500581 if (a2 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500582 return;
583
584 /* Again, the fixed MTRRs take precedence so the beginning
Aaron Durbina38677b2016-07-21 14:26:34 -0500585 * of the range can be set to 0 if it starts at or below 1MiB. */
586 if (a1 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500587 a1 = 0;
588
589 /* If the range starts above 4GiB the processing is done. */
590 if (!var_state->above4gb && a1 >= RANGE_4GB)
591 return;
592
593 /* Clip the upper address to 4GiB if addresses above 4GiB
594 * are not being processed. */
595 if (!var_state->above4gb && a2 > RANGE_4GB)
596 a2 = RANGE_4GB;
597
598 b1 = a2;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200599 b2 = a2;
600 carve_hole = 0;
Aaron Durbin53924242013-03-29 11:48:27 -0500601
Nico Huber64f0bcb2017-10-07 16:37:04 +0200602 /* We only consider WB type ranges for hole-carving. */
603 if (mtrr_type == MTRR_TYPE_WRBACK) {
604 struct range_entry *next;
605 uint64_t b2_limit;
606 /*
607 * Depending on the type of the next range, there are three
608 * different situations to handle:
609 *
610 * 1. WB range is last in address space:
611 * Aligning up, up to the next power of 2, may gain us
612 * something.
613 *
614 * 2. The next range is of type UC:
615 * We may align up, up to the _end_ of the next range. If
616 * there is a gap between the current and the next range,
617 * it would have been covered by the default type UC anyway.
618 *
619 * 3. The next range is not of type UC:
620 * We may align up, up to the _base_ of the next range. This
621 * may either be the end of the current range (if the next
622 * range follows immediately) or the end of the gap between
623 * the ranges.
624 */
625 next = memranges_next_entry(var_state->addr_space, r);
626 if (next == NULL) {
627 b2_limit = ALIGN_UP((uint64_t)b1, 1 << fms(b1));
628 /* If it's the last range above 4GiB, we won't carve
629 the hole out. If an OS wanted to move MMIO there,
630 it would have to override the MTRR setting using
631 PAT just like it would with WB as default type. */
632 carve_hole = a1 < RANGE_4GB;
633 } else if (range_entry_mtrr_type(next)
634 == MTRR_TYPE_UNCACHEABLE) {
635 b2_limit = range_entry_end_mtrr_addr(next);
636 carve_hole = 1;
637 } else {
638 b2_limit = range_entry_base_mtrr_addr(next);
639 carve_hole = 1;
640 }
641 b2 = optimize_var_mtrr_hole(a1, b1, b2_limit, carve_hole);
Aaron Durbin53924242013-03-29 11:48:27 -0500642 }
Aaron Durbine3834422013-03-28 20:48:51 -0500643
644 calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
Nico Huberbd5fb662017-10-07 13:40:19 +0200645 if (carve_hole && b2 != b1) {
646 calc_var_mtrr_range(var_state, b1, b2 - b1,
647 MTRR_TYPE_UNCACHEABLE);
648 }
Aaron Durbine3834422013-03-28 20:48:51 -0500649}
650
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600651static void __calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700652 int above4gb, int address_bits,
653 int *num_def_wb_mtrrs, int *num_def_uc_mtrrs)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500654{
655 int wb_deftype_count;
656 int uc_deftype_count;
Aaron Durbine3834422013-03-28 20:48:51 -0500657 struct range_entry *r;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000658 struct var_mtrr_state var_state;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000659
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500660 /* The default MTRR cacheability type is determined by calculating
Paul Menzel4fe98132014-01-25 15:55:28 +0100661 * the number of MTRRs required for each MTRR type as if it was the
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500662 * default. */
663 var_state.addr_space = addr_space;
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000664 var_state.above4gb = above4gb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500665 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800666 var_state.prepare_msrs = 0;
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000667
Aaron Durbine3834422013-03-28 20:48:51 -0500668 wb_deftype_count = 0;
669 uc_deftype_count = 0;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800670
Aaron Durbine3834422013-03-28 20:48:51 -0500671 /*
Nico Huber64f0bcb2017-10-07 16:37:04 +0200672 * For each range do 2 calculations:
673 * 1. UC as default type with possible holes at top of range.
674 * 2. WB as default.
Martin Roth4c3ab732013-07-08 16:23:54 -0600675 * The lowest count is then used as default after totaling all
Nico Huber64f0bcb2017-10-07 16:37:04 +0200676 * MTRRs. UC takes precedence in the MTRR architecture. There-
677 * fore, only holes can be used when the type of the region is
678 * MTRR_TYPE_WRBACK with MTRR_TYPE_UNCACHEABLE as the default
679 * type.
Aaron Durbine3834422013-03-28 20:48:51 -0500680 */
681 memranges_each_entry(r, var_state.addr_space) {
682 int mtrr_type;
683
684 mtrr_type = range_entry_mtrr_type(r);
685
686 if (mtrr_type != MTRR_TYPE_UNCACHEABLE) {
Aaron Durbine3834422013-03-28 20:48:51 -0500687 var_state.mtrr_index = 0;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200688 var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE;
689 calc_var_mtrrs_with_hole(&var_state, r);
690 uc_deftype_count += var_state.mtrr_index;
Aaron Durbine3834422013-03-28 20:48:51 -0500691 }
692
693 if (mtrr_type != MTRR_TYPE_WRBACK) {
694 var_state.mtrr_index = 0;
695 var_state.def_mtrr_type = MTRR_TYPE_WRBACK;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200696 calc_var_mtrrs_with_hole(&var_state, r);
Aaron Durbine3834422013-03-28 20:48:51 -0500697 wb_deftype_count += var_state.mtrr_index;
698 }
699 }
Jonathan Zhang320ad932020-10-14 15:07:51 -0700700
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600701 *num_def_wb_mtrrs = wb_deftype_count;
702 *num_def_uc_mtrrs = uc_deftype_count;
703}
704
705static int calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700706 int above4gb, int address_bits)
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600707{
708 int wb_deftype_count = 0;
709 int uc_deftype_count = 0;
710
711 __calc_var_mtrrs(addr_space, above4gb, address_bits, &wb_deftype_count,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700712 &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600713
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -0600714 const int bios_mtrrs = total_mtrrs - get_os_reserved_mtrrs();
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600715 if (wb_deftype_count > bios_mtrrs && uc_deftype_count > bios_mtrrs) {
716 printk(BIOS_DEBUG, "MTRR: Removing WRCOMB type. "
717 "WB/UC MTRR counts: %d/%d > %d.\n",
718 wb_deftype_count, uc_deftype_count, bios_mtrrs);
719 memranges_update_tag(addr_space, MTRR_TYPE_WRCOMB,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700720 MTRR_TYPE_UNCACHEABLE);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600721 __calc_var_mtrrs(addr_space, above4gb, address_bits,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700722 &wb_deftype_count, &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600723 }
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000724
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500725 printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
726 wb_deftype_count, uc_deftype_count);
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300727
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500728 if (wb_deftype_count < uc_deftype_count) {
729 printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n");
730 return MTRR_TYPE_WRBACK;
731 }
732 printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n");
733 return MTRR_TYPE_UNCACHEABLE;
734}
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300735
Gabe Black7756fe72014-02-25 01:40:34 -0800736static void prepare_var_mtrrs(struct memranges *addr_space, int def_type,
737 int above4gb, int address_bits,
738 struct var_mtrr_solution *sol)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500739{
Aaron Durbine3834422013-03-28 20:48:51 -0500740 struct range_entry *r;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500741 struct var_mtrr_state var_state;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500742
743 var_state.addr_space = addr_space;
744 var_state.above4gb = above4gb;
745 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800746 /* Prepare the MSRs. */
747 var_state.prepare_msrs = 1;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500748 var_state.mtrr_index = 0;
749 var_state.def_mtrr_type = def_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800750 var_state.regs = &sol->regs[0];
Aaron Durbine3834422013-03-28 20:48:51 -0500751
752 memranges_each_entry(r, var_state.addr_space) {
753 if (range_entry_mtrr_type(r) == def_type)
754 continue;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200755 calc_var_mtrrs_with_hole(&var_state, r);
Aaron Durbine3834422013-03-28 20:48:51 -0500756 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500757
Gabe Black7756fe72014-02-25 01:40:34 -0800758 /* Update the solution. */
759 sol->num_used = var_state.mtrr_index;
760}
761
Aaron Durbind9762f72017-06-12 12:48:38 -0500762static int commit_var_mtrrs(const struct var_mtrr_solution *sol)
Gabe Black7756fe72014-02-25 01:40:34 -0800763{
764 int i;
765
Aaron Durbind9762f72017-06-12 12:48:38 -0500766 if (sol->num_used > total_mtrrs) {
767 printk(BIOS_WARNING, "Not enough MTRRs: %d vs %d\n",
768 sol->num_used, total_mtrrs);
769 return -1;
770 }
771
Isaac Christensen81f90c52014-09-24 14:59:32 -0600772 /* Write out the variable MTRRs. */
Gabe Black7756fe72014-02-25 01:40:34 -0800773 disable_cache();
774 for (i = 0; i < sol->num_used; i++) {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700775 wrmsr(MTRR_PHYS_BASE(i), sol->regs[i].base);
776 wrmsr(MTRR_PHYS_MASK(i), sol->regs[i].mask);
Gabe Black7756fe72014-02-25 01:40:34 -0800777 }
778 /* Clear the ones that are unused. */
779 for (; i < total_mtrrs; i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500780 clear_var_mtrr(i);
Isaac Christensen81f90c52014-09-24 14:59:32 -0600781 enable_var_mtrr(sol->mtrr_default_type);
Gabe Black7756fe72014-02-25 01:40:34 -0800782 enable_cache();
783
Aaron Durbind9762f72017-06-12 12:48:38 -0500784 return 0;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500785}
786
787void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
788{
Gabe Black7756fe72014-02-25 01:40:34 -0800789 static struct var_mtrr_solution *sol = NULL;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500790 struct memranges *addr_space;
791
792 addr_space = get_physical_address_space();
793
Gabe Black7756fe72014-02-25 01:40:34 -0800794 if (sol == NULL) {
Gabe Black7756fe72014-02-25 01:40:34 -0800795 sol = &mtrr_global_solution;
796 sol->mtrr_default_type =
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500797 calc_var_mtrrs(addr_space, !!above4gb, address_bits);
Gabe Black7756fe72014-02-25 01:40:34 -0800798 prepare_var_mtrrs(addr_space, sol->mtrr_default_type,
799 !!above4gb, address_bits, sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000800 }
Stefan Reinauer00093a82011-11-02 16:12:34 -0700801
Gabe Black7756fe72014-02-25 01:40:34 -0800802 commit_var_mtrrs(sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000803}
804
Aaron Durbin1ebbb162020-05-28 10:17:34 -0600805static void _x86_setup_mtrrs(unsigned int above4gb)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000806{
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100807 int address_size;
Aaron Durbine63be892016-03-07 16:05:36 -0600808
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000809 x86_setup_fixed_mtrrs();
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100810 address_size = cpu_phys_address_size();
Felix Held447f5772022-12-14 23:07:52 +0100811 printk(BIOS_DEBUG, "apic_id 0x%x setup mtrr for CPU physical address size: %d bits\n",
812 lapicid(), address_size);
Aaron Durbin1ebbb162020-05-28 10:17:34 -0600813 x86_setup_var_mtrrs(address_size, above4gb);
814}
815
816void x86_setup_mtrrs(void)
817{
Tim Wawrzynczak6fcc46d2021-04-19 13:47:36 -0600818 /* Without detect, assume the minimum */
819 total_mtrrs = MIN_MTRRS;
Aaron Durbine63be892016-03-07 16:05:36 -0600820 /* Always handle addresses above 4GiB. */
Aaron Durbin1ebbb162020-05-28 10:17:34 -0600821 _x86_setup_mtrrs(1);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000822}
823
Aaron Durbine63be892016-03-07 16:05:36 -0600824void x86_setup_mtrrs_with_detect(void)
825{
826 detect_var_mtrrs();
Aaron Durbin1ebbb162020-05-28 10:17:34 -0600827 /* Always handle addresses above 4GiB. */
828 _x86_setup_mtrrs(1);
829}
830
831void x86_setup_mtrrs_with_detect_no_above_4gb(void)
832{
833 detect_var_mtrrs();
834 _x86_setup_mtrrs(0);
Aaron Durbine63be892016-03-07 16:05:36 -0600835}
836
Kyösti Mälkki38a8fb02014-06-30 13:48:18 +0300837void x86_mtrr_check(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000838{
839 /* Only Pentium Pro and later have MTRR */
840 msr_t msr;
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000841 printk(BIOS_DEBUG, "\nMTRR check\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000842
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700843 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000844
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000845 printk(BIOS_DEBUG, "Fixed MTRRs : ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700846 if (msr.lo & MTRR_DEF_TYPE_FIX_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000847 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000848 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000849 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000850
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000851 printk(BIOS_DEBUG, "Variable MTRRs: ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700852 if (msr.lo & MTRR_DEF_TYPE_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000853 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000854 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000855 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000856
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000857 printk(BIOS_DEBUG, "\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000858
859 post_code(0x93);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000860}
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600861
862static bool put_back_original_solution;
863
864void mtrr_use_temp_range(uintptr_t begin, size_t size, int type)
865{
866 const struct range_entry *r;
867 const struct memranges *orig;
868 struct var_mtrr_solution sol;
869 struct memranges addr_space;
870 const int above4gb = 1; /* Cover above 4GiB by default. */
871 int address_bits;
Arthur Heymans4ed22602022-04-11 18:58:09 +0200872 static struct temp_range {
873 uintptr_t begin;
874 size_t size;
875 int type;
876 } temp_ranges[10];
877
878 if (size == 0)
879 return;
880
881 int i;
882 for (i = 0; i < ARRAY_SIZE(temp_ranges); i++) {
883 if (temp_ranges[i].size == 0) {
884 temp_ranges[i].begin = begin;
885 temp_ranges[i].size = size;
886 temp_ranges[i].type = type;
887 break;
888 }
889 }
890 if (i == ARRAY_SIZE(temp_ranges)) {
891 printk(BIOS_ERR, "Out of temporary ranges for MTRR use\n");
892 return;
893 }
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600894
895 /* Make a copy of the original address space and tweak it with the
896 * provided range. */
897 memranges_init_empty(&addr_space, NULL, 0);
898 orig = get_physical_address_space();
899 memranges_each_entry(r, orig) {
900 unsigned long tag = range_entry_tag(r);
901
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600902 /* Remove any write combining MTRRs from the temporary
903 * solution as it just fragments the address space. */
904 if (tag == MTRR_TYPE_WRCOMB)
905 tag = MTRR_TYPE_UNCACHEABLE;
906
907 memranges_insert(&addr_space, range_entry_base(r),
908 range_entry_size(r), tag);
909 }
910
911 /* Place new range into the address space. */
Arthur Heymans4ed22602022-04-11 18:58:09 +0200912 for (i = 0; i < ARRAY_SIZE(temp_ranges); i++) {
913 if (temp_ranges[i].size != 0)
914 memranges_insert(&addr_space, temp_ranges[i].begin,
915 temp_ranges[i].size, temp_ranges[i].type);
916 }
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600917
918 print_physical_address_space(&addr_space, "TEMPORARY");
919
920 /* Calculate a new solution with the updated address space. */
921 address_bits = cpu_phys_address_size();
922 memset(&sol, 0, sizeof(sol));
923 sol.mtrr_default_type =
924 calc_var_mtrrs(&addr_space, above4gb, address_bits);
925 prepare_var_mtrrs(&addr_space, sol.mtrr_default_type,
926 above4gb, address_bits, &sol);
Aaron Durbind9762f72017-06-12 12:48:38 -0500927
928 if (commit_var_mtrrs(&sol) < 0)
929 printk(BIOS_WARNING, "Unable to insert temporary MTRR range: 0x%016llx - 0x%016llx size 0x%08llx type %d\n",
Werner Zeheaf11c92022-04-11 08:35:06 +0200930 (long long)begin, (long long)begin + size - 1,
Aaron Durbind9762f72017-06-12 12:48:38 -0500931 (long long)size, type);
932 else
Arthur Heymans29aa1e12022-05-30 18:51:45 +0200933 put_back_original_solution = true;
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600934
935 memranges_teardown(&addr_space);
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600936}
937
938static void remove_temp_solution(void *unused)
939{
940 if (put_back_original_solution)
941 commit_var_mtrrs(&mtrr_global_solution);
942}
943
944BOOT_STATE_INIT_ENTRY(BS_OS_RESUME, BS_ON_ENTRY, remove_temp_solution, NULL);
Subrata Banik1f09a2a2022-03-31 00:06:07 +0530945BOOT_STATE_INIT_ENTRY(BS_PAYLOAD_BOOT, BS_ON_ENTRY, remove_temp_solution, NULL);