blob: 53b640088b54a364ef9b32494e11fd0f8a1e722c [file] [log] [blame]
Patrick Georgi02363b52020-05-05 20:48:50 +02001/* This file is part of the coreboot project. */
Elyes HAOUAS3a7346c2020-05-07 07:46:17 +02002/* SPDX-License-Identifier: GPL-2.0-or-later */
3
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00004/*
Martin Rothd57ace22019-08-31 10:48:37 -06005 * mtrr.c: setting MTRR to decent values for cache initialization on P6
6 * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00007 *
Lee Leahyc5917072017-03-15 16:38:51 -07008 * Reference: Intel Architecture Software Developer's Manual, Volume 3: System
9 * Programming
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000010 */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000011
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +000012#include <stddef.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050013#include <string.h>
Aaron Durbinbebf6692013-04-24 20:59:43 -050014#include <bootstate.h>
Elyes HAOUASd26844c2019-06-21 07:31:40 +020015#include <commonlib/helpers.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000016#include <console/console.h>
17#include <device/device.h>
Aaron Durbinca4f4b82014-02-08 15:41:52 -060018#include <device/pci_ids.h>
Aaron Durbinebf142a2013-03-29 16:23:23 -050019#include <cpu/cpu.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000020#include <cpu/x86/msr.h>
21#include <cpu/x86/mtrr.h>
22#include <cpu/x86/cache.h>
Stefan Reinauer00093a82011-11-02 16:12:34 -070023#include <cpu/x86/lapic.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050024#include <memrange.h>
Aaron Durbin57686f82013-03-20 15:50:59 -050025#include <cpu/amd/mtrr.h>
Richard Spiegelb28025a2019-02-20 11:00:19 -070026#include <assert.h>
Julius Wernercd49cce2019-03-05 16:53:33 -080027#if CONFIG(X86_AMD_FIXED_MTRRS)
Aaron Durbin57686f82013-03-20 15:50:59 -050028#define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
29#else
30#define MTRR_FIXED_WRBACK_BITS 0
31#endif
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000032
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070033/* 2 MTRRS are reserved for the operating system */
34#define BIOS_MTRRS 6
35#define OS_MTRRS 2
36#define MTRRS (BIOS_MTRRS + OS_MTRRS)
Gabe Black7756fe72014-02-25 01:40:34 -080037/*
Isaac Christensen81f90c52014-09-24 14:59:32 -060038 * Static storage size for variable MTRRs. It's sized sufficiently large to
39 * handle different types of CPUs. Empirically, 16 variable MTRRs has not
Gabe Black7756fe72014-02-25 01:40:34 -080040 * yet been observed.
41 */
42#define NUM_MTRR_STATIC_STORAGE 16
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070043
44static int total_mtrrs = MTRRS;
45static int bios_mtrrs = BIOS_MTRRS;
46
47static void detect_var_mtrrs(void)
48{
49 msr_t msr;
50
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070051 msr = rdmsr(MTRR_CAP_MSR);
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070052
53 total_mtrrs = msr.lo & 0xff;
Gabe Black7756fe72014-02-25 01:40:34 -080054
55 if (total_mtrrs > NUM_MTRR_STATIC_STORAGE) {
56 printk(BIOS_WARNING,
57 "MTRRs detected (%d) > NUM_MTRR_STATIC_STORAGE (%d)\n",
58 total_mtrrs, NUM_MTRR_STATIC_STORAGE);
59 total_mtrrs = NUM_MTRR_STATIC_STORAGE;
60 }
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070061 bios_mtrrs = total_mtrrs - OS_MTRRS;
62}
63
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000064void enable_fixed_mtrr(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000065{
66 msr_t msr;
67
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070068 msr = rdmsr(MTRR_DEF_TYPE_MSR);
69 msr.lo |= MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN;
70 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000071}
72
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060073void fixed_mtrrs_expose_amd_rwdram(void)
74{
75 msr_t syscfg;
76
Julius Wernercd49cce2019-03-05 16:53:33 -080077 if (!CONFIG(X86_AMD_FIXED_MTRRS))
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060078 return;
79
80 syscfg = rdmsr(SYSCFG_MSR);
81 syscfg.lo |= SYSCFG_MSR_MtrrFixDramModEn;
82 wrmsr(SYSCFG_MSR, syscfg);
83}
84
85void fixed_mtrrs_hide_amd_rwdram(void)
86{
87 msr_t syscfg;
88
Julius Wernercd49cce2019-03-05 16:53:33 -080089 if (!CONFIG(X86_AMD_FIXED_MTRRS))
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060090 return;
91
92 syscfg = rdmsr(SYSCFG_MSR);
93 syscfg.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
94 wrmsr(SYSCFG_MSR, syscfg);
95}
96
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050097static void enable_var_mtrr(unsigned char deftype)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000098{
99 msr_t msr;
100
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700101 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500102 msr.lo &= ~0xff;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700103 msr.lo |= MTRR_DEF_TYPE_EN | deftype;
104 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000105}
106
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500107#define MTRR_VERBOSE_LEVEL BIOS_NEVER
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000108
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500109/* MTRRs are at a 4KiB granularity. Therefore all address calculations can
110 * be done with 32-bit numbers. This allows for the MTRR code to handle
111 * up to 2^44 bytes (16 TiB) of address space. */
112#define RANGE_SHIFT 12
113#define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
114 (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
115#define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
116#define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
117#define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR)
118
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500119/* Helpful constants. */
120#define RANGE_1MB PHYS_TO_RANGE_ADDR(1 << 20)
121#define RANGE_4GB (1 << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
122
Aaron Durbine3834422013-03-28 20:48:51 -0500123#define MTRR_ALGO_SHIFT (8)
124#define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1)
Aaron Durbine3834422013-03-28 20:48:51 -0500125
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500126static inline uint32_t range_entry_base_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000127{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500128 return PHYS_TO_RANGE_ADDR(range_entry_base(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000129}
130
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500131static inline uint32_t range_entry_end_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000132{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500133 return PHYS_TO_RANGE_ADDR(range_entry_end(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000134}
135
Aaron Durbine3834422013-03-28 20:48:51 -0500136static inline int range_entry_mtrr_type(struct range_entry *r)
137{
138 return range_entry_tag(r) & MTRR_TAG_MASK;
139}
140
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600141static int filter_vga_wrcomb(struct device *dev, struct resource *res)
142{
143 /* Only handle PCI devices. */
144 if (dev->path.type != DEVICE_PATH_PCI)
145 return 0;
146
147 /* Only handle VGA class devices. */
148 if (((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
149 return 0;
150
151 /* Add resource as write-combining in the address space. */
152 return 1;
153}
154
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600155static void print_physical_address_space(const struct memranges *addr_space,
156 const char *identifier)
157{
158 const struct range_entry *r;
159
160 if (identifier)
161 printk(BIOS_DEBUG, "MTRR: %s Physical address space:\n",
162 identifier);
163 else
164 printk(BIOS_DEBUG, "MTRR: Physical address space:\n");
165
166 memranges_each_entry(r, addr_space)
167 printk(BIOS_DEBUG,
168 "0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
169 range_entry_base(r), range_entry_end(r),
170 range_entry_size(r), range_entry_tag(r));
171}
172
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500173static struct memranges *get_physical_address_space(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000174{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500175 static struct memranges *addr_space;
176 static struct memranges addr_space_storage;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800177
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500178 /* In order to handle some chipsets not being able to pre-determine
Martin Roth4c3ab732013-07-08 16:23:54 -0600179 * uncacheable ranges, such as graphics memory, at resource insertion
180 * time remove uncacheable regions from the cacheable ones. */
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500181 if (addr_space == NULL) {
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500182 unsigned long mask;
183 unsigned long match;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500184
185 addr_space = &addr_space_storage;
186
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500187 mask = IORESOURCE_CACHEABLE;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500188 /* Collect cacheable and uncacheable address ranges. The
189 * uncacheable regions take precedence over the cacheable
190 * regions. */
191 memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
192 memranges_add_resources(addr_space, mask, 0,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700193 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500194
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500195 /* Handle any write combining resources. Only prefetchable
Vladimir Serbinenko30fe6122014-02-05 23:25:28 +0100196 * resources are appropriate for this MTRR type. */
197 match = IORESOURCE_PREFETCH;
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500198 mask |= match;
Lee Leahyc5917072017-03-15 16:38:51 -0700199 memranges_add_resources_filter(addr_space, mask, match,
200 MTRR_TYPE_WRCOMB, filter_vga_wrcomb);
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500201
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500202 /* The address space below 4GiB is special. It needs to be
Martin Roth2f914032016-01-15 10:20:11 -0700203 * covered entirely by range entries so that MTRR calculations
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500204 * can be properly done for the full 32-bit address space.
205 * Therefore, ensure holes are filled up to 4GiB as
206 * uncacheable */
207 memranges_fill_holes_up_to(addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700208 RANGE_TO_PHYS_ADDR(RANGE_4GB),
209 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500210
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600211 print_physical_address_space(addr_space, NULL);
Carl-Daniel Hailfinger7dde1da2009-02-11 16:57:32 +0000212 }
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000213
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500214 return addr_space;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000215}
216
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500217/* Fixed MTRR descriptor. This structure defines the step size and begin
Martin Roth4c3ab732013-07-08 16:23:54 -0600218 * and end (exclusive) address covered by a set of fixed MTRR MSRs.
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500219 * It also describes the offset in byte intervals to store the calculated MTRR
220 * type in an array. */
221struct fixed_mtrr_desc {
222 uint32_t begin;
223 uint32_t end;
224 uint32_t step;
225 int range_index;
226 int msr_index_base;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000227};
228
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500229/* Shared MTRR calculations. Can be reused by APs. */
230static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES];
231
232/* Fixed MTRR descriptors. */
233static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
234 { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700235 PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRR_FIX_64K_00000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500236 { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700237 PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRR_FIX_16K_80000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500238 { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700239 PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRR_FIX_4K_C0000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500240};
241
242static void calc_fixed_mtrrs(void)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000243{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500244 static int fixed_mtrr_types_initialized;
245 struct memranges *phys_addr_space;
246 struct range_entry *r;
247 const struct fixed_mtrr_desc *desc;
248 const struct fixed_mtrr_desc *last_desc;
249 uint32_t begin;
250 uint32_t end;
251 int type_index;
252
253 if (fixed_mtrr_types_initialized)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000254 return;
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300255
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500256 phys_addr_space = get_physical_address_space();
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300257
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500258 /* Set all fixed ranges to uncacheable first. */
259 memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300260
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500261 desc = &fixed_mtrr_desc[0];
262 last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1];
Kyösti Mälkki1ec5e742012-07-26 23:51:20 +0300263
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500264 memranges_each_entry(r, phys_addr_space) {
265 begin = range_entry_base_mtrr_addr(r);
266 end = range_entry_end_mtrr_addr(r);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300267
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500268 if (begin >= last_desc->end)
269 break;
270
271 if (end > last_desc->end)
272 end = last_desc->end;
273
274 /* Get to the correct fixed mtrr descriptor. */
275 while (begin >= desc->end)
276 desc++;
277
278 type_index = desc->range_index;
279 type_index += (begin - desc->begin) / desc->step;
280
281 while (begin != end) {
282 unsigned char type;
283
284 type = range_entry_tag(r);
285 printk(MTRR_VERBOSE_LEVEL,
286 "MTRR addr 0x%x-0x%x set to %d type @ %d\n",
287 begin, begin + desc->step, type, type_index);
288 if (type == MTRR_TYPE_WRBACK)
289 type |= MTRR_FIXED_WRBACK_BITS;
290 fixed_mtrr_types[type_index] = type;
291 type_index++;
292 begin += desc->step;
293 if (begin == desc->end)
294 desc++;
Yinghai Lu63601872005-01-27 22:48:12 +0000295 }
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000296 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500297 fixed_mtrr_types_initialized = 1;
298}
299
300static void commit_fixed_mtrrs(void)
301{
302 int i;
303 int j;
304 int msr_num;
305 int type_index;
306 /* 8 ranges per msr. */
307 msr_t fixed_msrs[NUM_FIXED_MTRRS];
308 unsigned long msr_index[NUM_FIXED_MTRRS];
309
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600310 fixed_mtrrs_expose_amd_rwdram();
311
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500312 memset(&fixed_msrs, 0, sizeof(fixed_msrs));
313
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500314 msr_num = 0;
315 type_index = 0;
316 for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) {
317 const struct fixed_mtrr_desc *desc;
318 int num_ranges;
319
320 desc = &fixed_mtrr_desc[i];
321 num_ranges = (desc->end - desc->begin) / desc->step;
322 for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) {
323 msr_index[msr_num] = desc->msr_index_base +
324 (j / RANGES_PER_FIXED_MTRR);
325 fixed_msrs[msr_num].lo |=
326 fixed_mtrr_types[type_index++] << 0;
327 fixed_msrs[msr_num].lo |=
328 fixed_mtrr_types[type_index++] << 8;
329 fixed_msrs[msr_num].lo |=
330 fixed_mtrr_types[type_index++] << 16;
331 fixed_msrs[msr_num].lo |=
332 fixed_mtrr_types[type_index++] << 24;
333 fixed_msrs[msr_num].hi |=
334 fixed_mtrr_types[type_index++] << 0;
335 fixed_msrs[msr_num].hi |=
336 fixed_mtrr_types[type_index++] << 8;
337 fixed_msrs[msr_num].hi |=
338 fixed_mtrr_types[type_index++] << 16;
339 fixed_msrs[msr_num].hi |=
340 fixed_mtrr_types[type_index++] << 24;
341 msr_num++;
342 }
343 }
344
Jacob Garber5b922722019-05-28 11:47:49 -0600345 /* Ensure that both arrays were fully initialized */
346 ASSERT(msr_num == NUM_FIXED_MTRRS)
347
Gabe Black7756fe72014-02-25 01:40:34 -0800348 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500349 printk(BIOS_DEBUG, "MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
350 msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500351
Gabe Black7756fe72014-02-25 01:40:34 -0800352 disable_cache();
353 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
354 wrmsr(msr_index[i], fixed_msrs[i]);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500355 enable_cache();
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600356 fixed_mtrrs_hide_amd_rwdram();
357
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000358}
359
Aaron Durbin57686f82013-03-20 15:50:59 -0500360void x86_setup_fixed_mtrrs_no_enable(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000361{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500362 calc_fixed_mtrrs();
363 commit_fixed_mtrrs();
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000364}
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000365
Aaron Durbin57686f82013-03-20 15:50:59 -0500366void x86_setup_fixed_mtrrs(void)
367{
368 x86_setup_fixed_mtrrs_no_enable();
369
370 printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
371 enable_fixed_mtrr();
372}
373
Gabe Black7756fe72014-02-25 01:40:34 -0800374struct var_mtrr_regs {
375 msr_t base;
376 msr_t mask;
377};
378
379struct var_mtrr_solution {
380 int mtrr_default_type;
381 int num_used;
382 struct var_mtrr_regs regs[NUM_MTRR_STATIC_STORAGE];
383};
384
385/* Global storage for variable MTRR solution. */
386static struct var_mtrr_solution mtrr_global_solution;
387
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500388struct var_mtrr_state {
389 struct memranges *addr_space;
390 int above4gb;
391 int address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800392 int prepare_msrs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500393 int mtrr_index;
394 int def_mtrr_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800395 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500396};
Aaron Durbin57686f82013-03-20 15:50:59 -0500397
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500398static void clear_var_mtrr(int index)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000399{
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600400 msr_t msr = { .lo = 0, .hi = 0 };
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500401
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600402 wrmsr(MTRR_PHYS_BASE(index), msr);
403 wrmsr(MTRR_PHYS_MASK(index), msr);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500404}
405
Gabe Black7756fe72014-02-25 01:40:34 -0800406static void prep_var_mtrr(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700407 uint32_t base, uint32_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500408{
Gabe Black7756fe72014-02-25 01:40:34 -0800409 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500410 resource_t rbase;
411 resource_t rsize;
412 resource_t mask;
413
414 /* Some variable MTRRs are attempted to be saved for the OS use.
415 * However, it's more important to try to map the full address space
416 * properly. */
417 if (var_state->mtrr_index >= bios_mtrrs)
418 printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n");
419 if (var_state->mtrr_index >= total_mtrrs) {
Jonathan Neuschäferbb3a5ef2018-04-09 20:14:19 +0200420 printk(BIOS_ERR, "ERROR: Not enough MTRRs available! MTRR index is %d with %d MTRRs in total.\n",
Paul Menzel6a70dbc2015-10-15 12:41:53 +0200421 var_state->mtrr_index, total_mtrrs);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500422 return;
423 }
424
425 rbase = base;
426 rsize = size;
427
428 rbase = RANGE_TO_PHYS_ADDR(rbase);
429 rsize = RANGE_TO_PHYS_ADDR(rsize);
430 rsize = -rsize;
431
432 mask = (1ULL << var_state->address_bits) - 1;
433 rsize = rsize & mask;
434
435 printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
436 var_state->mtrr_index, rbase, rsize, mtrr_type);
437
Gabe Black7756fe72014-02-25 01:40:34 -0800438 regs = &var_state->regs[var_state->mtrr_index];
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500439
Gabe Black7756fe72014-02-25 01:40:34 -0800440 regs->base.lo = rbase;
441 regs->base.lo |= mtrr_type;
442 regs->base.hi = rbase >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500443
Gabe Black7756fe72014-02-25 01:40:34 -0800444 regs->mask.lo = rsize;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700445 regs->mask.lo |= MTRR_PHYS_MASK_VALID;
Gabe Black7756fe72014-02-25 01:40:34 -0800446 regs->mask.hi = rsize >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500447}
448
449static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700450 uint32_t base, uint32_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500451{
452 while (size != 0) {
453 uint32_t addr_lsb;
454 uint32_t size_msb;
455 uint32_t mtrr_size;
456
457 addr_lsb = fls(base);
458 size_msb = fms(size);
459
460 /* All MTRR entries need to have their base aligned to the mask
461 * size. The maximum size is calculated by a function of the
462 * min base bit set and maximum size bit set. */
463 if (addr_lsb > size_msb)
464 mtrr_size = 1 << size_msb;
465 else
466 mtrr_size = 1 << addr_lsb;
467
Gabe Black7756fe72014-02-25 01:40:34 -0800468 if (var_state->prepare_msrs)
469 prep_var_mtrr(var_state, base, mtrr_size, mtrr_type);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500470
471 size -= mtrr_size;
472 base += mtrr_size;
473 var_state->mtrr_index++;
474 }
475}
476
Nico Huberbd5fb662017-10-07 13:40:19 +0200477static uint32_t optimize_var_mtrr_hole(const uint32_t base,
478 const uint32_t hole,
479 const uint64_t limit,
480 const int carve_hole)
481{
482 /*
483 * With default type UC, we can potentially optimize a WB
484 * range with unaligned upper end, by aligning it up and
485 * carving the added "hole" out again.
486 *
487 * To optimize the upper end of the hole, we will test
488 * how many MTRRs calc_var_mtrr_range() will spend for any
489 * alignment of the hole's upper end.
490 *
491 * We take four parameters, the lower end of the WB range
492 * `base`, upper end of the WB range as start of the `hole`,
493 * a `limit` how far we may align the upper end of the hole
494 * up and a flag `carve_hole` whether we should count MTRRs
495 * for carving the hole out. We return the optimal upper end
496 * for the hole (which may be the same as the end of the WB
497 * range in case we don't gain anything by aligning up).
498 */
499
500 const int dont_care = 0;
501 struct var_mtrr_state var_state = { 0, };
502
503 unsigned int align, best_count;
504 uint32_t best_end = hole;
505
506 /* calculate MTRR count for the WB range alone (w/o a hole) */
507 calc_var_mtrr_range(&var_state, base, hole - base, dont_care);
508 best_count = var_state.mtrr_index;
509 var_state.mtrr_index = 0;
510
511 for (align = fls(hole) + 1; align <= fms(hole); ++align) {
512 const uint64_t hole_end = ALIGN_UP((uint64_t)hole, 1 << align);
513 if (hole_end > limit)
514 break;
515
516 /* calculate MTRR count for this alignment */
517 calc_var_mtrr_range(
518 &var_state, base, hole_end - base, dont_care);
519 if (carve_hole)
520 calc_var_mtrr_range(
521 &var_state, hole, hole_end - hole, dont_care);
522
523 if (var_state.mtrr_index < best_count) {
524 best_count = var_state.mtrr_index;
525 best_end = hole_end;
526 }
527 var_state.mtrr_index = 0;
528 }
529
530 return best_end;
531}
532
Aaron Durbine3834422013-03-28 20:48:51 -0500533static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700534 struct range_entry *r)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500535{
Aaron Durbine3834422013-03-28 20:48:51 -0500536 uint32_t a1, a2, b1, b2;
Nico Huberbd5fb662017-10-07 13:40:19 +0200537 int mtrr_type, carve_hole;
Aaron Durbine3834422013-03-28 20:48:51 -0500538
539 /*
Martin Roth4c3ab732013-07-08 16:23:54 -0600540 * Determine MTRRs based on the following algorithm for the given entry:
Aaron Durbine3834422013-03-28 20:48:51 -0500541 * +------------------+ b2 = ALIGN_UP(end)
542 * | 0 or more bytes | <-- hole is carved out between b1 and b2
Nico Huberbd5fb662017-10-07 13:40:19 +0200543 * +------------------+ a2 = b1 = original end
Aaron Durbine3834422013-03-28 20:48:51 -0500544 * | |
545 * +------------------+ a1 = begin
546 *
Nico Huberbd5fb662017-10-07 13:40:19 +0200547 * Thus, there are up to 2 sub-ranges to configure variable MTRRs for.
Aaron Durbine3834422013-03-28 20:48:51 -0500548 */
549 mtrr_type = range_entry_mtrr_type(r);
550
551 a1 = range_entry_base_mtrr_addr(r);
552 a2 = range_entry_end_mtrr_addr(r);
553
Aaron Durbina38677b2016-07-21 14:26:34 -0500554 /* The end address is within the first 1MiB. The fixed MTRRs take
Aaron Durbine3834422013-03-28 20:48:51 -0500555 * precedence over the variable ones. Therefore this range
556 * can be ignored. */
Aaron Durbina38677b2016-07-21 14:26:34 -0500557 if (a2 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500558 return;
559
560 /* Again, the fixed MTRRs take precedence so the beginning
Aaron Durbina38677b2016-07-21 14:26:34 -0500561 * of the range can be set to 0 if it starts at or below 1MiB. */
562 if (a1 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500563 a1 = 0;
564
565 /* If the range starts above 4GiB the processing is done. */
566 if (!var_state->above4gb && a1 >= RANGE_4GB)
567 return;
568
569 /* Clip the upper address to 4GiB if addresses above 4GiB
570 * are not being processed. */
571 if (!var_state->above4gb && a2 > RANGE_4GB)
572 a2 = RANGE_4GB;
573
574 b1 = a2;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200575 b2 = a2;
576 carve_hole = 0;
Aaron Durbin53924242013-03-29 11:48:27 -0500577
Nico Huber64f0bcb2017-10-07 16:37:04 +0200578 /* We only consider WB type ranges for hole-carving. */
579 if (mtrr_type == MTRR_TYPE_WRBACK) {
580 struct range_entry *next;
581 uint64_t b2_limit;
582 /*
583 * Depending on the type of the next range, there are three
584 * different situations to handle:
585 *
586 * 1. WB range is last in address space:
587 * Aligning up, up to the next power of 2, may gain us
588 * something.
589 *
590 * 2. The next range is of type UC:
591 * We may align up, up to the _end_ of the next range. If
592 * there is a gap between the current and the next range,
593 * it would have been covered by the default type UC anyway.
594 *
595 * 3. The next range is not of type UC:
596 * We may align up, up to the _base_ of the next range. This
597 * may either be the end of the current range (if the next
598 * range follows immediately) or the end of the gap between
599 * the ranges.
600 */
601 next = memranges_next_entry(var_state->addr_space, r);
602 if (next == NULL) {
603 b2_limit = ALIGN_UP((uint64_t)b1, 1 << fms(b1));
604 /* If it's the last range above 4GiB, we won't carve
605 the hole out. If an OS wanted to move MMIO there,
606 it would have to override the MTRR setting using
607 PAT just like it would with WB as default type. */
608 carve_hole = a1 < RANGE_4GB;
609 } else if (range_entry_mtrr_type(next)
610 == MTRR_TYPE_UNCACHEABLE) {
611 b2_limit = range_entry_end_mtrr_addr(next);
612 carve_hole = 1;
613 } else {
614 b2_limit = range_entry_base_mtrr_addr(next);
615 carve_hole = 1;
616 }
617 b2 = optimize_var_mtrr_hole(a1, b1, b2_limit, carve_hole);
Aaron Durbin53924242013-03-29 11:48:27 -0500618 }
Aaron Durbine3834422013-03-28 20:48:51 -0500619
620 calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
Nico Huberbd5fb662017-10-07 13:40:19 +0200621 if (carve_hole && b2 != b1) {
622 calc_var_mtrr_range(var_state, b1, b2 - b1,
623 MTRR_TYPE_UNCACHEABLE);
624 }
Aaron Durbine3834422013-03-28 20:48:51 -0500625}
626
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600627static void __calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700628 int above4gb, int address_bits,
629 int *num_def_wb_mtrrs, int *num_def_uc_mtrrs)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500630{
631 int wb_deftype_count;
632 int uc_deftype_count;
Aaron Durbine3834422013-03-28 20:48:51 -0500633 struct range_entry *r;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000634 struct var_mtrr_state var_state;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000635
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500636 /* The default MTRR cacheability type is determined by calculating
Paul Menzel4fe98132014-01-25 15:55:28 +0100637 * the number of MTRRs required for each MTRR type as if it was the
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500638 * default. */
639 var_state.addr_space = addr_space;
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000640 var_state.above4gb = above4gb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500641 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800642 var_state.prepare_msrs = 0;
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000643
Aaron Durbine3834422013-03-28 20:48:51 -0500644 wb_deftype_count = 0;
645 uc_deftype_count = 0;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800646
Aaron Durbine3834422013-03-28 20:48:51 -0500647 /*
Nico Huber64f0bcb2017-10-07 16:37:04 +0200648 * For each range do 2 calculations:
649 * 1. UC as default type with possible holes at top of range.
650 * 2. WB as default.
Martin Roth4c3ab732013-07-08 16:23:54 -0600651 * The lowest count is then used as default after totaling all
Nico Huber64f0bcb2017-10-07 16:37:04 +0200652 * MTRRs. UC takes precedence in the MTRR architecture. There-
653 * fore, only holes can be used when the type of the region is
654 * MTRR_TYPE_WRBACK with MTRR_TYPE_UNCACHEABLE as the default
655 * type.
Aaron Durbine3834422013-03-28 20:48:51 -0500656 */
657 memranges_each_entry(r, var_state.addr_space) {
658 int mtrr_type;
659
660 mtrr_type = range_entry_mtrr_type(r);
661
662 if (mtrr_type != MTRR_TYPE_UNCACHEABLE) {
Aaron Durbine3834422013-03-28 20:48:51 -0500663 var_state.mtrr_index = 0;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200664 var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE;
665 calc_var_mtrrs_with_hole(&var_state, r);
666 uc_deftype_count += var_state.mtrr_index;
Aaron Durbine3834422013-03-28 20:48:51 -0500667 }
668
669 if (mtrr_type != MTRR_TYPE_WRBACK) {
670 var_state.mtrr_index = 0;
671 var_state.def_mtrr_type = MTRR_TYPE_WRBACK;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200672 calc_var_mtrrs_with_hole(&var_state, r);
Aaron Durbine3834422013-03-28 20:48:51 -0500673 wb_deftype_count += var_state.mtrr_index;
674 }
675 }
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600676 *num_def_wb_mtrrs = wb_deftype_count;
677 *num_def_uc_mtrrs = uc_deftype_count;
678}
679
680static int calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700681 int above4gb, int address_bits)
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600682{
683 int wb_deftype_count = 0;
684 int uc_deftype_count = 0;
685
686 __calc_var_mtrrs(addr_space, above4gb, address_bits, &wb_deftype_count,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700687 &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600688
689 if (wb_deftype_count > bios_mtrrs && uc_deftype_count > bios_mtrrs) {
690 printk(BIOS_DEBUG, "MTRR: Removing WRCOMB type. "
691 "WB/UC MTRR counts: %d/%d > %d.\n",
692 wb_deftype_count, uc_deftype_count, bios_mtrrs);
693 memranges_update_tag(addr_space, MTRR_TYPE_WRCOMB,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700694 MTRR_TYPE_UNCACHEABLE);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600695 __calc_var_mtrrs(addr_space, above4gb, address_bits,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700696 &wb_deftype_count, &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600697 }
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000698
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500699 printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
700 wb_deftype_count, uc_deftype_count);
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300701
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500702 if (wb_deftype_count < uc_deftype_count) {
703 printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n");
704 return MTRR_TYPE_WRBACK;
705 }
706 printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n");
707 return MTRR_TYPE_UNCACHEABLE;
708}
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300709
Gabe Black7756fe72014-02-25 01:40:34 -0800710static void prepare_var_mtrrs(struct memranges *addr_space, int def_type,
711 int above4gb, int address_bits,
712 struct var_mtrr_solution *sol)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500713{
Aaron Durbine3834422013-03-28 20:48:51 -0500714 struct range_entry *r;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500715 struct var_mtrr_state var_state;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500716
717 var_state.addr_space = addr_space;
718 var_state.above4gb = above4gb;
719 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800720 /* Prepare the MSRs. */
721 var_state.prepare_msrs = 1;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500722 var_state.mtrr_index = 0;
723 var_state.def_mtrr_type = def_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800724 var_state.regs = &sol->regs[0];
Aaron Durbine3834422013-03-28 20:48:51 -0500725
726 memranges_each_entry(r, var_state.addr_space) {
727 if (range_entry_mtrr_type(r) == def_type)
728 continue;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200729 calc_var_mtrrs_with_hole(&var_state, r);
Aaron Durbine3834422013-03-28 20:48:51 -0500730 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500731
Gabe Black7756fe72014-02-25 01:40:34 -0800732 /* Update the solution. */
733 sol->num_used = var_state.mtrr_index;
734}
735
Aaron Durbind9762f72017-06-12 12:48:38 -0500736static int commit_var_mtrrs(const struct var_mtrr_solution *sol)
Gabe Black7756fe72014-02-25 01:40:34 -0800737{
738 int i;
739
Aaron Durbind9762f72017-06-12 12:48:38 -0500740 if (sol->num_used > total_mtrrs) {
741 printk(BIOS_WARNING, "Not enough MTRRs: %d vs %d\n",
742 sol->num_used, total_mtrrs);
743 return -1;
744 }
745
Isaac Christensen81f90c52014-09-24 14:59:32 -0600746 /* Write out the variable MTRRs. */
Gabe Black7756fe72014-02-25 01:40:34 -0800747 disable_cache();
748 for (i = 0; i < sol->num_used; i++) {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700749 wrmsr(MTRR_PHYS_BASE(i), sol->regs[i].base);
750 wrmsr(MTRR_PHYS_MASK(i), sol->regs[i].mask);
Gabe Black7756fe72014-02-25 01:40:34 -0800751 }
752 /* Clear the ones that are unused. */
753 for (; i < total_mtrrs; i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500754 clear_var_mtrr(i);
Isaac Christensen81f90c52014-09-24 14:59:32 -0600755 enable_var_mtrr(sol->mtrr_default_type);
Gabe Black7756fe72014-02-25 01:40:34 -0800756 enable_cache();
757
Aaron Durbind9762f72017-06-12 12:48:38 -0500758 return 0;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500759}
760
761void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
762{
Gabe Black7756fe72014-02-25 01:40:34 -0800763 static struct var_mtrr_solution *sol = NULL;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500764 struct memranges *addr_space;
765
766 addr_space = get_physical_address_space();
767
Gabe Black7756fe72014-02-25 01:40:34 -0800768 if (sol == NULL) {
Gabe Black7756fe72014-02-25 01:40:34 -0800769 sol = &mtrr_global_solution;
770 sol->mtrr_default_type =
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500771 calc_var_mtrrs(addr_space, !!above4gb, address_bits);
Gabe Black7756fe72014-02-25 01:40:34 -0800772 prepare_var_mtrrs(addr_space, sol->mtrr_default_type,
773 !!above4gb, address_bits, sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000774 }
Stefan Reinauer00093a82011-11-02 16:12:34 -0700775
Gabe Black7756fe72014-02-25 01:40:34 -0800776 commit_var_mtrrs(sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000777}
778
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100779void x86_setup_mtrrs(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000780{
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100781 int address_size;
Aaron Durbine63be892016-03-07 16:05:36 -0600782
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000783 x86_setup_fixed_mtrrs();
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100784 address_size = cpu_phys_address_size();
Aaron Durbine63be892016-03-07 16:05:36 -0600785 printk(BIOS_DEBUG, "CPU physical address size: %d bits\n",
786 address_size);
787 /* Always handle addresses above 4GiB. */
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100788 x86_setup_var_mtrrs(address_size, 1);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000789}
790
Aaron Durbine63be892016-03-07 16:05:36 -0600791void x86_setup_mtrrs_with_detect(void)
792{
793 detect_var_mtrrs();
794 x86_setup_mtrrs();
795}
796
Kyösti Mälkki38a8fb02014-06-30 13:48:18 +0300797void x86_mtrr_check(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000798{
799 /* Only Pentium Pro and later have MTRR */
800 msr_t msr;
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000801 printk(BIOS_DEBUG, "\nMTRR check\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000802
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700803 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000804
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000805 printk(BIOS_DEBUG, "Fixed MTRRs : ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700806 if (msr.lo & MTRR_DEF_TYPE_FIX_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000807 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000808 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000809 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000810
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000811 printk(BIOS_DEBUG, "Variable MTRRs: ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700812 if (msr.lo & MTRR_DEF_TYPE_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000813 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000814 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000815 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000816
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000817 printk(BIOS_DEBUG, "\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000818
819 post_code(0x93);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000820}
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600821
822static bool put_back_original_solution;
823
824void mtrr_use_temp_range(uintptr_t begin, size_t size, int type)
825{
826 const struct range_entry *r;
827 const struct memranges *orig;
828 struct var_mtrr_solution sol;
829 struct memranges addr_space;
830 const int above4gb = 1; /* Cover above 4GiB by default. */
831 int address_bits;
832
833 /* Make a copy of the original address space and tweak it with the
834 * provided range. */
835 memranges_init_empty(&addr_space, NULL, 0);
836 orig = get_physical_address_space();
837 memranges_each_entry(r, orig) {
838 unsigned long tag = range_entry_tag(r);
839
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600840 /* Remove any write combining MTRRs from the temporary
841 * solution as it just fragments the address space. */
842 if (tag == MTRR_TYPE_WRCOMB)
843 tag = MTRR_TYPE_UNCACHEABLE;
844
845 memranges_insert(&addr_space, range_entry_base(r),
846 range_entry_size(r), tag);
847 }
848
849 /* Place new range into the address space. */
850 memranges_insert(&addr_space, begin, size, type);
851
852 print_physical_address_space(&addr_space, "TEMPORARY");
853
854 /* Calculate a new solution with the updated address space. */
855 address_bits = cpu_phys_address_size();
856 memset(&sol, 0, sizeof(sol));
857 sol.mtrr_default_type =
858 calc_var_mtrrs(&addr_space, above4gb, address_bits);
859 prepare_var_mtrrs(&addr_space, sol.mtrr_default_type,
860 above4gb, address_bits, &sol);
Aaron Durbind9762f72017-06-12 12:48:38 -0500861
862 if (commit_var_mtrrs(&sol) < 0)
863 printk(BIOS_WARNING, "Unable to insert temporary MTRR range: 0x%016llx - 0x%016llx size 0x%08llx type %d\n",
864 (long long)begin, (long long)begin + size,
865 (long long)size, type);
866 else
867 put_back_original_solution = true;
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600868
869 memranges_teardown(&addr_space);
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600870}
871
872static void remove_temp_solution(void *unused)
873{
874 if (put_back_original_solution)
875 commit_var_mtrrs(&mtrr_global_solution);
876}
877
878BOOT_STATE_INIT_ENTRY(BS_OS_RESUME, BS_ON_ENTRY, remove_temp_solution, NULL);
879BOOT_STATE_INIT_ENTRY(BS_PAYLOAD_LOAD, BS_ON_EXIT, remove_temp_solution, NULL);