blob: b26e31a1d4ac238cd8bbca593c56add683878114 [file] [log] [blame]
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00001/*
Martin Rothd57ace22019-08-31 10:48:37 -06002 * This file is part of the coreboot project.
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
Martin Rothd57ace22019-08-31 10:48:37 -060014 * mtrr.c: setting MTRR to decent values for cache initialization on P6
15 * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000016 *
Lee Leahyc5917072017-03-15 16:38:51 -070017 * Reference: Intel Architecture Software Developer's Manual, Volume 3: System
18 * Programming
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000019 */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000020
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +000021#include <stddef.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050022#include <string.h>
Aaron Durbinbebf6692013-04-24 20:59:43 -050023#include <bootstate.h>
Elyes HAOUASd26844c2019-06-21 07:31:40 +020024#include <commonlib/helpers.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000025#include <console/console.h>
26#include <device/device.h>
Aaron Durbinca4f4b82014-02-08 15:41:52 -060027#include <device/pci_ids.h>
Aaron Durbinebf142a2013-03-29 16:23:23 -050028#include <cpu/cpu.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000029#include <cpu/x86/msr.h>
30#include <cpu/x86/mtrr.h>
31#include <cpu/x86/cache.h>
Stefan Reinauer00093a82011-11-02 16:12:34 -070032#include <cpu/x86/lapic.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050033#include <memrange.h>
Aaron Durbin57686f82013-03-20 15:50:59 -050034#include <cpu/amd/mtrr.h>
Richard Spiegelb28025a2019-02-20 11:00:19 -070035#include <assert.h>
Julius Wernercd49cce2019-03-05 16:53:33 -080036#if CONFIG(X86_AMD_FIXED_MTRRS)
Aaron Durbin57686f82013-03-20 15:50:59 -050037#define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
38#else
39#define MTRR_FIXED_WRBACK_BITS 0
40#endif
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000041
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070042/* 2 MTRRS are reserved for the operating system */
43#define BIOS_MTRRS 6
44#define OS_MTRRS 2
45#define MTRRS (BIOS_MTRRS + OS_MTRRS)
Gabe Black7756fe72014-02-25 01:40:34 -080046/*
Isaac Christensen81f90c52014-09-24 14:59:32 -060047 * Static storage size for variable MTRRs. It's sized sufficiently large to
48 * handle different types of CPUs. Empirically, 16 variable MTRRs has not
Gabe Black7756fe72014-02-25 01:40:34 -080049 * yet been observed.
50 */
51#define NUM_MTRR_STATIC_STORAGE 16
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070052
53static int total_mtrrs = MTRRS;
54static int bios_mtrrs = BIOS_MTRRS;
55
56static void detect_var_mtrrs(void)
57{
58 msr_t msr;
59
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070060 msr = rdmsr(MTRR_CAP_MSR);
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070061
62 total_mtrrs = msr.lo & 0xff;
Gabe Black7756fe72014-02-25 01:40:34 -080063
64 if (total_mtrrs > NUM_MTRR_STATIC_STORAGE) {
65 printk(BIOS_WARNING,
66 "MTRRs detected (%d) > NUM_MTRR_STATIC_STORAGE (%d)\n",
67 total_mtrrs, NUM_MTRR_STATIC_STORAGE);
68 total_mtrrs = NUM_MTRR_STATIC_STORAGE;
69 }
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070070 bios_mtrrs = total_mtrrs - OS_MTRRS;
71}
72
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000073void enable_fixed_mtrr(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000074{
75 msr_t msr;
76
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070077 msr = rdmsr(MTRR_DEF_TYPE_MSR);
78 msr.lo |= MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN;
79 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000080}
81
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060082void fixed_mtrrs_expose_amd_rwdram(void)
83{
84 msr_t syscfg;
85
Julius Wernercd49cce2019-03-05 16:53:33 -080086 if (!CONFIG(X86_AMD_FIXED_MTRRS))
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060087 return;
88
89 syscfg = rdmsr(SYSCFG_MSR);
90 syscfg.lo |= SYSCFG_MSR_MtrrFixDramModEn;
91 wrmsr(SYSCFG_MSR, syscfg);
92}
93
94void fixed_mtrrs_hide_amd_rwdram(void)
95{
96 msr_t syscfg;
97
Julius Wernercd49cce2019-03-05 16:53:33 -080098 if (!CONFIG(X86_AMD_FIXED_MTRRS))
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060099 return;
100
101 syscfg = rdmsr(SYSCFG_MSR);
102 syscfg.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
103 wrmsr(SYSCFG_MSR, syscfg);
104}
105
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500106static void enable_var_mtrr(unsigned char deftype)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000107{
108 msr_t msr;
109
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700110 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500111 msr.lo &= ~0xff;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700112 msr.lo |= MTRR_DEF_TYPE_EN | deftype;
113 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000114}
115
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500116#define MTRR_VERBOSE_LEVEL BIOS_NEVER
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000117
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500118/* MTRRs are at a 4KiB granularity. Therefore all address calculations can
119 * be done with 32-bit numbers. This allows for the MTRR code to handle
120 * up to 2^44 bytes (16 TiB) of address space. */
121#define RANGE_SHIFT 12
122#define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
123 (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
124#define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
125#define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
126#define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR)
127
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500128/* Helpful constants. */
129#define RANGE_1MB PHYS_TO_RANGE_ADDR(1 << 20)
130#define RANGE_4GB (1 << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
131
Aaron Durbine3834422013-03-28 20:48:51 -0500132#define MTRR_ALGO_SHIFT (8)
133#define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1)
Aaron Durbine3834422013-03-28 20:48:51 -0500134
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500135static inline uint32_t range_entry_base_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000136{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500137 return PHYS_TO_RANGE_ADDR(range_entry_base(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000138}
139
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500140static inline uint32_t range_entry_end_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000141{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500142 return PHYS_TO_RANGE_ADDR(range_entry_end(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000143}
144
Aaron Durbine3834422013-03-28 20:48:51 -0500145static inline int range_entry_mtrr_type(struct range_entry *r)
146{
147 return range_entry_tag(r) & MTRR_TAG_MASK;
148}
149
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600150static int filter_vga_wrcomb(struct device *dev, struct resource *res)
151{
152 /* Only handle PCI devices. */
153 if (dev->path.type != DEVICE_PATH_PCI)
154 return 0;
155
156 /* Only handle VGA class devices. */
157 if (((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
158 return 0;
159
160 /* Add resource as write-combining in the address space. */
161 return 1;
162}
163
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600164static void print_physical_address_space(const struct memranges *addr_space,
165 const char *identifier)
166{
167 const struct range_entry *r;
168
169 if (identifier)
170 printk(BIOS_DEBUG, "MTRR: %s Physical address space:\n",
171 identifier);
172 else
173 printk(BIOS_DEBUG, "MTRR: Physical address space:\n");
174
175 memranges_each_entry(r, addr_space)
176 printk(BIOS_DEBUG,
177 "0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
178 range_entry_base(r), range_entry_end(r),
179 range_entry_size(r), range_entry_tag(r));
180}
181
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500182static struct memranges *get_physical_address_space(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000183{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500184 static struct memranges *addr_space;
185 static struct memranges addr_space_storage;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800186
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500187 /* In order to handle some chipsets not being able to pre-determine
Martin Roth4c3ab732013-07-08 16:23:54 -0600188 * uncacheable ranges, such as graphics memory, at resource insertion
189 * time remove uncacheable regions from the cacheable ones. */
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500190 if (addr_space == NULL) {
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500191 unsigned long mask;
192 unsigned long match;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500193
194 addr_space = &addr_space_storage;
195
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500196 mask = IORESOURCE_CACHEABLE;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500197 /* Collect cacheable and uncacheable address ranges. The
198 * uncacheable regions take precedence over the cacheable
199 * regions. */
200 memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
201 memranges_add_resources(addr_space, mask, 0,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700202 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500203
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500204 /* Handle any write combining resources. Only prefetchable
Vladimir Serbinenko30fe6122014-02-05 23:25:28 +0100205 * resources are appropriate for this MTRR type. */
206 match = IORESOURCE_PREFETCH;
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500207 mask |= match;
Lee Leahyc5917072017-03-15 16:38:51 -0700208 memranges_add_resources_filter(addr_space, mask, match,
209 MTRR_TYPE_WRCOMB, filter_vga_wrcomb);
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500210
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500211 /* The address space below 4GiB is special. It needs to be
Martin Roth2f914032016-01-15 10:20:11 -0700212 * covered entirely by range entries so that MTRR calculations
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500213 * can be properly done for the full 32-bit address space.
214 * Therefore, ensure holes are filled up to 4GiB as
215 * uncacheable */
216 memranges_fill_holes_up_to(addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700217 RANGE_TO_PHYS_ADDR(RANGE_4GB),
218 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500219
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600220 print_physical_address_space(addr_space, NULL);
Carl-Daniel Hailfinger7dde1da2009-02-11 16:57:32 +0000221 }
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000222
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500223 return addr_space;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000224}
225
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500226/* Fixed MTRR descriptor. This structure defines the step size and begin
Martin Roth4c3ab732013-07-08 16:23:54 -0600227 * and end (exclusive) address covered by a set of fixed MTRR MSRs.
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500228 * It also describes the offset in byte intervals to store the calculated MTRR
229 * type in an array. */
230struct fixed_mtrr_desc {
231 uint32_t begin;
232 uint32_t end;
233 uint32_t step;
234 int range_index;
235 int msr_index_base;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000236};
237
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500238/* Shared MTRR calculations. Can be reused by APs. */
239static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES];
240
241/* Fixed MTRR descriptors. */
242static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
243 { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700244 PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRR_FIX_64K_00000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500245 { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700246 PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRR_FIX_16K_80000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500247 { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700248 PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRR_FIX_4K_C0000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500249};
250
251static void calc_fixed_mtrrs(void)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000252{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500253 static int fixed_mtrr_types_initialized;
254 struct memranges *phys_addr_space;
255 struct range_entry *r;
256 const struct fixed_mtrr_desc *desc;
257 const struct fixed_mtrr_desc *last_desc;
258 uint32_t begin;
259 uint32_t end;
260 int type_index;
261
262 if (fixed_mtrr_types_initialized)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000263 return;
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300264
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500265 phys_addr_space = get_physical_address_space();
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300266
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500267 /* Set all fixed ranges to uncacheable first. */
268 memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300269
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500270 desc = &fixed_mtrr_desc[0];
271 last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1];
Kyösti Mälkki1ec5e742012-07-26 23:51:20 +0300272
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500273 memranges_each_entry(r, phys_addr_space) {
274 begin = range_entry_base_mtrr_addr(r);
275 end = range_entry_end_mtrr_addr(r);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300276
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500277 if (begin >= last_desc->end)
278 break;
279
280 if (end > last_desc->end)
281 end = last_desc->end;
282
283 /* Get to the correct fixed mtrr descriptor. */
284 while (begin >= desc->end)
285 desc++;
286
287 type_index = desc->range_index;
288 type_index += (begin - desc->begin) / desc->step;
289
290 while (begin != end) {
291 unsigned char type;
292
293 type = range_entry_tag(r);
294 printk(MTRR_VERBOSE_LEVEL,
295 "MTRR addr 0x%x-0x%x set to %d type @ %d\n",
296 begin, begin + desc->step, type, type_index);
297 if (type == MTRR_TYPE_WRBACK)
298 type |= MTRR_FIXED_WRBACK_BITS;
299 fixed_mtrr_types[type_index] = type;
300 type_index++;
301 begin += desc->step;
302 if (begin == desc->end)
303 desc++;
Yinghai Lu63601872005-01-27 22:48:12 +0000304 }
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000305 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500306 fixed_mtrr_types_initialized = 1;
307}
308
309static void commit_fixed_mtrrs(void)
310{
311 int i;
312 int j;
313 int msr_num;
314 int type_index;
315 /* 8 ranges per msr. */
316 msr_t fixed_msrs[NUM_FIXED_MTRRS];
317 unsigned long msr_index[NUM_FIXED_MTRRS];
318
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600319 fixed_mtrrs_expose_amd_rwdram();
320
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500321 memset(&fixed_msrs, 0, sizeof(fixed_msrs));
322
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500323 msr_num = 0;
324 type_index = 0;
325 for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) {
326 const struct fixed_mtrr_desc *desc;
327 int num_ranges;
328
329 desc = &fixed_mtrr_desc[i];
330 num_ranges = (desc->end - desc->begin) / desc->step;
331 for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) {
332 msr_index[msr_num] = desc->msr_index_base +
333 (j / RANGES_PER_FIXED_MTRR);
334 fixed_msrs[msr_num].lo |=
335 fixed_mtrr_types[type_index++] << 0;
336 fixed_msrs[msr_num].lo |=
337 fixed_mtrr_types[type_index++] << 8;
338 fixed_msrs[msr_num].lo |=
339 fixed_mtrr_types[type_index++] << 16;
340 fixed_msrs[msr_num].lo |=
341 fixed_mtrr_types[type_index++] << 24;
342 fixed_msrs[msr_num].hi |=
343 fixed_mtrr_types[type_index++] << 0;
344 fixed_msrs[msr_num].hi |=
345 fixed_mtrr_types[type_index++] << 8;
346 fixed_msrs[msr_num].hi |=
347 fixed_mtrr_types[type_index++] << 16;
348 fixed_msrs[msr_num].hi |=
349 fixed_mtrr_types[type_index++] << 24;
350 msr_num++;
351 }
352 }
353
Jacob Garber5b922722019-05-28 11:47:49 -0600354 /* Ensure that both arrays were fully initialized */
355 ASSERT(msr_num == NUM_FIXED_MTRRS)
356
Gabe Black7756fe72014-02-25 01:40:34 -0800357 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500358 printk(BIOS_DEBUG, "MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
359 msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500360
Gabe Black7756fe72014-02-25 01:40:34 -0800361 disable_cache();
362 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
363 wrmsr(msr_index[i], fixed_msrs[i]);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500364 enable_cache();
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600365 fixed_mtrrs_hide_amd_rwdram();
366
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000367}
368
Aaron Durbin57686f82013-03-20 15:50:59 -0500369void x86_setup_fixed_mtrrs_no_enable(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000370{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500371 calc_fixed_mtrrs();
372 commit_fixed_mtrrs();
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000373}
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000374
Aaron Durbin57686f82013-03-20 15:50:59 -0500375void x86_setup_fixed_mtrrs(void)
376{
377 x86_setup_fixed_mtrrs_no_enable();
378
379 printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
380 enable_fixed_mtrr();
381}
382
Gabe Black7756fe72014-02-25 01:40:34 -0800383struct var_mtrr_regs {
384 msr_t base;
385 msr_t mask;
386};
387
388struct var_mtrr_solution {
389 int mtrr_default_type;
390 int num_used;
391 struct var_mtrr_regs regs[NUM_MTRR_STATIC_STORAGE];
392};
393
394/* Global storage for variable MTRR solution. */
395static struct var_mtrr_solution mtrr_global_solution;
396
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500397struct var_mtrr_state {
398 struct memranges *addr_space;
399 int above4gb;
400 int address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800401 int prepare_msrs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500402 int mtrr_index;
403 int def_mtrr_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800404 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500405};
Aaron Durbin57686f82013-03-20 15:50:59 -0500406
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500407static void clear_var_mtrr(int index)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000408{
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600409 msr_t msr = { .lo = 0, .hi = 0 };
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500410
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600411 wrmsr(MTRR_PHYS_BASE(index), msr);
412 wrmsr(MTRR_PHYS_MASK(index), msr);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500413}
414
Gabe Black7756fe72014-02-25 01:40:34 -0800415static void prep_var_mtrr(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700416 uint32_t base, uint32_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500417{
Gabe Black7756fe72014-02-25 01:40:34 -0800418 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500419 resource_t rbase;
420 resource_t rsize;
421 resource_t mask;
422
423 /* Some variable MTRRs are attempted to be saved for the OS use.
424 * However, it's more important to try to map the full address space
425 * properly. */
426 if (var_state->mtrr_index >= bios_mtrrs)
427 printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n");
428 if (var_state->mtrr_index >= total_mtrrs) {
Jonathan Neuschäferbb3a5ef2018-04-09 20:14:19 +0200429 printk(BIOS_ERR, "ERROR: Not enough MTRRs available! MTRR index is %d with %d MTRRs in total.\n",
Paul Menzel6a70dbc2015-10-15 12:41:53 +0200430 var_state->mtrr_index, total_mtrrs);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500431 return;
432 }
433
434 rbase = base;
435 rsize = size;
436
437 rbase = RANGE_TO_PHYS_ADDR(rbase);
438 rsize = RANGE_TO_PHYS_ADDR(rsize);
439 rsize = -rsize;
440
441 mask = (1ULL << var_state->address_bits) - 1;
442 rsize = rsize & mask;
443
444 printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
445 var_state->mtrr_index, rbase, rsize, mtrr_type);
446
Gabe Black7756fe72014-02-25 01:40:34 -0800447 regs = &var_state->regs[var_state->mtrr_index];
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500448
Gabe Black7756fe72014-02-25 01:40:34 -0800449 regs->base.lo = rbase;
450 regs->base.lo |= mtrr_type;
451 regs->base.hi = rbase >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500452
Gabe Black7756fe72014-02-25 01:40:34 -0800453 regs->mask.lo = rsize;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700454 regs->mask.lo |= MTRR_PHYS_MASK_VALID;
Gabe Black7756fe72014-02-25 01:40:34 -0800455 regs->mask.hi = rsize >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500456}
457
458static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700459 uint32_t base, uint32_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500460{
461 while (size != 0) {
462 uint32_t addr_lsb;
463 uint32_t size_msb;
464 uint32_t mtrr_size;
465
466 addr_lsb = fls(base);
467 size_msb = fms(size);
468
469 /* All MTRR entries need to have their base aligned to the mask
470 * size. The maximum size is calculated by a function of the
471 * min base bit set and maximum size bit set. */
472 if (addr_lsb > size_msb)
473 mtrr_size = 1 << size_msb;
474 else
475 mtrr_size = 1 << addr_lsb;
476
Gabe Black7756fe72014-02-25 01:40:34 -0800477 if (var_state->prepare_msrs)
478 prep_var_mtrr(var_state, base, mtrr_size, mtrr_type);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500479
480 size -= mtrr_size;
481 base += mtrr_size;
482 var_state->mtrr_index++;
483 }
484}
485
Nico Huberbd5fb662017-10-07 13:40:19 +0200486static uint32_t optimize_var_mtrr_hole(const uint32_t base,
487 const uint32_t hole,
488 const uint64_t limit,
489 const int carve_hole)
490{
491 /*
492 * With default type UC, we can potentially optimize a WB
493 * range with unaligned upper end, by aligning it up and
494 * carving the added "hole" out again.
495 *
496 * To optimize the upper end of the hole, we will test
497 * how many MTRRs calc_var_mtrr_range() will spend for any
498 * alignment of the hole's upper end.
499 *
500 * We take four parameters, the lower end of the WB range
501 * `base`, upper end of the WB range as start of the `hole`,
502 * a `limit` how far we may align the upper end of the hole
503 * up and a flag `carve_hole` whether we should count MTRRs
504 * for carving the hole out. We return the optimal upper end
505 * for the hole (which may be the same as the end of the WB
506 * range in case we don't gain anything by aligning up).
507 */
508
509 const int dont_care = 0;
510 struct var_mtrr_state var_state = { 0, };
511
512 unsigned int align, best_count;
513 uint32_t best_end = hole;
514
515 /* calculate MTRR count for the WB range alone (w/o a hole) */
516 calc_var_mtrr_range(&var_state, base, hole - base, dont_care);
517 best_count = var_state.mtrr_index;
518 var_state.mtrr_index = 0;
519
520 for (align = fls(hole) + 1; align <= fms(hole); ++align) {
521 const uint64_t hole_end = ALIGN_UP((uint64_t)hole, 1 << align);
522 if (hole_end > limit)
523 break;
524
525 /* calculate MTRR count for this alignment */
526 calc_var_mtrr_range(
527 &var_state, base, hole_end - base, dont_care);
528 if (carve_hole)
529 calc_var_mtrr_range(
530 &var_state, hole, hole_end - hole, dont_care);
531
532 if (var_state.mtrr_index < best_count) {
533 best_count = var_state.mtrr_index;
534 best_end = hole_end;
535 }
536 var_state.mtrr_index = 0;
537 }
538
539 return best_end;
540}
541
Aaron Durbine3834422013-03-28 20:48:51 -0500542static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700543 struct range_entry *r)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500544{
Aaron Durbine3834422013-03-28 20:48:51 -0500545 uint32_t a1, a2, b1, b2;
Nico Huberbd5fb662017-10-07 13:40:19 +0200546 int mtrr_type, carve_hole;
Aaron Durbine3834422013-03-28 20:48:51 -0500547
548 /*
Martin Roth4c3ab732013-07-08 16:23:54 -0600549 * Determine MTRRs based on the following algorithm for the given entry:
Aaron Durbine3834422013-03-28 20:48:51 -0500550 * +------------------+ b2 = ALIGN_UP(end)
551 * | 0 or more bytes | <-- hole is carved out between b1 and b2
Nico Huberbd5fb662017-10-07 13:40:19 +0200552 * +------------------+ a2 = b1 = original end
Aaron Durbine3834422013-03-28 20:48:51 -0500553 * | |
554 * +------------------+ a1 = begin
555 *
Nico Huberbd5fb662017-10-07 13:40:19 +0200556 * Thus, there are up to 2 sub-ranges to configure variable MTRRs for.
Aaron Durbine3834422013-03-28 20:48:51 -0500557 */
558 mtrr_type = range_entry_mtrr_type(r);
559
560 a1 = range_entry_base_mtrr_addr(r);
561 a2 = range_entry_end_mtrr_addr(r);
562
Aaron Durbina38677b2016-07-21 14:26:34 -0500563 /* The end address is within the first 1MiB. The fixed MTRRs take
Aaron Durbine3834422013-03-28 20:48:51 -0500564 * precedence over the variable ones. Therefore this range
565 * can be ignored. */
Aaron Durbina38677b2016-07-21 14:26:34 -0500566 if (a2 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500567 return;
568
569 /* Again, the fixed MTRRs take precedence so the beginning
Aaron Durbina38677b2016-07-21 14:26:34 -0500570 * of the range can be set to 0 if it starts at or below 1MiB. */
571 if (a1 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500572 a1 = 0;
573
574 /* If the range starts above 4GiB the processing is done. */
575 if (!var_state->above4gb && a1 >= RANGE_4GB)
576 return;
577
578 /* Clip the upper address to 4GiB if addresses above 4GiB
579 * are not being processed. */
580 if (!var_state->above4gb && a2 > RANGE_4GB)
581 a2 = RANGE_4GB;
582
583 b1 = a2;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200584 b2 = a2;
585 carve_hole = 0;
Aaron Durbin53924242013-03-29 11:48:27 -0500586
Nico Huber64f0bcb2017-10-07 16:37:04 +0200587 /* We only consider WB type ranges for hole-carving. */
588 if (mtrr_type == MTRR_TYPE_WRBACK) {
589 struct range_entry *next;
590 uint64_t b2_limit;
591 /*
592 * Depending on the type of the next range, there are three
593 * different situations to handle:
594 *
595 * 1. WB range is last in address space:
596 * Aligning up, up to the next power of 2, may gain us
597 * something.
598 *
599 * 2. The next range is of type UC:
600 * We may align up, up to the _end_ of the next range. If
601 * there is a gap between the current and the next range,
602 * it would have been covered by the default type UC anyway.
603 *
604 * 3. The next range is not of type UC:
605 * We may align up, up to the _base_ of the next range. This
606 * may either be the end of the current range (if the next
607 * range follows immediately) or the end of the gap between
608 * the ranges.
609 */
610 next = memranges_next_entry(var_state->addr_space, r);
611 if (next == NULL) {
612 b2_limit = ALIGN_UP((uint64_t)b1, 1 << fms(b1));
613 /* If it's the last range above 4GiB, we won't carve
614 the hole out. If an OS wanted to move MMIO there,
615 it would have to override the MTRR setting using
616 PAT just like it would with WB as default type. */
617 carve_hole = a1 < RANGE_4GB;
618 } else if (range_entry_mtrr_type(next)
619 == MTRR_TYPE_UNCACHEABLE) {
620 b2_limit = range_entry_end_mtrr_addr(next);
621 carve_hole = 1;
622 } else {
623 b2_limit = range_entry_base_mtrr_addr(next);
624 carve_hole = 1;
625 }
626 b2 = optimize_var_mtrr_hole(a1, b1, b2_limit, carve_hole);
Aaron Durbin53924242013-03-29 11:48:27 -0500627 }
Aaron Durbine3834422013-03-28 20:48:51 -0500628
629 calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
Nico Huberbd5fb662017-10-07 13:40:19 +0200630 if (carve_hole && b2 != b1) {
631 calc_var_mtrr_range(var_state, b1, b2 - b1,
632 MTRR_TYPE_UNCACHEABLE);
633 }
Aaron Durbine3834422013-03-28 20:48:51 -0500634}
635
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600636static void __calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700637 int above4gb, int address_bits,
638 int *num_def_wb_mtrrs, int *num_def_uc_mtrrs)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500639{
640 int wb_deftype_count;
641 int uc_deftype_count;
Aaron Durbine3834422013-03-28 20:48:51 -0500642 struct range_entry *r;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000643 struct var_mtrr_state var_state;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000644
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500645 /* The default MTRR cacheability type is determined by calculating
Paul Menzel4fe98132014-01-25 15:55:28 +0100646 * the number of MTRRs required for each MTRR type as if it was the
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500647 * default. */
648 var_state.addr_space = addr_space;
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000649 var_state.above4gb = above4gb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500650 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800651 var_state.prepare_msrs = 0;
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000652
Aaron Durbine3834422013-03-28 20:48:51 -0500653 wb_deftype_count = 0;
654 uc_deftype_count = 0;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800655
Aaron Durbine3834422013-03-28 20:48:51 -0500656 /*
Nico Huber64f0bcb2017-10-07 16:37:04 +0200657 * For each range do 2 calculations:
658 * 1. UC as default type with possible holes at top of range.
659 * 2. WB as default.
Martin Roth4c3ab732013-07-08 16:23:54 -0600660 * The lowest count is then used as default after totaling all
Nico Huber64f0bcb2017-10-07 16:37:04 +0200661 * MTRRs. UC takes precedence in the MTRR architecture. There-
662 * fore, only holes can be used when the type of the region is
663 * MTRR_TYPE_WRBACK with MTRR_TYPE_UNCACHEABLE as the default
664 * type.
Aaron Durbine3834422013-03-28 20:48:51 -0500665 */
666 memranges_each_entry(r, var_state.addr_space) {
667 int mtrr_type;
668
669 mtrr_type = range_entry_mtrr_type(r);
670
671 if (mtrr_type != MTRR_TYPE_UNCACHEABLE) {
Aaron Durbine3834422013-03-28 20:48:51 -0500672 var_state.mtrr_index = 0;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200673 var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE;
674 calc_var_mtrrs_with_hole(&var_state, r);
675 uc_deftype_count += var_state.mtrr_index;
Aaron Durbine3834422013-03-28 20:48:51 -0500676 }
677
678 if (mtrr_type != MTRR_TYPE_WRBACK) {
679 var_state.mtrr_index = 0;
680 var_state.def_mtrr_type = MTRR_TYPE_WRBACK;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200681 calc_var_mtrrs_with_hole(&var_state, r);
Aaron Durbine3834422013-03-28 20:48:51 -0500682 wb_deftype_count += var_state.mtrr_index;
683 }
684 }
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600685 *num_def_wb_mtrrs = wb_deftype_count;
686 *num_def_uc_mtrrs = uc_deftype_count;
687}
688
689static int calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700690 int above4gb, int address_bits)
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600691{
692 int wb_deftype_count = 0;
693 int uc_deftype_count = 0;
694
695 __calc_var_mtrrs(addr_space, above4gb, address_bits, &wb_deftype_count,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700696 &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600697
698 if (wb_deftype_count > bios_mtrrs && uc_deftype_count > bios_mtrrs) {
699 printk(BIOS_DEBUG, "MTRR: Removing WRCOMB type. "
700 "WB/UC MTRR counts: %d/%d > %d.\n",
701 wb_deftype_count, uc_deftype_count, bios_mtrrs);
702 memranges_update_tag(addr_space, MTRR_TYPE_WRCOMB,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700703 MTRR_TYPE_UNCACHEABLE);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600704 __calc_var_mtrrs(addr_space, above4gb, address_bits,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700705 &wb_deftype_count, &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600706 }
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000707
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500708 printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
709 wb_deftype_count, uc_deftype_count);
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300710
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500711 if (wb_deftype_count < uc_deftype_count) {
712 printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n");
713 return MTRR_TYPE_WRBACK;
714 }
715 printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n");
716 return MTRR_TYPE_UNCACHEABLE;
717}
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300718
Gabe Black7756fe72014-02-25 01:40:34 -0800719static void prepare_var_mtrrs(struct memranges *addr_space, int def_type,
720 int above4gb, int address_bits,
721 struct var_mtrr_solution *sol)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500722{
Aaron Durbine3834422013-03-28 20:48:51 -0500723 struct range_entry *r;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500724 struct var_mtrr_state var_state;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500725
726 var_state.addr_space = addr_space;
727 var_state.above4gb = above4gb;
728 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800729 /* Prepare the MSRs. */
730 var_state.prepare_msrs = 1;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500731 var_state.mtrr_index = 0;
732 var_state.def_mtrr_type = def_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800733 var_state.regs = &sol->regs[0];
Aaron Durbine3834422013-03-28 20:48:51 -0500734
735 memranges_each_entry(r, var_state.addr_space) {
736 if (range_entry_mtrr_type(r) == def_type)
737 continue;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200738 calc_var_mtrrs_with_hole(&var_state, r);
Aaron Durbine3834422013-03-28 20:48:51 -0500739 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500740
Gabe Black7756fe72014-02-25 01:40:34 -0800741 /* Update the solution. */
742 sol->num_used = var_state.mtrr_index;
743}
744
Aaron Durbind9762f72017-06-12 12:48:38 -0500745static int commit_var_mtrrs(const struct var_mtrr_solution *sol)
Gabe Black7756fe72014-02-25 01:40:34 -0800746{
747 int i;
748
Aaron Durbind9762f72017-06-12 12:48:38 -0500749 if (sol->num_used > total_mtrrs) {
750 printk(BIOS_WARNING, "Not enough MTRRs: %d vs %d\n",
751 sol->num_used, total_mtrrs);
752 return -1;
753 }
754
Isaac Christensen81f90c52014-09-24 14:59:32 -0600755 /* Write out the variable MTRRs. */
Gabe Black7756fe72014-02-25 01:40:34 -0800756 disable_cache();
757 for (i = 0; i < sol->num_used; i++) {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700758 wrmsr(MTRR_PHYS_BASE(i), sol->regs[i].base);
759 wrmsr(MTRR_PHYS_MASK(i), sol->regs[i].mask);
Gabe Black7756fe72014-02-25 01:40:34 -0800760 }
761 /* Clear the ones that are unused. */
762 for (; i < total_mtrrs; i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500763 clear_var_mtrr(i);
Isaac Christensen81f90c52014-09-24 14:59:32 -0600764 enable_var_mtrr(sol->mtrr_default_type);
Gabe Black7756fe72014-02-25 01:40:34 -0800765 enable_cache();
766
Aaron Durbind9762f72017-06-12 12:48:38 -0500767 return 0;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500768}
769
770void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
771{
Gabe Black7756fe72014-02-25 01:40:34 -0800772 static struct var_mtrr_solution *sol = NULL;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500773 struct memranges *addr_space;
774
775 addr_space = get_physical_address_space();
776
Gabe Black7756fe72014-02-25 01:40:34 -0800777 if (sol == NULL) {
Gabe Black7756fe72014-02-25 01:40:34 -0800778 sol = &mtrr_global_solution;
779 sol->mtrr_default_type =
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500780 calc_var_mtrrs(addr_space, !!above4gb, address_bits);
Gabe Black7756fe72014-02-25 01:40:34 -0800781 prepare_var_mtrrs(addr_space, sol->mtrr_default_type,
782 !!above4gb, address_bits, sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000783 }
Stefan Reinauer00093a82011-11-02 16:12:34 -0700784
Gabe Black7756fe72014-02-25 01:40:34 -0800785 commit_var_mtrrs(sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000786}
787
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100788void x86_setup_mtrrs(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000789{
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100790 int address_size;
Aaron Durbine63be892016-03-07 16:05:36 -0600791
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000792 x86_setup_fixed_mtrrs();
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100793 address_size = cpu_phys_address_size();
Aaron Durbine63be892016-03-07 16:05:36 -0600794 printk(BIOS_DEBUG, "CPU physical address size: %d bits\n",
795 address_size);
796 /* Always handle addresses above 4GiB. */
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100797 x86_setup_var_mtrrs(address_size, 1);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000798}
799
Aaron Durbine63be892016-03-07 16:05:36 -0600800void x86_setup_mtrrs_with_detect(void)
801{
802 detect_var_mtrrs();
803 x86_setup_mtrrs();
804}
805
Kyösti Mälkki38a8fb02014-06-30 13:48:18 +0300806void x86_mtrr_check(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000807{
808 /* Only Pentium Pro and later have MTRR */
809 msr_t msr;
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000810 printk(BIOS_DEBUG, "\nMTRR check\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000811
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700812 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000813
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000814 printk(BIOS_DEBUG, "Fixed MTRRs : ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700815 if (msr.lo & MTRR_DEF_TYPE_FIX_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000816 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000817 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000818 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000819
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000820 printk(BIOS_DEBUG, "Variable MTRRs: ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700821 if (msr.lo & MTRR_DEF_TYPE_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000822 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000823 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000824 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000825
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000826 printk(BIOS_DEBUG, "\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000827
828 post_code(0x93);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000829}
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600830
831static bool put_back_original_solution;
832
833void mtrr_use_temp_range(uintptr_t begin, size_t size, int type)
834{
835 const struct range_entry *r;
836 const struct memranges *orig;
837 struct var_mtrr_solution sol;
838 struct memranges addr_space;
839 const int above4gb = 1; /* Cover above 4GiB by default. */
840 int address_bits;
841
842 /* Make a copy of the original address space and tweak it with the
843 * provided range. */
844 memranges_init_empty(&addr_space, NULL, 0);
845 orig = get_physical_address_space();
846 memranges_each_entry(r, orig) {
847 unsigned long tag = range_entry_tag(r);
848
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600849 /* Remove any write combining MTRRs from the temporary
850 * solution as it just fragments the address space. */
851 if (tag == MTRR_TYPE_WRCOMB)
852 tag = MTRR_TYPE_UNCACHEABLE;
853
854 memranges_insert(&addr_space, range_entry_base(r),
855 range_entry_size(r), tag);
856 }
857
858 /* Place new range into the address space. */
859 memranges_insert(&addr_space, begin, size, type);
860
861 print_physical_address_space(&addr_space, "TEMPORARY");
862
863 /* Calculate a new solution with the updated address space. */
864 address_bits = cpu_phys_address_size();
865 memset(&sol, 0, sizeof(sol));
866 sol.mtrr_default_type =
867 calc_var_mtrrs(&addr_space, above4gb, address_bits);
868 prepare_var_mtrrs(&addr_space, sol.mtrr_default_type,
869 above4gb, address_bits, &sol);
Aaron Durbind9762f72017-06-12 12:48:38 -0500870
871 if (commit_var_mtrrs(&sol) < 0)
872 printk(BIOS_WARNING, "Unable to insert temporary MTRR range: 0x%016llx - 0x%016llx size 0x%08llx type %d\n",
873 (long long)begin, (long long)begin + size,
874 (long long)size, type);
875 else
876 put_back_original_solution = true;
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600877
878 memranges_teardown(&addr_space);
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600879}
880
881static void remove_temp_solution(void *unused)
882{
883 if (put_back_original_solution)
884 commit_var_mtrrs(&mtrr_global_solution);
885}
886
887BOOT_STATE_INIT_ENTRY(BS_OS_RESUME, BS_ON_ENTRY, remove_temp_solution, NULL);
888BOOT_STATE_INIT_ENTRY(BS_PAYLOAD_LOAD, BS_ON_EXIT, remove_temp_solution, NULL);