blob: e9efc8b1de51650f3775d5cfc656957ebab6e597 [file] [log] [blame]
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00001/*
Stefan Reinauercdc5cc62007-04-24 18:40:02 +00002 * mtrr.c: setting MTRR to decent values for cache initialization on P6
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00003 *
4 * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
5 *
6 * Copyright 2000 Silicon Integrated System Corporation
Aaron Durbinbb4e79a2013-03-26 14:09:47 -05007 * Copyright 2013 Google Inc.
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000019 *
Lee Leahyc5917072017-03-15 16:38:51 -070020 * Reference: Intel Architecture Software Developer's Manual, Volume 3: System
21 * Programming
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000022 */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000023
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +000024#include <stddef.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050025#include <stdlib.h>
26#include <string.h>
Aaron Durbinbebf6692013-04-24 20:59:43 -050027#include <bootstate.h>
Elyes HAOUASd26844c2019-06-21 07:31:40 +020028#include <commonlib/helpers.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000029#include <console/console.h>
30#include <device/device.h>
Aaron Durbinca4f4b82014-02-08 15:41:52 -060031#include <device/pci_ids.h>
Aaron Durbinebf142a2013-03-29 16:23:23 -050032#include <cpu/cpu.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000033#include <cpu/x86/msr.h>
34#include <cpu/x86/mtrr.h>
35#include <cpu/x86/cache.h>
Stefan Reinauer00093a82011-11-02 16:12:34 -070036#include <cpu/x86/lapic.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050037#include <memrange.h>
Aaron Durbin57686f82013-03-20 15:50:59 -050038#include <cpu/amd/mtrr.h>
Richard Spiegelb28025a2019-02-20 11:00:19 -070039#include <assert.h>
Julius Wernercd49cce2019-03-05 16:53:33 -080040#if CONFIG(X86_AMD_FIXED_MTRRS)
Aaron Durbin57686f82013-03-20 15:50:59 -050041#define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
42#else
43#define MTRR_FIXED_WRBACK_BITS 0
44#endif
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000045
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070046/* 2 MTRRS are reserved for the operating system */
47#define BIOS_MTRRS 6
48#define OS_MTRRS 2
49#define MTRRS (BIOS_MTRRS + OS_MTRRS)
Gabe Black7756fe72014-02-25 01:40:34 -080050/*
Isaac Christensen81f90c52014-09-24 14:59:32 -060051 * Static storage size for variable MTRRs. It's sized sufficiently large to
52 * handle different types of CPUs. Empirically, 16 variable MTRRs has not
Gabe Black7756fe72014-02-25 01:40:34 -080053 * yet been observed.
54 */
55#define NUM_MTRR_STATIC_STORAGE 16
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070056
57static int total_mtrrs = MTRRS;
58static int bios_mtrrs = BIOS_MTRRS;
59
60static void detect_var_mtrrs(void)
61{
62 msr_t msr;
63
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070064 msr = rdmsr(MTRR_CAP_MSR);
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070065
66 total_mtrrs = msr.lo & 0xff;
Gabe Black7756fe72014-02-25 01:40:34 -080067
68 if (total_mtrrs > NUM_MTRR_STATIC_STORAGE) {
69 printk(BIOS_WARNING,
70 "MTRRs detected (%d) > NUM_MTRR_STATIC_STORAGE (%d)\n",
71 total_mtrrs, NUM_MTRR_STATIC_STORAGE);
72 total_mtrrs = NUM_MTRR_STATIC_STORAGE;
73 }
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070074 bios_mtrrs = total_mtrrs - OS_MTRRS;
75}
76
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000077void enable_fixed_mtrr(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000078{
79 msr_t msr;
80
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070081 msr = rdmsr(MTRR_DEF_TYPE_MSR);
82 msr.lo |= MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN;
83 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000084}
85
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060086void fixed_mtrrs_expose_amd_rwdram(void)
87{
88 msr_t syscfg;
89
Julius Wernercd49cce2019-03-05 16:53:33 -080090 if (!CONFIG(X86_AMD_FIXED_MTRRS))
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060091 return;
92
93 syscfg = rdmsr(SYSCFG_MSR);
94 syscfg.lo |= SYSCFG_MSR_MtrrFixDramModEn;
95 wrmsr(SYSCFG_MSR, syscfg);
96}
97
98void fixed_mtrrs_hide_amd_rwdram(void)
99{
100 msr_t syscfg;
101
Julius Wernercd49cce2019-03-05 16:53:33 -0800102 if (!CONFIG(X86_AMD_FIXED_MTRRS))
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600103 return;
104
105 syscfg = rdmsr(SYSCFG_MSR);
106 syscfg.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
107 wrmsr(SYSCFG_MSR, syscfg);
108}
109
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500110static void enable_var_mtrr(unsigned char deftype)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000111{
112 msr_t msr;
113
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700114 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500115 msr.lo &= ~0xff;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700116 msr.lo |= MTRR_DEF_TYPE_EN | deftype;
117 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000118}
119
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500120#define MTRR_VERBOSE_LEVEL BIOS_NEVER
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000121
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500122/* MTRRs are at a 4KiB granularity. Therefore all address calculations can
123 * be done with 32-bit numbers. This allows for the MTRR code to handle
124 * up to 2^44 bytes (16 TiB) of address space. */
125#define RANGE_SHIFT 12
126#define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
127 (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
128#define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
129#define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
130#define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR)
131
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500132/* Helpful constants. */
133#define RANGE_1MB PHYS_TO_RANGE_ADDR(1 << 20)
134#define RANGE_4GB (1 << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
135
Aaron Durbine3834422013-03-28 20:48:51 -0500136#define MTRR_ALGO_SHIFT (8)
137#define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1)
Aaron Durbine3834422013-03-28 20:48:51 -0500138
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500139static inline uint32_t range_entry_base_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000140{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500141 return PHYS_TO_RANGE_ADDR(range_entry_base(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000142}
143
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500144static inline uint32_t range_entry_end_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000145{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500146 return PHYS_TO_RANGE_ADDR(range_entry_end(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000147}
148
Aaron Durbine3834422013-03-28 20:48:51 -0500149static inline int range_entry_mtrr_type(struct range_entry *r)
150{
151 return range_entry_tag(r) & MTRR_TAG_MASK;
152}
153
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600154static int filter_vga_wrcomb(struct device *dev, struct resource *res)
155{
156 /* Only handle PCI devices. */
157 if (dev->path.type != DEVICE_PATH_PCI)
158 return 0;
159
160 /* Only handle VGA class devices. */
161 if (((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
162 return 0;
163
164 /* Add resource as write-combining in the address space. */
165 return 1;
166}
167
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600168static void print_physical_address_space(const struct memranges *addr_space,
169 const char *identifier)
170{
171 const struct range_entry *r;
172
173 if (identifier)
174 printk(BIOS_DEBUG, "MTRR: %s Physical address space:\n",
175 identifier);
176 else
177 printk(BIOS_DEBUG, "MTRR: Physical address space:\n");
178
179 memranges_each_entry(r, addr_space)
180 printk(BIOS_DEBUG,
181 "0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
182 range_entry_base(r), range_entry_end(r),
183 range_entry_size(r), range_entry_tag(r));
184}
185
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500186static struct memranges *get_physical_address_space(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000187{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500188 static struct memranges *addr_space;
189 static struct memranges addr_space_storage;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800190
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500191 /* In order to handle some chipsets not being able to pre-determine
Martin Roth4c3ab732013-07-08 16:23:54 -0600192 * uncacheable ranges, such as graphics memory, at resource insertion
193 * time remove uncacheable regions from the cacheable ones. */
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500194 if (addr_space == NULL) {
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500195 unsigned long mask;
196 unsigned long match;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500197
198 addr_space = &addr_space_storage;
199
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500200 mask = IORESOURCE_CACHEABLE;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500201 /* Collect cacheable and uncacheable address ranges. The
202 * uncacheable regions take precedence over the cacheable
203 * regions. */
204 memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
205 memranges_add_resources(addr_space, mask, 0,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700206 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500207
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500208 /* Handle any write combining resources. Only prefetchable
Vladimir Serbinenko30fe6122014-02-05 23:25:28 +0100209 * resources are appropriate for this MTRR type. */
210 match = IORESOURCE_PREFETCH;
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500211 mask |= match;
Lee Leahyc5917072017-03-15 16:38:51 -0700212 memranges_add_resources_filter(addr_space, mask, match,
213 MTRR_TYPE_WRCOMB, filter_vga_wrcomb);
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500214
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500215 /* The address space below 4GiB is special. It needs to be
Martin Roth2f914032016-01-15 10:20:11 -0700216 * covered entirely by range entries so that MTRR calculations
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500217 * can be properly done for the full 32-bit address space.
218 * Therefore, ensure holes are filled up to 4GiB as
219 * uncacheable */
220 memranges_fill_holes_up_to(addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700221 RANGE_TO_PHYS_ADDR(RANGE_4GB),
222 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500223
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600224 print_physical_address_space(addr_space, NULL);
Carl-Daniel Hailfinger7dde1da2009-02-11 16:57:32 +0000225 }
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000226
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500227 return addr_space;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000228}
229
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500230/* Fixed MTRR descriptor. This structure defines the step size and begin
Martin Roth4c3ab732013-07-08 16:23:54 -0600231 * and end (exclusive) address covered by a set of fixed MTRR MSRs.
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500232 * It also describes the offset in byte intervals to store the calculated MTRR
233 * type in an array. */
234struct fixed_mtrr_desc {
235 uint32_t begin;
236 uint32_t end;
237 uint32_t step;
238 int range_index;
239 int msr_index_base;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000240};
241
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500242/* Shared MTRR calculations. Can be reused by APs. */
243static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES];
244
245/* Fixed MTRR descriptors. */
246static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
247 { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700248 PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRR_FIX_64K_00000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500249 { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700250 PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRR_FIX_16K_80000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500251 { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700252 PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRR_FIX_4K_C0000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500253};
254
255static void calc_fixed_mtrrs(void)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000256{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500257 static int fixed_mtrr_types_initialized;
258 struct memranges *phys_addr_space;
259 struct range_entry *r;
260 const struct fixed_mtrr_desc *desc;
261 const struct fixed_mtrr_desc *last_desc;
262 uint32_t begin;
263 uint32_t end;
264 int type_index;
265
266 if (fixed_mtrr_types_initialized)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000267 return;
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300268
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500269 phys_addr_space = get_physical_address_space();
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300270
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500271 /* Set all fixed ranges to uncacheable first. */
272 memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300273
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500274 desc = &fixed_mtrr_desc[0];
275 last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1];
Kyösti Mälkki1ec5e742012-07-26 23:51:20 +0300276
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500277 memranges_each_entry(r, phys_addr_space) {
278 begin = range_entry_base_mtrr_addr(r);
279 end = range_entry_end_mtrr_addr(r);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300280
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500281 if (begin >= last_desc->end)
282 break;
283
284 if (end > last_desc->end)
285 end = last_desc->end;
286
287 /* Get to the correct fixed mtrr descriptor. */
288 while (begin >= desc->end)
289 desc++;
290
291 type_index = desc->range_index;
292 type_index += (begin - desc->begin) / desc->step;
293
294 while (begin != end) {
295 unsigned char type;
296
297 type = range_entry_tag(r);
298 printk(MTRR_VERBOSE_LEVEL,
299 "MTRR addr 0x%x-0x%x set to %d type @ %d\n",
300 begin, begin + desc->step, type, type_index);
301 if (type == MTRR_TYPE_WRBACK)
302 type |= MTRR_FIXED_WRBACK_BITS;
303 fixed_mtrr_types[type_index] = type;
304 type_index++;
305 begin += desc->step;
306 if (begin == desc->end)
307 desc++;
Yinghai Lu63601872005-01-27 22:48:12 +0000308 }
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000309 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500310 fixed_mtrr_types_initialized = 1;
311}
312
313static void commit_fixed_mtrrs(void)
314{
315 int i;
316 int j;
317 int msr_num;
318 int type_index;
319 /* 8 ranges per msr. */
320 msr_t fixed_msrs[NUM_FIXED_MTRRS];
321 unsigned long msr_index[NUM_FIXED_MTRRS];
322
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600323 fixed_mtrrs_expose_amd_rwdram();
324
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500325 memset(&fixed_msrs, 0, sizeof(fixed_msrs));
326
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500327 msr_num = 0;
328 type_index = 0;
329 for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) {
330 const struct fixed_mtrr_desc *desc;
331 int num_ranges;
332
333 desc = &fixed_mtrr_desc[i];
334 num_ranges = (desc->end - desc->begin) / desc->step;
335 for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) {
336 msr_index[msr_num] = desc->msr_index_base +
337 (j / RANGES_PER_FIXED_MTRR);
338 fixed_msrs[msr_num].lo |=
339 fixed_mtrr_types[type_index++] << 0;
340 fixed_msrs[msr_num].lo |=
341 fixed_mtrr_types[type_index++] << 8;
342 fixed_msrs[msr_num].lo |=
343 fixed_mtrr_types[type_index++] << 16;
344 fixed_msrs[msr_num].lo |=
345 fixed_mtrr_types[type_index++] << 24;
346 fixed_msrs[msr_num].hi |=
347 fixed_mtrr_types[type_index++] << 0;
348 fixed_msrs[msr_num].hi |=
349 fixed_mtrr_types[type_index++] << 8;
350 fixed_msrs[msr_num].hi |=
351 fixed_mtrr_types[type_index++] << 16;
352 fixed_msrs[msr_num].hi |=
353 fixed_mtrr_types[type_index++] << 24;
354 msr_num++;
355 }
356 }
357
Jacob Garber5b922722019-05-28 11:47:49 -0600358 /* Ensure that both arrays were fully initialized */
359 ASSERT(msr_num == NUM_FIXED_MTRRS)
360
Gabe Black7756fe72014-02-25 01:40:34 -0800361 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500362 printk(BIOS_DEBUG, "MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
363 msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500364
Gabe Black7756fe72014-02-25 01:40:34 -0800365 disable_cache();
366 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
367 wrmsr(msr_index[i], fixed_msrs[i]);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500368 enable_cache();
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600369 fixed_mtrrs_hide_amd_rwdram();
370
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000371}
372
Aaron Durbin57686f82013-03-20 15:50:59 -0500373void x86_setup_fixed_mtrrs_no_enable(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000374{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500375 calc_fixed_mtrrs();
376 commit_fixed_mtrrs();
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000377}
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000378
Aaron Durbin57686f82013-03-20 15:50:59 -0500379void x86_setup_fixed_mtrrs(void)
380{
381 x86_setup_fixed_mtrrs_no_enable();
382
383 printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
384 enable_fixed_mtrr();
385}
386
Gabe Black7756fe72014-02-25 01:40:34 -0800387struct var_mtrr_regs {
388 msr_t base;
389 msr_t mask;
390};
391
392struct var_mtrr_solution {
393 int mtrr_default_type;
394 int num_used;
395 struct var_mtrr_regs regs[NUM_MTRR_STATIC_STORAGE];
396};
397
398/* Global storage for variable MTRR solution. */
399static struct var_mtrr_solution mtrr_global_solution;
400
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500401struct var_mtrr_state {
402 struct memranges *addr_space;
403 int above4gb;
404 int address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800405 int prepare_msrs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500406 int mtrr_index;
407 int def_mtrr_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800408 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500409};
Aaron Durbin57686f82013-03-20 15:50:59 -0500410
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500411static void clear_var_mtrr(int index)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000412{
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600413 msr_t msr = { .lo = 0, .hi = 0 };
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500414
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600415 wrmsr(MTRR_PHYS_BASE(index), msr);
416 wrmsr(MTRR_PHYS_MASK(index), msr);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500417}
418
Gabe Black7756fe72014-02-25 01:40:34 -0800419static void prep_var_mtrr(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700420 uint32_t base, uint32_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500421{
Gabe Black7756fe72014-02-25 01:40:34 -0800422 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500423 resource_t rbase;
424 resource_t rsize;
425 resource_t mask;
426
427 /* Some variable MTRRs are attempted to be saved for the OS use.
428 * However, it's more important to try to map the full address space
429 * properly. */
430 if (var_state->mtrr_index >= bios_mtrrs)
431 printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n");
432 if (var_state->mtrr_index >= total_mtrrs) {
Jonathan Neuschäferbb3a5ef2018-04-09 20:14:19 +0200433 printk(BIOS_ERR, "ERROR: Not enough MTRRs available! MTRR index is %d with %d MTRRs in total.\n",
Paul Menzel6a70dbc2015-10-15 12:41:53 +0200434 var_state->mtrr_index, total_mtrrs);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500435 return;
436 }
437
438 rbase = base;
439 rsize = size;
440
441 rbase = RANGE_TO_PHYS_ADDR(rbase);
442 rsize = RANGE_TO_PHYS_ADDR(rsize);
443 rsize = -rsize;
444
445 mask = (1ULL << var_state->address_bits) - 1;
446 rsize = rsize & mask;
447
448 printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
449 var_state->mtrr_index, rbase, rsize, mtrr_type);
450
Gabe Black7756fe72014-02-25 01:40:34 -0800451 regs = &var_state->regs[var_state->mtrr_index];
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500452
Gabe Black7756fe72014-02-25 01:40:34 -0800453 regs->base.lo = rbase;
454 regs->base.lo |= mtrr_type;
455 regs->base.hi = rbase >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500456
Gabe Black7756fe72014-02-25 01:40:34 -0800457 regs->mask.lo = rsize;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700458 regs->mask.lo |= MTRR_PHYS_MASK_VALID;
Gabe Black7756fe72014-02-25 01:40:34 -0800459 regs->mask.hi = rsize >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500460}
461
462static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700463 uint32_t base, uint32_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500464{
465 while (size != 0) {
466 uint32_t addr_lsb;
467 uint32_t size_msb;
468 uint32_t mtrr_size;
469
470 addr_lsb = fls(base);
471 size_msb = fms(size);
472
473 /* All MTRR entries need to have their base aligned to the mask
474 * size. The maximum size is calculated by a function of the
475 * min base bit set and maximum size bit set. */
476 if (addr_lsb > size_msb)
477 mtrr_size = 1 << size_msb;
478 else
479 mtrr_size = 1 << addr_lsb;
480
Gabe Black7756fe72014-02-25 01:40:34 -0800481 if (var_state->prepare_msrs)
482 prep_var_mtrr(var_state, base, mtrr_size, mtrr_type);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500483
484 size -= mtrr_size;
485 base += mtrr_size;
486 var_state->mtrr_index++;
487 }
488}
489
Nico Huberbd5fb662017-10-07 13:40:19 +0200490static uint32_t optimize_var_mtrr_hole(const uint32_t base,
491 const uint32_t hole,
492 const uint64_t limit,
493 const int carve_hole)
494{
495 /*
496 * With default type UC, we can potentially optimize a WB
497 * range with unaligned upper end, by aligning it up and
498 * carving the added "hole" out again.
499 *
500 * To optimize the upper end of the hole, we will test
501 * how many MTRRs calc_var_mtrr_range() will spend for any
502 * alignment of the hole's upper end.
503 *
504 * We take four parameters, the lower end of the WB range
505 * `base`, upper end of the WB range as start of the `hole`,
506 * a `limit` how far we may align the upper end of the hole
507 * up and a flag `carve_hole` whether we should count MTRRs
508 * for carving the hole out. We return the optimal upper end
509 * for the hole (which may be the same as the end of the WB
510 * range in case we don't gain anything by aligning up).
511 */
512
513 const int dont_care = 0;
514 struct var_mtrr_state var_state = { 0, };
515
516 unsigned int align, best_count;
517 uint32_t best_end = hole;
518
519 /* calculate MTRR count for the WB range alone (w/o a hole) */
520 calc_var_mtrr_range(&var_state, base, hole - base, dont_care);
521 best_count = var_state.mtrr_index;
522 var_state.mtrr_index = 0;
523
524 for (align = fls(hole) + 1; align <= fms(hole); ++align) {
525 const uint64_t hole_end = ALIGN_UP((uint64_t)hole, 1 << align);
526 if (hole_end > limit)
527 break;
528
529 /* calculate MTRR count for this alignment */
530 calc_var_mtrr_range(
531 &var_state, base, hole_end - base, dont_care);
532 if (carve_hole)
533 calc_var_mtrr_range(
534 &var_state, hole, hole_end - hole, dont_care);
535
536 if (var_state.mtrr_index < best_count) {
537 best_count = var_state.mtrr_index;
538 best_end = hole_end;
539 }
540 var_state.mtrr_index = 0;
541 }
542
543 return best_end;
544}
545
Aaron Durbine3834422013-03-28 20:48:51 -0500546static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700547 struct range_entry *r)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500548{
Aaron Durbine3834422013-03-28 20:48:51 -0500549 uint32_t a1, a2, b1, b2;
Nico Huberbd5fb662017-10-07 13:40:19 +0200550 int mtrr_type, carve_hole;
Aaron Durbine3834422013-03-28 20:48:51 -0500551
552 /*
Martin Roth4c3ab732013-07-08 16:23:54 -0600553 * Determine MTRRs based on the following algorithm for the given entry:
Aaron Durbine3834422013-03-28 20:48:51 -0500554 * +------------------+ b2 = ALIGN_UP(end)
555 * | 0 or more bytes | <-- hole is carved out between b1 and b2
Nico Huberbd5fb662017-10-07 13:40:19 +0200556 * +------------------+ a2 = b1 = original end
Aaron Durbine3834422013-03-28 20:48:51 -0500557 * | |
558 * +------------------+ a1 = begin
559 *
Nico Huberbd5fb662017-10-07 13:40:19 +0200560 * Thus, there are up to 2 sub-ranges to configure variable MTRRs for.
Aaron Durbine3834422013-03-28 20:48:51 -0500561 */
562 mtrr_type = range_entry_mtrr_type(r);
563
564 a1 = range_entry_base_mtrr_addr(r);
565 a2 = range_entry_end_mtrr_addr(r);
566
Aaron Durbina38677b2016-07-21 14:26:34 -0500567 /* The end address is within the first 1MiB. The fixed MTRRs take
Aaron Durbine3834422013-03-28 20:48:51 -0500568 * precedence over the variable ones. Therefore this range
569 * can be ignored. */
Aaron Durbina38677b2016-07-21 14:26:34 -0500570 if (a2 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500571 return;
572
573 /* Again, the fixed MTRRs take precedence so the beginning
Aaron Durbina38677b2016-07-21 14:26:34 -0500574 * of the range can be set to 0 if it starts at or below 1MiB. */
575 if (a1 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500576 a1 = 0;
577
578 /* If the range starts above 4GiB the processing is done. */
579 if (!var_state->above4gb && a1 >= RANGE_4GB)
580 return;
581
582 /* Clip the upper address to 4GiB if addresses above 4GiB
583 * are not being processed. */
584 if (!var_state->above4gb && a2 > RANGE_4GB)
585 a2 = RANGE_4GB;
586
587 b1 = a2;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200588 b2 = a2;
589 carve_hole = 0;
Aaron Durbin53924242013-03-29 11:48:27 -0500590
Nico Huber64f0bcb2017-10-07 16:37:04 +0200591 /* We only consider WB type ranges for hole-carving. */
592 if (mtrr_type == MTRR_TYPE_WRBACK) {
593 struct range_entry *next;
594 uint64_t b2_limit;
595 /*
596 * Depending on the type of the next range, there are three
597 * different situations to handle:
598 *
599 * 1. WB range is last in address space:
600 * Aligning up, up to the next power of 2, may gain us
601 * something.
602 *
603 * 2. The next range is of type UC:
604 * We may align up, up to the _end_ of the next range. If
605 * there is a gap between the current and the next range,
606 * it would have been covered by the default type UC anyway.
607 *
608 * 3. The next range is not of type UC:
609 * We may align up, up to the _base_ of the next range. This
610 * may either be the end of the current range (if the next
611 * range follows immediately) or the end of the gap between
612 * the ranges.
613 */
614 next = memranges_next_entry(var_state->addr_space, r);
615 if (next == NULL) {
616 b2_limit = ALIGN_UP((uint64_t)b1, 1 << fms(b1));
617 /* If it's the last range above 4GiB, we won't carve
618 the hole out. If an OS wanted to move MMIO there,
619 it would have to override the MTRR setting using
620 PAT just like it would with WB as default type. */
621 carve_hole = a1 < RANGE_4GB;
622 } else if (range_entry_mtrr_type(next)
623 == MTRR_TYPE_UNCACHEABLE) {
624 b2_limit = range_entry_end_mtrr_addr(next);
625 carve_hole = 1;
626 } else {
627 b2_limit = range_entry_base_mtrr_addr(next);
628 carve_hole = 1;
629 }
630 b2 = optimize_var_mtrr_hole(a1, b1, b2_limit, carve_hole);
Aaron Durbin53924242013-03-29 11:48:27 -0500631 }
Aaron Durbine3834422013-03-28 20:48:51 -0500632
633 calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
Nico Huberbd5fb662017-10-07 13:40:19 +0200634 if (carve_hole && b2 != b1) {
635 calc_var_mtrr_range(var_state, b1, b2 - b1,
636 MTRR_TYPE_UNCACHEABLE);
637 }
Aaron Durbine3834422013-03-28 20:48:51 -0500638}
639
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600640static void __calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700641 int above4gb, int address_bits,
642 int *num_def_wb_mtrrs, int *num_def_uc_mtrrs)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500643{
644 int wb_deftype_count;
645 int uc_deftype_count;
Aaron Durbine3834422013-03-28 20:48:51 -0500646 struct range_entry *r;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000647 struct var_mtrr_state var_state;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000648
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500649 /* The default MTRR cacheability type is determined by calculating
Paul Menzel4fe98132014-01-25 15:55:28 +0100650 * the number of MTRRs required for each MTRR type as if it was the
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500651 * default. */
652 var_state.addr_space = addr_space;
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000653 var_state.above4gb = above4gb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500654 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800655 var_state.prepare_msrs = 0;
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000656
Aaron Durbine3834422013-03-28 20:48:51 -0500657 wb_deftype_count = 0;
658 uc_deftype_count = 0;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800659
Aaron Durbine3834422013-03-28 20:48:51 -0500660 /*
Nico Huber64f0bcb2017-10-07 16:37:04 +0200661 * For each range do 2 calculations:
662 * 1. UC as default type with possible holes at top of range.
663 * 2. WB as default.
Martin Roth4c3ab732013-07-08 16:23:54 -0600664 * The lowest count is then used as default after totaling all
Nico Huber64f0bcb2017-10-07 16:37:04 +0200665 * MTRRs. UC takes precedence in the MTRR architecture. There-
666 * fore, only holes can be used when the type of the region is
667 * MTRR_TYPE_WRBACK with MTRR_TYPE_UNCACHEABLE as the default
668 * type.
Aaron Durbine3834422013-03-28 20:48:51 -0500669 */
670 memranges_each_entry(r, var_state.addr_space) {
671 int mtrr_type;
672
673 mtrr_type = range_entry_mtrr_type(r);
674
675 if (mtrr_type != MTRR_TYPE_UNCACHEABLE) {
Aaron Durbine3834422013-03-28 20:48:51 -0500676 var_state.mtrr_index = 0;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200677 var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE;
678 calc_var_mtrrs_with_hole(&var_state, r);
679 uc_deftype_count += var_state.mtrr_index;
Aaron Durbine3834422013-03-28 20:48:51 -0500680 }
681
682 if (mtrr_type != MTRR_TYPE_WRBACK) {
683 var_state.mtrr_index = 0;
684 var_state.def_mtrr_type = MTRR_TYPE_WRBACK;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200685 calc_var_mtrrs_with_hole(&var_state, r);
Aaron Durbine3834422013-03-28 20:48:51 -0500686 wb_deftype_count += var_state.mtrr_index;
687 }
688 }
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600689 *num_def_wb_mtrrs = wb_deftype_count;
690 *num_def_uc_mtrrs = uc_deftype_count;
691}
692
693static int calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700694 int above4gb, int address_bits)
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600695{
696 int wb_deftype_count = 0;
697 int uc_deftype_count = 0;
698
699 __calc_var_mtrrs(addr_space, above4gb, address_bits, &wb_deftype_count,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700700 &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600701
702 if (wb_deftype_count > bios_mtrrs && uc_deftype_count > bios_mtrrs) {
703 printk(BIOS_DEBUG, "MTRR: Removing WRCOMB type. "
704 "WB/UC MTRR counts: %d/%d > %d.\n",
705 wb_deftype_count, uc_deftype_count, bios_mtrrs);
706 memranges_update_tag(addr_space, MTRR_TYPE_WRCOMB,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700707 MTRR_TYPE_UNCACHEABLE);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600708 __calc_var_mtrrs(addr_space, above4gb, address_bits,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700709 &wb_deftype_count, &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600710 }
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000711
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500712 printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
713 wb_deftype_count, uc_deftype_count);
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300714
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500715 if (wb_deftype_count < uc_deftype_count) {
716 printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n");
717 return MTRR_TYPE_WRBACK;
718 }
719 printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n");
720 return MTRR_TYPE_UNCACHEABLE;
721}
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300722
Gabe Black7756fe72014-02-25 01:40:34 -0800723static void prepare_var_mtrrs(struct memranges *addr_space, int def_type,
724 int above4gb, int address_bits,
725 struct var_mtrr_solution *sol)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500726{
Aaron Durbine3834422013-03-28 20:48:51 -0500727 struct range_entry *r;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500728 struct var_mtrr_state var_state;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500729
730 var_state.addr_space = addr_space;
731 var_state.above4gb = above4gb;
732 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800733 /* Prepare the MSRs. */
734 var_state.prepare_msrs = 1;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500735 var_state.mtrr_index = 0;
736 var_state.def_mtrr_type = def_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800737 var_state.regs = &sol->regs[0];
Aaron Durbine3834422013-03-28 20:48:51 -0500738
739 memranges_each_entry(r, var_state.addr_space) {
740 if (range_entry_mtrr_type(r) == def_type)
741 continue;
Nico Huber64f0bcb2017-10-07 16:37:04 +0200742 calc_var_mtrrs_with_hole(&var_state, r);
Aaron Durbine3834422013-03-28 20:48:51 -0500743 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500744
Gabe Black7756fe72014-02-25 01:40:34 -0800745 /* Update the solution. */
746 sol->num_used = var_state.mtrr_index;
747}
748
Aaron Durbind9762f72017-06-12 12:48:38 -0500749static int commit_var_mtrrs(const struct var_mtrr_solution *sol)
Gabe Black7756fe72014-02-25 01:40:34 -0800750{
751 int i;
752
Aaron Durbind9762f72017-06-12 12:48:38 -0500753 if (sol->num_used > total_mtrrs) {
754 printk(BIOS_WARNING, "Not enough MTRRs: %d vs %d\n",
755 sol->num_used, total_mtrrs);
756 return -1;
757 }
758
Isaac Christensen81f90c52014-09-24 14:59:32 -0600759 /* Write out the variable MTRRs. */
Gabe Black7756fe72014-02-25 01:40:34 -0800760 disable_cache();
761 for (i = 0; i < sol->num_used; i++) {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700762 wrmsr(MTRR_PHYS_BASE(i), sol->regs[i].base);
763 wrmsr(MTRR_PHYS_MASK(i), sol->regs[i].mask);
Gabe Black7756fe72014-02-25 01:40:34 -0800764 }
765 /* Clear the ones that are unused. */
766 for (; i < total_mtrrs; i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500767 clear_var_mtrr(i);
Isaac Christensen81f90c52014-09-24 14:59:32 -0600768 enable_var_mtrr(sol->mtrr_default_type);
Gabe Black7756fe72014-02-25 01:40:34 -0800769 enable_cache();
770
Aaron Durbind9762f72017-06-12 12:48:38 -0500771 return 0;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500772}
773
774void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
775{
Gabe Black7756fe72014-02-25 01:40:34 -0800776 static struct var_mtrr_solution *sol = NULL;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500777 struct memranges *addr_space;
778
779 addr_space = get_physical_address_space();
780
Gabe Black7756fe72014-02-25 01:40:34 -0800781 if (sol == NULL) {
Gabe Black7756fe72014-02-25 01:40:34 -0800782 sol = &mtrr_global_solution;
783 sol->mtrr_default_type =
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500784 calc_var_mtrrs(addr_space, !!above4gb, address_bits);
Gabe Black7756fe72014-02-25 01:40:34 -0800785 prepare_var_mtrrs(addr_space, sol->mtrr_default_type,
786 !!above4gb, address_bits, sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000787 }
Stefan Reinauer00093a82011-11-02 16:12:34 -0700788
Gabe Black7756fe72014-02-25 01:40:34 -0800789 commit_var_mtrrs(sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000790}
791
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100792void x86_setup_mtrrs(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000793{
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100794 int address_size;
Aaron Durbine63be892016-03-07 16:05:36 -0600795
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000796 x86_setup_fixed_mtrrs();
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100797 address_size = cpu_phys_address_size();
Aaron Durbine63be892016-03-07 16:05:36 -0600798 printk(BIOS_DEBUG, "CPU physical address size: %d bits\n",
799 address_size);
800 /* Always handle addresses above 4GiB. */
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100801 x86_setup_var_mtrrs(address_size, 1);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000802}
803
Aaron Durbine63be892016-03-07 16:05:36 -0600804void x86_setup_mtrrs_with_detect(void)
805{
806 detect_var_mtrrs();
807 x86_setup_mtrrs();
808}
809
Kyösti Mälkki38a8fb02014-06-30 13:48:18 +0300810void x86_mtrr_check(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000811{
812 /* Only Pentium Pro and later have MTRR */
813 msr_t msr;
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000814 printk(BIOS_DEBUG, "\nMTRR check\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000815
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700816 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000817
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000818 printk(BIOS_DEBUG, "Fixed MTRRs : ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700819 if (msr.lo & MTRR_DEF_TYPE_FIX_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000820 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000821 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000822 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000823
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000824 printk(BIOS_DEBUG, "Variable MTRRs: ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700825 if (msr.lo & MTRR_DEF_TYPE_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000826 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000827 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000828 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000829
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000830 printk(BIOS_DEBUG, "\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000831
832 post_code(0x93);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000833}
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600834
835static bool put_back_original_solution;
836
837void mtrr_use_temp_range(uintptr_t begin, size_t size, int type)
838{
839 const struct range_entry *r;
840 const struct memranges *orig;
841 struct var_mtrr_solution sol;
842 struct memranges addr_space;
843 const int above4gb = 1; /* Cover above 4GiB by default. */
844 int address_bits;
845
846 /* Make a copy of the original address space and tweak it with the
847 * provided range. */
848 memranges_init_empty(&addr_space, NULL, 0);
849 orig = get_physical_address_space();
850 memranges_each_entry(r, orig) {
851 unsigned long tag = range_entry_tag(r);
852
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600853 /* Remove any write combining MTRRs from the temporary
854 * solution as it just fragments the address space. */
855 if (tag == MTRR_TYPE_WRCOMB)
856 tag = MTRR_TYPE_UNCACHEABLE;
857
858 memranges_insert(&addr_space, range_entry_base(r),
859 range_entry_size(r), tag);
860 }
861
862 /* Place new range into the address space. */
863 memranges_insert(&addr_space, begin, size, type);
864
865 print_physical_address_space(&addr_space, "TEMPORARY");
866
867 /* Calculate a new solution with the updated address space. */
868 address_bits = cpu_phys_address_size();
869 memset(&sol, 0, sizeof(sol));
870 sol.mtrr_default_type =
871 calc_var_mtrrs(&addr_space, above4gb, address_bits);
872 prepare_var_mtrrs(&addr_space, sol.mtrr_default_type,
873 above4gb, address_bits, &sol);
Aaron Durbind9762f72017-06-12 12:48:38 -0500874
875 if (commit_var_mtrrs(&sol) < 0)
876 printk(BIOS_WARNING, "Unable to insert temporary MTRR range: 0x%016llx - 0x%016llx size 0x%08llx type %d\n",
877 (long long)begin, (long long)begin + size,
878 (long long)size, type);
879 else
880 put_back_original_solution = true;
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600881
882 memranges_teardown(&addr_space);
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600883}
884
885static void remove_temp_solution(void *unused)
886{
887 if (put_back_original_solution)
888 commit_var_mtrrs(&mtrr_global_solution);
889}
890
891BOOT_STATE_INIT_ENTRY(BS_OS_RESUME, BS_ON_ENTRY, remove_temp_solution, NULL);
892BOOT_STATE_INIT_ENTRY(BS_PAYLOAD_LOAD, BS_ON_EXIT, remove_temp_solution, NULL);