blob: 7b1b65909756ebe17b143b7deef2495d68d4aa85 [file] [log] [blame]
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00001/*
Stefan Reinauercdc5cc62007-04-24 18:40:02 +00002 * mtrr.c: setting MTRR to decent values for cache initialization on P6
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00003 *
4 * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
5 *
6 * Copyright 2000 Silicon Integrated System Corporation
Aaron Durbinbb4e79a2013-03-26 14:09:47 -05007 * Copyright 2013 Google Inc.
Eric Biedermanfcd5ace2004-10-14 19:29:29 +00008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000019 *
Lee Leahyc5917072017-03-15 16:38:51 -070020 * Reference: Intel Architecture Software Developer's Manual, Volume 3: System
21 * Programming
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000022 */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000023
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +000024#include <stddef.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050025#include <stdlib.h>
26#include <string.h>
Aaron Durbinbebf6692013-04-24 20:59:43 -050027#include <bootstate.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000028#include <console/console.h>
29#include <device/device.h>
Aaron Durbinca4f4b82014-02-08 15:41:52 -060030#include <device/pci_ids.h>
Aaron Durbinebf142a2013-03-29 16:23:23 -050031#include <cpu/cpu.h>
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000032#include <cpu/x86/msr.h>
33#include <cpu/x86/mtrr.h>
34#include <cpu/x86/cache.h>
Stefan Reinauer00093a82011-11-02 16:12:34 -070035#include <cpu/x86/lapic.h>
Sven Schnelleadfbcb792012-01-10 12:01:43 +010036#include <arch/cpu.h>
Stefan Reinauer00093a82011-11-02 16:12:34 -070037#include <arch/acpi.h>
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050038#include <memrange.h>
Aaron Durbin57686f82013-03-20 15:50:59 -050039#include <cpu/amd/mtrr.h>
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060040#if IS_ENABLED(CONFIG_X86_AMD_FIXED_MTRRS)
Aaron Durbin57686f82013-03-20 15:50:59 -050041#define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
42#else
43#define MTRR_FIXED_WRBACK_BITS 0
44#endif
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000045
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070046/* 2 MTRRS are reserved for the operating system */
47#define BIOS_MTRRS 6
48#define OS_MTRRS 2
49#define MTRRS (BIOS_MTRRS + OS_MTRRS)
Gabe Black7756fe72014-02-25 01:40:34 -080050/*
Isaac Christensen81f90c52014-09-24 14:59:32 -060051 * Static storage size for variable MTRRs. It's sized sufficiently large to
52 * handle different types of CPUs. Empirically, 16 variable MTRRs has not
Gabe Black7756fe72014-02-25 01:40:34 -080053 * yet been observed.
54 */
55#define NUM_MTRR_STATIC_STORAGE 16
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070056
57static int total_mtrrs = MTRRS;
58static int bios_mtrrs = BIOS_MTRRS;
59
60static void detect_var_mtrrs(void)
61{
62 msr_t msr;
63
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070064 msr = rdmsr(MTRR_CAP_MSR);
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070065
66 total_mtrrs = msr.lo & 0xff;
Gabe Black7756fe72014-02-25 01:40:34 -080067
68 if (total_mtrrs > NUM_MTRR_STATIC_STORAGE) {
69 printk(BIOS_WARNING,
70 "MTRRs detected (%d) > NUM_MTRR_STATIC_STORAGE (%d)\n",
71 total_mtrrs, NUM_MTRR_STATIC_STORAGE);
72 total_mtrrs = NUM_MTRR_STATIC_STORAGE;
73 }
Stefan Reinauerc00dfbc2012-04-03 16:24:37 -070074 bios_mtrrs = total_mtrrs - OS_MTRRS;
75}
76
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000077void enable_fixed_mtrr(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000078{
79 msr_t msr;
80
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070081 msr = rdmsr(MTRR_DEF_TYPE_MSR);
82 msr.lo |= MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN;
83 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +000084}
85
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060086void fixed_mtrrs_expose_amd_rwdram(void)
87{
88 msr_t syscfg;
89
90 if (!IS_ENABLED(CONFIG_X86_AMD_FIXED_MTRRS))
91 return;
92
93 syscfg = rdmsr(SYSCFG_MSR);
94 syscfg.lo |= SYSCFG_MSR_MtrrFixDramModEn;
95 wrmsr(SYSCFG_MSR, syscfg);
96}
97
98void fixed_mtrrs_hide_amd_rwdram(void)
99{
100 msr_t syscfg;
101
102 if (!IS_ENABLED(CONFIG_X86_AMD_FIXED_MTRRS))
103 return;
104
105 syscfg = rdmsr(SYSCFG_MSR);
106 syscfg.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
107 wrmsr(SYSCFG_MSR, syscfg);
108}
109
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500110static void enable_var_mtrr(unsigned char deftype)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000111{
112 msr_t msr;
113
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700114 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500115 msr.lo &= ~0xff;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700116 msr.lo |= MTRR_DEF_TYPE_EN | deftype;
117 wrmsr(MTRR_DEF_TYPE_MSR, msr);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000118}
119
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500120#define MTRR_VERBOSE_LEVEL BIOS_NEVER
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000121
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500122/* MTRRs are at a 4KiB granularity. Therefore all address calculations can
123 * be done with 32-bit numbers. This allows for the MTRR code to handle
124 * up to 2^44 bytes (16 TiB) of address space. */
125#define RANGE_SHIFT 12
126#define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
127 (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
128#define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
129#define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
130#define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR)
131
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500132/* Helpful constants. */
133#define RANGE_1MB PHYS_TO_RANGE_ADDR(1 << 20)
134#define RANGE_4GB (1 << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
135
Aaron Durbine3834422013-03-28 20:48:51 -0500136/*
137 * The default MTRR type selection uses 3 approaches for selecting the
138 * optimal number of variable MTRRs. For each range do 3 calculations:
139 * 1. UC as default type with no holes at top of range.
140 * 2. UC as default using holes at top of range.
141 * 3. WB as default.
142 * If using holes is optimal for a range when UC is the default type the
143 * tag is updated to direct the commit routine to use a hole at the top
144 * of a range.
145 */
146#define MTRR_ALGO_SHIFT (8)
147#define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1)
148/* If the default type is UC use the hole carving algorithm for a range. */
149#define MTRR_RANGE_UC_USE_HOLE (1 << MTRR_ALGO_SHIFT)
150
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500151static inline uint32_t range_entry_base_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000152{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500153 return PHYS_TO_RANGE_ADDR(range_entry_base(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000154}
155
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500156static inline uint32_t range_entry_end_mtrr_addr(struct range_entry *r)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000157{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500158 return PHYS_TO_RANGE_ADDR(range_entry_end(r));
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000159}
160
Aaron Durbine3834422013-03-28 20:48:51 -0500161static inline int range_entry_mtrr_type(struct range_entry *r)
162{
163 return range_entry_tag(r) & MTRR_TAG_MASK;
164}
165
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600166static int filter_vga_wrcomb(struct device *dev, struct resource *res)
167{
168 /* Only handle PCI devices. */
169 if (dev->path.type != DEVICE_PATH_PCI)
170 return 0;
171
172 /* Only handle VGA class devices. */
173 if (((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
174 return 0;
175
176 /* Add resource as write-combining in the address space. */
177 return 1;
178}
179
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600180static void print_physical_address_space(const struct memranges *addr_space,
181 const char *identifier)
182{
183 const struct range_entry *r;
184
185 if (identifier)
186 printk(BIOS_DEBUG, "MTRR: %s Physical address space:\n",
187 identifier);
188 else
189 printk(BIOS_DEBUG, "MTRR: Physical address space:\n");
190
191 memranges_each_entry(r, addr_space)
192 printk(BIOS_DEBUG,
193 "0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
194 range_entry_base(r), range_entry_end(r),
195 range_entry_size(r), range_entry_tag(r));
196}
197
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500198static struct memranges *get_physical_address_space(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000199{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500200 static struct memranges *addr_space;
201 static struct memranges addr_space_storage;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800202
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500203 /* In order to handle some chipsets not being able to pre-determine
Martin Roth4c3ab732013-07-08 16:23:54 -0600204 * uncacheable ranges, such as graphics memory, at resource insertion
205 * time remove uncacheable regions from the cacheable ones. */
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500206 if (addr_space == NULL) {
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500207 unsigned long mask;
208 unsigned long match;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500209
210 addr_space = &addr_space_storage;
211
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500212 mask = IORESOURCE_CACHEABLE;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500213 /* Collect cacheable and uncacheable address ranges. The
214 * uncacheable regions take precedence over the cacheable
215 * regions. */
216 memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
217 memranges_add_resources(addr_space, mask, 0,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700218 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500219
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500220 /* Handle any write combining resources. Only prefetchable
Vladimir Serbinenko30fe6122014-02-05 23:25:28 +0100221 * resources are appropriate for this MTRR type. */
222 match = IORESOURCE_PREFETCH;
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500223 mask |= match;
Lee Leahyc5917072017-03-15 16:38:51 -0700224 memranges_add_resources_filter(addr_space, mask, match,
225 MTRR_TYPE_WRCOMB, filter_vga_wrcomb);
Aaron Durbin9b027fe2013-03-26 14:10:34 -0500226
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500227 /* The address space below 4GiB is special. It needs to be
Martin Roth2f914032016-01-15 10:20:11 -0700228 * covered entirely by range entries so that MTRR calculations
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500229 * can be properly done for the full 32-bit address space.
230 * Therefore, ensure holes are filled up to 4GiB as
231 * uncacheable */
232 memranges_fill_holes_up_to(addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700233 RANGE_TO_PHYS_ADDR(RANGE_4GB),
234 MTRR_TYPE_UNCACHEABLE);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500235
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600236 print_physical_address_space(addr_space, NULL);
Carl-Daniel Hailfinger7dde1da2009-02-11 16:57:32 +0000237 }
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000238
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500239 return addr_space;
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000240}
241
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500242/* Fixed MTRR descriptor. This structure defines the step size and begin
Martin Roth4c3ab732013-07-08 16:23:54 -0600243 * and end (exclusive) address covered by a set of fixed MTRR MSRs.
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500244 * It also describes the offset in byte intervals to store the calculated MTRR
245 * type in an array. */
246struct fixed_mtrr_desc {
247 uint32_t begin;
248 uint32_t end;
249 uint32_t step;
250 int range_index;
251 int msr_index_base;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000252};
253
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500254/* Shared MTRR calculations. Can be reused by APs. */
255static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES];
256
257/* Fixed MTRR descriptors. */
258static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
259 { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700260 PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRR_FIX_64K_00000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500261 { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700262 PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRR_FIX_16K_80000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500263 { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700264 PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRR_FIX_4K_C0000 },
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500265};
266
267static void calc_fixed_mtrrs(void)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000268{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500269 static int fixed_mtrr_types_initialized;
270 struct memranges *phys_addr_space;
271 struct range_entry *r;
272 const struct fixed_mtrr_desc *desc;
273 const struct fixed_mtrr_desc *last_desc;
274 uint32_t begin;
275 uint32_t end;
276 int type_index;
277
278 if (fixed_mtrr_types_initialized)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000279 return;
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300280
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500281 phys_addr_space = get_physical_address_space();
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300282
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500283 /* Set all fixed ranges to uncacheable first. */
284 memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300285
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500286 desc = &fixed_mtrr_desc[0];
287 last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1];
Kyösti Mälkki1ec5e742012-07-26 23:51:20 +0300288
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500289 memranges_each_entry(r, phys_addr_space) {
290 begin = range_entry_base_mtrr_addr(r);
291 end = range_entry_end_mtrr_addr(r);
Kyösti Mälkki2d42b342012-07-12 00:18:22 +0300292
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500293 if (begin >= last_desc->end)
294 break;
295
296 if (end > last_desc->end)
297 end = last_desc->end;
298
299 /* Get to the correct fixed mtrr descriptor. */
300 while (begin >= desc->end)
301 desc++;
302
303 type_index = desc->range_index;
304 type_index += (begin - desc->begin) / desc->step;
305
306 while (begin != end) {
307 unsigned char type;
308
309 type = range_entry_tag(r);
310 printk(MTRR_VERBOSE_LEVEL,
311 "MTRR addr 0x%x-0x%x set to %d type @ %d\n",
312 begin, begin + desc->step, type, type_index);
313 if (type == MTRR_TYPE_WRBACK)
314 type |= MTRR_FIXED_WRBACK_BITS;
315 fixed_mtrr_types[type_index] = type;
316 type_index++;
317 begin += desc->step;
318 if (begin == desc->end)
319 desc++;
Yinghai Lu63601872005-01-27 22:48:12 +0000320 }
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000321 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500322 fixed_mtrr_types_initialized = 1;
323}
324
325static void commit_fixed_mtrrs(void)
326{
327 int i;
328 int j;
329 int msr_num;
330 int type_index;
331 /* 8 ranges per msr. */
332 msr_t fixed_msrs[NUM_FIXED_MTRRS];
333 unsigned long msr_index[NUM_FIXED_MTRRS];
334
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600335 fixed_mtrrs_expose_amd_rwdram();
336
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500337 memset(&fixed_msrs, 0, sizeof(fixed_msrs));
338
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500339 msr_num = 0;
340 type_index = 0;
341 for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) {
342 const struct fixed_mtrr_desc *desc;
343 int num_ranges;
344
345 desc = &fixed_mtrr_desc[i];
346 num_ranges = (desc->end - desc->begin) / desc->step;
347 for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) {
348 msr_index[msr_num] = desc->msr_index_base +
349 (j / RANGES_PER_FIXED_MTRR);
350 fixed_msrs[msr_num].lo |=
351 fixed_mtrr_types[type_index++] << 0;
352 fixed_msrs[msr_num].lo |=
353 fixed_mtrr_types[type_index++] << 8;
354 fixed_msrs[msr_num].lo |=
355 fixed_mtrr_types[type_index++] << 16;
356 fixed_msrs[msr_num].lo |=
357 fixed_mtrr_types[type_index++] << 24;
358 fixed_msrs[msr_num].hi |=
359 fixed_mtrr_types[type_index++] << 0;
360 fixed_msrs[msr_num].hi |=
361 fixed_mtrr_types[type_index++] << 8;
362 fixed_msrs[msr_num].hi |=
363 fixed_mtrr_types[type_index++] << 16;
364 fixed_msrs[msr_num].hi |=
365 fixed_mtrr_types[type_index++] << 24;
366 msr_num++;
367 }
368 }
369
Gabe Black7756fe72014-02-25 01:40:34 -0800370 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500371 printk(BIOS_DEBUG, "MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
372 msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500373
Gabe Black7756fe72014-02-25 01:40:34 -0800374 disable_cache();
375 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
376 wrmsr(msr_index[i], fixed_msrs[i]);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500377 enable_cache();
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600378 fixed_mtrrs_hide_amd_rwdram();
379
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000380}
381
Aaron Durbin57686f82013-03-20 15:50:59 -0500382void x86_setup_fixed_mtrrs_no_enable(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000383{
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500384 calc_fixed_mtrrs();
385 commit_fixed_mtrrs();
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000386}
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000387
Aaron Durbin57686f82013-03-20 15:50:59 -0500388void x86_setup_fixed_mtrrs(void)
389{
390 x86_setup_fixed_mtrrs_no_enable();
391
392 printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
393 enable_fixed_mtrr();
394}
395
Gabe Black7756fe72014-02-25 01:40:34 -0800396struct var_mtrr_regs {
397 msr_t base;
398 msr_t mask;
399};
400
401struct var_mtrr_solution {
402 int mtrr_default_type;
403 int num_used;
404 struct var_mtrr_regs regs[NUM_MTRR_STATIC_STORAGE];
405};
406
407/* Global storage for variable MTRR solution. */
408static struct var_mtrr_solution mtrr_global_solution;
409
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500410struct var_mtrr_state {
411 struct memranges *addr_space;
412 int above4gb;
413 int address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800414 int prepare_msrs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500415 int mtrr_index;
416 int def_mtrr_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800417 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500418};
Aaron Durbin57686f82013-03-20 15:50:59 -0500419
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500420static void clear_var_mtrr(int index)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000421{
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600422 msr_t msr = { .lo = 0, .hi = 0 };
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500423
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600424 wrmsr(MTRR_PHYS_BASE(index), msr);
425 wrmsr(MTRR_PHYS_MASK(index), msr);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500426}
427
Gabe Black7756fe72014-02-25 01:40:34 -0800428static void prep_var_mtrr(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700429 uint32_t base, uint32_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500430{
Gabe Black7756fe72014-02-25 01:40:34 -0800431 struct var_mtrr_regs *regs;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500432 resource_t rbase;
433 resource_t rsize;
434 resource_t mask;
435
436 /* Some variable MTRRs are attempted to be saved for the OS use.
437 * However, it's more important to try to map the full address space
438 * properly. */
439 if (var_state->mtrr_index >= bios_mtrrs)
440 printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n");
441 if (var_state->mtrr_index >= total_mtrrs) {
Paul Menzel6a70dbc2015-10-15 12:41:53 +0200442 printk(BIOS_ERR, "ERROR: Not enough MTRRs available! MTRR index"
443 "is %d with %d MTTRs in total.\n",
444 var_state->mtrr_index, total_mtrrs);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500445 return;
446 }
447
448 rbase = base;
449 rsize = size;
450
451 rbase = RANGE_TO_PHYS_ADDR(rbase);
452 rsize = RANGE_TO_PHYS_ADDR(rsize);
453 rsize = -rsize;
454
455 mask = (1ULL << var_state->address_bits) - 1;
456 rsize = rsize & mask;
457
458 printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
459 var_state->mtrr_index, rbase, rsize, mtrr_type);
460
Gabe Black7756fe72014-02-25 01:40:34 -0800461 regs = &var_state->regs[var_state->mtrr_index];
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500462
Gabe Black7756fe72014-02-25 01:40:34 -0800463 regs->base.lo = rbase;
464 regs->base.lo |= mtrr_type;
465 regs->base.hi = rbase >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500466
Gabe Black7756fe72014-02-25 01:40:34 -0800467 regs->mask.lo = rsize;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700468 regs->mask.lo |= MTRR_PHYS_MASK_VALID;
Gabe Black7756fe72014-02-25 01:40:34 -0800469 regs->mask.hi = rsize >> 32;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500470}
471
472static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700473 uint32_t base, uint32_t size, int mtrr_type)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500474{
475 while (size != 0) {
476 uint32_t addr_lsb;
477 uint32_t size_msb;
478 uint32_t mtrr_size;
479
480 addr_lsb = fls(base);
481 size_msb = fms(size);
482
483 /* All MTRR entries need to have their base aligned to the mask
484 * size. The maximum size is calculated by a function of the
485 * min base bit set and maximum size bit set. */
486 if (addr_lsb > size_msb)
487 mtrr_size = 1 << size_msb;
488 else
489 mtrr_size = 1 << addr_lsb;
490
Gabe Black7756fe72014-02-25 01:40:34 -0800491 if (var_state->prepare_msrs)
492 prep_var_mtrr(var_state, base, mtrr_size, mtrr_type);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500493
494 size -= mtrr_size;
495 base += mtrr_size;
496 var_state->mtrr_index++;
497 }
498}
499
Nico Huberbd5fb662017-10-07 13:40:19 +0200500static uint32_t optimize_var_mtrr_hole(const uint32_t base,
501 const uint32_t hole,
502 const uint64_t limit,
503 const int carve_hole)
504{
505 /*
506 * With default type UC, we can potentially optimize a WB
507 * range with unaligned upper end, by aligning it up and
508 * carving the added "hole" out again.
509 *
510 * To optimize the upper end of the hole, we will test
511 * how many MTRRs calc_var_mtrr_range() will spend for any
512 * alignment of the hole's upper end.
513 *
514 * We take four parameters, the lower end of the WB range
515 * `base`, upper end of the WB range as start of the `hole`,
516 * a `limit` how far we may align the upper end of the hole
517 * up and a flag `carve_hole` whether we should count MTRRs
518 * for carving the hole out. We return the optimal upper end
519 * for the hole (which may be the same as the end of the WB
520 * range in case we don't gain anything by aligning up).
521 */
522
523 const int dont_care = 0;
524 struct var_mtrr_state var_state = { 0, };
525
526 unsigned int align, best_count;
527 uint32_t best_end = hole;
528
529 /* calculate MTRR count for the WB range alone (w/o a hole) */
530 calc_var_mtrr_range(&var_state, base, hole - base, dont_care);
531 best_count = var_state.mtrr_index;
532 var_state.mtrr_index = 0;
533
534 for (align = fls(hole) + 1; align <= fms(hole); ++align) {
535 const uint64_t hole_end = ALIGN_UP((uint64_t)hole, 1 << align);
536 if (hole_end > limit)
537 break;
538
539 /* calculate MTRR count for this alignment */
540 calc_var_mtrr_range(
541 &var_state, base, hole_end - base, dont_care);
542 if (carve_hole)
543 calc_var_mtrr_range(
544 &var_state, hole, hole_end - hole, dont_care);
545
546 if (var_state.mtrr_index < best_count) {
547 best_count = var_state.mtrr_index;
548 best_end = hole_end;
549 }
550 var_state.mtrr_index = 0;
551 }
552
553 return best_end;
554}
555
Aaron Durbine3834422013-03-28 20:48:51 -0500556static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700557 struct range_entry *r)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500558{
Aaron Durbine3834422013-03-28 20:48:51 -0500559 uint32_t a1, a2, b1, b2;
Nico Huberbd5fb662017-10-07 13:40:19 +0200560 uint64_t b2_limit;
561 int mtrr_type, carve_hole;
Aaron Durbine3834422013-03-28 20:48:51 -0500562 struct range_entry *next;
563
564 /*
Martin Roth4c3ab732013-07-08 16:23:54 -0600565 * Determine MTRRs based on the following algorithm for the given entry:
Aaron Durbine3834422013-03-28 20:48:51 -0500566 * +------------------+ b2 = ALIGN_UP(end)
567 * | 0 or more bytes | <-- hole is carved out between b1 and b2
Nico Huberbd5fb662017-10-07 13:40:19 +0200568 * +------------------+ a2 = b1 = original end
Aaron Durbine3834422013-03-28 20:48:51 -0500569 * | |
570 * +------------------+ a1 = begin
571 *
Nico Huberbd5fb662017-10-07 13:40:19 +0200572 * Thus, there are up to 2 sub-ranges to configure variable MTRRs for.
Aaron Durbine3834422013-03-28 20:48:51 -0500573 */
574 mtrr_type = range_entry_mtrr_type(r);
575
576 a1 = range_entry_base_mtrr_addr(r);
577 a2 = range_entry_end_mtrr_addr(r);
578
Aaron Durbina38677b2016-07-21 14:26:34 -0500579 /* The end address is within the first 1MiB. The fixed MTRRs take
Aaron Durbine3834422013-03-28 20:48:51 -0500580 * precedence over the variable ones. Therefore this range
581 * can be ignored. */
Aaron Durbina38677b2016-07-21 14:26:34 -0500582 if (a2 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500583 return;
584
585 /* Again, the fixed MTRRs take precedence so the beginning
Aaron Durbina38677b2016-07-21 14:26:34 -0500586 * of the range can be set to 0 if it starts at or below 1MiB. */
587 if (a1 <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500588 a1 = 0;
589
590 /* If the range starts above 4GiB the processing is done. */
591 if (!var_state->above4gb && a1 >= RANGE_4GB)
592 return;
593
594 /* Clip the upper address to 4GiB if addresses above 4GiB
595 * are not being processed. */
596 if (!var_state->above4gb && a2 > RANGE_4GB)
597 a2 = RANGE_4GB;
598
599 b1 = a2;
Aaron Durbin53924242013-03-29 11:48:27 -0500600
Nico Huberbd5fb662017-10-07 13:40:19 +0200601 /*
602 * Depending on the type of the next range, there are three
603 * different situations to handle:
604 *
605 * 1. WB range is last in address space:
606 * Aligning up, up to the next power of 2, may gain us
607 * something.
608 *
609 * 2. The next range is of type UC:
610 * We may align up, up to the _end_ of the next range. If
611 * there is a gap between the current and the next range,
612 * it would have been covered by the default type UC anyway.
613 *
614 * 3. The next range is not of type UC:
615 * We may align up, up to the _base_ of the next range. This
616 * may either be the end of the current range (if the next
617 * range follows immediately) or the end of the gap between
618 * the ranges.
619 */
620 next = memranges_next_entry(var_state->addr_space, r);
621 if (next == NULL) {
622 b2_limit = ALIGN_UP((uint64_t)b1, 1 << fms(b1));
623 /* If it's the last range above 4GiB, we won't carve
624 the hole out. If an OS wanted to move MMIO there,
625 it would have to override the MTRR setting using
626 PAT just like it would with WB as default type. */
627 carve_hole = a1 < RANGE_4GB;
628 } else if (range_entry_mtrr_type(next) == MTRR_TYPE_UNCACHEABLE) {
629 b2_limit = range_entry_end_mtrr_addr(next);
630 carve_hole = 1;
631 } else {
632 b2_limit = range_entry_base_mtrr_addr(next);
633 carve_hole = 1;
Aaron Durbin53924242013-03-29 11:48:27 -0500634 }
Nico Huberbd5fb662017-10-07 13:40:19 +0200635 b2 = optimize_var_mtrr_hole(a1, b1, b2_limit, carve_hole);
Aaron Durbine3834422013-03-28 20:48:51 -0500636
637 calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
Nico Huberbd5fb662017-10-07 13:40:19 +0200638 if (carve_hole && b2 != b1) {
639 calc_var_mtrr_range(var_state, b1, b2 - b1,
640 MTRR_TYPE_UNCACHEABLE);
641 }
Aaron Durbine3834422013-03-28 20:48:51 -0500642}
643
644static void calc_var_mtrrs_without_hole(struct var_mtrr_state *var_state,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700645 struct range_entry *r)
Aaron Durbine3834422013-03-28 20:48:51 -0500646{
Nico Huberceb52712017-10-06 19:08:51 +0200647 const int mtrr_type = range_entry_mtrr_type(r);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500648
Nico Huberceb52712017-10-06 19:08:51 +0200649 uint32_t base = range_entry_base_mtrr_addr(r);
650 uint32_t end = range_entry_end_mtrr_addr(r);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500651
Aaron Durbina38677b2016-07-21 14:26:34 -0500652 /* The end address is within the first 1MiB. The fixed MTRRs take
Aaron Durbine3834422013-03-28 20:48:51 -0500653 * precedence over the variable ones. Therefore this range
654 * can be ignored. */
Nico Huberceb52712017-10-06 19:08:51 +0200655 if (end <= RANGE_1MB)
Aaron Durbine3834422013-03-28 20:48:51 -0500656 return;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500657
Aaron Durbine3834422013-03-28 20:48:51 -0500658 /* Again, the fixed MTRRs take precedence so the beginning
Aaron Durbina38677b2016-07-21 14:26:34 -0500659 * of the range can be set to 0 if it starts at or below 1MiB. */
Nico Huberceb52712017-10-06 19:08:51 +0200660 if (base <= RANGE_1MB)
661 base = 0;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500662
Aaron Durbine3834422013-03-28 20:48:51 -0500663 /* If the range starts above 4GiB the processing is done. */
Nico Huberceb52712017-10-06 19:08:51 +0200664 if (!var_state->above4gb && base >= RANGE_4GB)
Aaron Durbine3834422013-03-28 20:48:51 -0500665 return;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500666
Aaron Durbine3834422013-03-28 20:48:51 -0500667 /* Clip the upper address to 4GiB if addresses above 4GiB
668 * are not being processed. */
Nico Huberceb52712017-10-06 19:08:51 +0200669 if (!var_state->above4gb && end > RANGE_4GB)
670 end = RANGE_4GB;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500671
Nico Huberceb52712017-10-06 19:08:51 +0200672 calc_var_mtrr_range(var_state, base, end - base, mtrr_type);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500673}
674
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600675static void __calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700676 int above4gb, int address_bits,
677 int *num_def_wb_mtrrs, int *num_def_uc_mtrrs)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500678{
679 int wb_deftype_count;
680 int uc_deftype_count;
Aaron Durbine3834422013-03-28 20:48:51 -0500681 struct range_entry *r;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000682 struct var_mtrr_state var_state;
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000683
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500684 /* The default MTRR cacheability type is determined by calculating
Paul Menzel4fe98132014-01-25 15:55:28 +0100685 * the number of MTRRs required for each MTRR type as if it was the
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500686 * default. */
687 var_state.addr_space = addr_space;
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000688 var_state.above4gb = above4gb;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500689 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800690 var_state.prepare_msrs = 0;
Stefan Reinauer7f86ed12009-02-12 16:02:16 +0000691
Aaron Durbine3834422013-03-28 20:48:51 -0500692 wb_deftype_count = 0;
693 uc_deftype_count = 0;
Duncan Laurie7389fa92011-12-22 10:59:40 -0800694
Aaron Durbine3834422013-03-28 20:48:51 -0500695 /*
696 * For each range do 3 calculations:
697 * 1. UC as default type with no holes at top of range.
698 * 2. UC as default using holes at top of range.
699 * 3. WB as default.
Martin Roth4c3ab732013-07-08 16:23:54 -0600700 * The lowest count is then used as default after totaling all
701 * MTRRs. Note that the optimal algorithm for UC default is marked in
Aaron Durbine3834422013-03-28 20:48:51 -0500702 * the tag of each range regardless of final decision. UC takes
Martin Roth4c3ab732013-07-08 16:23:54 -0600703 * precedence in the MTRR architecture. Therefore, only holes can be
Aaron Durbine3834422013-03-28 20:48:51 -0500704 * used when the type of the region is MTRR_TYPE_WRBACK with
705 * MTRR_TYPE_UNCACHEABLE as the default type.
706 */
707 memranges_each_entry(r, var_state.addr_space) {
708 int mtrr_type;
709
710 mtrr_type = range_entry_mtrr_type(r);
711
712 if (mtrr_type != MTRR_TYPE_UNCACHEABLE) {
713 int uc_hole_count;
714 int uc_no_hole_count;
715
716 var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE;
717 var_state.mtrr_index = 0;
718
719 /* No hole calculation. */
720 calc_var_mtrrs_without_hole(&var_state, r);
721 uc_no_hole_count = var_state.mtrr_index;
722
723 /* Hole calculation only if type is WB. The 64 number
724 * is a count that is unachievable, thus making it
725 * a default large number in the case of not doing
726 * the hole calculation. */
727 uc_hole_count = 64;
728 if (mtrr_type == MTRR_TYPE_WRBACK) {
729 var_state.mtrr_index = 0;
730 calc_var_mtrrs_with_hole(&var_state, r);
731 uc_hole_count = var_state.mtrr_index;
732 }
733
734 /* Mark the entry with the optimal algorithm. */
735 if (uc_no_hole_count < uc_hole_count) {
736 uc_deftype_count += uc_no_hole_count;
737 } else {
738 unsigned long new_tag;
739
740 new_tag = mtrr_type | MTRR_RANGE_UC_USE_HOLE;
741 range_entry_update_tag(r, new_tag);
742 uc_deftype_count += uc_hole_count;
743 }
744 }
745
746 if (mtrr_type != MTRR_TYPE_WRBACK) {
747 var_state.mtrr_index = 0;
748 var_state.def_mtrr_type = MTRR_TYPE_WRBACK;
749 calc_var_mtrrs_without_hole(&var_state, r);
750 wb_deftype_count += var_state.mtrr_index;
751 }
752 }
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600753 *num_def_wb_mtrrs = wb_deftype_count;
754 *num_def_uc_mtrrs = uc_deftype_count;
755}
756
757static int calc_var_mtrrs(struct memranges *addr_space,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700758 int above4gb, int address_bits)
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600759{
760 int wb_deftype_count = 0;
761 int uc_deftype_count = 0;
762
763 __calc_var_mtrrs(addr_space, above4gb, address_bits, &wb_deftype_count,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700764 &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600765
766 if (wb_deftype_count > bios_mtrrs && uc_deftype_count > bios_mtrrs) {
767 printk(BIOS_DEBUG, "MTRR: Removing WRCOMB type. "
768 "WB/UC MTRR counts: %d/%d > %d.\n",
769 wb_deftype_count, uc_deftype_count, bios_mtrrs);
770 memranges_update_tag(addr_space, MTRR_TYPE_WRCOMB,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700771 MTRR_TYPE_UNCACHEABLE);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600772 __calc_var_mtrrs(addr_space, above4gb, address_bits,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700773 &wb_deftype_count, &uc_deftype_count);
Aaron Durbin5b9e3b62014-02-05 16:00:43 -0600774 }
Scott Duplichanf3cce2f2010-11-13 19:07:59 +0000775
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500776 printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
777 wb_deftype_count, uc_deftype_count);
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300778
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500779 if (wb_deftype_count < uc_deftype_count) {
780 printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n");
781 return MTRR_TYPE_WRBACK;
782 }
783 printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n");
784 return MTRR_TYPE_UNCACHEABLE;
785}
Kyösti Mälkkiffc1fb32012-07-11 14:40:19 +0300786
Gabe Black7756fe72014-02-25 01:40:34 -0800787static void prepare_var_mtrrs(struct memranges *addr_space, int def_type,
788 int above4gb, int address_bits,
789 struct var_mtrr_solution *sol)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500790{
Aaron Durbine3834422013-03-28 20:48:51 -0500791 struct range_entry *r;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500792 struct var_mtrr_state var_state;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500793
794 var_state.addr_space = addr_space;
795 var_state.above4gb = above4gb;
796 var_state.address_bits = address_bits;
Gabe Black7756fe72014-02-25 01:40:34 -0800797 /* Prepare the MSRs. */
798 var_state.prepare_msrs = 1;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500799 var_state.mtrr_index = 0;
800 var_state.def_mtrr_type = def_type;
Gabe Black7756fe72014-02-25 01:40:34 -0800801 var_state.regs = &sol->regs[0];
Aaron Durbine3834422013-03-28 20:48:51 -0500802
803 memranges_each_entry(r, var_state.addr_space) {
804 if (range_entry_mtrr_type(r) == def_type)
805 continue;
806
807 if (def_type == MTRR_TYPE_UNCACHEABLE &&
808 (range_entry_tag(r) & MTRR_RANGE_UC_USE_HOLE))
809 calc_var_mtrrs_with_hole(&var_state, r);
810 else
811 calc_var_mtrrs_without_hole(&var_state, r);
812 }
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500813
Gabe Black7756fe72014-02-25 01:40:34 -0800814 /* Update the solution. */
815 sol->num_used = var_state.mtrr_index;
816}
817
Aaron Durbind9762f72017-06-12 12:48:38 -0500818static int commit_var_mtrrs(const struct var_mtrr_solution *sol)
Gabe Black7756fe72014-02-25 01:40:34 -0800819{
820 int i;
821
Aaron Durbind9762f72017-06-12 12:48:38 -0500822 if (sol->num_used > total_mtrrs) {
823 printk(BIOS_WARNING, "Not enough MTRRs: %d vs %d\n",
824 sol->num_used, total_mtrrs);
825 return -1;
826 }
827
Isaac Christensen81f90c52014-09-24 14:59:32 -0600828 /* Write out the variable MTRRs. */
Gabe Black7756fe72014-02-25 01:40:34 -0800829 disable_cache();
830 for (i = 0; i < sol->num_used; i++) {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700831 wrmsr(MTRR_PHYS_BASE(i), sol->regs[i].base);
832 wrmsr(MTRR_PHYS_MASK(i), sol->regs[i].mask);
Gabe Black7756fe72014-02-25 01:40:34 -0800833 }
834 /* Clear the ones that are unused. */
835 for (; i < total_mtrrs; i++)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500836 clear_var_mtrr(i);
Isaac Christensen81f90c52014-09-24 14:59:32 -0600837 enable_var_mtrr(sol->mtrr_default_type);
Gabe Black7756fe72014-02-25 01:40:34 -0800838 enable_cache();
839
Aaron Durbind9762f72017-06-12 12:48:38 -0500840 return 0;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500841}
842
843void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
844{
Gabe Black7756fe72014-02-25 01:40:34 -0800845 static struct var_mtrr_solution *sol = NULL;
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500846 struct memranges *addr_space;
847
848 addr_space = get_physical_address_space();
849
Gabe Black7756fe72014-02-25 01:40:34 -0800850 if (sol == NULL) {
Gabe Black7756fe72014-02-25 01:40:34 -0800851 sol = &mtrr_global_solution;
852 sol->mtrr_default_type =
Aaron Durbinbb4e79a2013-03-26 14:09:47 -0500853 calc_var_mtrrs(addr_space, !!above4gb, address_bits);
Gabe Black7756fe72014-02-25 01:40:34 -0800854 prepare_var_mtrrs(addr_space, sol->mtrr_default_type,
855 !!above4gb, address_bits, sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000856 }
Stefan Reinauer00093a82011-11-02 16:12:34 -0700857
Gabe Black7756fe72014-02-25 01:40:34 -0800858 commit_var_mtrrs(sol);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000859}
860
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100861void x86_setup_mtrrs(void)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000862{
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100863 int address_size;
Aaron Durbine63be892016-03-07 16:05:36 -0600864
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000865 x86_setup_fixed_mtrrs();
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100866 address_size = cpu_phys_address_size();
Aaron Durbine63be892016-03-07 16:05:36 -0600867 printk(BIOS_DEBUG, "CPU physical address size: %d bits\n",
868 address_size);
869 /* Always handle addresses above 4GiB. */
Sven Schnelleadfbcb792012-01-10 12:01:43 +0100870 x86_setup_var_mtrrs(address_size, 1);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000871}
872
Aaron Durbine63be892016-03-07 16:05:36 -0600873void x86_setup_mtrrs_with_detect(void)
874{
875 detect_var_mtrrs();
876 x86_setup_mtrrs();
877}
878
Kyösti Mälkki38a8fb02014-06-30 13:48:18 +0300879void x86_mtrr_check(void)
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000880{
881 /* Only Pentium Pro and later have MTRR */
882 msr_t msr;
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000883 printk(BIOS_DEBUG, "\nMTRR check\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000884
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700885 msr = rdmsr(MTRR_DEF_TYPE_MSR);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000886
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000887 printk(BIOS_DEBUG, "Fixed MTRRs : ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700888 if (msr.lo & MTRR_DEF_TYPE_FIX_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000889 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000890 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000891 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000892
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000893 printk(BIOS_DEBUG, "Variable MTRRs: ");
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700894 if (msr.lo & MTRR_DEF_TYPE_EN)
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000895 printk(BIOS_DEBUG, "Enabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000896 else
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000897 printk(BIOS_DEBUG, "Disabled\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000898
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000899 printk(BIOS_DEBUG, "\n");
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000900
901 post_code(0x93);
Eric Biedermanfcd5ace2004-10-14 19:29:29 +0000902}
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600903
904static bool put_back_original_solution;
905
906void mtrr_use_temp_range(uintptr_t begin, size_t size, int type)
907{
908 const struct range_entry *r;
909 const struct memranges *orig;
910 struct var_mtrr_solution sol;
911 struct memranges addr_space;
912 const int above4gb = 1; /* Cover above 4GiB by default. */
913 int address_bits;
914
915 /* Make a copy of the original address space and tweak it with the
916 * provided range. */
917 memranges_init_empty(&addr_space, NULL, 0);
918 orig = get_physical_address_space();
919 memranges_each_entry(r, orig) {
920 unsigned long tag = range_entry_tag(r);
921
922 /* Remove any special tags from original solution. */
923 tag &= ~MTRR_RANGE_UC_USE_HOLE;
924
925 /* Remove any write combining MTRRs from the temporary
926 * solution as it just fragments the address space. */
927 if (tag == MTRR_TYPE_WRCOMB)
928 tag = MTRR_TYPE_UNCACHEABLE;
929
930 memranges_insert(&addr_space, range_entry_base(r),
931 range_entry_size(r), tag);
932 }
933
934 /* Place new range into the address space. */
935 memranges_insert(&addr_space, begin, size, type);
936
937 print_physical_address_space(&addr_space, "TEMPORARY");
938
939 /* Calculate a new solution with the updated address space. */
940 address_bits = cpu_phys_address_size();
941 memset(&sol, 0, sizeof(sol));
942 sol.mtrr_default_type =
943 calc_var_mtrrs(&addr_space, above4gb, address_bits);
944 prepare_var_mtrrs(&addr_space, sol.mtrr_default_type,
945 above4gb, address_bits, &sol);
Aaron Durbind9762f72017-06-12 12:48:38 -0500946
947 if (commit_var_mtrrs(&sol) < 0)
948 printk(BIOS_WARNING, "Unable to insert temporary MTRR range: 0x%016llx - 0x%016llx size 0x%08llx type %d\n",
949 (long long)begin, (long long)begin + size,
950 (long long)size, type);
951 else
952 put_back_original_solution = true;
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600953
954 memranges_teardown(&addr_space);
Aaron Durbin2bebd7b2016-11-10 15:15:35 -0600955}
956
957static void remove_temp_solution(void *unused)
958{
959 if (put_back_original_solution)
960 commit_var_mtrrs(&mtrr_global_solution);
961}
962
963BOOT_STATE_INIT_ENTRY(BS_OS_RESUME, BS_ON_ENTRY, remove_temp_solution, NULL);
964BOOT_STATE_INIT_ENTRY(BS_PAYLOAD_LOAD, BS_ON_EXIT, remove_temp_solution, NULL);