blob: b2bc2c86b1effc508926b56c4a61a97dca382556 [file] [log] [blame]
Eric Biedermanc84c1902004-10-14 20:13:01 +00001#ifndef CPU_X86_MTRR_H
2#define CPU_X86_MTRR_H
3
Nico Huberca74f8f2018-06-01 21:57:54 +02004#ifndef __ASSEMBLER__
5#include <cpu/x86/msr.h>
6#include <arch/cpu.h>
7#endif
Nico Huber6197b762018-05-26 20:34:21 +02008
Eric Biedermanc84c1902004-10-14 20:13:01 +00009/* These are the region types */
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070010#define MTRR_TYPE_UNCACHEABLE 0
11#define MTRR_TYPE_WRCOMB 1
12#define MTRR_TYPE_WRTHROUGH 4
13#define MTRR_TYPE_WRPROT 5
14#define MTRR_TYPE_WRBACK 6
15#define MTRR_NUM_TYPES 7
Eric Biedermanc84c1902004-10-14 20:13:01 +000016
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070017#define MTRR_CAP_MSR 0x0fe
Lee Leahybfdf2482015-06-18 10:55:19 -070018
Kyösti Mälkkieadd2512020-06-11 09:52:45 +030019#define MTRR_CAP_PRMRR (1 << 12)
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070020#define MTRR_CAP_SMRR (1 << 11)
21#define MTRR_CAP_WC (1 << 10)
22#define MTRR_CAP_FIX (1 << 8)
23#define MTRR_CAP_VCNT 0xff
Lee Leahybfdf2482015-06-18 10:55:19 -070024
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070025#define MTRR_DEF_TYPE_MSR 0x2ff
26#define MTRR_DEF_TYPE_MASK 0xff
27#define MTRR_DEF_TYPE_EN (1 << 11)
28#define MTRR_DEF_TYPE_FIX_EN (1 << 10)
Eric Biedermanc84c1902004-10-14 20:13:01 +000029
Arthur Heymanse750b38e2018-07-20 23:31:59 +020030#define IA32_SMRR_PHYS_BASE 0x1f2
31#define IA32_SMRR_PHYS_MASK 0x1f3
Tim Wawrzynczak62669a22020-09-01 16:08:02 -060032#define SMRR_PHYS_MASK_LOCK (1 << 10)
Duncan Laurie7b678922012-01-09 22:05:18 -080033
Arthur Heymanseaaa5492020-10-28 19:30:36 +010034/* Specific to model_6fx and model_1067x.
35 These are named MSR_SMRR_PHYSBASE in the SDM. */
36#define CORE2_SMRR_PHYS_BASE 0xa0
37#define CORE2_SMRR_PHYS_MASK 0xa1
Arthur Heymans06f818c2018-07-20 23:41:54 +020038
Lee Leahy84d20d02017-03-07 15:00:18 -080039#define MTRR_PHYS_BASE(reg) (0x200 + 2 * (reg))
40#define MTRR_PHYS_MASK(reg) (MTRR_PHYS_BASE(reg) + 1)
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070041#define MTRR_PHYS_MASK_VALID (1 << 11)
Eric Biedermanc84c1902004-10-14 20:13:01 +000042
Lee Leahy84d20d02017-03-07 15:00:18 -080043#define NUM_FIXED_RANGES 88
44#define RANGES_PER_FIXED_MTRR 8
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070045#define MTRR_FIX_64K_00000 0x250
46#define MTRR_FIX_16K_80000 0x258
47#define MTRR_FIX_16K_A0000 0x259
48#define MTRR_FIX_4K_C0000 0x268
49#define MTRR_FIX_4K_C8000 0x269
50#define MTRR_FIX_4K_D0000 0x26a
51#define MTRR_FIX_4K_D8000 0x26b
52#define MTRR_FIX_4K_E0000 0x26c
53#define MTRR_FIX_4K_E8000 0x26d
54#define MTRR_FIX_4K_F0000 0x26e
55#define MTRR_FIX_4K_F8000 0x26f
Eric Biedermanc84c1902004-10-14 20:13:01 +000056
Arthur Heymans1cb9cd52019-11-28 16:05:08 +010057#if !defined(__ASSEMBLER__)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050058
Aaron Durbin2bebd7b2016-11-10 15:15:35 -060059#include <stdint.h>
60#include <stddef.h>
61
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050062/*
63 * The MTRR code has some side effects that the callers should be aware for.
64 * 1. The call sequence matters. x86_setup_mtrrs() calls
65 * x86_setup_fixed_mtrrs_no_enable() then enable_fixed_mtrrs() (equivalent
66 * of x86_setup_fixed_mtrrs()) then x86_setup_var_mtrrs(). If the callers
67 * want to call the components of x86_setup_mtrrs() because of other
Martin Roth0cb07e32013-07-09 21:46:01 -060068 * requirements the ordering should still preserved.
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050069 * 2. enable_fixed_mtrr() will enable both variable and fixed MTRRs because
70 * of the nature of the global MTRR enable flag. Therefore, all direct
71 * or indirect callers of enable_fixed_mtrr() should ensure that the
72 * variable MTRR MSRs do not contain bad ranges.
Aaron Durbine63be892016-03-07 16:05:36 -060073 *
74 * Note that this function sets up MTRRs for addresses above 4GiB.
Ronald G. Minnich69efaa02013-02-26 10:07:40 -080075 */
Sven Schnelleadfbcb792012-01-10 12:01:43 +010076void x86_setup_mtrrs(void);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050077/*
Aaron Durbine63be892016-03-07 16:05:36 -060078 * x86_setup_mtrrs_with_detect() does the same thing as x86_setup_mtrrs(), but
79 * it always dynamically detects the number of variable MTRRs available.
80 */
81void x86_setup_mtrrs_with_detect(void);
Aaron Durbin1ebbb162020-05-28 10:17:34 -060082void x86_setup_mtrrs_with_detect_no_above_4gb(void);
Aaron Durbine63be892016-03-07 16:05:36 -060083/*
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050084 * x86_setup_var_mtrrs() parameters:
85 * address_bits - number of physical address bits supported by cpu
Aaron Durbine63be892016-03-07 16:05:36 -060086 * above4gb - if set setup MTRRs for addresses above 4GiB else ignore
87 * memory ranges above 4GiB
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050088 */
89void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb);
90void enable_fixed_mtrr(void);
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060091/* Unhide Rd/WrDram bits and allow modification for AMD. */
92void fixed_mtrrs_expose_amd_rwdram(void);
93/* Hide Rd/WrDram bits and allow modification for AMD. */
94void fixed_mtrrs_hide_amd_rwdram(void);
Maciej Pijankaea921852009-10-27 14:29:29 +000095void x86_setup_fixed_mtrrs(void);
Aaron Durbin57686f82013-03-20 15:50:59 -050096/* Set up fixed MTRRs but do not enable them. */
97void x86_setup_fixed_mtrrs_no_enable(void);
Kyösti Mälkki38a8fb02014-06-30 13:48:18 +030098void x86_mtrr_check(void);
Aaron Durbin2bebd7b2016-11-10 15:15:35 -060099
100/* Insert a temporary MTRR range for the duration of coreboot's runtime.
101 * This function needs to be called after the first MTRR solution is derived. */
102void mtrr_use_temp_range(uintptr_t begin, size_t size, int type);
Eric Biedermanc84c1902004-10-14 20:13:01 +0000103
Nico Huberd67edca2018-11-13 19:28:07 +0100104static inline int get_var_mtrr_count(void)
105{
106 return rdmsr(MTRR_CAP_MSR).lo & MTRR_CAP_VCNT;
107}
108
Lee Leahy0ca2a062017-03-06 18:01:04 -0800109void set_var_mtrr(unsigned int reg, unsigned int base, unsigned int size,
110 unsigned int type);
Furquan Shaikh331ac1b2016-03-16 16:12:06 -0700111int get_free_var_mtrr(void);
Raul E Rangel3ae3ff22020-04-27 15:47:18 -0600112void clear_all_var_mtrr(void);
Rizwan Qureshi8453c4f2016-09-07 20:11:11 +0530113
Nico Huberd67edca2018-11-13 19:28:07 +0100114asmlinkage void display_mtrrs(void);
115
Aaron Durbinfa5eded2020-05-28 21:21:49 -0600116struct var_mtrr_context {
Arthur Heymans46b409d2021-05-14 13:19:43 +0200117 uint32_t max_var_mtrrs;
118 uint32_t used_var_mtrrs;
119 struct {
120 msr_t base;
121 msr_t mask;
122 } mtrr[];
Aaron Durbinfa5eded2020-05-28 21:21:49 -0600123};
124
Arthur Heymans46b409d2021-05-14 13:19:43 +0200125void var_mtrr_context_init(struct var_mtrr_context *ctx);
Aaron Durbinfa5eded2020-05-28 21:21:49 -0600126int var_mtrr_set(struct var_mtrr_context *ctx, uintptr_t addr, size_t size, int type);
Arthur Heymans46b409d2021-05-14 13:19:43 +0200127void commit_mtrr_setup(const struct var_mtrr_context *ctx);
128void postcar_mtrr_setup(void);
Nico Huberca74f8f2018-06-01 21:57:54 +0200129
Rizwan Qureshi8453c4f2016-09-07 20:11:11 +0530130/* fms: find most significant bit set, stolen from Linux Kernel Source. */
131static inline unsigned int fms(unsigned int x)
132{
Elyes HAOUASdb6c3f22019-06-26 21:05:17 +0200133 unsigned int r;
Rizwan Qureshi8453c4f2016-09-07 20:11:11 +0530134
135 __asm__("bsrl %1,%0\n\t"
Lee Leahy708fc272017-03-07 12:18:53 -0800136 "jnz 1f\n\t"
137 "movl $0,%0\n"
Aaron Durbinf4258de2017-11-01 15:47:05 -0600138 "1:" : "=r" (r) : "mr" (x));
Rizwan Qureshi8453c4f2016-09-07 20:11:11 +0530139 return r;
140}
141
142/* fls: find least significant bit set */
143static inline unsigned int fls(unsigned int x)
144{
Elyes HAOUASdb6c3f22019-06-26 21:05:17 +0200145 unsigned int r;
Rizwan Qureshi8453c4f2016-09-07 20:11:11 +0530146
147 __asm__("bsfl %1,%0\n\t"
Lee Leahy708fc272017-03-07 12:18:53 -0800148 "jnz 1f\n\t"
149 "movl $32,%0\n"
Aaron Durbinf4258de2017-11-01 15:47:05 -0600150 "1:" : "=r" (r) : "mr" (x));
Rizwan Qureshi8453c4f2016-09-07 20:11:11 +0530151 return r;
152}
Arthur Heymans1cb9cd52019-11-28 16:05:08 +0100153#endif /* !defined(__ASSEMBLER__) */
Rizwan Qureshi8453c4f2016-09-07 20:11:11 +0530154
Arthur Heymans1cb9cd52019-11-28 16:05:08 +0100155/* Align up/down to next power of 2, suitable for assembler
Nico Huberb4953a92018-05-26 17:47:42 +0200156 too. Range of result 256kB to 128MB is good enough here. */
Kyösti Mälkki107f72e2014-01-06 11:06:26 +0200157#define _POW2_MASK(x) ((x>>1)|(x>>2)|(x>>3)|(x>>4)|(x>>5)| \
158 (x>>6)|(x>>7)|(x>>8)|((1<<18)-1))
159#define _ALIGN_UP_POW2(x) ((x + _POW2_MASK(x)) & ~_POW2_MASK(x))
Nico Huberb4953a92018-05-26 17:47:42 +0200160#define _ALIGN_DOWN_POW2(x) ((x) & ~_POW2_MASK(x))
161
162/* Calculate `4GiB - x` (e.g. absolute address for offset from 4GiB) */
Nico Huber540a6642019-02-10 20:14:57 +0100163#define _FROM_4G_TOP(x) ((0xffffffff - (x)) + 1)
Kyösti Mälkki107f72e2014-01-06 11:06:26 +0200164
Elyes HAOUAS918535a2016-07-28 21:25:21 +0200165/* At the end of romstage, low RAM 0..CACHE_TM_RAMTOP may be set
Kyösti Mälkki65cc5262016-06-19 20:38:41 +0300166 * as write-back cacheable to speed up ramstage decompression.
167 * Note MTRR boundaries, must be power of two.
168 */
169#define CACHE_TMP_RAMTOP (16<<20)
Stefan Reinauer8f2c6162010-04-06 21:50:21 +0000170
Nico Huberb4953a92018-05-26 17:47:42 +0200171/* For ROM caching, generally, try to use the next power of 2. */
172#define OPTIMAL_CACHE_ROM_SIZE _ALIGN_UP_POW2(CONFIG_ROM_SIZE)
173#define OPTIMAL_CACHE_ROM_BASE _FROM_4G_TOP(OPTIMAL_CACHE_ROM_SIZE)
174#if (OPTIMAL_CACHE_ROM_SIZE < CONFIG_ROM_SIZE) || \
175 (OPTIMAL_CACHE_ROM_SIZE >= (2 * CONFIG_ROM_SIZE))
176# error "Optimal CACHE_ROM_SIZE can't be derived, _POW2_MASK needs refinement."
177#endif
178
179/* Make sure it doesn't overlap CAR, though. If the gap between
180 CAR and 4GiB is too small, make it at most the size of this
181 gap. As we can't align up (might overlap again), align down
182 to get a power of 2 again, for a single MTRR. */
183#define CAR_END (CONFIG_DCACHE_RAM_BASE + CONFIG_DCACHE_RAM_SIZE)
184#if CAR_END > OPTIMAL_CACHE_ROM_BASE
Nico Huber6197b762018-05-26 20:34:21 +0200185# define CAR_CACHE_ROM_SIZE _ALIGN_DOWN_POW2(_FROM_4G_TOP(CAR_END))
Kyösti Mälkki107f72e2014-01-06 11:06:26 +0200186#else
Nico Huber6197b762018-05-26 20:34:21 +0200187# define CAR_CACHE_ROM_SIZE OPTIMAL_CACHE_ROM_SIZE
Nico Huberb4953a92018-05-26 17:47:42 +0200188#endif
Nico Huber6197b762018-05-26 20:34:21 +0200189#if ((CAR_CACHE_ROM_SIZE & (CAR_CACHE_ROM_SIZE - 1)) != 0)
190# error "CAR CACHE_ROM_SIZE is not a power of 2, _POW2_MASK needs refinement."
191#endif
192
193/* Last but not least, most (if not all) chipsets have MMIO
194 between 0xfe000000 and 0xff000000, so limit to 16MiB. */
Nico Huberb7816802020-05-27 17:29:30 +0200195#if CAR_CACHE_ROM_SIZE >= 16 << 20
196# define CACHE_ROM_SIZE (16 << 20)
Nico Huber6197b762018-05-26 20:34:21 +0200197#else
198# define CACHE_ROM_SIZE CAR_CACHE_ROM_SIZE
Kyösti Mälkki5458b9d2012-06-30 11:41:08 +0300199#endif
200
Nico Huberb4953a92018-05-26 17:47:42 +0200201#define CACHE_ROM_BASE _FROM_4G_TOP(CACHE_ROM_SIZE)
Kyösti Mälkki107f72e2014-01-06 11:06:26 +0200202
Eric Biedermanc84c1902004-10-14 20:13:01 +0000203#endif /* CPU_X86_MTRR_H */