blob: 86ce57b8d7f1bde90d4895310d192891f950eae7 [file] [log] [blame]
Eric Biedermanc84c1902004-10-14 20:13:01 +00001#ifndef CPU_X86_MTRR_H
2#define CPU_X86_MTRR_H
3
Eric Biedermanc84c1902004-10-14 20:13:01 +00004/* These are the region types */
5#define MTRR_TYPE_UNCACHEABLE 0
6#define MTRR_TYPE_WRCOMB 1
7/*#define MTRR_TYPE_ 2*/
8/*#define MTRR_TYPE_ 3*/
9#define MTRR_TYPE_WRTHROUGH 4
10#define MTRR_TYPE_WRPROT 5
11#define MTRR_TYPE_WRBACK 6
12#define MTRR_NUM_TYPES 7
13
14#define MTRRcap_MSR 0x0fe
Lee Leahybfdf2482015-06-18 10:55:19 -070015
16#define MTRRcapSmrr (1 << 11)
17#define MTRRcapWc (1 << 10)
18#define MTRRcapFix (1 << 8)
19#define MTRRcapVcnt 0xff
20
Eric Biedermanc84c1902004-10-14 20:13:01 +000021#define MTRRdefType_MSR 0x2ff
22
Uwe Hermann66d16872010-10-01 07:27:51 +000023#define MTRRdefTypeEn (1 << 11)
24#define MTRRdefTypeFixEn (1 << 10)
Lee Leahybfdf2482015-06-18 10:55:19 -070025#define MTRRdefTypeType 0xff
Uwe Hermann66d16872010-10-01 07:27:51 +000026
Duncan Laurie7b678922012-01-09 22:05:18 -080027#define SMRRphysBase_MSR 0x1f2
28#define SMRRphysMask_MSR 0x1f3
29
Eric Biedermanc84c1902004-10-14 20:13:01 +000030#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
31#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
32
Kevin O'Connor5bb9fd62011-01-19 06:32:35 +000033#define MTRRphysMaskValid (1 << 11)
34
Eric Biedermanc84c1902004-10-14 20:13:01 +000035#define NUM_FIXED_RANGES 88
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050036#define RANGES_PER_FIXED_MTRR 8
Eric Biedermanc84c1902004-10-14 20:13:01 +000037#define MTRRfix64K_00000_MSR 0x250
38#define MTRRfix16K_80000_MSR 0x258
39#define MTRRfix16K_A0000_MSR 0x259
40#define MTRRfix4K_C0000_MSR 0x268
41#define MTRRfix4K_C8000_MSR 0x269
42#define MTRRfix4K_D0000_MSR 0x26a
43#define MTRRfix4K_D8000_MSR 0x26b
44#define MTRRfix4K_E0000_MSR 0x26c
45#define MTRRfix4K_E8000_MSR 0x26d
46#define MTRRfix4K_F0000_MSR 0x26e
47#define MTRRfix4K_F8000_MSR 0x26f
48
Stefan Reinauer61aee5f2011-04-10 04:15:23 +000049#if !defined (__ASSEMBLER__) && !defined(__PRE_RAM__)
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050050
51/*
52 * The MTRR code has some side effects that the callers should be aware for.
53 * 1. The call sequence matters. x86_setup_mtrrs() calls
54 * x86_setup_fixed_mtrrs_no_enable() then enable_fixed_mtrrs() (equivalent
55 * of x86_setup_fixed_mtrrs()) then x86_setup_var_mtrrs(). If the callers
56 * want to call the components of x86_setup_mtrrs() because of other
Martin Roth0cb07e32013-07-09 21:46:01 -060057 * requirements the ordering should still preserved.
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050058 * 2. enable_fixed_mtrr() will enable both variable and fixed MTRRs because
59 * of the nature of the global MTRR enable flag. Therefore, all direct
60 * or indirect callers of enable_fixed_mtrr() should ensure that the
61 * variable MTRR MSRs do not contain bad ranges.
Ronald G. Minnich69efaa02013-02-26 10:07:40 -080062 */
Sven Schnelleadfbcb792012-01-10 12:01:43 +010063void x86_setup_mtrrs(void);
Aaron Durbinbb4e79a2013-03-26 14:09:47 -050064/*
65 * x86_setup_var_mtrrs() parameters:
66 * address_bits - number of physical address bits supported by cpu
67 * above4gb - 2 means dynamically detect number of variable MTRRs available.
68 * non-zero means handle memory ranges above 4GiB.
69 * 0 means ignore memory ranges above 4GiB
70 */
71void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb);
72void enable_fixed_mtrr(void);
Maciej Pijankaea921852009-10-27 14:29:29 +000073void x86_setup_fixed_mtrrs(void);
Aaron Durbin57686f82013-03-20 15:50:59 -050074/* Set up fixed MTRRs but do not enable them. */
75void x86_setup_fixed_mtrrs_no_enable(void);
Kyösti Mälkki38a8fb02014-06-30 13:48:18 +030076void x86_mtrr_check(void);
Stefan Reinauer35b6bbb2010-03-28 21:26:54 +000077#endif
Eric Biedermanc84c1902004-10-14 20:13:01 +000078
Kyösti Mälkki88a67f02013-12-12 12:27:53 +020079#if !defined(__ASSEMBLER__) && defined(__PRE_RAM__) && !defined(__ROMCC__)
80void set_var_mtrr(unsigned reg, unsigned base, unsigned size, unsigned type);
81#endif
82
Kyösti Mälkki107f72e2014-01-06 11:06:26 +020083/* Align up to next power of 2, suitable for ROMCC and assembler too.
84 * Range of result 256kB to 128MB is good enough here.
85 */
86#define _POW2_MASK(x) ((x>>1)|(x>>2)|(x>>3)|(x>>4)|(x>>5)| \
87 (x>>6)|(x>>7)|(x>>8)|((1<<18)-1))
88#define _ALIGN_UP_POW2(x) ((x + _POW2_MASK(x)) & ~_POW2_MASK(x))
89
Stefan Reinauer8f2c6162010-04-06 21:50:21 +000090#if !defined(CONFIG_RAMTOP)
91# error "CONFIG_RAMTOP not defined"
92#endif
93
Patrick Georgi784544b2011-10-31 17:07:52 +010094#if ((CONFIG_XIP_ROM_SIZE & (CONFIG_XIP_ROM_SIZE -1)) != 0)
Stefan Reinauer8f2c6162010-04-06 21:50:21 +000095# error "CONFIG_XIP_ROM_SIZE is not a power of 2"
96#endif
Stefan Reinauer8f2c6162010-04-06 21:50:21 +000097
Kyösti Mälkki107f72e2014-01-06 11:06:26 +020098/* Select CACHE_ROM_SIZE to use with MTRR setup. For most cases this
99 * resolves to a suitable CONFIG_ROM_SIZE but some odd cases need to
100 * use CONFIG_CACHE_ROM_SIZE_OVERRIDE in the mainboard Kconfig.
101 */
102#if (CONFIG_CACHE_ROM_SIZE_OVERRIDE != 0)
103# define CACHE_ROM_SIZE CONFIG_CACHE_ROM_SIZE_OVERRIDE
104#else
105# if ((CONFIG_ROM_SIZE & (CONFIG_ROM_SIZE-1)) == 0)
106# define CACHE_ROM_SIZE CONFIG_ROM_SIZE
107# else
108# define CACHE_ROM_SIZE _ALIGN_UP_POW2(CONFIG_ROM_SIZE)
109# if (CACHE_ROM_SIZE < CONFIG_ROM_SIZE) || (CACHE_ROM_SIZE >= (2 * CONFIG_ROM_SIZE))
110# error "CACHE_ROM_SIZE is not optimal."
111# endif
112# endif
Kyösti Mälkki5458b9d2012-06-30 11:41:08 +0300113#endif
114
Kyösti Mälkki107f72e2014-01-06 11:06:26 +0200115#if ((CACHE_ROM_SIZE & (CACHE_ROM_SIZE-1)) != 0)
116# error "CACHE_ROM_SIZE is not a power of 2."
117#endif
118
119#define CACHE_ROM_BASE (((1<<20) - (CACHE_ROM_SIZE>>12))<<12)
120
Stefan Reinauer1d888a92011-04-21 20:24:43 +0000121#if (CONFIG_RAMTOP & (CONFIG_RAMTOP - 1)) != 0
Stefan Reinauer8f2c6162010-04-06 21:50:21 +0000122# error "CONFIG_RAMTOP must be a power of 2"
123#endif
124
Eric Biedermanc84c1902004-10-14 20:13:01 +0000125#endif /* CPU_X86_MTRR_H */