blob: c663d29c36b27e1e4132917d598765622a9836a8 [file] [log] [blame]
Eric Biedermanc84c1902004-10-14 20:13:01 +00001#ifndef CPU_X86_MSR_H
2#define CPU_X86_MSR_H
3
Stefan Reinauer35b6bbb2010-03-28 21:26:54 +00004#if defined(__ROMCC__)
Eric Biedermanc84c1902004-10-14 20:13:01 +00005
6typedef __builtin_msr_t msr_t;
7
8static msr_t rdmsr(unsigned long index)
9{
10 return __builtin_rdmsr(index);
11}
12
13static void wrmsr(unsigned long index, msr_t msr)
14{
15 __builtin_wrmsr(index, msr.lo, msr.hi);
16}
17
arch import user (historical)6ca76362005-07-06 17:17:25 +000018#else
Eric Biedermanc84c1902004-10-14 20:13:01 +000019
Stefan Reinauer14e22772010-04-27 06:56:47 +000020typedef struct msr_struct
Eric Biedermanc84c1902004-10-14 20:13:01 +000021{
Lee Leahy0ca2a062017-03-06 18:01:04 -080022 unsigned int lo;
23 unsigned int hi;
Eric Biedermanc84c1902004-10-14 20:13:01 +000024} msr_t;
25
Stefan Reinauer5ff7c132011-10-31 12:56:45 -070026typedef struct msrinit_struct
Edwin Beasantf333ba02010-06-10 15:24:57 +000027{
Lee Leahy0ca2a062017-03-06 18:01:04 -080028 unsigned int index;
Elyes HAOUAS8ffd0502016-09-01 19:01:41 +020029 msr_t msr;
Edwin Beasantf333ba02010-06-10 15:24:57 +000030} msrinit_t;
31
Lee Leahyae738ac2016-07-24 08:03:37 -070032#if IS_ENABLED(CONFIG_SOC_SETS_MSRS)
Lee Leahy0ca2a062017-03-06 18:01:04 -080033msr_t soc_msr_read(unsigned int index);
34void soc_msr_write(unsigned int index, msr_t msr);
Lee Leahyae738ac2016-07-24 08:03:37 -070035
36/* Handle MSR references in the other source code */
Lee Leahy0ca2a062017-03-06 18:01:04 -080037static inline __attribute__((always_inline)) msr_t rdmsr(unsigned int index)
Lee Leahyae738ac2016-07-24 08:03:37 -070038{
39 return soc_msr_read(index);
40}
41
Lee Leahy0ca2a062017-03-06 18:01:04 -080042static inline __attribute__((always_inline)) void wrmsr(unsigned int index, msr_t msr)
Lee Leahyae738ac2016-07-24 08:03:37 -070043{
44 soc_msr_write(index, msr);
45}
46#else /* CONFIG_SOC_SETS_MSRS */
47
Scott Duplichan78301d02010-09-17 21:38:40 +000048/* The following functions require the always_inline due to AMD
49 * function STOP_CAR_AND_CPU that disables cache as
Elyes HAOUAS918535a2016-07-28 21:25:21 +020050 * RAM, the cache as RAM stack can no longer be used. Called
Scott Duplichan78301d02010-09-17 21:38:40 +000051 * functions must be inlined to avoid stack usage. Also, the
52 * compiler must keep local variables register based and not
53 * allocated them from the stack. With gcc 4.5.0, some functions
54 * declared as inline are not being inlined. This patch forces
55 * these functions to always be inlined by adding the qualifier
56 * __attribute__((always_inline)) to their declaration.
57 */
Lee Leahy0ca2a062017-03-06 18:01:04 -080058static inline __attribute__((always_inline)) msr_t rdmsr(unsigned int index)
Eric Biedermanc84c1902004-10-14 20:13:01 +000059{
60 msr_t result;
61 __asm__ __volatile__ (
62 "rdmsr"
63 : "=a" (result.lo), "=d" (result.hi)
64 : "c" (index)
65 );
66 return result;
67}
68
Lee Leahy0ca2a062017-03-06 18:01:04 -080069static inline __attribute__((always_inline)) void wrmsr(unsigned int index, msr_t msr)
Eric Biedermanc84c1902004-10-14 20:13:01 +000070{
71 __asm__ __volatile__ (
72 "wrmsr"
73 : /* No outputs */
74 : "c" (index), "a" (msr.lo), "d" (msr.hi)
75 );
76}
77
Lee Leahyae738ac2016-07-24 08:03:37 -070078#endif /* CONFIG_SOC_SETS_MSRS */
Myles Watson1d6d45e2009-11-06 17:02:51 +000079#endif /* __ROMCC__ */
Eric Biedermanc84c1902004-10-14 20:13:01 +000080
Eric Biedermanc84c1902004-10-14 20:13:01 +000081#endif /* CPU_X86_MSR_H */