arch/x86/include: Split msr access into separate file

To allow testing of code that uses msr calls, separate the actual
calls into a separate header file,  This allows the tests to emulate
the msr access without replacing the rest of the msr.h definitions.

Signed-off-by: Martin Roth <gaumless@gmail.com>
Change-Id: I102709fec346f18040baf9f2ce6e6d7eb094682d
Reviewed-on: https://review.coreboot.org/c/coreboot/+/67917
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Jakub Czapiga <jacz@semihalf.com>
Reviewed-by: Angel Pons <th3fanbus@gmail.com>
diff --git a/src/include/cpu/x86/msr.h b/src/include/cpu/x86/msr.h
index ca2a37f..999fdaa 100644
--- a/src/include/cpu/x86/msr.h
+++ b/src/include/cpu/x86/msr.h
@@ -3,6 +3,8 @@
 #ifndef CPU_X86_MSR_H
 #define CPU_X86_MSR_H
 
+#include <cpu/x86/msr_access.h>
+
 /* Intel SDM: Table 2-1
  * IA-32 architectural MSR: Extended Feature Enable Register
  *
@@ -107,66 +109,12 @@
 #define IA32_CR_SF_QOS_MASK_2		0x1892
 
 #ifndef __ASSEMBLER__
-#include <types.h>
-
-typedef struct msr_struct {
-	unsigned int lo;
-	unsigned int hi;
-} msr_t;
 
 typedef struct msrinit_struct {
 	unsigned int index;
 	msr_t msr;
 } msrinit_t;
 
-#if CONFIG(SOC_SETS_MSRS)
-msr_t soc_msr_read(unsigned int index);
-void soc_msr_write(unsigned int index, msr_t msr);
-
-/* Handle MSR references in the other source code */
-static __always_inline msr_t rdmsr(unsigned int index)
-{
-	return soc_msr_read(index);
-}
-
-static __always_inline void wrmsr(unsigned int index, msr_t msr)
-{
-	soc_msr_write(index, msr);
-}
-#else /* CONFIG_SOC_SETS_MSRS */
-
-/* The following functions require the __always_inline due to AMD
- * function STOP_CAR_AND_CPU that disables cache as
- * RAM, the cache as RAM stack can no longer be used. Called
- * functions must be inlined to avoid stack usage. Also, the
- * compiler must keep local variables register based and not
- * allocated them from the stack. With gcc 4.5.0, some functions
- * declared as inline are not being inlined. This patch forces
- * these functions to always be inlined by adding the qualifier
- * __always_inline to their declaration.
- */
-static __always_inline msr_t rdmsr(unsigned int index)
-{
-	msr_t result;
-	__asm__ __volatile__ (
-		"rdmsr"
-		: "=a" (result.lo), "=d" (result.hi)
-		: "c" (index)
-		);
-	return result;
-}
-
-static __always_inline void wrmsr(unsigned int index, msr_t msr)
-{
-	__asm__ __volatile__ (
-		"wrmsr"
-		: /* No outputs */
-		: "c" (index), "a" (msr.lo), "d" (msr.hi)
-		);
-}
-
-#endif /* CONFIG_SOC_SETS_MSRS */
-
 /* Get MCA bank count from MSR */
 static inline unsigned int mca_get_bank_count(void)
 {
diff --git a/src/include/cpu/x86/msr_access.h b/src/include/cpu/x86/msr_access.h
new file mode 100644
index 0000000..a7f72fd
--- /dev/null
+++ b/src/include/cpu/x86/msr_access.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef CPU_X86_MSR_ACCESS_H
+#define CPU_X86_MSR_ACCESS_H
+
+#ifndef __ASSEMBLER__
+#include <types.h>
+
+typedef struct msr_struct {
+	unsigned int lo;
+	unsigned int hi;
+} msr_t;
+
+#if CONFIG(SOC_SETS_MSRS)
+msr_t soc_msr_read(unsigned int index);
+void soc_msr_write(unsigned int index, msr_t msr);
+
+/* Handle MSR references in the other source code */
+static __always_inline msr_t rdmsr(unsigned int index)
+{
+	return soc_msr_read(index);
+}
+
+static __always_inline void wrmsr(unsigned int index, msr_t msr)
+{
+	soc_msr_write(index, msr);
+}
+#else /* CONFIG_SOC_SETS_MSRS */
+
+/* The following functions require the __always_inline due to AMD
+ * function STOP_CAR_AND_CPU that disables cache as
+ * RAM, the cache as RAM stack can no longer be used. Called
+ * functions must be inlined to avoid stack usage. Also, the
+ * compiler must keep local variables register based and not
+ * allocated them from the stack. With gcc 4.5.0, some functions
+ * declared as inline are not being inlined. This patch forces
+ * these functions to always be inlined by adding the qualifier
+ * __always_inline to their declaration.
+ */
+static __always_inline msr_t rdmsr(unsigned int index)
+{
+	msr_t result;
+	__asm__ __volatile__ (
+		"rdmsr"
+		: "=a" (result.lo), "=d" (result.hi)
+		: "c" (index)
+		);
+	return result;
+}
+
+static __always_inline void wrmsr(unsigned int index, msr_t msr)
+{
+	__asm__ __volatile__ (
+		"wrmsr"
+		: /* No outputs */
+		: "c" (index), "a" (msr.lo), "d" (msr.hi)
+		);
+}
+
+#endif /* CONFIG_SOC_SETS_MSRS */
+#endif /* __ASSEMBLER__ */
+#endif /* CPU_X86_MSR_ACCESS_H */