- Renamed cpu header files


git-svn-id: svn://svn.coreboot.org/coreboot/trunk@1659 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
diff --git a/src/include/cpu/x86/bist.h b/src/include/cpu/x86/bist.h
new file mode 100644
index 0000000..6a62150
--- /dev/null
+++ b/src/include/cpu/x86/bist.h
@@ -0,0 +1,13 @@
+#ifndef CPU_X86_BIST_H
+#define CPU_X86_BIST_H
+
+static void report_bist_failure(unsigned long bist)
+{
+	if (bist != 0) {
+		print_emerg("BIST failed: ");
+		print_emerg_hex32(bist);
+		die("\r\n");
+	}
+}
+
+#endif /* CPU_Xf86_BIST_H */
diff --git a/src/include/cpu/x86/cache.h b/src/include/cpu/x86/cache.h
new file mode 100644
index 0000000..623ef97
--- /dev/null
+++ b/src/include/cpu/x86/cache.h
@@ -0,0 +1,48 @@
+#ifndef CPU_X86_CACHE
+#define CPU_X86_CACHE
+
+static inline unsigned long read_cr0(void)
+{
+	unsigned long cr0;
+	asm volatile ("movl %%cr0, %0" : "=r" (cr0));
+	return cr0;
+}
+
+static inline void write_cr0(unsigned long cr0)
+{
+	asm volatile ("movl %0, %%cr0" : : "r" (cr0));
+}
+
+static inline void invd(void)
+{
+	asm volatile("invd" ::: "memory");
+}
+static inline void wbinvd(void)
+{
+	asm volatile ("wbinvd");
+}
+
+static inline void enable_cache(void)
+{
+	unsigned long cr0;
+	cr0 = read_cr0();
+	cr0 &= 0x9fffffff;
+	write_cr0(cr0);
+}
+
+static inline void disable_cache(void)
+{
+	/* Disable and write back the cache */
+	unsigned long cr0;
+	cr0 = read_cr0();
+	cr0 |= 0x40000000;
+	wbinvd();
+	write_cr0(cr0);
+	wbinvd();
+}
+
+#ifndef __ROMCC__
+void x86_enable_cache(void);
+#endif /* !__ROMCC__ */
+
+#endif /* CPU_X86_CACHE */
diff --git a/src/include/cpu/x86/lapic.h b/src/include/cpu/x86/lapic.h
new file mode 100644
index 0000000..12ca518
--- /dev/null
+++ b/src/include/cpu/x86/lapic.h
@@ -0,0 +1,168 @@
+#ifndef CPU_X86_LAPIC_H
+#define CPU_X86_LAPIC_H
+
+#include <cpu/x86/lapic_def.h>
+#include <cpu/x86/msr.h>
+#include <arch/hlt.h>
+
+/* See if I need to initialize the local apic */
+#if CONFIG_SMP || CONFIG_IOAPIC
+#  define NEED_LAPIC 1
+#endif
+
+static inline unsigned long lapic_read(unsigned long reg)
+{
+	return *((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg));
+}
+
+static inline void lapic_write(unsigned long reg, unsigned long v)
+{
+	*((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg)) = v;
+}
+
+static inline void lapic_wait_icr_idle(void)
+{
+	do { } while ( lapic_read( LAPIC_ICR ) & LAPIC_ICR_BUSY );
+}
+
+
+
+static inline void enable_lapic(void)
+{
+
+	msr_t msr;
+	msr = rdmsr(LAPIC_BASE_MSR);
+	msr.hi &= 0xffffff00;
+	msr.lo &= 0x000007ff;
+	msr.lo |= LAPIC_DEFAULT_BASE | (1 << 11);
+	wrmsr(LAPIC_BASE_MSR, msr);
+}
+
+static inline void disable_lapic(void)
+{
+	msr_t msr;
+	msr = rdmsr(LAPIC_BASE_MSR);
+	msr.lo &= ~(1 << 11);
+	wrmsr(LAPIC_BASE_MSR, msr);
+}
+
+static inline unsigned long lapicid(void)
+{
+	return lapic_read(LAPIC_ID) >> 24;
+}
+
+static inline void stop_this_cpu(void)
+{
+	unsigned apicid;
+	apicid = lapicid();
+
+	/* Send an APIC INIT to myself */
+	lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
+	lapic_write(LAPIC_ICR, LAPIC_INT_LEVELTRIG | LAPIC_INT_ASSERT | LAPIC_DM_INIT);
+	/* Wait for the ipi send to finish */
+	lapic_wait_icr_idle();
+
+	/* Deassert the APIC INIT */
+	lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
+	lapic_write(LAPIC_ICR,  LAPIC_INT_LEVELTRIG | LAPIC_DM_INIT);
+	/* Wait for the ipi send to finish */
+	lapic_wait_icr_idle();
+
+	/* If I haven't halted spin forever */
+	for(;;) {
+		hlt();
+	}
+}
+
+#if ! defined (__ROMCC__)
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ *	  but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+	switch (size) {
+		case 1:
+			__asm__ __volatile__("xchgb %b0,%1"
+				:"=q" (x)
+				:"m" (*__xg(ptr)), "0" (x)
+				:"memory");
+			break;
+		case 2:
+			__asm__ __volatile__("xchgw %w0,%1"
+				:"=r" (x)
+				:"m" (*__xg(ptr)), "0" (x)
+				:"memory");
+			break;
+		case 4:
+			__asm__ __volatile__("xchgl %0,%1"
+				:"=r" (x)
+				:"m" (*__xg(ptr)), "0" (x)
+				:"memory");
+			break;
+	}
+	return x;
+}
+
+
+extern inline void lapic_write_atomic(unsigned long reg, unsigned long v)
+{
+	xchg((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg), v);
+}
+
+
+#ifdef CONFIG_X86_GOOD_APIC
+# define FORCE_READ_AROUND_WRITE 0
+# define lapic_read_around(x) lapic_read(x)
+# define lapic_write_around(x,y) lapic_write((x),(y))
+#else
+# define FORCE_READ_AROUND_WRITE 1
+# define lapic_read_around(x) lapic_read(x)
+# define lapic_write_around(x,y) lapic_write_atomic((x),(y))
+#endif
+
+static inline int lapic_remote_read(int apicid, int reg, unsigned long *pvalue)
+{
+	int timeout;
+	unsigned long status;
+	int result;
+	lapic_wait_icr_idle();
+	lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
+	lapic_write_around(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
+	timeout = 0;
+	do {
+#if 0
+		udelay(100);
+#endif
+		status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
+	} while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
+
+	result = -1;
+	if (status == LAPIC_ICR_RR_VALID) {
+		*pvalue = lapic_read(LAPIC_RRR);
+		result = 0;
+	}
+	return result;
+}
+
+
+void setup_lapic(void);
+
+
+#if CONFIG_SMP == 1
+struct device;
+int start_cpu(struct device *cpu);
+
+#endif /* CONFIG_SMP */
+
+
+#endif /* !__ROMCC__ */
+
+#endif /* CPU_X86_LAPIC_H */
diff --git a/src/include/cpu/x86/lapic_def.h b/src/include/cpu/x86/lapic_def.h
new file mode 100644
index 0000000..6035273
--- /dev/null
+++ b/src/include/cpu/x86/lapic_def.h
@@ -0,0 +1,92 @@
+#ifndef CPU_X86_LAPIC_DEF_H
+#define CPU_X86_LAPIC_DEF_H
+
+#define LAPIC_BASE_MSR 0x1B
+#define LAPIC_BASE_MSR_BOOTSTRAP_PROCESSOR (1 << 8)
+#define LAPIC_BASE_MSR_ENABLE (1 << 11)
+#define LAPIC_BASE_MSR_ADDR_MASK 0xFFFFF000
+
+#define LAPIC_DEFAULT_BASE 0xfee00000
+
+#define LAPIC_ID		0x020
+#define LAPIC_LVR	0x030
+#define	LAPIC_TASKPRI	0x80
+#define		LAPIC_TPRI_MASK		0xFF
+#define LAPIC_ARBID	0x090
+#define	LAPIC_RRR	0x0C0
+#define LAPIC_SVR	0x0f0
+#define LAPIC_SPIV	0x0f0
+#define 	LAPIC_SPIV_ENABLE  0x100
+#define LAPIC_ESR	0x280
+#define		LAPIC_ESR_SEND_CS	0x00001
+#define		LAPIC_ESR_RECV_CS	0x00002
+#define		LAPIC_ESR_SEND_ACC	0x00004
+#define		LAPIC_ESR_RECV_ACC	0x00008
+#define		LAPIC_ESR_SENDILL	0x00020
+#define		LAPIC_ESR_RECVILL	0x00040
+#define		LAPIC_ESR_ILLREGA	0x00080
+#define LAPIC_ICR 	0x300
+#define		LAPIC_DEST_SELF		0x40000
+#define		LAPIC_DEST_ALLINC	0x80000
+#define		LAPIC_DEST_ALLBUT	0xC0000
+#define		LAPIC_ICR_RR_MASK	0x30000
+#define		LAPIC_ICR_RR_INVALID	0x00000
+#define		LAPIC_ICR_RR_INPROG	0x10000
+#define		LAPIC_ICR_RR_VALID	0x20000
+#define		LAPIC_INT_LEVELTRIG	0x08000
+#define		LAPIC_INT_ASSERT		0x04000
+#define		LAPIC_ICR_BUSY		0x01000
+#define		LAPIC_DEST_LOGICAL	0x00800
+#define		LAPIC_DM_FIXED		0x00000
+#define		LAPIC_DM_LOWEST		0x00100
+#define		LAPIC_DM_SMI		0x00200
+#define		LAPIC_DM_REMRD		0x00300
+#define		LAPIC_DM_NMI		0x00400
+#define		LAPIC_DM_INIT		0x00500
+#define		LAPIC_DM_STARTUP		0x00600
+#define		LAPIC_DM_EXTINT		0x00700
+#define		LAPIC_VECTOR_MASK	0x000FF
+#define LAPIC_ICR2	0x310
+#define		GET_LAPIC_DEST_FIELD(x)	(((x)>>24)&0xFF)
+#define		SET_LAPIC_DEST_FIELD(x)	((x)<<24)
+#define LAPIC_LVTT	0x320
+#define LAPIC_LVTPC	0x340
+#define LAPIC_LVT0	0x350
+#define		LAPIC_LVT_TIMER_BASE_MASK	(0x3<<18)
+#define		GET_LAPIC_TIMER_BASE(x)		(((x)>>18)&0x3)
+#define		SET_LAPIC_TIMER_BASE(x)		(((x)<<18))
+#define		LAPIC_TIMER_BASE_CLKIN		0x0
+#define		LAPIC_TIMER_BASE_TMBASE		0x1
+#define		LAPIC_TIMER_BASE_DIV		0x2
+#define		LAPIC_LVT_TIMER_PERIODIC		(1<<17)
+#define		LAPIC_LVT_MASKED			(1<<16)
+#define		LAPIC_LVT_LEVEL_TRIGGER		(1<<15)
+#define		LAPIC_LVT_REMOTE_IRR		(1<<14)
+#define		LAPIC_INPUT_POLARITY		(1<<13)
+#define		LAPIC_SEND_PENDING		(1<<12)
+#define		LAPIC_LVT_RESERVED_1		(1<<11)
+#define		LAPIC_DELIVERY_MODE_MASK		(7<<8)
+#define		LAPIC_DELIVERY_MODE_FIXED	(0<<8)
+#define		LAPIC_DELIVERY_MODE_NMI		(4<<8)
+#define		LAPIC_DELIVERY_MODE_EXTINT	(7<<8)
+#define		GET_LAPIC_DELIVERY_MODE(x)	(((x)>>8)&0x7)
+#define		SET_LAPIC_DELIVERY_MODE(x,y)	(((x)&~0x700)|((y)<<8))
+#define			LAPIC_MODE_FIXED		0x0
+#define			LAPIC_MODE_NMI		0x4
+#define			LAPIC_MODE_EXINT		0x7
+#define LAPIC_LVT1	0x360
+#define LAPIC_LVTERR	0x370
+#define	LAPIC_TMICT	0x380
+#define	LAPIC_TMCCT	0x390
+#define	LAPIC_TDCR	0x3E0
+#define		LAPIC_TDR_DIV_TMBASE	(1<<2)
+#define		LAPIC_TDR_DIV_1		0xB
+#define		LAPIC_TDR_DIV_2		0x0
+#define		LAPIC_TDR_DIV_4		0x1
+#define		LAPIC_TDR_DIV_8		0x2
+#define		LAPIC_TDR_DIV_16		0x3
+#define		LAPIC_TDR_DIV_32		0x8
+#define		LAPIC_TDR_DIV_64		0x9
+#define		LAPIC_TDR_DIV_128	0xA
+
+#endif /* CPU_X86_LAPIC_DEF_H */
diff --git a/src/include/cpu/x86/mem.h b/src/include/cpu/x86/mem.h
new file mode 100644
index 0000000..4849ed2
--- /dev/null
+++ b/src/include/cpu/x86/mem.h
@@ -0,0 +1,18 @@
+#ifndef CPU_X86_MEM_H
+#define CPU_X86_MEM_H
+
+/* Optimized generic x86 assembly for clearing memory */
+static inline void clear_memory(void *addr, unsigned long size)
+{
+	asm volatile(
+		"1: \n\t"
+		"movl %0, (%1)\n\t"
+		"addl $4, %1\n\t"
+		"subl $4, %2\n\t"
+		"jnz 1b\n\t"
+		: /* No outputs */
+		: "a" (0), "D" (addr), "c" (size)
+		);
+}
+
+#endif /* CPU_X86_MEM_H */
diff --git a/src/include/cpu/x86/msr.h b/src/include/cpu/x86/msr.h
new file mode 100644
index 0000000..4f481bd
--- /dev/null
+++ b/src/include/cpu/x86/msr.h
@@ -0,0 +1,52 @@
+#ifndef CPU_X86_MSR_H
+#define CPU_X86_MSR_H
+
+
+#ifdef __ROMCC__
+
+typedef __builtin_msr_t msr_t;
+
+static msr_t rdmsr(unsigned long index)
+{
+	return __builtin_rdmsr(index);
+}
+
+static void wrmsr(unsigned long index, msr_t msr)
+{
+	__builtin_wrmsr(index, msr.lo, msr.hi);
+}
+
+#endif /* __ROMCC__ */
+
+#if defined(__GNUC__) && !defined(__ROMCC__)
+
+typedef struct msr_struct 
+{
+	unsigned lo;
+	unsigned hi;
+} msr_t;
+
+static inline msr_t rdmsr(unsigned index)
+{
+	msr_t result;
+	__asm__ __volatile__ (
+		"rdmsr"
+		: "=a" (result.lo), "=d" (result.hi)
+		: "c" (index)
+		);
+	return result;
+}
+
+static inline void wrmsr(unsigned index, msr_t msr)
+{
+	__asm__ __volatile__ (
+		"wrmsr"
+		: /* No outputs */
+		: "c" (index), "a" (msr.lo), "d" (msr.hi)
+		);
+}
+
+#endif /* __GNUC__ */
+
+
+#endif /* CPU_X86_MSR_H */
diff --git a/src/include/cpu/x86/mtrr.h b/src/include/cpu/x86/mtrr.h
new file mode 100644
index 0000000..eb9bcb4
--- /dev/null
+++ b/src/include/cpu/x86/mtrr.h
@@ -0,0 +1,43 @@
+#ifndef CPU_X86_MTRR_H
+#define CPU_X86_MTRR_H
+
+
+/*  These are the region types  */
+#define MTRR_TYPE_UNCACHEABLE 0
+#define MTRR_TYPE_WRCOMB     1
+/*#define MTRR_TYPE_         2*/
+/*#define MTRR_TYPE_         3*/
+#define MTRR_TYPE_WRTHROUGH  4
+#define MTRR_TYPE_WRPROT     5
+#define MTRR_TYPE_WRBACK     6
+#define MTRR_NUM_TYPES       7
+
+#define MTRRcap_MSR     0x0fe
+#define MTRRdefType_MSR 0x2ff
+
+#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
+#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
+
+#define NUM_FIXED_RANGES 88
+#define MTRRfix64K_00000_MSR 0x250
+#define MTRRfix16K_80000_MSR 0x258
+#define MTRRfix16K_A0000_MSR 0x259
+#define MTRRfix4K_C0000_MSR 0x268
+#define MTRRfix4K_C8000_MSR 0x269
+#define MTRRfix4K_D0000_MSR 0x26a
+#define MTRRfix4K_D8000_MSR 0x26b
+#define MTRRfix4K_E0000_MSR 0x26c
+#define MTRRfix4K_E8000_MSR 0x26d
+#define MTRRfix4K_F0000_MSR 0x26e
+#define MTRRfix4K_F8000_MSR 0x26f
+
+
+#if !defined(__ROMCC__) && !defined(ASSEMBLY)
+
+void x86_setup_mtrrs(void);
+int x86_mtrr_check(void);
+
+#endif /* __ROMCC__ */
+
+
+#endif /* CPU_X86_MTRR_H */
diff --git a/src/include/cpu/x86/pae.h b/src/include/cpu/x86/pae.h
new file mode 100644
index 0000000..c1eb022
--- /dev/null
+++ b/src/include/cpu/x86/pae.h
@@ -0,0 +1,7 @@
+#ifndef CPU_X86_PAE_H
+#define CPU_X86_PAE_H 
+
+#define MAPPING_ERROR ((void *)0xffffffffUL)
+void *map_2M_page(unsigned long page);
+
+#endif /* CPU_X86_PAE_H  */
diff --git a/src/include/cpu/x86/tsc.h b/src/include/cpu/x86/tsc.h
new file mode 100644
index 0000000..fdf6d74
--- /dev/null
+++ b/src/include/cpu/x86/tsc.h
@@ -0,0 +1,30 @@
+#ifndef CPU_X86_TSC_H
+#define CPU_X86_TSC_H
+
+struct tsc_struct {
+	unsigned lo;
+	unsigned hi;
+};
+typedef struct tsc_struct tsc_t;
+
+static tsc_t rdtsc(void)
+{
+	tsc_t res;
+	__asm__ __volatile__ (
+		"rdtsc"
+		: "=a" (res.lo), "=d"(res.hi) /* outputs */
+		);
+	return res;
+}
+
+#ifndef ROMCC
+static inline unsigned long long rdtscll(void)
+{
+	unsigned long long val;
+	asm volatile ("rdtsc" : "=A" (val));
+	return val;
+}
+#endif
+
+
+#endif /* CPU_X86_TSC_H */