blob: 2215ec7ee5e28b4d31f67d33b6bcbb82893669f1 [file] [log] [blame]
Eric Biedermanc84c1902004-10-14 20:13:01 +00001#ifndef CPU_X86_LAPIC_H
2#define CPU_X86_LAPIC_H
3
4#include <cpu/x86/lapic_def.h>
5#include <cpu/x86/msr.h>
6#include <arch/hlt.h>
7
8/* See if I need to initialize the local apic */
9#if CONFIG_SMP || CONFIG_IOAPIC
10# define NEED_LAPIC 1
Stefan Reinauerdf77f342009-04-06 14:00:53 +000011#else
12# define NEED_LAPIC 0
Eric Biedermanc84c1902004-10-14 20:13:01 +000013#endif
14
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000015static inline __attribute__((always_inline)) unsigned long lapic_read(unsigned long reg)
Eric Biedermanc84c1902004-10-14 20:13:01 +000016{
17 return *((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg));
18}
19
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000020static inline __attribute__((always_inline)) void lapic_write(unsigned long reg, unsigned long v)
Eric Biedermanc84c1902004-10-14 20:13:01 +000021{
22 *((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg)) = v;
23}
24
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000025static inline __attribute__((always_inline)) void lapic_wait_icr_idle(void)
Eric Biedermanc84c1902004-10-14 20:13:01 +000026{
27 do { } while ( lapic_read( LAPIC_ICR ) & LAPIC_ICR_BUSY );
28}
29
Eric Biedermanc84c1902004-10-14 20:13:01 +000030static inline void enable_lapic(void)
31{
32
33 msr_t msr;
34 msr = rdmsr(LAPIC_BASE_MSR);
35 msr.hi &= 0xffffff00;
36 msr.lo &= 0x000007ff;
37 msr.lo |= LAPIC_DEFAULT_BASE | (1 << 11);
38 wrmsr(LAPIC_BASE_MSR, msr);
39}
40
41static inline void disable_lapic(void)
42{
43 msr_t msr;
44 msr = rdmsr(LAPIC_BASE_MSR);
45 msr.lo &= ~(1 << 11);
46 wrmsr(LAPIC_BASE_MSR, msr);
47}
48
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000049static inline __attribute__((always_inline)) unsigned long lapicid(void)
Eric Biedermanc84c1902004-10-14 20:13:01 +000050{
51 return lapic_read(LAPIC_ID) >> 24;
52}
53
Stefan Reinauer00093a82011-11-02 16:12:34 -070054#ifndef __ROMCC__
Stefan Reinauer68524062008-08-02 15:15:23 +000055#if CONFIG_AP_IN_SIPI_WAIT != 1
56/* If we need to go back to sipi wait, we use the long non-inlined version of
57 * this function in lapic_cpu_init.c
58 */
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000059static inline __attribute__((always_inline)) void stop_this_cpu(void)
Eric Biedermanc84c1902004-10-14 20:13:01 +000060{
Marc Jonesba8965c2007-12-19 01:52:11 +000061 /* Called by an AP when it is ready to halt and wait for a new task */
Eric Biedermanc84c1902004-10-14 20:13:01 +000062 for(;;) {
63 hlt();
64 }
65}
Stefan Reinauerde3206a2010-02-22 06:09:43 +000066#else
67void stop_this_cpu(void);
Stefan Reinauer68524062008-08-02 15:15:23 +000068#endif
Eric Biedermanc84c1902004-10-14 20:13:01 +000069
Stefan Reinauer35b6bbb2010-03-28 21:26:54 +000070#if !defined(__PRE_RAM__)
Eric Biedermanc84c1902004-10-14 20:13:01 +000071
72#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
73
74struct __xchg_dummy { unsigned long a[100]; };
75#define __xg(x) ((struct __xchg_dummy *)(x))
76
77/*
78 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
79 * Note 2: xchg has side effect, so that attribute volatile is necessary,
80 * but generally the primitive is invalid, *ptr is output argument. --ANK
81 */
82static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
83{
84 switch (size) {
85 case 1:
86 __asm__ __volatile__("xchgb %b0,%1"
87 :"=q" (x)
88 :"m" (*__xg(ptr)), "0" (x)
89 :"memory");
90 break;
91 case 2:
92 __asm__ __volatile__("xchgw %w0,%1"
93 :"=r" (x)
94 :"m" (*__xg(ptr)), "0" (x)
95 :"memory");
96 break;
97 case 4:
98 __asm__ __volatile__("xchgl %0,%1"
99 :"=r" (x)
100 :"m" (*__xg(ptr)), "0" (x)
101 :"memory");
102 break;
103 }
104 return x;
105}
106
Stefan Reinauer68524062008-08-02 15:15:23 +0000107static inline void lapic_write_atomic(unsigned long reg, unsigned long v)
Eric Biedermanc84c1902004-10-14 20:13:01 +0000108{
Patrick Georgi1a341652012-03-11 19:42:33 +0100109 (void)xchg((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg), v);
Eric Biedermanc84c1902004-10-14 20:13:01 +0000110}
111
112
Myles Watson0bc61542009-10-17 13:25:07 +0000113#ifdef X86_GOOD_APIC
Eric Biedermanc84c1902004-10-14 20:13:01 +0000114# define FORCE_READ_AROUND_WRITE 0
115# define lapic_read_around(x) lapic_read(x)
116# define lapic_write_around(x,y) lapic_write((x),(y))
117#else
118# define FORCE_READ_AROUND_WRITE 1
119# define lapic_read_around(x) lapic_read(x)
120# define lapic_write_around(x,y) lapic_write_atomic((x),(y))
121#endif
122
123static inline int lapic_remote_read(int apicid, int reg, unsigned long *pvalue)
124{
125 int timeout;
126 unsigned long status;
127 int result;
128 lapic_wait_icr_idle();
129 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
130 lapic_write_around(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
131 timeout = 0;
132 do {
133#if 0
134 udelay(100);
135#endif
136 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
137 } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
138
139 result = -1;
140 if (status == LAPIC_ICR_RR_VALID) {
141 *pvalue = lapic_read(LAPIC_RRR);
142 result = 0;
143 }
144 return result;
145}
146
147
148void setup_lapic(void);
149
Eric Biedermanc84c1902004-10-14 20:13:01 +0000150#if CONFIG_SMP == 1
151struct device;
152int start_cpu(struct device *cpu);
Eric Biedermanc84c1902004-10-14 20:13:01 +0000153#endif /* CONFIG_SMP */
154
Stefan Reinauer35b6bbb2010-03-28 21:26:54 +0000155#endif /* !__PRE_RAM__ */
Eric Biedermanc84c1902004-10-14 20:13:01 +0000156
Stefan Reinauer00093a82011-11-02 16:12:34 -0700157int boot_cpu(void);
158#endif
159
Eric Biedermanc84c1902004-10-14 20:13:01 +0000160#endif /* CPU_X86_LAPIC_H */