blob: 3d5046ee91114057656091400ee390ea39de7048 [file] [log] [blame]
Eric Biedermanc84c1902004-10-14 20:13:01 +00001#ifndef CPU_X86_LAPIC_H
2#define CPU_X86_LAPIC_H
3
4#include <cpu/x86/lapic_def.h>
5#include <cpu/x86/msr.h>
6#include <arch/hlt.h>
Kyösti Mälkki5a5c8862014-01-26 14:41:54 +02007#include <smp/node.h>
Eric Biedermanc84c1902004-10-14 20:13:01 +00008
9/* See if I need to initialize the local apic */
10#if CONFIG_SMP || CONFIG_IOAPIC
11# define NEED_LAPIC 1
Stefan Reinauerdf77f342009-04-06 14:00:53 +000012#else
13# define NEED_LAPIC 0
Eric Biedermanc84c1902004-10-14 20:13:01 +000014#endif
15
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000016static inline __attribute__((always_inline)) unsigned long lapic_read(unsigned long reg)
Eric Biedermanc84c1902004-10-14 20:13:01 +000017{
18 return *((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg));
19}
20
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000021static inline __attribute__((always_inline)) void lapic_write(unsigned long reg, unsigned long v)
Eric Biedermanc84c1902004-10-14 20:13:01 +000022{
23 *((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg)) = v;
24}
25
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000026static inline __attribute__((always_inline)) void lapic_wait_icr_idle(void)
Eric Biedermanc84c1902004-10-14 20:13:01 +000027{
28 do { } while ( lapic_read( LAPIC_ICR ) & LAPIC_ICR_BUSY );
29}
30
Eric Biedermanc84c1902004-10-14 20:13:01 +000031static inline void enable_lapic(void)
32{
33
34 msr_t msr;
35 msr = rdmsr(LAPIC_BASE_MSR);
36 msr.hi &= 0xffffff00;
37 msr.lo &= 0x000007ff;
38 msr.lo |= LAPIC_DEFAULT_BASE | (1 << 11);
39 wrmsr(LAPIC_BASE_MSR, msr);
40}
41
42static inline void disable_lapic(void)
43{
44 msr_t msr;
45 msr = rdmsr(LAPIC_BASE_MSR);
46 msr.lo &= ~(1 << 11);
47 wrmsr(LAPIC_BASE_MSR, msr);
48}
49
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000050static inline __attribute__((always_inline)) unsigned long lapicid(void)
Eric Biedermanc84c1902004-10-14 20:13:01 +000051{
52 return lapic_read(LAPIC_ID) >> 24;
53}
54
Stefan Reinauer8ada1522012-11-16 13:34:48 -080055#if !CONFIG_AP_IN_SIPI_WAIT
Sven Schnelle51676b12012-07-29 19:18:03 +020056/* If we need to go back to sipi wait, we use the long non-inlined version of
57 * this function in lapic_cpu_init.c
58 */
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000059static inline __attribute__((always_inline)) void stop_this_cpu(void)
Eric Biedermanc84c1902004-10-14 20:13:01 +000060{
Sven Schnelle51676b12012-07-29 19:18:03 +020061 /* Called by an AP when it is ready to halt and wait for a new task */
62 for(;;) {
63 hlt();
64 }
Eric Biedermanc84c1902004-10-14 20:13:01 +000065}
Sven Schnelle51676b12012-07-29 19:18:03 +020066#else
67void stop_this_cpu(void);
68#endif
Eric Biedermanc84c1902004-10-14 20:13:01 +000069
Stefan Reinauer35b6bbb2010-03-28 21:26:54 +000070#if !defined(__PRE_RAM__)
Eric Biedermanc84c1902004-10-14 20:13:01 +000071
72#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
73
74struct __xchg_dummy { unsigned long a[100]; };
75#define __xg(x) ((struct __xchg_dummy *)(x))
76
77/*
78 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
79 * Note 2: xchg has side effect, so that attribute volatile is necessary,
80 * but generally the primitive is invalid, *ptr is output argument. --ANK
81 */
82static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
83{
84 switch (size) {
85 case 1:
86 __asm__ __volatile__("xchgb %b0,%1"
87 :"=q" (x)
88 :"m" (*__xg(ptr)), "0" (x)
89 :"memory");
90 break;
91 case 2:
92 __asm__ __volatile__("xchgw %w0,%1"
93 :"=r" (x)
94 :"m" (*__xg(ptr)), "0" (x)
95 :"memory");
96 break;
97 case 4:
98 __asm__ __volatile__("xchgl %0,%1"
99 :"=r" (x)
100 :"m" (*__xg(ptr)), "0" (x)
101 :"memory");
102 break;
103 }
104 return x;
105}
106
Stefan Reinauer68524062008-08-02 15:15:23 +0000107static inline void lapic_write_atomic(unsigned long reg, unsigned long v)
Eric Biedermanc84c1902004-10-14 20:13:01 +0000108{
Patrick Georgi1a341652012-03-11 19:42:33 +0100109 (void)xchg((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg), v);
Eric Biedermanc84c1902004-10-14 20:13:01 +0000110}
111
112
Myles Watson0bc61542009-10-17 13:25:07 +0000113#ifdef X86_GOOD_APIC
Eric Biedermanc84c1902004-10-14 20:13:01 +0000114# define FORCE_READ_AROUND_WRITE 0
115# define lapic_read_around(x) lapic_read(x)
116# define lapic_write_around(x,y) lapic_write((x),(y))
117#else
118# define FORCE_READ_AROUND_WRITE 1
119# define lapic_read_around(x) lapic_read(x)
120# define lapic_write_around(x,y) lapic_write_atomic((x),(y))
121#endif
122
123static inline int lapic_remote_read(int apicid, int reg, unsigned long *pvalue)
124{
125 int timeout;
126 unsigned long status;
127 int result;
128 lapic_wait_icr_idle();
129 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
130 lapic_write_around(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
131 timeout = 0;
132 do {
133#if 0
134 udelay(100);
135#endif
136 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
137 } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
138
139 result = -1;
140 if (status == LAPIC_ICR_RR_VALID) {
141 *pvalue = lapic_read(LAPIC_RRR);
142 result = 0;
143 }
144 return result;
145}
146
147
148void setup_lapic(void);
149
Patrick Georgie1667822012-05-05 15:29:32 +0200150#if CONFIG_SMP
Eric Biedermanc84c1902004-10-14 20:13:01 +0000151struct device;
152int start_cpu(struct device *cpu);
Eric Biedermanc84c1902004-10-14 20:13:01 +0000153#endif /* CONFIG_SMP */
154
Stefan Reinauer35b6bbb2010-03-28 21:26:54 +0000155#endif /* !__PRE_RAM__ */
Eric Biedermanc84c1902004-10-14 20:13:01 +0000156
Eric Biedermanc84c1902004-10-14 20:13:01 +0000157#endif /* CPU_X86_LAPIC_H */