blob: 68608edab1374e94d34a633bb274f94e1a4daf77 [file] [log] [blame]
Eric Biedermanc84c1902004-10-14 20:13:01 +00001#ifndef CPU_X86_LAPIC_H
2#define CPU_X86_LAPIC_H
3
4#include <cpu/x86/lapic_def.h>
5#include <cpu/x86/msr.h>
6#include <arch/hlt.h>
7
8/* See if I need to initialize the local apic */
9#if CONFIG_SMP || CONFIG_IOAPIC
10# define NEED_LAPIC 1
Stefan Reinauerdf77f342009-04-06 14:00:53 +000011#else
12# define NEED_LAPIC 0
Eric Biedermanc84c1902004-10-14 20:13:01 +000013#endif
14
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000015static inline __attribute__((always_inline)) unsigned long lapic_read(unsigned long reg)
Eric Biedermanc84c1902004-10-14 20:13:01 +000016{
17 return *((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg));
18}
19
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000020static inline __attribute__((always_inline)) void lapic_write(unsigned long reg, unsigned long v)
Eric Biedermanc84c1902004-10-14 20:13:01 +000021{
22 *((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg)) = v;
23}
24
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000025static inline __attribute__((always_inline)) void lapic_wait_icr_idle(void)
Eric Biedermanc84c1902004-10-14 20:13:01 +000026{
27 do { } while ( lapic_read( LAPIC_ICR ) & LAPIC_ICR_BUSY );
28}
29
30
31
32static inline void enable_lapic(void)
33{
34
35 msr_t msr;
36 msr = rdmsr(LAPIC_BASE_MSR);
37 msr.hi &= 0xffffff00;
38 msr.lo &= 0x000007ff;
39 msr.lo |= LAPIC_DEFAULT_BASE | (1 << 11);
40 wrmsr(LAPIC_BASE_MSR, msr);
41}
42
43static inline void disable_lapic(void)
44{
45 msr_t msr;
46 msr = rdmsr(LAPIC_BASE_MSR);
47 msr.lo &= ~(1 << 11);
48 wrmsr(LAPIC_BASE_MSR, msr);
49}
50
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000051static inline __attribute__((always_inline)) unsigned long lapicid(void)
Eric Biedermanc84c1902004-10-14 20:13:01 +000052{
53 return lapic_read(LAPIC_ID) >> 24;
54}
55
Stefan Reinauer68524062008-08-02 15:15:23 +000056
57#if CONFIG_AP_IN_SIPI_WAIT != 1
58/* If we need to go back to sipi wait, we use the long non-inlined version of
59 * this function in lapic_cpu_init.c
60 */
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000061static inline __attribute__((always_inline)) void stop_this_cpu(void)
Eric Biedermanc84c1902004-10-14 20:13:01 +000062{
Marc Jonesba8965c2007-12-19 01:52:11 +000063 /* Called by an AP when it is ready to halt and wait for a new task */
Eric Biedermanc84c1902004-10-14 20:13:01 +000064 for(;;) {
65 hlt();
66 }
67}
Stefan Reinauerde3206a2010-02-22 06:09:43 +000068#else
69void stop_this_cpu(void);
Stefan Reinauer68524062008-08-02 15:15:23 +000070#endif
Eric Biedermanc84c1902004-10-14 20:13:01 +000071
Stefan Reinauer35b6bbb2010-03-28 21:26:54 +000072#if !defined(__PRE_RAM__)
Eric Biedermanc84c1902004-10-14 20:13:01 +000073
74#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
75
76struct __xchg_dummy { unsigned long a[100]; };
77#define __xg(x) ((struct __xchg_dummy *)(x))
78
79/*
80 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
81 * Note 2: xchg has side effect, so that attribute volatile is necessary,
82 * but generally the primitive is invalid, *ptr is output argument. --ANK
83 */
84static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
85{
86 switch (size) {
87 case 1:
88 __asm__ __volatile__("xchgb %b0,%1"
89 :"=q" (x)
90 :"m" (*__xg(ptr)), "0" (x)
91 :"memory");
92 break;
93 case 2:
94 __asm__ __volatile__("xchgw %w0,%1"
95 :"=r" (x)
96 :"m" (*__xg(ptr)), "0" (x)
97 :"memory");
98 break;
99 case 4:
100 __asm__ __volatile__("xchgl %0,%1"
101 :"=r" (x)
102 :"m" (*__xg(ptr)), "0" (x)
103 :"memory");
104 break;
105 }
106 return x;
107}
108
Stefan Reinauer68524062008-08-02 15:15:23 +0000109static inline void lapic_write_atomic(unsigned long reg, unsigned long v)
Eric Biedermanc84c1902004-10-14 20:13:01 +0000110{
Patrick Georgi1a341652012-03-11 19:42:33 +0100111 (void)xchg((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg), v);
Eric Biedermanc84c1902004-10-14 20:13:01 +0000112}
113
114
Myles Watson0bc61542009-10-17 13:25:07 +0000115#ifdef X86_GOOD_APIC
Eric Biedermanc84c1902004-10-14 20:13:01 +0000116# define FORCE_READ_AROUND_WRITE 0
117# define lapic_read_around(x) lapic_read(x)
118# define lapic_write_around(x,y) lapic_write((x),(y))
119#else
120# define FORCE_READ_AROUND_WRITE 1
121# define lapic_read_around(x) lapic_read(x)
122# define lapic_write_around(x,y) lapic_write_atomic((x),(y))
123#endif
124
125static inline int lapic_remote_read(int apicid, int reg, unsigned long *pvalue)
126{
127 int timeout;
128 unsigned long status;
129 int result;
130 lapic_wait_icr_idle();
131 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
132 lapic_write_around(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
133 timeout = 0;
134 do {
135#if 0
136 udelay(100);
137#endif
138 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
139 } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
140
141 result = -1;
142 if (status == LAPIC_ICR_RR_VALID) {
143 *pvalue = lapic_read(LAPIC_RRR);
144 result = 0;
145 }
146 return result;
147}
148
149
150void setup_lapic(void);
151
Eric Biedermanc84c1902004-10-14 20:13:01 +0000152#if CONFIG_SMP == 1
153struct device;
154int start_cpu(struct device *cpu);
Eric Biedermanc84c1902004-10-14 20:13:01 +0000155#endif /* CONFIG_SMP */
156
Stefan Reinauer35b6bbb2010-03-28 21:26:54 +0000157#endif /* !__PRE_RAM__ */
Eric Biedermanc84c1902004-10-14 20:13:01 +0000158
159#endif /* CPU_X86_LAPIC_H */