blob: 66b40fc7b6c69f6f5b72dfcf42a5f3462e55325d [file] [log] [blame]
Eric Biedermanc84c1902004-10-14 20:13:01 +00001#ifndef CPU_X86_LAPIC_H
2#define CPU_X86_LAPIC_H
3
4#include <cpu/x86/lapic_def.h>
5#include <cpu/x86/msr.h>
6#include <arch/hlt.h>
7
8/* See if I need to initialize the local apic */
9#if CONFIG_SMP || CONFIG_IOAPIC
10# define NEED_LAPIC 1
11#endif
12
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000013static inline __attribute__((always_inline)) unsigned long lapic_read(unsigned long reg)
Eric Biedermanc84c1902004-10-14 20:13:01 +000014{
15 return *((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg));
16}
17
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000018static inline __attribute__((always_inline)) void lapic_write(unsigned long reg, unsigned long v)
Eric Biedermanc84c1902004-10-14 20:13:01 +000019{
20 *((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg)) = v;
21}
22
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000023static inline __attribute__((always_inline)) void lapic_wait_icr_idle(void)
Eric Biedermanc84c1902004-10-14 20:13:01 +000024{
25 do { } while ( lapic_read( LAPIC_ICR ) & LAPIC_ICR_BUSY );
26}
27
28
29
30static inline void enable_lapic(void)
31{
32
33 msr_t msr;
34 msr = rdmsr(LAPIC_BASE_MSR);
35 msr.hi &= 0xffffff00;
36 msr.lo &= 0x000007ff;
37 msr.lo |= LAPIC_DEFAULT_BASE | (1 << 11);
38 wrmsr(LAPIC_BASE_MSR, msr);
39}
40
41static inline void disable_lapic(void)
42{
43 msr_t msr;
44 msr = rdmsr(LAPIC_BASE_MSR);
45 msr.lo &= ~(1 << 11);
46 wrmsr(LAPIC_BASE_MSR, msr);
47}
48
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000049static inline __attribute__((always_inline)) unsigned long lapicid(void)
Eric Biedermanc84c1902004-10-14 20:13:01 +000050{
51 return lapic_read(LAPIC_ID) >> 24;
52}
53
Stefan Reinauer68524062008-08-02 15:15:23 +000054
55#if CONFIG_AP_IN_SIPI_WAIT != 1
56/* If we need to go back to sipi wait, we use the long non-inlined version of
57 * this function in lapic_cpu_init.c
58 */
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000059static inline __attribute__((always_inline)) void stop_this_cpu(void)
Eric Biedermanc84c1902004-10-14 20:13:01 +000060{
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000061
Marc Jonesba8965c2007-12-19 01:52:11 +000062 /* Called by an AP when it is ready to halt and wait for a new task */
Eric Biedermanc84c1902004-10-14 20:13:01 +000063 for(;;) {
64 hlt();
65 }
66}
Stefan Reinauer68524062008-08-02 15:15:23 +000067#endif
Eric Biedermanc84c1902004-10-14 20:13:01 +000068
69#if ! defined (__ROMCC__)
70
71#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
72
73struct __xchg_dummy { unsigned long a[100]; };
74#define __xg(x) ((struct __xchg_dummy *)(x))
75
76/*
77 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
78 * Note 2: xchg has side effect, so that attribute volatile is necessary,
79 * but generally the primitive is invalid, *ptr is output argument. --ANK
80 */
81static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
82{
83 switch (size) {
84 case 1:
85 __asm__ __volatile__("xchgb %b0,%1"
86 :"=q" (x)
87 :"m" (*__xg(ptr)), "0" (x)
88 :"memory");
89 break;
90 case 2:
91 __asm__ __volatile__("xchgw %w0,%1"
92 :"=r" (x)
93 :"m" (*__xg(ptr)), "0" (x)
94 :"memory");
95 break;
96 case 4:
97 __asm__ __volatile__("xchgl %0,%1"
98 :"=r" (x)
99 :"m" (*__xg(ptr)), "0" (x)
100 :"memory");
101 break;
102 }
103 return x;
104}
105
106
Stefan Reinauer68524062008-08-02 15:15:23 +0000107static inline void lapic_write_atomic(unsigned long reg, unsigned long v)
Eric Biedermanc84c1902004-10-14 20:13:01 +0000108{
109 xchg((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg), v);
110}
111
112
113#ifdef CONFIG_X86_GOOD_APIC
114# define FORCE_READ_AROUND_WRITE 0
115# define lapic_read_around(x) lapic_read(x)
116# define lapic_write_around(x,y) lapic_write((x),(y))
117#else
118# define FORCE_READ_AROUND_WRITE 1
119# define lapic_read_around(x) lapic_read(x)
120# define lapic_write_around(x,y) lapic_write_atomic((x),(y))
121#endif
122
123static inline int lapic_remote_read(int apicid, int reg, unsigned long *pvalue)
124{
125 int timeout;
126 unsigned long status;
127 int result;
128 lapic_wait_icr_idle();
129 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
130 lapic_write_around(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
131 timeout = 0;
132 do {
133#if 0
134 udelay(100);
135#endif
136 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
137 } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
138
139 result = -1;
140 if (status == LAPIC_ICR_RR_VALID) {
141 *pvalue = lapic_read(LAPIC_RRR);
142 result = 0;
143 }
144 return result;
145}
146
147
148void setup_lapic(void);
149
150
151#if CONFIG_SMP == 1
152struct device;
153int start_cpu(struct device *cpu);
154
155#endif /* CONFIG_SMP */
156
157
158#endif /* !__ROMCC__ */
159
160#endif /* CPU_X86_LAPIC_H */