blob: 69a6738c8c2d245cb890f10ebe3a5c3328f3f6fb [file] [log] [blame]
Eric Biedermanc84c1902004-10-14 20:13:01 +00001#ifndef CPU_X86_LAPIC_H
2#define CPU_X86_LAPIC_H
3
4#include <cpu/x86/lapic_def.h>
5#include <cpu/x86/msr.h>
Patrick Georgibd79c5e2014-11-28 22:35:36 +01006#include <halt.h>
Kyösti Mälkki5a5c8862014-01-26 14:41:54 +02007#include <smp/node.h>
Eric Biedermanc84c1902004-10-14 20:13:01 +00008
Elyes HAOUASccf78f02016-08-21 10:49:19 +02009/* See if I need to initialize the local APIC */
Eric Biedermanc84c1902004-10-14 20:13:01 +000010#if CONFIG_SMP || CONFIG_IOAPIC
11# define NEED_LAPIC 1
Stefan Reinauerdf77f342009-04-06 14:00:53 +000012#else
13# define NEED_LAPIC 0
Eric Biedermanc84c1902004-10-14 20:13:01 +000014#endif
15
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000016static inline __attribute__((always_inline)) unsigned long lapic_read(unsigned long reg)
Eric Biedermanc84c1902004-10-14 20:13:01 +000017{
18 return *((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg));
19}
20
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000021static inline __attribute__((always_inline)) void lapic_write(unsigned long reg, unsigned long v)
Eric Biedermanc84c1902004-10-14 20:13:01 +000022{
23 *((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg)) = v;
24}
25
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000026static inline __attribute__((always_inline)) void lapic_wait_icr_idle(void)
Eric Biedermanc84c1902004-10-14 20:13:01 +000027{
28 do { } while ( lapic_read( LAPIC_ICR ) & LAPIC_ICR_BUSY );
29}
30
Eric Biedermanc84c1902004-10-14 20:13:01 +000031static inline void enable_lapic(void)
32{
33
34 msr_t msr;
35 msr = rdmsr(LAPIC_BASE_MSR);
36 msr.hi &= 0xffffff00;
37 msr.lo &= 0x000007ff;
38 msr.lo |= LAPIC_DEFAULT_BASE | (1 << 11);
39 wrmsr(LAPIC_BASE_MSR, msr);
40}
41
42static inline void disable_lapic(void)
43{
44 msr_t msr;
45 msr = rdmsr(LAPIC_BASE_MSR);
46 msr.lo &= ~(1 << 11);
47 wrmsr(LAPIC_BASE_MSR, msr);
48}
49
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000050static inline __attribute__((always_inline)) unsigned long lapicid(void)
Eric Biedermanc84c1902004-10-14 20:13:01 +000051{
52 return lapic_read(LAPIC_ID) >> 24;
53}
54
Stefan Reinauer8ada1522012-11-16 13:34:48 -080055#if !CONFIG_AP_IN_SIPI_WAIT
Sven Schnelle51676b12012-07-29 19:18:03 +020056/* If we need to go back to sipi wait, we use the long non-inlined version of
57 * this function in lapic_cpu_init.c
58 */
Stefan Reinauer7ce8c542005-12-02 21:52:30 +000059static inline __attribute__((always_inline)) void stop_this_cpu(void)
Eric Biedermanc84c1902004-10-14 20:13:01 +000060{
Sven Schnelle51676b12012-07-29 19:18:03 +020061 /* Called by an AP when it is ready to halt and wait for a new task */
Patrick Georgibd79c5e2014-11-28 22:35:36 +010062 halt();
Eric Biedermanc84c1902004-10-14 20:13:01 +000063}
Sven Schnelle51676b12012-07-29 19:18:03 +020064#else
65void stop_this_cpu(void);
66#endif
Eric Biedermanc84c1902004-10-14 20:13:01 +000067
Stefan Reinauer35b6bbb2010-03-28 21:26:54 +000068#if !defined(__PRE_RAM__)
Eric Biedermanc84c1902004-10-14 20:13:01 +000069
Lee Leahyae3fd342017-03-07 12:55:23 -080070#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
Eric Biedermanc84c1902004-10-14 20:13:01 +000071
72struct __xchg_dummy { unsigned long a[100]; };
73#define __xg(x) ((struct __xchg_dummy *)(x))
74
75/*
76 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
77 * Note 2: xchg has side effect, so that attribute volatile is necessary,
78 * but generally the primitive is invalid, *ptr is output argument. --ANK
79 */
80static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
81{
82 switch (size) {
83 case 1:
84 __asm__ __volatile__("xchgb %b0,%1"
85 :"=q" (x)
86 :"m" (*__xg(ptr)), "0" (x)
87 :"memory");
88 break;
89 case 2:
90 __asm__ __volatile__("xchgw %w0,%1"
91 :"=r" (x)
92 :"m" (*__xg(ptr)), "0" (x)
93 :"memory");
94 break;
95 case 4:
96 __asm__ __volatile__("xchgl %0,%1"
97 :"=r" (x)
98 :"m" (*__xg(ptr)), "0" (x)
99 :"memory");
100 break;
101 }
102 return x;
103}
104
Stefan Reinauer68524062008-08-02 15:15:23 +0000105static inline void lapic_write_atomic(unsigned long reg, unsigned long v)
Eric Biedermanc84c1902004-10-14 20:13:01 +0000106{
Patrick Georgi1a341652012-03-11 19:42:33 +0100107 (void)xchg((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg), v);
Eric Biedermanc84c1902004-10-14 20:13:01 +0000108}
109
110
Myles Watson0bc61542009-10-17 13:25:07 +0000111#ifdef X86_GOOD_APIC
Eric Biedermanc84c1902004-10-14 20:13:01 +0000112# define FORCE_READ_AROUND_WRITE 0
113# define lapic_read_around(x) lapic_read(x)
Lee Leahyae3fd342017-03-07 12:55:23 -0800114# define lapic_write_around(x, y) lapic_write((x), (y))
Eric Biedermanc84c1902004-10-14 20:13:01 +0000115#else
116# define FORCE_READ_AROUND_WRITE 1
117# define lapic_read_around(x) lapic_read(x)
Lee Leahyae3fd342017-03-07 12:55:23 -0800118# define lapic_write_around(x, y) lapic_write_atomic((x), (y))
Eric Biedermanc84c1902004-10-14 20:13:01 +0000119#endif
120
121static inline int lapic_remote_read(int apicid, int reg, unsigned long *pvalue)
122{
123 int timeout;
124 unsigned long status;
125 int result;
126 lapic_wait_icr_idle();
127 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
128 lapic_write_around(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
129 timeout = 0;
130 do {
131#if 0
132 udelay(100);
133#endif
134 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
135 } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
136
137 result = -1;
138 if (status == LAPIC_ICR_RR_VALID) {
139 *pvalue = lapic_read(LAPIC_RRR);
140 result = 0;
141 }
142 return result;
143}
144
145
146void setup_lapic(void);
147
Patrick Georgie1667822012-05-05 15:29:32 +0200148#if CONFIG_SMP
Eric Biedermanc84c1902004-10-14 20:13:01 +0000149struct device;
150int start_cpu(struct device *cpu);
Eric Biedermanc84c1902004-10-14 20:13:01 +0000151#endif /* CONFIG_SMP */
152
Stefan Reinauer35b6bbb2010-03-28 21:26:54 +0000153#endif /* !__PRE_RAM__ */
Eric Biedermanc84c1902004-10-14 20:13:01 +0000154
Eric Biedermanc84c1902004-10-14 20:13:01 +0000155#endif /* CPU_X86_LAPIC_H */