blob: 9e7108ccf586a63652c1f80182e25f7049c0c459 [file] [log] [blame]
Angel Ponsf23ae0b2020-04-02 23:48:12 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +00002
3/* NOTE: This handler assumes the SMM window goes from 0xa0000
4 * to 0xaffff. In fact, at least on Intel Core CPUs (i945 chipset)
5 * the SMM window is 128K big, covering 0xa0000 to 0xbffff.
6 * So there is a lot of potential for growth in here. Let's stick
7 * to 64k if we can though.
8 */
9
Elyes HAOUASa9473ec2018-10-24 15:55:53 +020010#include <cpu/x86/lapic_def.h>
Patrick Rudolph03a79522019-09-29 11:08:33 +020011#include <cpu/x86/msr.h>
Patrick Georgice2564a2015-09-05 20:21:24 +020012
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000013/*
14 * +--------------------------------+ 0xaffff
15 * | Save State Map Node 0 |
16 * | Save State Map Node 1 |
17 * | Save State Map Node 2 |
18 * | Save State Map Node 3 |
19 * | ... |
20 * +--------------------------------+ 0xaf000
21 * | |
22 * | |
23 * | |
24 * +--------------------------------+ 0xa8400
Stefan Reinauer14e22772010-04-27 06:56:47 +000025 * | SMM Entry Node 0 (+ stack) |
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000026 * +--------------------------------+ 0xa8000
Stefan Reinauer14e22772010-04-27 06:56:47 +000027 * | SMM Entry Node 1 (+ stack) |
28 * | SMM Entry Node 2 (+ stack) |
29 * | SMM Entry Node 3 (+ stack) |
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000030 * | ... |
31 * +--------------------------------+ 0xa7400
32 * | |
33 * | SMM Handler |
34 * | |
35 * +--------------------------------+ 0xa0000
36 *
37 */
38
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000039/* SMM_HANDLER_OFFSET is the 16bit offset within the ASEG
40 * at which smm_handler_start lives. At the moment the handler
Stefan Reinauer14e22772010-04-27 06:56:47 +000041 * lives right at 0xa0000, so the offset is 0.
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000042 */
43
44#define SMM_HANDLER_OFFSET 0x0000
45
Patrick Rudolphadcf7822020-08-27 20:50:18 +020046#if ENV_X86_64
Patrick Rudolph03a79522019-09-29 11:08:33 +020047.bss
48ia32efer_backup_eax:
Patrick Rudolph9256e512020-08-25 20:41:11 +020049.long 0
Patrick Rudolph03a79522019-09-29 11:08:33 +020050ia32efer_backup_edx:
Patrick Rudolph9256e512020-08-25 20:41:11 +020051.long 0
Patrick Rudolph03a79522019-09-29 11:08:33 +020052#endif
53
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000054/* initially SMM is some sort of real mode. Let gcc know
55 * how to treat the SMM handler stub
56 */
57
58.section ".handler", "a", @progbits
59
60.code16
61
62/**
63 * SMM code to enable protected mode and jump to the
64 * C-written function void smi_handler(u32 smm_revision)
65 *
66 * All the bad magic is not all that bad after all.
67 */
Patrick Georgice2564a2015-09-05 20:21:24 +020068#define SMM_START 0xa0000
69#define SMM_END 0xb0000
70#if SMM_END <= SMM_START
71#error invalid SMM configuration
72#endif
Aaron Durbine73dae42015-03-29 22:16:55 -050073.global smm_handler_start
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000074smm_handler_start:
Julius Wernercd49cce2019-03-05 16:53:33 -080075#if CONFIG(SMM_LAPIC_REMAP_MITIGATION)
Patrick Georgice2564a2015-09-05 20:21:24 +020076 /* Check if the LAPIC register block overlaps with SMM.
77 * This block needs to work without data accesses because they
78 * may be routed into the LAPIC register block.
79 * Code accesses, on the other hand, are never routed to LAPIC,
80 * which is what makes this work in the first place.
81 */
82 mov $LAPIC_BASE_MSR, %ecx
83 rdmsr
84 and $(~0xfff), %eax
85 sub $(SMM_START), %eax
86 cmp $(SMM_END - SMM_START), %eax
87 ja untampered_lapic
881:
89 /* emit "Crash" on serial */
90 mov $(CONFIG_TTYS0_BASE), %dx
91 mov $'C', %al
92 out %al, (%dx)
93 mov $'r', %al
94 out %al, (%dx)
95 mov $'a', %al
96 out %al, (%dx)
97 mov $'s', %al
98 out %al, (%dx)
99 mov $'h', %al
100 out %al, (%dx)
101 /* now crash for real */
102 ud2
103untampered_lapic:
104#endif
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100105 movw $(smm_gdtptr16 - smm_handler_start + SMM_HANDLER_OFFSET), %bx
Edward O'Callaghan4e2294b2017-01-08 19:14:42 +1100106 lgdtl %cs:(%bx)
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000107
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100108 movl %cr0, %eax
109 andl $0x7FFAFFD1, %eax /* PG,AM,WP,NE,TS,EM,MP = 0 */
110 orl $0x60000001, %eax /* CD, NW, PE = 1 */
111 movl %eax, %cr0
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000112
113 /* Enable protected mode */
Edward O'Callaghan4e2294b2017-01-08 19:14:42 +1100114 ljmpl $0x08, $1f
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000115
116.code32
1171:
Stefan Reinauer31286852011-10-15 11:23:04 -0700118 /* flush the cache after disabling it */
119 wbinvd
120
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000121 /* Use flat data segment */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100122 movw $0x10, %ax
123 movw %ax, %ds
124 movw %ax, %es
125 movw %ax, %ss
Raul E Rangelea5c3112021-09-21 10:17:24 -0600126 xor %ax, %ax /* zero out the gs and fs segment index */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100127 movw %ax, %fs
Raul E Rangelea5c3112021-09-21 10:17:24 -0600128 movw %ax, %gs /* Will be used for cpu_info */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000129
Kyösti Mälkkidea42e02021-05-31 20:26:16 +0300130 /* FIXME: Incompatible with X2APIC_SUPPORT. */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000131 /* Get this CPU's LAPIC ID */
Kyösti Mälkkidea42e02021-05-31 20:26:16 +0300132 movl $(LAPIC_DEFAULT_BASE | LAPIC_ID), %esi
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100133 movl (%esi), %ecx
134 shr $24, %ecx
Stefan Reinauer14e22772010-04-27 06:56:47 +0000135
Alexandru Gagniuc53072d82014-04-12 21:57:18 -0500136 /* This is an ugly hack, and we should find a way to read the CPU index
137 * without relying on the LAPIC ID.
138 */
Alexandru Gagniuc53072d82014-04-12 21:57:18 -0500139
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000140 /* calculate stack offset by multiplying the APIC ID
141 * by 1024 (0x400), and save that offset in ebp.
142 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100143 shl $10, %ecx
144 movl %ecx, %ebp
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000145
Stefan Reinauer14e22772010-04-27 06:56:47 +0000146 /* We put the stack for each core right above
147 * its SMM entry point. Core 0 starts at 0xa8000,
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000148 * we spare 0x10 bytes for the jump to be sure.
149 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100150 movl $0xa8010, %eax
151 subl %ecx, %eax /* subtract offset, see above */
152 movl %eax, %ebx /* Save bottom of stack in ebx */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000153
154#define SMM_STACK_SIZE (0x400 - 0x10)
155 /* clear stack */
156 cld
157 movl %eax, %edi
158 movl $(SMM_STACK_SIZE >> 2), %ecx
159 xorl %eax, %eax
160 rep stosl
161
162 /* set new stack */
163 addl $SMM_STACK_SIZE, %ebx
164 movl %ebx, %esp
165
Patrick Rudolphadcf7822020-08-27 20:50:18 +0200166#if ENV_X86_64
Patrick Rudolph03a79522019-09-29 11:08:33 +0200167 /* Backup IA32_EFER. Preserves ebx. */
168 movl $(IA32_EFER), %ecx
169 rdmsr
170 movl %eax, ia32efer_backup_eax
171 movl %edx, ia32efer_backup_edx
172
173 /* Enable long mode. Preserves ebx. */
174#include <cpu/x86/64bit/entry64.inc>
175
Patrick Rudolph03a79522019-09-29 11:08:33 +0200176#endif
Patrick Rudolph03a79522019-09-29 11:08:33 +0200177 /* Call C handler */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100178 call smi_handler
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000179
Patrick Rudolphadcf7822020-08-27 20:50:18 +0200180#if ENV_X86_64
Patrick Rudolph03a79522019-09-29 11:08:33 +0200181 /*
182 * The only reason to go back to protected mode is that RSM doesn't restore
183 * MSR registers and MSR IA32_EFER was modified by entering long mode.
184 * Drop to protected mode to safely operate on the IA32_EFER MSR.
185 */
186
187 /* Disable long mode. */
188 #include <cpu/x86/64bit/exit32.inc>
189
190 /* Restore IA32_EFER as RSM doesn't restore MSRs. */
191 movl $(IA32_EFER), %ecx
192 movl ia32efer_backup_eax, %eax
193 movl ia32efer_backup_edx, %edx
194 wrmsr
195#endif
196
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000197 /* To return, just do rsm. It will "clean up" protected mode */
198 rsm
199
200.code16
201
202.align 4, 0xff
203
204smm_gdtptr16:
205 .word smm_gdt_end - smm_gdt - 1
206 .long smm_gdt - smm_handler_start + 0xa0000 + SMM_HANDLER_OFFSET
207
208.code32
209
210smm_gdt:
211 /* The first GDT entry can not be used. Keep it zero */
212 .long 0x00000000, 0x00000000
213
214 /* gdt selector 0x08, flat code segment */
Stefan Reinauer14e22772010-04-27 06:56:47 +0000215 .word 0xffff, 0x0000
216 .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000217
218 /* gdt selector 0x10, flat data segment */
Stefan Reinauer14e22772010-04-27 06:56:47 +0000219 .word 0xffff, 0x0000
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000220 .byte 0x00, 0x93, 0xcf, 0x00
221
Patrick Rudolph03a79522019-09-29 11:08:33 +0200222 /* gdt selector 0x18, flat code segment (64-bit) */
223 .word 0xffff, 0x0000
224 .byte 0x00, 0x9b, 0xaf, 0x00
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000225smm_gdt_end:
226
227
228.section ".jumptable", "a", @progbits
229
230/* This is the SMM jump table. All cores use the same SMM handler
Stefan Reinauer14e22772010-04-27 06:56:47 +0000231 * for simplicity. But SMM Entry needs to be different due to the
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000232 * save state area. The jump table makes sure all CPUs jump into the
233 * real handler on SMM entry.
234 */
235
236/* This code currently supports up to 4 CPU cores. If more than 4 CPU cores
237 * shall be used, below table has to be updated, as well as smm.ld
238 */
239
240/* GNU AS/LD will always generate code that assumes CS is 0xa000. In reality
241 * CS will be set to SMM_BASE[19:4] though. Knowing that the smm handler is the
242 * first thing in the ASEG, we do a far jump here, to set CS to 0xa000.
243 */
244
245.code16
246jumptable:
247 /* core 3 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100248 ljmp $0xa000, $SMM_HANDLER_OFFSET
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000249.align 1024, 0x00
250 /* core 2 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100251 ljmp $0xa000, $SMM_HANDLER_OFFSET
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000252.align 1024, 0x00
253 /* core 1 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100254 ljmp $0xa000, $SMM_HANDLER_OFFSET
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000255.align 1024, 0x00
256 /* core 0 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100257 ljmp $0xa000, $SMM_HANDLER_OFFSET
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000258.align 1024, 0x00