blob: 19793a0f844ced509b4dd4d14811b9e2d57b5748 [file] [log] [blame]
Angel Ponsf23ae0b2020-04-02 23:48:12 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +00002
3/* NOTE: This handler assumes the SMM window goes from 0xa0000
4 * to 0xaffff. In fact, at least on Intel Core CPUs (i945 chipset)
5 * the SMM window is 128K big, covering 0xa0000 to 0xbffff.
6 * So there is a lot of potential for growth in here. Let's stick
7 * to 64k if we can though.
8 */
9
Elyes HAOUASa9473ec2018-10-24 15:55:53 +020010#include <cpu/x86/lapic_def.h>
Patrick Rudolph03a79522019-09-29 11:08:33 +020011#include <cpu/x86/msr.h>
Patrick Georgice2564a2015-09-05 20:21:24 +020012
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000013/*
14 * +--------------------------------+ 0xaffff
15 * | Save State Map Node 0 |
16 * | Save State Map Node 1 |
17 * | Save State Map Node 2 |
18 * | Save State Map Node 3 |
19 * | ... |
20 * +--------------------------------+ 0xaf000
21 * | |
22 * | |
23 * | |
24 * +--------------------------------+ 0xa8400
Stefan Reinauer14e22772010-04-27 06:56:47 +000025 * | SMM Entry Node 0 (+ stack) |
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000026 * +--------------------------------+ 0xa8000
Stefan Reinauer14e22772010-04-27 06:56:47 +000027 * | SMM Entry Node 1 (+ stack) |
28 * | SMM Entry Node 2 (+ stack) |
29 * | SMM Entry Node 3 (+ stack) |
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000030 * | ... |
31 * +--------------------------------+ 0xa7400
32 * | |
33 * | SMM Handler |
34 * | |
35 * +--------------------------------+ 0xa0000
36 *
37 */
38
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000039/* SMM_HANDLER_OFFSET is the 16bit offset within the ASEG
40 * at which smm_handler_start lives. At the moment the handler
Stefan Reinauer14e22772010-04-27 06:56:47 +000041 * lives right at 0xa0000, so the offset is 0.
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000042 */
43
44#define SMM_HANDLER_OFFSET 0x0000
45
Patrick Rudolphadcf7822020-08-27 20:50:18 +020046#if ENV_X86_64
Patrick Rudolph03a79522019-09-29 11:08:33 +020047.bss
48ia32efer_backup_eax:
Patrick Rudolph9256e512020-08-25 20:41:11 +020049.long 0
Patrick Rudolph03a79522019-09-29 11:08:33 +020050ia32efer_backup_edx:
Patrick Rudolph9256e512020-08-25 20:41:11 +020051.long 0
Patrick Rudolph03a79522019-09-29 11:08:33 +020052#endif
53
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000054/* initially SMM is some sort of real mode. Let gcc know
55 * how to treat the SMM handler stub
56 */
57
58.section ".handler", "a", @progbits
59
60.code16
61
62/**
63 * SMM code to enable protected mode and jump to the
64 * C-written function void smi_handler(u32 smm_revision)
65 *
66 * All the bad magic is not all that bad after all.
67 */
Patrick Georgice2564a2015-09-05 20:21:24 +020068#define SMM_START 0xa0000
69#define SMM_END 0xb0000
70#if SMM_END <= SMM_START
71#error invalid SMM configuration
72#endif
Aaron Durbine73dae42015-03-29 22:16:55 -050073.global smm_handler_start
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000074smm_handler_start:
Julius Wernercd49cce2019-03-05 16:53:33 -080075#if CONFIG(SMM_LAPIC_REMAP_MITIGATION)
Patrick Georgice2564a2015-09-05 20:21:24 +020076 /* Check if the LAPIC register block overlaps with SMM.
77 * This block needs to work without data accesses because they
78 * may be routed into the LAPIC register block.
79 * Code accesses, on the other hand, are never routed to LAPIC,
80 * which is what makes this work in the first place.
81 */
82 mov $LAPIC_BASE_MSR, %ecx
83 rdmsr
84 and $(~0xfff), %eax
85 sub $(SMM_START), %eax
86 cmp $(SMM_END - SMM_START), %eax
87 ja untampered_lapic
881:
89 /* emit "Crash" on serial */
90 mov $(CONFIG_TTYS0_BASE), %dx
91 mov $'C', %al
92 out %al, (%dx)
93 mov $'r', %al
94 out %al, (%dx)
95 mov $'a', %al
96 out %al, (%dx)
97 mov $'s', %al
98 out %al, (%dx)
99 mov $'h', %al
100 out %al, (%dx)
101 /* now crash for real */
102 ud2
103untampered_lapic:
104#endif
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100105 movw $(smm_gdtptr16 - smm_handler_start + SMM_HANDLER_OFFSET), %bx
Edward O'Callaghan4e2294b2017-01-08 19:14:42 +1100106 lgdtl %cs:(%bx)
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000107
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100108 movl %cr0, %eax
109 andl $0x7FFAFFD1, %eax /* PG,AM,WP,NE,TS,EM,MP = 0 */
110 orl $0x60000001, %eax /* CD, NW, PE = 1 */
111 movl %eax, %cr0
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000112
113 /* Enable protected mode */
Edward O'Callaghan4e2294b2017-01-08 19:14:42 +1100114 ljmpl $0x08, $1f
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000115
116.code32
1171:
Stefan Reinauer31286852011-10-15 11:23:04 -0700118 /* flush the cache after disabling it */
119 wbinvd
120
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000121 /* Use flat data segment */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100122 movw $0x10, %ax
123 movw %ax, %ds
124 movw %ax, %es
125 movw %ax, %ss
Raul E Rangelea5c3112021-09-21 10:17:24 -0600126 xor %ax, %ax /* zero out the gs and fs segment index */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100127 movw %ax, %fs
Raul E Rangelea5c3112021-09-21 10:17:24 -0600128 movw %ax, %gs /* Will be used for cpu_info */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000129
Kyösti Mälkkidea42e02021-05-31 20:26:16 +0300130 /* FIXME: Incompatible with X2APIC_SUPPORT. */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000131 /* Get this CPU's LAPIC ID */
Kyösti Mälkkidea42e02021-05-31 20:26:16 +0300132 movl $(LAPIC_DEFAULT_BASE | LAPIC_ID), %esi
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100133 movl (%esi), %ecx
134 shr $24, %ecx
Stefan Reinauer14e22772010-04-27 06:56:47 +0000135
Alexandru Gagniuc53072d82014-04-12 21:57:18 -0500136 /* This is an ugly hack, and we should find a way to read the CPU index
137 * without relying on the LAPIC ID.
138 */
Julius Wernercd49cce2019-03-05 16:53:33 -0800139#if CONFIG(CPU_AMD_AGESA_FAMILY15_TN)
Alexandru Gagniuc53072d82014-04-12 21:57:18 -0500140 /* LAPIC IDs start from 0x10; map that to the proper core index */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100141 subl $0x10, %ecx
Alexandru Gagniuc53072d82014-04-12 21:57:18 -0500142#endif
143
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000144 /* calculate stack offset by multiplying the APIC ID
145 * by 1024 (0x400), and save that offset in ebp.
146 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100147 shl $10, %ecx
148 movl %ecx, %ebp
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000149
Stefan Reinauer14e22772010-04-27 06:56:47 +0000150 /* We put the stack for each core right above
151 * its SMM entry point. Core 0 starts at 0xa8000,
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000152 * we spare 0x10 bytes for the jump to be sure.
153 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100154 movl $0xa8010, %eax
155 subl %ecx, %eax /* subtract offset, see above */
156 movl %eax, %ebx /* Save bottom of stack in ebx */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000157
158#define SMM_STACK_SIZE (0x400 - 0x10)
159 /* clear stack */
160 cld
161 movl %eax, %edi
162 movl $(SMM_STACK_SIZE >> 2), %ecx
163 xorl %eax, %eax
164 rep stosl
165
166 /* set new stack */
167 addl $SMM_STACK_SIZE, %ebx
168 movl %ebx, %esp
169
Patrick Rudolphadcf7822020-08-27 20:50:18 +0200170#if ENV_X86_64
Patrick Rudolph03a79522019-09-29 11:08:33 +0200171 /* Backup IA32_EFER. Preserves ebx. */
172 movl $(IA32_EFER), %ecx
173 rdmsr
174 movl %eax, ia32efer_backup_eax
175 movl %edx, ia32efer_backup_edx
176
177 /* Enable long mode. Preserves ebx. */
178#include <cpu/x86/64bit/entry64.inc>
179
Patrick Rudolph03a79522019-09-29 11:08:33 +0200180#endif
Patrick Rudolph03a79522019-09-29 11:08:33 +0200181 /* Call C handler */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100182 call smi_handler
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000183
Patrick Rudolphadcf7822020-08-27 20:50:18 +0200184#if ENV_X86_64
Patrick Rudolph03a79522019-09-29 11:08:33 +0200185 /*
186 * The only reason to go back to protected mode is that RSM doesn't restore
187 * MSR registers and MSR IA32_EFER was modified by entering long mode.
188 * Drop to protected mode to safely operate on the IA32_EFER MSR.
189 */
190
191 /* Disable long mode. */
192 #include <cpu/x86/64bit/exit32.inc>
193
194 /* Restore IA32_EFER as RSM doesn't restore MSRs. */
195 movl $(IA32_EFER), %ecx
196 movl ia32efer_backup_eax, %eax
197 movl ia32efer_backup_edx, %edx
198 wrmsr
199#endif
200
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000201 /* To return, just do rsm. It will "clean up" protected mode */
202 rsm
203
204.code16
205
206.align 4, 0xff
207
208smm_gdtptr16:
209 .word smm_gdt_end - smm_gdt - 1
210 .long smm_gdt - smm_handler_start + 0xa0000 + SMM_HANDLER_OFFSET
211
212.code32
213
214smm_gdt:
215 /* The first GDT entry can not be used. Keep it zero */
216 .long 0x00000000, 0x00000000
217
218 /* gdt selector 0x08, flat code segment */
Stefan Reinauer14e22772010-04-27 06:56:47 +0000219 .word 0xffff, 0x0000
220 .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000221
222 /* gdt selector 0x10, flat data segment */
Stefan Reinauer14e22772010-04-27 06:56:47 +0000223 .word 0xffff, 0x0000
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000224 .byte 0x00, 0x93, 0xcf, 0x00
225
Patrick Rudolph03a79522019-09-29 11:08:33 +0200226 /* gdt selector 0x18, flat code segment (64-bit) */
227 .word 0xffff, 0x0000
228 .byte 0x00, 0x9b, 0xaf, 0x00
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000229smm_gdt_end:
230
231
232.section ".jumptable", "a", @progbits
233
234/* This is the SMM jump table. All cores use the same SMM handler
Stefan Reinauer14e22772010-04-27 06:56:47 +0000235 * for simplicity. But SMM Entry needs to be different due to the
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000236 * save state area. The jump table makes sure all CPUs jump into the
237 * real handler on SMM entry.
238 */
239
240/* This code currently supports up to 4 CPU cores. If more than 4 CPU cores
241 * shall be used, below table has to be updated, as well as smm.ld
242 */
243
244/* GNU AS/LD will always generate code that assumes CS is 0xa000. In reality
245 * CS will be set to SMM_BASE[19:4] though. Knowing that the smm handler is the
246 * first thing in the ASEG, we do a far jump here, to set CS to 0xa000.
247 */
248
249.code16
250jumptable:
251 /* core 3 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100252 ljmp $0xa000, $SMM_HANDLER_OFFSET
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000253.align 1024, 0x00
254 /* core 2 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100255 ljmp $0xa000, $SMM_HANDLER_OFFSET
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000256.align 1024, 0x00
257 /* core 1 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100258 ljmp $0xa000, $SMM_HANDLER_OFFSET
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000259.align 1024, 0x00
260 /* core 0 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100261 ljmp $0xa000, $SMM_HANDLER_OFFSET
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000262.align 1024, 0x00