blob: b7805d06ab73143adb382c7a05742f94e31e3dd8 [file] [log] [blame]
Angel Ponsf23ae0b2020-04-02 23:48:12 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +00002
3/* NOTE: This handler assumes the SMM window goes from 0xa0000
4 * to 0xaffff. In fact, at least on Intel Core CPUs (i945 chipset)
5 * the SMM window is 128K big, covering 0xa0000 to 0xbffff.
6 * So there is a lot of potential for growth in here. Let's stick
7 * to 64k if we can though.
8 */
9
Elyes HAOUASa9473ec2018-10-24 15:55:53 +020010#include <cpu/x86/lapic_def.h>
Patrick Rudolph03a79522019-09-29 11:08:33 +020011#include <cpu/x86/msr.h>
Patrick Georgice2564a2015-09-05 20:21:24 +020012
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000013/*
14 * +--------------------------------+ 0xaffff
15 * | Save State Map Node 0 |
16 * | Save State Map Node 1 |
17 * | Save State Map Node 2 |
18 * | Save State Map Node 3 |
19 * | ... |
20 * +--------------------------------+ 0xaf000
21 * | |
22 * | |
23 * | |
24 * +--------------------------------+ 0xa8400
Stefan Reinauer14e22772010-04-27 06:56:47 +000025 * | SMM Entry Node 0 (+ stack) |
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000026 * +--------------------------------+ 0xa8000
Stefan Reinauer14e22772010-04-27 06:56:47 +000027 * | SMM Entry Node 1 (+ stack) |
28 * | SMM Entry Node 2 (+ stack) |
29 * | SMM Entry Node 3 (+ stack) |
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000030 * | ... |
31 * +--------------------------------+ 0xa7400
32 * | |
33 * | SMM Handler |
34 * | |
35 * +--------------------------------+ 0xa0000
36 *
37 */
38
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000039/* SMM_HANDLER_OFFSET is the 16bit offset within the ASEG
40 * at which smm_handler_start lives. At the moment the handler
Stefan Reinauer14e22772010-04-27 06:56:47 +000041 * lives right at 0xa0000, so the offset is 0.
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000042 */
43
44#define SMM_HANDLER_OFFSET 0x0000
45
Patrick Rudolphadcf7822020-08-27 20:50:18 +020046#if ENV_X86_64
Patrick Rudolph03a79522019-09-29 11:08:33 +020047.bss
48ia32efer_backup_eax:
Patrick Rudolph9256e512020-08-25 20:41:11 +020049.long 0
Patrick Rudolph03a79522019-09-29 11:08:33 +020050ia32efer_backup_edx:
Patrick Rudolph9256e512020-08-25 20:41:11 +020051.long 0
Patrick Rudolph03a79522019-09-29 11:08:33 +020052#endif
53
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000054/* initially SMM is some sort of real mode. Let gcc know
55 * how to treat the SMM handler stub
56 */
57
58.section ".handler", "a", @progbits
59
60.code16
61
62/**
63 * SMM code to enable protected mode and jump to the
64 * C-written function void smi_handler(u32 smm_revision)
65 *
66 * All the bad magic is not all that bad after all.
67 */
Patrick Georgice2564a2015-09-05 20:21:24 +020068#define SMM_START 0xa0000
69#define SMM_END 0xb0000
70#if SMM_END <= SMM_START
71#error invalid SMM configuration
72#endif
Aaron Durbine73dae42015-03-29 22:16:55 -050073.global smm_handler_start
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000074smm_handler_start:
Julius Wernercd49cce2019-03-05 16:53:33 -080075#if CONFIG(SMM_LAPIC_REMAP_MITIGATION)
Patrick Georgice2564a2015-09-05 20:21:24 +020076 /* Check if the LAPIC register block overlaps with SMM.
77 * This block needs to work without data accesses because they
78 * may be routed into the LAPIC register block.
79 * Code accesses, on the other hand, are never routed to LAPIC,
80 * which is what makes this work in the first place.
81 */
82 mov $LAPIC_BASE_MSR, %ecx
83 rdmsr
84 and $(~0xfff), %eax
85 sub $(SMM_START), %eax
86 cmp $(SMM_END - SMM_START), %eax
87 ja untampered_lapic
881:
89 /* emit "Crash" on serial */
90 mov $(CONFIG_TTYS0_BASE), %dx
91 mov $'C', %al
92 out %al, (%dx)
93 mov $'r', %al
94 out %al, (%dx)
95 mov $'a', %al
96 out %al, (%dx)
97 mov $'s', %al
98 out %al, (%dx)
99 mov $'h', %al
100 out %al, (%dx)
101 /* now crash for real */
102 ud2
103untampered_lapic:
104#endif
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100105 movw $(smm_gdtptr16 - smm_handler_start + SMM_HANDLER_OFFSET), %bx
Edward O'Callaghan4e2294b2017-01-08 19:14:42 +1100106 lgdtl %cs:(%bx)
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000107
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100108 movl %cr0, %eax
109 andl $0x7FFAFFD1, %eax /* PG,AM,WP,NE,TS,EM,MP = 0 */
110 orl $0x60000001, %eax /* CD, NW, PE = 1 */
111 movl %eax, %cr0
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000112
113 /* Enable protected mode */
Edward O'Callaghan4e2294b2017-01-08 19:14:42 +1100114 ljmpl $0x08, $1f
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000115
116.code32
1171:
Stefan Reinauer31286852011-10-15 11:23:04 -0700118 /* flush the cache after disabling it */
119 wbinvd
120
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000121 /* Use flat data segment */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100122 movw $0x10, %ax
123 movw %ax, %ds
124 movw %ax, %es
125 movw %ax, %ss
126 movw %ax, %fs
127 movw %ax, %gs
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000128
Kyösti Mälkkidea42e02021-05-31 20:26:16 +0300129 /* FIXME: Incompatible with X2APIC_SUPPORT. */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000130 /* Get this CPU's LAPIC ID */
Kyösti Mälkkidea42e02021-05-31 20:26:16 +0300131 movl $(LAPIC_DEFAULT_BASE | LAPIC_ID), %esi
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100132 movl (%esi), %ecx
133 shr $24, %ecx
Stefan Reinauer14e22772010-04-27 06:56:47 +0000134
Alexandru Gagniuc53072d82014-04-12 21:57:18 -0500135 /* This is an ugly hack, and we should find a way to read the CPU index
136 * without relying on the LAPIC ID.
137 */
Julius Wernercd49cce2019-03-05 16:53:33 -0800138#if CONFIG(CPU_AMD_AGESA_FAMILY15_TN)
Alexandru Gagniuc53072d82014-04-12 21:57:18 -0500139 /* LAPIC IDs start from 0x10; map that to the proper core index */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100140 subl $0x10, %ecx
Alexandru Gagniuc53072d82014-04-12 21:57:18 -0500141#endif
142
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000143 /* calculate stack offset by multiplying the APIC ID
144 * by 1024 (0x400), and save that offset in ebp.
145 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100146 shl $10, %ecx
147 movl %ecx, %ebp
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000148
Stefan Reinauer14e22772010-04-27 06:56:47 +0000149 /* We put the stack for each core right above
150 * its SMM entry point. Core 0 starts at 0xa8000,
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000151 * we spare 0x10 bytes for the jump to be sure.
152 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100153 movl $0xa8010, %eax
154 subl %ecx, %eax /* subtract offset, see above */
155 movl %eax, %ebx /* Save bottom of stack in ebx */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000156
157#define SMM_STACK_SIZE (0x400 - 0x10)
158 /* clear stack */
159 cld
160 movl %eax, %edi
161 movl $(SMM_STACK_SIZE >> 2), %ecx
162 xorl %eax, %eax
163 rep stosl
164
165 /* set new stack */
166 addl $SMM_STACK_SIZE, %ebx
167 movl %ebx, %esp
168
Patrick Rudolphadcf7822020-08-27 20:50:18 +0200169#if ENV_X86_64
Patrick Rudolph03a79522019-09-29 11:08:33 +0200170 /* Backup IA32_EFER. Preserves ebx. */
171 movl $(IA32_EFER), %ecx
172 rdmsr
173 movl %eax, ia32efer_backup_eax
174 movl %edx, ia32efer_backup_edx
175
176 /* Enable long mode. Preserves ebx. */
177#include <cpu/x86/64bit/entry64.inc>
178
Patrick Rudolph03a79522019-09-29 11:08:33 +0200179#endif
Patrick Rudolph03a79522019-09-29 11:08:33 +0200180 /* Call C handler */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100181 call smi_handler
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000182
Patrick Rudolphadcf7822020-08-27 20:50:18 +0200183#if ENV_X86_64
Patrick Rudolph03a79522019-09-29 11:08:33 +0200184 /*
185 * The only reason to go back to protected mode is that RSM doesn't restore
186 * MSR registers and MSR IA32_EFER was modified by entering long mode.
187 * Drop to protected mode to safely operate on the IA32_EFER MSR.
188 */
189
190 /* Disable long mode. */
191 #include <cpu/x86/64bit/exit32.inc>
192
193 /* Restore IA32_EFER as RSM doesn't restore MSRs. */
194 movl $(IA32_EFER), %ecx
195 movl ia32efer_backup_eax, %eax
196 movl ia32efer_backup_edx, %edx
197 wrmsr
198#endif
199
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000200 /* To return, just do rsm. It will "clean up" protected mode */
201 rsm
202
203.code16
204
205.align 4, 0xff
206
207smm_gdtptr16:
208 .word smm_gdt_end - smm_gdt - 1
209 .long smm_gdt - smm_handler_start + 0xa0000 + SMM_HANDLER_OFFSET
210
211.code32
212
213smm_gdt:
214 /* The first GDT entry can not be used. Keep it zero */
215 .long 0x00000000, 0x00000000
216
217 /* gdt selector 0x08, flat code segment */
Stefan Reinauer14e22772010-04-27 06:56:47 +0000218 .word 0xffff, 0x0000
219 .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000220
221 /* gdt selector 0x10, flat data segment */
Stefan Reinauer14e22772010-04-27 06:56:47 +0000222 .word 0xffff, 0x0000
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000223 .byte 0x00, 0x93, 0xcf, 0x00
224
Patrick Rudolph03a79522019-09-29 11:08:33 +0200225 /* gdt selector 0x18, flat code segment (64-bit) */
226 .word 0xffff, 0x0000
227 .byte 0x00, 0x9b, 0xaf, 0x00
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000228smm_gdt_end:
229
230
231.section ".jumptable", "a", @progbits
232
233/* This is the SMM jump table. All cores use the same SMM handler
Stefan Reinauer14e22772010-04-27 06:56:47 +0000234 * for simplicity. But SMM Entry needs to be different due to the
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000235 * save state area. The jump table makes sure all CPUs jump into the
236 * real handler on SMM entry.
237 */
238
239/* This code currently supports up to 4 CPU cores. If more than 4 CPU cores
240 * shall be used, below table has to be updated, as well as smm.ld
241 */
242
243/* GNU AS/LD will always generate code that assumes CS is 0xa000. In reality
244 * CS will be set to SMM_BASE[19:4] though. Knowing that the smm handler is the
245 * first thing in the ASEG, we do a far jump here, to set CS to 0xa000.
246 */
247
248.code16
249jumptable:
250 /* core 3 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100251 ljmp $0xa000, $SMM_HANDLER_OFFSET
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000252.align 1024, 0x00
253 /* core 2 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100254 ljmp $0xa000, $SMM_HANDLER_OFFSET
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000255.align 1024, 0x00
256 /* core 1 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100257 ljmp $0xa000, $SMM_HANDLER_OFFSET
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000258.align 1024, 0x00
259 /* core 0 */
Elyes HAOUAS9981df32018-12-22 09:26:28 +0100260 ljmp $0xa000, $SMM_HANDLER_OFFSET
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000261.align 1024, 0x00