Angel Pons | f23ae0b | 2020-04-02 23:48:12 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 2 | |
| 3 | /* NOTE: This handler assumes the SMM window goes from 0xa0000 |
| 4 | * to 0xaffff. In fact, at least on Intel Core CPUs (i945 chipset) |
| 5 | * the SMM window is 128K big, covering 0xa0000 to 0xbffff. |
| 6 | * So there is a lot of potential for growth in here. Let's stick |
| 7 | * to 64k if we can though. |
| 8 | */ |
| 9 | |
Elyes HAOUAS | a9473ec | 2018-10-24 15:55:53 +0200 | [diff] [blame] | 10 | #include <cpu/x86/lapic_def.h> |
Patrick Rudolph | 03a7952 | 2019-09-29 11:08:33 +0200 | [diff] [blame] | 11 | #include <cpu/x86/msr.h> |
Patrick Georgi | ce2564a | 2015-09-05 20:21:24 +0200 | [diff] [blame] | 12 | |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 13 | /* |
| 14 | * +--------------------------------+ 0xaffff |
| 15 | * | Save State Map Node 0 | |
| 16 | * | Save State Map Node 1 | |
| 17 | * | Save State Map Node 2 | |
| 18 | * | Save State Map Node 3 | |
| 19 | * | ... | |
| 20 | * +--------------------------------+ 0xaf000 |
| 21 | * | | |
| 22 | * | | |
| 23 | * | | |
| 24 | * +--------------------------------+ 0xa8400 |
Stefan Reinauer | 14e2277 | 2010-04-27 06:56:47 +0000 | [diff] [blame] | 25 | * | SMM Entry Node 0 (+ stack) | |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 26 | * +--------------------------------+ 0xa8000 |
Stefan Reinauer | 14e2277 | 2010-04-27 06:56:47 +0000 | [diff] [blame] | 27 | * | SMM Entry Node 1 (+ stack) | |
| 28 | * | SMM Entry Node 2 (+ stack) | |
| 29 | * | SMM Entry Node 3 (+ stack) | |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 30 | * | ... | |
| 31 | * +--------------------------------+ 0xa7400 |
| 32 | * | | |
| 33 | * | SMM Handler | |
| 34 | * | | |
| 35 | * +--------------------------------+ 0xa0000 |
| 36 | * |
| 37 | */ |
| 38 | |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 39 | /* SMM_HANDLER_OFFSET is the 16bit offset within the ASEG |
| 40 | * at which smm_handler_start lives. At the moment the handler |
Stefan Reinauer | 14e2277 | 2010-04-27 06:56:47 +0000 | [diff] [blame] | 41 | * lives right at 0xa0000, so the offset is 0. |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 42 | */ |
| 43 | |
| 44 | #define SMM_HANDLER_OFFSET 0x0000 |
| 45 | |
Patrick Rudolph | adcf782 | 2020-08-27 20:50:18 +0200 | [diff] [blame] | 46 | #if ENV_X86_64 |
Patrick Rudolph | 03a7952 | 2019-09-29 11:08:33 +0200 | [diff] [blame] | 47 | .bss |
| 48 | ia32efer_backup_eax: |
Patrick Rudolph | 9256e51 | 2020-08-25 20:41:11 +0200 | [diff] [blame] | 49 | .long 0 |
Patrick Rudolph | 03a7952 | 2019-09-29 11:08:33 +0200 | [diff] [blame] | 50 | ia32efer_backup_edx: |
Patrick Rudolph | 9256e51 | 2020-08-25 20:41:11 +0200 | [diff] [blame] | 51 | .long 0 |
Patrick Rudolph | 03a7952 | 2019-09-29 11:08:33 +0200 | [diff] [blame] | 52 | #endif |
| 53 | |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 54 | /* initially SMM is some sort of real mode. Let gcc know |
| 55 | * how to treat the SMM handler stub |
| 56 | */ |
| 57 | |
| 58 | .section ".handler", "a", @progbits |
| 59 | |
| 60 | .code16 |
| 61 | |
| 62 | /** |
| 63 | * SMM code to enable protected mode and jump to the |
| 64 | * C-written function void smi_handler(u32 smm_revision) |
| 65 | * |
| 66 | * All the bad magic is not all that bad after all. |
| 67 | */ |
Patrick Georgi | ce2564a | 2015-09-05 20:21:24 +0200 | [diff] [blame] | 68 | #define SMM_START 0xa0000 |
| 69 | #define SMM_END 0xb0000 |
| 70 | #if SMM_END <= SMM_START |
| 71 | #error invalid SMM configuration |
| 72 | #endif |
Aaron Durbin | e73dae4 | 2015-03-29 22:16:55 -0500 | [diff] [blame] | 73 | .global smm_handler_start |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 74 | smm_handler_start: |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 75 | #if CONFIG(SMM_LAPIC_REMAP_MITIGATION) |
Patrick Georgi | ce2564a | 2015-09-05 20:21:24 +0200 | [diff] [blame] | 76 | /* Check if the LAPIC register block overlaps with SMM. |
| 77 | * This block needs to work without data accesses because they |
| 78 | * may be routed into the LAPIC register block. |
| 79 | * Code accesses, on the other hand, are never routed to LAPIC, |
| 80 | * which is what makes this work in the first place. |
| 81 | */ |
| 82 | mov $LAPIC_BASE_MSR, %ecx |
| 83 | rdmsr |
| 84 | and $(~0xfff), %eax |
| 85 | sub $(SMM_START), %eax |
| 86 | cmp $(SMM_END - SMM_START), %eax |
| 87 | ja untampered_lapic |
| 88 | 1: |
| 89 | /* emit "Crash" on serial */ |
| 90 | mov $(CONFIG_TTYS0_BASE), %dx |
| 91 | mov $'C', %al |
| 92 | out %al, (%dx) |
| 93 | mov $'r', %al |
| 94 | out %al, (%dx) |
| 95 | mov $'a', %al |
| 96 | out %al, (%dx) |
| 97 | mov $'s', %al |
| 98 | out %al, (%dx) |
| 99 | mov $'h', %al |
| 100 | out %al, (%dx) |
| 101 | /* now crash for real */ |
| 102 | ud2 |
| 103 | untampered_lapic: |
| 104 | #endif |
Elyes HAOUAS | 9981df3 | 2018-12-22 09:26:28 +0100 | [diff] [blame] | 105 | movw $(smm_gdtptr16 - smm_handler_start + SMM_HANDLER_OFFSET), %bx |
Edward O'Callaghan | 4e2294b | 2017-01-08 19:14:42 +1100 | [diff] [blame] | 106 | lgdtl %cs:(%bx) |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 107 | |
Elyes HAOUAS | 9981df3 | 2018-12-22 09:26:28 +0100 | [diff] [blame] | 108 | movl %cr0, %eax |
| 109 | andl $0x7FFAFFD1, %eax /* PG,AM,WP,NE,TS,EM,MP = 0 */ |
| 110 | orl $0x60000001, %eax /* CD, NW, PE = 1 */ |
| 111 | movl %eax, %cr0 |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 112 | |
| 113 | /* Enable protected mode */ |
Edward O'Callaghan | 4e2294b | 2017-01-08 19:14:42 +1100 | [diff] [blame] | 114 | ljmpl $0x08, $1f |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 115 | |
| 116 | .code32 |
| 117 | 1: |
Stefan Reinauer | 3128685 | 2011-10-15 11:23:04 -0700 | [diff] [blame] | 118 | /* flush the cache after disabling it */ |
| 119 | wbinvd |
| 120 | |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 121 | /* Use flat data segment */ |
Elyes HAOUAS | 9981df3 | 2018-12-22 09:26:28 +0100 | [diff] [blame] | 122 | movw $0x10, %ax |
| 123 | movw %ax, %ds |
| 124 | movw %ax, %es |
| 125 | movw %ax, %ss |
Raul E Rangel | ea5c311 | 2021-09-21 10:17:24 -0600 | [diff] [blame] | 126 | xor %ax, %ax /* zero out the gs and fs segment index */ |
Elyes HAOUAS | 9981df3 | 2018-12-22 09:26:28 +0100 | [diff] [blame] | 127 | movw %ax, %fs |
Raul E Rangel | ea5c311 | 2021-09-21 10:17:24 -0600 | [diff] [blame] | 128 | movw %ax, %gs /* Will be used for cpu_info */ |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 129 | |
Kyösti Mälkki | dea42e0 | 2021-05-31 20:26:16 +0300 | [diff] [blame] | 130 | /* FIXME: Incompatible with X2APIC_SUPPORT. */ |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 131 | /* Get this CPU's LAPIC ID */ |
Kyösti Mälkki | dea42e0 | 2021-05-31 20:26:16 +0300 | [diff] [blame] | 132 | movl $(LAPIC_DEFAULT_BASE | LAPIC_ID), %esi |
Elyes HAOUAS | 9981df3 | 2018-12-22 09:26:28 +0100 | [diff] [blame] | 133 | movl (%esi), %ecx |
| 134 | shr $24, %ecx |
Stefan Reinauer | 14e2277 | 2010-04-27 06:56:47 +0000 | [diff] [blame] | 135 | |
Alexandru Gagniuc | 53072d8 | 2014-04-12 21:57:18 -0500 | [diff] [blame] | 136 | /* This is an ugly hack, and we should find a way to read the CPU index |
| 137 | * without relying on the LAPIC ID. |
| 138 | */ |
Alexandru Gagniuc | 53072d8 | 2014-04-12 21:57:18 -0500 | [diff] [blame] | 139 | |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 140 | /* calculate stack offset by multiplying the APIC ID |
| 141 | * by 1024 (0x400), and save that offset in ebp. |
| 142 | */ |
Elyes HAOUAS | 9981df3 | 2018-12-22 09:26:28 +0100 | [diff] [blame] | 143 | shl $10, %ecx |
| 144 | movl %ecx, %ebp |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 145 | |
Stefan Reinauer | 14e2277 | 2010-04-27 06:56:47 +0000 | [diff] [blame] | 146 | /* We put the stack for each core right above |
| 147 | * its SMM entry point. Core 0 starts at 0xa8000, |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 148 | * we spare 0x10 bytes for the jump to be sure. |
| 149 | */ |
Elyes HAOUAS | 9981df3 | 2018-12-22 09:26:28 +0100 | [diff] [blame] | 150 | movl $0xa8010, %eax |
| 151 | subl %ecx, %eax /* subtract offset, see above */ |
| 152 | movl %eax, %ebx /* Save bottom of stack in ebx */ |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 153 | |
| 154 | #define SMM_STACK_SIZE (0x400 - 0x10) |
| 155 | /* clear stack */ |
| 156 | cld |
| 157 | movl %eax, %edi |
| 158 | movl $(SMM_STACK_SIZE >> 2), %ecx |
| 159 | xorl %eax, %eax |
| 160 | rep stosl |
| 161 | |
| 162 | /* set new stack */ |
| 163 | addl $SMM_STACK_SIZE, %ebx |
| 164 | movl %ebx, %esp |
| 165 | |
Patrick Rudolph | adcf782 | 2020-08-27 20:50:18 +0200 | [diff] [blame] | 166 | #if ENV_X86_64 |
Patrick Rudolph | 03a7952 | 2019-09-29 11:08:33 +0200 | [diff] [blame] | 167 | /* Backup IA32_EFER. Preserves ebx. */ |
| 168 | movl $(IA32_EFER), %ecx |
| 169 | rdmsr |
| 170 | movl %eax, ia32efer_backup_eax |
| 171 | movl %edx, ia32efer_backup_edx |
| 172 | |
| 173 | /* Enable long mode. Preserves ebx. */ |
| 174 | #include <cpu/x86/64bit/entry64.inc> |
| 175 | |
Patrick Rudolph | 03a7952 | 2019-09-29 11:08:33 +0200 | [diff] [blame] | 176 | #endif |
Patrick Rudolph | 03a7952 | 2019-09-29 11:08:33 +0200 | [diff] [blame] | 177 | /* Call C handler */ |
Elyes HAOUAS | 9981df3 | 2018-12-22 09:26:28 +0100 | [diff] [blame] | 178 | call smi_handler |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 179 | |
Patrick Rudolph | adcf782 | 2020-08-27 20:50:18 +0200 | [diff] [blame] | 180 | #if ENV_X86_64 |
Patrick Rudolph | 03a7952 | 2019-09-29 11:08:33 +0200 | [diff] [blame] | 181 | /* |
| 182 | * The only reason to go back to protected mode is that RSM doesn't restore |
| 183 | * MSR registers and MSR IA32_EFER was modified by entering long mode. |
| 184 | * Drop to protected mode to safely operate on the IA32_EFER MSR. |
| 185 | */ |
| 186 | |
| 187 | /* Disable long mode. */ |
| 188 | #include <cpu/x86/64bit/exit32.inc> |
| 189 | |
| 190 | /* Restore IA32_EFER as RSM doesn't restore MSRs. */ |
| 191 | movl $(IA32_EFER), %ecx |
| 192 | movl ia32efer_backup_eax, %eax |
| 193 | movl ia32efer_backup_edx, %edx |
| 194 | wrmsr |
| 195 | #endif |
| 196 | |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 197 | /* To return, just do rsm. It will "clean up" protected mode */ |
| 198 | rsm |
| 199 | |
| 200 | .code16 |
| 201 | |
| 202 | .align 4, 0xff |
| 203 | |
| 204 | smm_gdtptr16: |
| 205 | .word smm_gdt_end - smm_gdt - 1 |
| 206 | .long smm_gdt - smm_handler_start + 0xa0000 + SMM_HANDLER_OFFSET |
| 207 | |
| 208 | .code32 |
| 209 | |
| 210 | smm_gdt: |
| 211 | /* The first GDT entry can not be used. Keep it zero */ |
| 212 | .long 0x00000000, 0x00000000 |
| 213 | |
| 214 | /* gdt selector 0x08, flat code segment */ |
Stefan Reinauer | 14e2277 | 2010-04-27 06:56:47 +0000 | [diff] [blame] | 215 | .word 0xffff, 0x0000 |
| 216 | .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */ |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 217 | |
| 218 | /* gdt selector 0x10, flat data segment */ |
Stefan Reinauer | 14e2277 | 2010-04-27 06:56:47 +0000 | [diff] [blame] | 219 | .word 0xffff, 0x0000 |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 220 | .byte 0x00, 0x93, 0xcf, 0x00 |
| 221 | |
Patrick Rudolph | 03a7952 | 2019-09-29 11:08:33 +0200 | [diff] [blame] | 222 | /* gdt selector 0x18, flat code segment (64-bit) */ |
| 223 | .word 0xffff, 0x0000 |
| 224 | .byte 0x00, 0x9b, 0xaf, 0x00 |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 225 | smm_gdt_end: |
| 226 | |
| 227 | |
| 228 | .section ".jumptable", "a", @progbits |
| 229 | |
| 230 | /* This is the SMM jump table. All cores use the same SMM handler |
Stefan Reinauer | 14e2277 | 2010-04-27 06:56:47 +0000 | [diff] [blame] | 231 | * for simplicity. But SMM Entry needs to be different due to the |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 232 | * save state area. The jump table makes sure all CPUs jump into the |
| 233 | * real handler on SMM entry. |
| 234 | */ |
| 235 | |
| 236 | /* This code currently supports up to 4 CPU cores. If more than 4 CPU cores |
| 237 | * shall be used, below table has to be updated, as well as smm.ld |
| 238 | */ |
| 239 | |
| 240 | /* GNU AS/LD will always generate code that assumes CS is 0xa000. In reality |
| 241 | * CS will be set to SMM_BASE[19:4] though. Knowing that the smm handler is the |
| 242 | * first thing in the ASEG, we do a far jump here, to set CS to 0xa000. |
| 243 | */ |
| 244 | |
| 245 | .code16 |
| 246 | jumptable: |
| 247 | /* core 3 */ |
Elyes HAOUAS | 9981df3 | 2018-12-22 09:26:28 +0100 | [diff] [blame] | 248 | ljmp $0xa000, $SMM_HANDLER_OFFSET |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 249 | .align 1024, 0x00 |
| 250 | /* core 2 */ |
Elyes HAOUAS | 9981df3 | 2018-12-22 09:26:28 +0100 | [diff] [blame] | 251 | ljmp $0xa000, $SMM_HANDLER_OFFSET |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 252 | .align 1024, 0x00 |
| 253 | /* core 1 */ |
Elyes HAOUAS | 9981df3 | 2018-12-22 09:26:28 +0100 | [diff] [blame] | 254 | ljmp $0xa000, $SMM_HANDLER_OFFSET |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 255 | .align 1024, 0x00 |
| 256 | /* core 0 */ |
Elyes HAOUAS | 9981df3 | 2018-12-22 09:26:28 +0100 | [diff] [blame] | 257 | ljmp $0xa000, $SMM_HANDLER_OFFSET |
Stefan Reinauer | debb11f | 2008-10-29 04:46:52 +0000 | [diff] [blame] | 258 | .align 1024, 0x00 |