blob: 6322e59beb5a4b066df305326ac0eb2b353dc864 [file] [log] [blame]
Angel Ponsf23ae0b2020-04-02 23:48:12 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin50a34642013-01-03 17:38:47 -06002
3#include <arch/io.h>
Elyes Haouas45d32052022-10-07 10:07:12 +02004#include <commonlib/bsd/compiler.h>
5#include <commonlib/region.h>
Raul E Rangelc5160982022-02-24 16:02:49 -07006#include <console/cbmem_console.h>
Aaron Durbin50a34642013-01-03 17:38:47 -06007#include <console/console.h>
Elyes Haouas45d32052022-10-07 10:07:12 +02008#include <cpu/cpu.h>
Aaron Durbin50a34642013-01-03 17:38:47 -06009#include <cpu/x86/smm.h>
Aaron Durbin3eb8eb72014-03-10 16:13:58 -050010#include <rmodule.h>
Elyes Haouas45d32052022-10-07 10:07:12 +020011#include <types.h>
Eugene D. Myersd205cf72023-11-02 13:34:56 -070012#include <security/intel/stm/SmmStm.h>
Aaron Durbin50a34642013-01-03 17:38:47 -060013
Julius Wernercd49cce2019-03-05 16:53:33 -080014#if CONFIG(SPI_FLASH_SMM)
David Hendricksbb0d5ef2014-06-19 15:39:29 -070015#include <spi-generic.h>
16#endif
17
18static int do_driver_init = 1;
19
Aaron Durbin50a34642013-01-03 17:38:47 -060020typedef enum { SMI_LOCKED, SMI_UNLOCKED } smi_semaphore;
21
22/* SMI multiprocessing semaphore */
23static volatile
Stefan Reinauer6a001132017-07-13 02:20:27 +020024__attribute__((aligned(4))) smi_semaphore smi_handler_status = SMI_UNLOCKED;
Aaron Durbin50a34642013-01-03 17:38:47 -060025
Arthur Heymans823b1a82021-02-15 16:02:10 +010026static const volatile
27__attribute((aligned(4), __section__(".module_parameters"))) struct smm_runtime smm_runtime;
28
Aaron Durbin50a34642013-01-03 17:38:47 -060029static int smi_obtain_lock(void)
30{
31 u8 ret = SMI_LOCKED;
32
33 asm volatile (
34 "movb %2, %%al\n"
35 "xchgb %%al, %1\n"
36 "movb %%al, %0\n"
37 : "=g" (ret), "=m" (smi_handler_status)
38 : "g" (SMI_LOCKED)
39 : "eax"
40 );
41
42 return (ret == SMI_UNLOCKED);
43}
44
45static void smi_release_lock(void)
46{
47 asm volatile (
48 "movb %1, %%al\n"
49 "xchgb %%al, %0\n"
50 : "=m" (smi_handler_status)
51 : "g" (SMI_UNLOCKED)
52 : "eax"
53 );
54}
55
Johnny Lin107e7aa2021-01-14 17:49:08 +080056#if CONFIG(RUNTIME_CONFIGURABLE_SMM_LOGLEVEL)
57int get_console_loglevel(void)
58{
59 return smm_runtime.smm_log_level;
60}
61#endif
62
Arthur Heymansd57d5e32023-12-27 20:54:19 +010063void smm_get_smmstore_com_buffer(uintptr_t *base, size_t *size)
64{
65 *base = smm_runtime.smmstore_com_buffer_base;
66 *size = smm_runtime.smmstore_com_buffer_size;
67}
68
Raul E Rangelc5160982022-02-24 16:02:49 -070069void smm_get_cbmemc_buffer(void **buffer_out, size_t *size_out)
70{
71 *buffer_out = smm_runtime.cbmemc;
72 *size_out = smm_runtime.cbmemc_size;
73}
74
Aaron Durbin50a34642013-01-03 17:38:47 -060075void io_trap_handler(int smif)
76{
77 /* If a handler function handled a given IO trap, it
78 * shall return a non-zero value
79 */
80 printk(BIOS_DEBUG, "SMI function trap 0x%x: ", smif);
81
Aaron Durbin50a34642013-01-03 17:38:47 -060082 if (mainboard_io_trap_handler(smif))
83 return;
84
85 printk(BIOS_DEBUG, "Unknown function\n");
86}
87
Aaron Durbin50a34642013-01-03 17:38:47 -060088static u32 pci_orig;
89
90/**
91 * @brief Backup PCI address to make sure we do not mess up the OS
92 */
93static void smi_backup_pci_address(void)
94{
95 pci_orig = inl(0xcf8);
96}
97
98/**
99 * @brief Restore PCI address previously backed up
100 */
101static void smi_restore_pci_address(void)
102{
103 outl(pci_orig, 0xcf8);
104}
105
Kyösti Mälkki239abaf2020-06-28 12:12:01 +0300106struct global_nvs *gnvs;
107
Aaron Durbin50a34642013-01-03 17:38:47 -0600108void *smm_get_save_state(int cpu)
109{
Arthur Heymans64d9e852021-02-15 18:55:40 +0100110 if (cpu > smm_runtime.num_cpus)
111 return NULL;
Aaron Durbin50a34642013-01-03 17:38:47 -0600112
Eugene D. Myersd205cf72023-11-02 13:34:56 -0700113 return (void *)(smm_runtime.save_state_top[cpu] -
114 (smm_runtime.save_state_size - STM_PSD_SIZE));
Aaron Durbin50a34642013-01-03 17:38:47 -0600115}
116
Arthur Heymans342d0a82020-08-09 14:58:05 +0200117uint32_t smm_revision(void)
118{
119 const uintptr_t save_state = (uintptr_t)(smm_get_save_state(0));
120
Arthur Heymans823b1a82021-02-15 16:02:10 +0100121 return *(uint32_t *)(save_state + smm_runtime.save_state_size
122 - SMM_REVISION_OFFSET_FROM_TOP);
Arthur Heymans342d0a82020-08-09 14:58:05 +0200123}
124
Patrick Rudolph41fec862020-05-06 10:55:12 +0200125bool smm_region_overlaps_handler(const struct region *r)
126{
Arthur Heymans823b1a82021-02-15 16:02:10 +0100127 const struct region r_smm = {smm_runtime.smbase, smm_runtime.smm_size};
Patrick Rudolph41fec862020-05-06 10:55:12 +0200128 const struct region r_aseg = {SMM_BASE, SMM_DEFAULT_SIZE};
129
130 return region_overlap(&r_smm, r) || region_overlap(&r_aseg, r);
131}
132
Lee Leahydfc8a5602017-03-15 16:47:33 -0700133asmlinkage void smm_handler_start(void *arg)
Aaron Durbin50a34642013-01-03 17:38:47 -0600134{
Aaron Durbin3eb8eb72014-03-10 16:13:58 -0500135 const struct smm_module_params *p;
Aaron Durbin3eb8eb72014-03-10 16:13:58 -0500136 int cpu;
Raul E Rangeleb5d76a2018-06-25 14:22:27 -0600137 uintptr_t actual_canary;
138 uintptr_t expected_canary;
Aaron Durbin3eb8eb72014-03-10 16:13:58 -0500139
140 p = arg;
Aaron Durbin3eb8eb72014-03-10 16:13:58 -0500141 cpu = p->cpu;
Raul E Rangeleb5d76a2018-06-25 14:22:27 -0600142 expected_canary = (uintptr_t)p->canary;
Aaron Durbin3eb8eb72014-03-10 16:13:58 -0500143
Aaron Durbin50a34642013-01-03 17:38:47 -0600144 /* Make sure to set the global runtime. It's OK to race as the value
145 * will be the same across CPUs as well as multiple SMIs. */
Arthur Heymans823b1a82021-02-15 16:02:10 +0100146 gnvs = (void *)(uintptr_t)smm_runtime.gnvs_ptr;
Aaron Durbin50a34642013-01-03 17:38:47 -0600147
148 if (cpu >= CONFIG_MAX_CPUS) {
Rocky Phagura1aa51942021-04-19 11:15:35 -0700149 /* Do not log messages to console here, it is not thread safe */
Aaron Durbin50a34642013-01-03 17:38:47 -0600150 return;
151 }
152
153 /* Are we ok to execute the handler? */
154 if (!smi_obtain_lock()) {
155 /* For security reasons we don't release the other CPUs
156 * until the CPU with the lock is actually done */
157 while (smi_handler_status == SMI_LOCKED) {
158 asm volatile (
159 ".byte 0xf3, 0x90\n" /* PAUSE */
160 );
161 }
162 return;
163 }
164
165 smi_backup_pci_address();
166
Raul E Rangele6cd6ca2022-02-25 12:40:32 -0700167 smm_soc_early_init();
168
Aaron Durbin50a34642013-01-03 17:38:47 -0600169 console_init();
170
171 printk(BIOS_SPEW, "\nSMI# #%d\n", cpu);
172
David Hendricksbb0d5ef2014-06-19 15:39:29 -0700173 /* Allow drivers to initialize variables in SMM context. */
174 if (do_driver_init) {
Julius Wernercd49cce2019-03-05 16:53:33 -0800175#if CONFIG(SPI_FLASH_SMM)
David Hendricksbb0d5ef2014-06-19 15:39:29 -0700176 spi_init();
177#endif
178 do_driver_init = 0;
179 }
180
Aaron Durbin50a34642013-01-03 17:38:47 -0600181 cpu_smi_handler();
182 northbridge_smi_handler();
183 southbridge_smi_handler();
184
185 smi_restore_pci_address();
186
Raul E Rangeleb5d76a2018-06-25 14:22:27 -0600187 actual_canary = *p->canary;
188
189 if (actual_canary != expected_canary) {
190 printk(BIOS_DEBUG, "canary 0x%lx != 0x%lx\n", actual_canary,
191 expected_canary);
192
193 // Don't die if we can't indicate an error.
Julius Wernercd49cce2019-03-05 16:53:33 -0800194 if (CONFIG(DEBUG_SMI))
Raul E Rangeleb5d76a2018-06-25 14:22:27 -0600195 die("SMM Handler caused a stack overflow\n");
196 }
197
Raul E Rangele6cd6ca2022-02-25 12:40:32 -0700198 smm_soc_exit();
199
Aaron Durbin50a34642013-01-03 17:38:47 -0600200 smi_release_lock();
201
202 /* De-assert SMI# signal to allow another SMI */
Kyösti Mälkki95932ba2022-11-14 17:41:13 +0200203 southbridge_smi_set_eos();
Aaron Durbin50a34642013-01-03 17:38:47 -0600204}
205
Robert Ziebaac8c3782022-09-07 16:25:15 -0600206#if CONFIG(SMM_PCI_RESOURCE_STORE)
207const volatile struct smm_pci_resource_info *smm_get_pci_resource_store(void)
208{
209 return &smm_runtime.pci_resources[0];
210}
211#endif
212
Aaron Durbin3eb8eb72014-03-10 16:13:58 -0500213RMODULE_ENTRY(smm_handler_start);
214
Aaron Durbin50a34642013-01-03 17:38:47 -0600215/* Provide a default implementation for all weak handlers so that relocation
216 * entries in the modules make sense. Without default implementations the
217 * weak relocations w/o a symbol have a 0 address which is where the modules
218 * are linked at. */
Aaron Durbin64031672018-04-21 14:45:32 -0600219int __weak mainboard_io_trap_handler(int smif) { return 0; }
220void __weak cpu_smi_handler(void) {}
Elyes HAOUASeb9e63f2022-01-25 11:51:43 +0100221void __weak northbridge_smi_handler(void) {}
222void __weak southbridge_smi_handler(void) {}
Aaron Durbin64031672018-04-21 14:45:32 -0600223void __weak mainboard_smi_gpi(u32 gpi_sts) {}
224int __weak mainboard_smi_apmc(u8 data) { return 0; }
225void __weak mainboard_smi_sleep(u8 slp_typ) {}
Aseda Aboagye63356052021-06-17 12:10:33 -0700226void __weak mainboard_smi_finalize(void) {}
Raul E Rangele6cd6ca2022-02-25 12:40:32 -0700227
228void __weak smm_soc_early_init(void) {}
229void __weak smm_soc_exit(void) {}