blob: ea51d63e83f5285acdc93e13d30c2b6291200611 [file] [log] [blame]
Rocky Phaguraafb7a812020-07-21 14:48:48 -07001/* SPDX-License-Identifier: GPL-2.0-only */
2
Kyösti Mälkki84935f72021-01-11 20:13:34 +02003#include <acpi/acpi_gnvs.h>
Arthur Heymansaf04f3c2022-04-13 02:10:15 +02004#include <cbmem.h>
5#include <commonlib/helpers.h>
6#include <commonlib/region.h>
7#include <console/console.h>
Arthur Heymansa804f912022-05-30 14:39:45 +02008#include <cpu/cpu.h>
Arthur Heymansaf04f3c2022-04-13 02:10:15 +02009#include <cpu/x86/smm.h>
Arthur Heymansa804f912022-05-30 14:39:45 +020010#include <device/device.h>
Arthur Heymans7a51acf2024-02-02 18:35:00 +010011#include <device/mmio.h>
Arthur Heymansaf04f3c2022-04-13 02:10:15 +020012#include <rmodule.h>
Arthur Heymansaf04f3c2022-04-13 02:10:15 +020013#include <stdio.h>
Rocky Phaguraafb7a812020-07-21 14:48:48 -070014#include <string.h>
Elyes Haouasafc5f9b2022-10-02 10:26:55 +020015#include <types.h>
Rocky Phaguraafb7a812020-07-21 14:48:48 -070016
Rocky Phaguraafb7a812020-07-21 14:48:48 -070017#define SMM_CODE_SEGMENT_SIZE 0x10000
Rocky Phaguraafb7a812020-07-21 14:48:48 -070018
19/*
20 * Components that make up the SMRAM:
21 * 1. Save state - the total save state memory used
22 * 2. Stack - stacks for the CPUs in the SMM handler
23 * 3. Stub - SMM stub code for calling into handler
24 * 4. Handler - C-based SMM handler.
25 *
26 * The components are assumed to consist of one consecutive region.
27 */
28
Rocky Phaguraafb7a812020-07-21 14:48:48 -070029/*
30 * The stub is the entry point that sets up protected mode and stacks for each
31 * CPU. It then calls into the SMM handler module. It is encoded as an rmodule.
32 */
33extern unsigned char _binary_smmstub_start[];
34
35/* Per CPU minimum stack size. */
36#define SMM_MINIMUM_STACK_SIZE 32
37
38struct cpu_smm_info {
39 uint8_t active;
40 uintptr_t smbase;
Arthur Heymans0effeb52022-05-23 22:43:51 +020041 struct region ss;
42 struct region stub_code;
Rocky Phaguraafb7a812020-07-21 14:48:48 -070043};
44struct cpu_smm_info cpus[CONFIG_MAX_CPUS] = { 0 };
45
46/*
47 * This method creates a map of all the CPU entry points, save state locations
48 * and the beginning and end of code segments for each CPU. This map is used
49 * during relocation to properly align as many CPUs that can fit into the SMRAM
50 * region. For more information on how SMRAM works, refer to the latest Intel
51 * developer's manuals (volume 3, chapter 34). SMRAM is divided up into the
52 * following regions:
53 * +-----------------+ Top of SMRAM
Arthur Heymans1efca4d2023-05-17 18:10:47 +020054 * | MSEG |
Rocky Phaguraafb7a812020-07-21 14:48:48 -070055 * +-----------------+
56 * | common |
57 * | smi handler | 64K
58 * | |
59 * +-----------------+
60 * | CPU 0 code seg |
61 * +-----------------+
62 * | CPU 1 code seg |
63 * +-----------------+
64 * | CPU x code seg |
65 * +-----------------+
66 * | |
67 * | |
68 * +-----------------+
69 * | stacks |
70 * +-----------------+ <- START of SMRAM
71 *
72 * The code below checks when a code segment is full and begins placing the remainder
73 * CPUs in the lower segments. The entry point for each CPU is smbase + 0x8000
74 * and save state is smbase + 0x8000 + (0x8000 - state save size). Save state
75 * area grows downward into the CPUs entry point. Therefore staggering too many
76 * CPUs in one 32K block will corrupt CPU0's entry code as the save states move
77 * downward.
78 * input : smbase of first CPU (all other CPUs
79 * will go below this address)
80 * input : num_cpus in the system. The map will
81 * be created from 0 to num_cpus.
82 */
Arthur Heymans0ab98d52022-04-07 19:14:48 +020083static int smm_create_map(const uintptr_t smbase, const unsigned int num_cpus,
84 const struct smm_loader_params *params)
Rocky Phaguraafb7a812020-07-21 14:48:48 -070085{
Rocky Phaguraafb7a812020-07-21 14:48:48 -070086 struct rmodule smm_stub;
Arthur Heymans0ab98d52022-04-07 19:14:48 +020087
88 if (ARRAY_SIZE(cpus) < num_cpus) {
89 printk(BIOS_ERR, "%s: increase MAX_CPUS in Kconfig\n", __func__);
90 return 0;
91 }
Rocky Phaguraafb7a812020-07-21 14:48:48 -070092
93 if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) {
94 printk(BIOS_ERR, "%s: unable to get SMM module size\n", __func__);
95 return 0;
96 }
97
Arthur Heymans0ab98d52022-04-07 19:14:48 +020098 /*
99 * How many CPUs can fit into one 64K segment?
100 * Make sure that the first stub does not overlap with the last save state of a segment.
101 */
102 const size_t stub_size = rmodule_memory_size(&smm_stub);
Arthur Heymans1684b0a2022-04-07 21:50:16 +0200103 const size_t needed_ss_size = MAX(params->cpu_save_state_size, stub_size);
Arthur Heymans0ab98d52022-04-07 19:14:48 +0200104 const size_t cpus_per_segment =
105 (SMM_CODE_SEGMENT_SIZE - SMM_ENTRY_OFFSET - stub_size) / needed_ss_size;
106
107 if (cpus_per_segment == 0) {
108 printk(BIOS_ERR, "%s: CPUs won't fit in segment. Broken stub or save state size\n",
109 __func__);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700110 return 0;
111 }
112
Arthur Heymans0ab98d52022-04-07 19:14:48 +0200113 for (unsigned int i = 0; i < num_cpus; i++) {
Arthur Heymans0ab98d52022-04-07 19:14:48 +0200114 const size_t segment_number = i / cpus_per_segment;
115 cpus[i].smbase = smbase - SMM_CODE_SEGMENT_SIZE * segment_number
116 - needed_ss_size * (i % cpus_per_segment);
Arthur Heymans0effeb52022-05-23 22:43:51 +0200117 cpus[i].stub_code.offset = cpus[i].smbase + SMM_ENTRY_OFFSET;
118 cpus[i].stub_code.size = stub_size;
119 cpus[i].ss.offset = cpus[i].smbase + SMM_CODE_SEGMENT_SIZE
120 - params->cpu_save_state_size;
121 cpus[i].ss.size = params->cpu_save_state_size;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700122 cpus[i].active = 1;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700123 }
124
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700125 return 1;
126}
127
128/*
129 * This method expects the smm relocation map to be complete.
130 * This method does not read any HW registers, it simply uses a
131 * map that was created during SMM setup.
132 * input: cpu_num - cpu number which is used as an index into the
133 * map to return the smbase
134 */
135u32 smm_get_cpu_smbase(unsigned int cpu_num)
136{
137 if (cpu_num < CONFIG_MAX_CPUS) {
138 if (cpus[cpu_num].active)
139 return cpus[cpu_num].smbase;
140 }
141 return 0;
142}
143
144/*
145 * This method assumes that at least 1 CPU has been set up from
146 * which it will place other CPUs below its smbase ensuring that
147 * save state does not clobber the first CPUs init code segment. The init
148 * code which is the smm stub code is the same for all CPUs. They enter
149 * smm, setup stacks (based on their apic id), enter protected mode
150 * and then jump to the common smi handler. The stack is allocated
151 * at the beginning of smram (aka tseg base, not smbase). The stack
152 * pointer for each CPU is calculated by using its apic id
153 * (code is in smm_stub.s)
154 * Each entry point will now have the same stub code which, sets up the CPU
155 * stack, enters protected mode and then jumps to the smi handler. It is
156 * important to enter protected mode before the jump because the "jump to
157 * address" might be larger than the 20bit address supported by real mode.
158 * SMI entry right now is in real mode.
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700159 * input: num_cpus - number of cpus that need relocation including
160 * the first CPU (though its code is already loaded)
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700161 */
162
Arthur Heymans346db922022-04-07 21:52:59 +0200163static void smm_place_entry_code(const unsigned int num_cpus)
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700164{
165 unsigned int i;
Elyes Haouas1f077972022-06-15 15:36:03 +0200166 size_t size;
Arthur Heymansdfff5c22021-02-15 23:39:01 +0100167
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700168 /* start at 1, the first CPU stub code is already there */
Arthur Heymans0effeb52022-05-23 22:43:51 +0200169 size = region_sz(&cpus[0].stub_code);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700170 for (i = 1; i < num_cpus; i++) {
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700171 printk(BIOS_DEBUG,
Elyes Haouas1f077972022-06-15 15:36:03 +0200172 "SMM Module: placing smm entry code at %zx, cpu # 0x%x\n",
Arthur Heymans0effeb52022-05-23 22:43:51 +0200173 region_offset(&cpus[i].stub_code), i);
174 memcpy((void *)region_offset(&cpus[i].stub_code),
175 (void *)region_offset(&cpus[0].stub_code), size);
Elyes Haouas1f077972022-06-15 15:36:03 +0200176 printk(BIOS_SPEW, "%s: copying from %zx to %zx 0x%zx bytes\n",
Arthur Heymans0effeb52022-05-23 22:43:51 +0200177 __func__, region_offset(&cpus[0].stub_code),
178 region_offset(&cpus[i].stub_code), size);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700179 }
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700180}
181
Arthur Heymans96451a72021-10-28 15:14:18 +0200182static uintptr_t stack_top;
183static size_t g_stack_size;
184
185int smm_setup_stack(const uintptr_t perm_smbase, const size_t perm_smram_size,
186 const unsigned int total_cpus, const size_t stack_size)
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700187{
Arthur Heymans96451a72021-10-28 15:14:18 +0200188 /* Need a minimum stack size and alignment. */
189 if (stack_size <= SMM_MINIMUM_STACK_SIZE || (stack_size & 3) != 0) {
190 printk(BIOS_ERR, "%s: need minimum stack size\n", __func__);
191 return -1;
192 }
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700193
Arthur Heymans96451a72021-10-28 15:14:18 +0200194 const size_t total_stack_size = total_cpus * stack_size;
195 if (total_stack_size >= perm_smram_size) {
196 printk(BIOS_ERR, "%s: Stack won't fit smram\n", __func__);
197 return -1;
198 }
199 stack_top = perm_smbase + total_stack_size;
200 g_stack_size = stack_size;
201 return 0;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700202}
203
204/*
205 * Place the staggered entry points for each CPU. The entry points are
206 * staggered by the per CPU SMM save state size extending down from
207 * SMM_ENTRY_OFFSET.
208 */
Arthur Heymans346db922022-04-07 21:52:59 +0200209static void smm_stub_place_staggered_entry_points(const struct smm_loader_params *params)
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700210{
Arthur Heymans346db922022-04-07 21:52:59 +0200211 if (params->num_concurrent_save_states > 1)
212 smm_place_entry_code(params->num_concurrent_save_states);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700213}
214
215/*
216 * The stub setup code assumes it is completely contained within the
217 * default SMRAM size (0x10000) for the default SMI handler (entry at
218 * 0x30000), but no assumption should be made for the permanent SMI handler.
219 * The placement of CPU entry points for permanent handler are determined
220 * by the number of CPUs in the system and the amount of SMRAM.
Arthur Heymanse6c35232021-02-16 13:19:18 +0100221 * There are potentially 2 regions to place
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700222 * within the default SMRAM size:
223 * 1. Save state areas
224 * 2. Stub code
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700225 *
Arthur Heymanse6c35232021-02-16 13:19:18 +0100226 * The save state always lives at the top of the CPUS smbase (and the entry
227 * point is at offset 0x8000). This allows only a certain number of CPUs with
228 * staggered entry points until the save state area comes down far enough to
229 * overwrite/corrupt the entry code (stub code). Therefore, an SMM map is
230 * created to avoid this corruption, see smm_create_map() above.
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700231 * This module setup code works for the default (0x30000) SMM handler setup and the
232 * permanent SMM handler.
Arthur Heymanse6c35232021-02-16 13:19:18 +0100233 * The CPU stack is decided at runtime in the stub and is treaded as a continuous
234 * region. As this might not fit the default SMRAM region, the same region used
Arthur Heymans96451a72021-10-28 15:14:18 +0200235 * by the permanent handler can be used during relocation.
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700236 */
Arthur Heymanscfd32242021-10-28 13:59:54 +0200237static int smm_module_setup_stub(const uintptr_t smbase, const size_t smm_size,
Arthur Heymans1efca4d2023-05-17 18:10:47 +0200238 struct smm_loader_params *params)
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700239{
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700240 struct rmodule smm_stub;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700241 if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) {
242 printk(BIOS_ERR, "%s: unable to parse smm stub\n", __func__);
243 return -1;
244 }
Arthur Heymansd7c37162022-04-07 21:41:26 +0200245 const size_t stub_size = rmodule_memory_size(&smm_stub);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700246
Arthur Heymansd7c37162022-04-07 21:41:26 +0200247 /* Some sanity check */
248 if (stub_size >= SMM_ENTRY_OFFSET) {
249 printk(BIOS_ERR, "%s: Stub too large\n", __func__);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700250 return -1;
251 }
252
Arthur Heymansd7c37162022-04-07 21:41:26 +0200253 const uintptr_t smm_stub_loc = smbase + SMM_ENTRY_OFFSET;
Arthur Heymanscfd32242021-10-28 13:59:54 +0200254 if (rmodule_load((void *)smm_stub_loc, &smm_stub)) {
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700255 printk(BIOS_ERR, "%s: load module failed\n", __func__);
256 return -1;
257 }
258
Arthur Heymansd7c37162022-04-07 21:41:26 +0200259 struct smm_stub_params *stub_params = rmodule_parameters(&smm_stub);
Arthur Heymans96451a72021-10-28 15:14:18 +0200260 stub_params->stack_top = stack_top;
261 stub_params->stack_size = g_stack_size;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700262 stub_params->c_handler = (uintptr_t)params->handler;
Arthur Heymans7a51acf2024-02-02 18:35:00 +0100263 stub_params->cr3 = params->cr3;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700264
Arthur Heymansa804f912022-05-30 14:39:45 +0200265 /* This runs on the BSP. All the APs are its siblings */
266 struct cpu_info *info = cpu_info();
267 if (!info || !info->cpu) {
268 printk(BIOS_ERR, "%s: Failed to find BSP struct device\n", __func__);
269 return -1;
270 }
271 int i = 0;
272 for (struct device *dev = info->cpu; dev; dev = dev->sibling)
273 if (dev->enabled)
274 stub_params->apic_id_to_cpu[i++] = dev->path.apic.initial_lapicid;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700275
Arthur Heymansa804f912022-05-30 14:39:45 +0200276 if (i != params->num_cpus) {
277 printk(BIOS_ERR, "%s: Failed to set up apic map correctly\n", __func__);
278 return -1;
279 }
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700280
Arthur Heymansd7c37162022-04-07 21:41:26 +0200281 printk(BIOS_DEBUG, "%s: stack_top = 0x%x\n", __func__, stub_params->stack_top);
282 printk(BIOS_DEBUG, "%s: per cpu stack_size = 0x%x\n", __func__,
283 stub_params->stack_size);
Arthur Heymansd7c37162022-04-07 21:41:26 +0200284 printk(BIOS_DEBUG, "%s: runtime.smm_size = 0x%zx\n", __func__, smm_size);
285
Arthur Heymans346db922022-04-07 21:52:59 +0200286 smm_stub_place_staggered_entry_points(params);
Arthur Heymansd7c37162022-04-07 21:41:26 +0200287
288 printk(BIOS_DEBUG, "SMM Module: stub loaded at %lx. Will call %p\n", smm_stub_loc,
289 params->handler);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700290 return 0;
291}
292
293/*
294 * smm_setup_relocation_handler assumes the callback is already loaded in
295 * memory. i.e. Another SMM module isn't chained to the stub. The other
296 * assumption is that the stub will be entered from the default SMRAM
297 * location: 0x30000 -> 0x40000.
298 */
Arthur Heymans96451a72021-10-28 15:14:18 +0200299int smm_setup_relocation_handler(struct smm_loader_params *params)
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700300{
Arthur Heymanscfd32242021-10-28 13:59:54 +0200301 uintptr_t smram = SMM_DEFAULT_BASE;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700302 printk(BIOS_SPEW, "%s: enter\n", __func__);
303 /* There can't be more than 1 concurrent save state for the relocation
304 * handler because all CPUs default to 0x30000 as SMBASE. */
305 if (params->num_concurrent_save_states > 1)
306 return -1;
307
308 /* A handler has to be defined to call for relocation. */
309 if (params->handler == NULL)
310 return -1;
311
312 /* Since the relocation handler always uses stack, adjust the number
313 * of concurrent stack users to be CONFIG_MAX_CPUS. */
Arthur Heymans2412c812021-10-28 15:19:39 +0200314 if (params->num_cpus == 0)
315 params->num_cpus = CONFIG_MAX_CPUS;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700316
John Zhao457c6612021-04-21 10:13:17 -0700317 printk(BIOS_SPEW, "%s: exit\n", __func__);
Arthur Heymans1efca4d2023-05-17 18:10:47 +0200318 return smm_module_setup_stub(smram, SMM_DEFAULT_SIZE, params);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700319}
320
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200321static void setup_smihandler_params(struct smm_runtime *mod_params,
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200322 struct smm_loader_params *loader_params)
323{
Benjamin Doronbb1f8122024-02-20 22:46:50 -0500324 uintptr_t tseg_base;
325 size_t tseg_size;
326
327 smm_region(&tseg_base, &tseg_size);
328
329 mod_params->smbase = tseg_base;
330 mod_params->smm_size = tseg_size;
Arthur Heymans1684b0a2022-04-07 21:50:16 +0200331 mod_params->save_state_size = loader_params->cpu_save_state_size;
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200332 mod_params->num_cpus = loader_params->num_cpus;
333 mod_params->gnvs_ptr = (uint32_t)(uintptr_t)acpi_get_gnvs();
334 const struct cbmem_entry *cbmemc;
335 if (CONFIG(CONSOLE_CBMEM) && (cbmemc = cbmem_entry_find(CBMEM_ID_CONSOLE))) {
336 mod_params->cbmemc = cbmem_entry_start(cbmemc);
337 mod_params->cbmemc_size = cbmem_entry_size(cbmemc);
338 } else {
339 mod_params->cbmemc = 0;
340 mod_params->cbmemc_size = 0;
341 }
342
Arthur Heymanscb361da2022-04-07 21:20:50 +0200343 for (int i = 0; i < loader_params->num_cpus; i++)
Arthur Heymans0effeb52022-05-23 22:43:51 +0200344 mod_params->save_state_top[i] = region_end(&cpus[i].ss);
Johnny Lin107e7aa2021-01-14 17:49:08 +0800345
346 if (CONFIG(RUNTIME_CONFIGURABLE_SMM_LOGLEVEL))
347 mod_params->smm_log_level = mainboard_set_smm_log_level();
348 else
349 mod_params->smm_log_level = 0;
Robert Ziebaac8c3782022-09-07 16:25:15 -0600350
351 if (CONFIG(SMM_PCI_RESOURCE_STORE))
352 smm_pci_resource_store_init(mod_params);
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200353}
Arthur Heymansb4ba2892021-10-28 16:48:36 +0200354
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200355static void print_region(const char *name, const struct region region)
356{
Elyes Haouas1f077972022-06-15 15:36:03 +0200357 printk(BIOS_DEBUG, "%-12s [0x%zx-0x%zx]\n", name, region_offset(&region),
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200358 region_end(&region));
359}
360
Arthur Heymans7a51acf2024-02-02 18:35:00 +0100361/* STM + Handler + (Stub + Save state) * CONFIG_MAX_CPUS + stacks + page tables*/
362#define SMM_REGIONS_ARRAY_SIZE (1 + 1 + CONFIG_MAX_CPUS * 2 + 1 + 1)
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200363
364static int append_and_check_region(const struct region smram,
365 const struct region region,
366 struct region *region_list,
367 const char *name)
368{
369 unsigned int region_counter = 0;
370 for (; region_counter < SMM_REGIONS_ARRAY_SIZE; region_counter++)
Nico Huberd7612e92024-01-11 18:50:50 +0100371 if (region_sz(&region_list[region_counter]) == 0)
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200372 break;
373
374 if (region_counter >= SMM_REGIONS_ARRAY_SIZE) {
375 printk(BIOS_ERR, "Array used to check regions too small\n");
376 return 1;
377 }
378
379 if (!region_is_subregion(&smram, &region)) {
380 printk(BIOS_ERR, "%s not in SMM\n", name);
381 return 1;
382 }
383
384 print_region(name, region);
385 for (unsigned int i = 0; i < region_counter; i++) {
386 if (region_overlap(&region_list[i], &region)) {
387 printk(BIOS_ERR, "%s overlaps with a previous region\n", name);
388 return 1;
389 }
390 }
391
392 region_list[region_counter] = region;
393
394 return 0;
395}
396
Arthur Heymans7a51acf2024-02-02 18:35:00 +0100397#define _PRES (1ULL << 0)
398#define _RW (1ULL << 1)
399#define _US (1ULL << 2)
400#define _A (1ULL << 5)
401#define _D (1ULL << 6)
402#define _PS (1ULL << 7)
403#define _GEN_DIR(a) (_PRES + _RW + _US + _A + (a))
404#define _GEN_PAGE(a) (_PRES + _RW + _US + _PS + _A + _D + (a))
405#define PAGE_SIZE 8
406
407/* Return the PM4LE */
408static uintptr_t install_page_table(const uintptr_t handler_base)
409{
410 const bool one_g_pages = !!(cpuid_edx(0x80000001) & (1 << 26));
411 /* 4 1G pages or 4 PDPE entries with 512 * 2M pages */
412 const size_t pages_needed = one_g_pages ? 4 : 2048 + 4;
413 const uintptr_t pages_base = ALIGN_DOWN(handler_base - pages_needed * PAGE_SIZE, 4096);
414 const uintptr_t pm4le = ALIGN_DOWN(pages_base - 8, 4096);
415
416 if (one_g_pages) {
417 for (size_t i = 0; i < 4; i++)
418 write64p(pages_base + i * PAGE_SIZE, _GEN_PAGE(1ull * GiB * i));
419 write64p(pm4le, _GEN_DIR(pages_base));
420 } else {
421 for (size_t i = 0; i < 2048; i++)
422 write64p(pages_base + i * PAGE_SIZE, _GEN_PAGE(2ull * MiB * i));
423 write64p(pm4le, _GEN_DIR(pages_base + 2048 * PAGE_SIZE));
424 for (size_t i = 0; i < 4; i++)
425 write64p(pages_base + (2048 + i) * PAGE_SIZE, _GEN_DIR(pages_base + 4096 * i));
426 }
427 return pm4le;
428}
429
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700430/*
431 *The SMM module is placed within the provided region in the following
432 * manner:
433 * +-----------------+ <- smram + size
434 * | BIOS resource |
435 * | list (STM) |
436 * +-----------------+
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700437 * | smi handler |
438 * | ... |
Arthur Heymans7a51acf2024-02-02 18:35:00 +0100439 * +-----------------+
440 * | page tables |
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700441 * +-----------------+ <- cpu0
442 * | stub code | <- cpu1
443 * | stub code | <- cpu2
444 * | stub code | <- cpu3, etc
445 * | |
446 * | |
447 * | |
448 * | stacks |
449 * +-----------------+ <- smram start
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200450 *
451 * With CONFIG(SMM_TSEG) the stubs will be placed in the same segment as the
452 * permanent handler and the stacks.
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700453 */
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200454int smm_load_module(const uintptr_t smram_base, const size_t smram_size,
455 struct smm_loader_params *params)
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700456{
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200457 /*
458 * Place in .bss to reduce stack usage.
459 * TODO: once CPU_INFO_V2 is used everywhere, use smaller stack for APs and move
460 * this back to the BSP stack.
461 */
462 static struct region region_list[SMM_REGIONS_ARRAY_SIZE] = {};
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700463
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200464 struct rmodule smi_handler;
465 if (rmodule_parse(&_binary_smm_start, &smi_handler))
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700466 return -1;
467
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200468 const struct region smram = { .offset = smram_base, .size = smram_size };
469 const uintptr_t smram_top = region_end(&smram);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700470
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200471 const size_t stm_size =
Eugene Myersa19ff6d2022-09-15 15:33:58 -0400472 CONFIG(STM) ? CONFIG_MSEG_SIZE + CONFIG_BIOS_RESOURCE_LIST_SIZE : 0;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700473
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200474 if (CONFIG(STM)) {
475 struct region stm = {};
476 stm.offset = smram_top - stm_size;
477 stm.size = stm_size;
478 if (append_and_check_region(smram, stm, region_list, "STM"))
479 return -1;
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200480 printk(BIOS_DEBUG, "MSEG size 0x%x\n", CONFIG_MSEG_SIZE);
481 printk(BIOS_DEBUG, "BIOS res list 0x%x\n", CONFIG_BIOS_RESOURCE_LIST_SIZE);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700482 }
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200483
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200484 const size_t handler_size = rmodule_memory_size(&smi_handler);
485 const size_t handler_alignment = rmodule_load_alignment(&smi_handler);
486 const uintptr_t handler_base =
Arthur Heymans1efca4d2023-05-17 18:10:47 +0200487 ALIGN_DOWN(smram_top - stm_size - handler_size,
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200488 handler_alignment);
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200489 struct region handler = {
490 .offset = handler_base,
491 .size = handler_size
492 };
493 if (append_and_check_region(smram, handler, region_list, "HANDLER"))
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700494 return -1;
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200495
Arthur Heymans7a51acf2024-02-02 18:35:00 +0100496 uintptr_t stub_segment_base;
497 if (ENV_X86_64) {
498 uintptr_t pt_base = install_page_table(handler_base);
499 struct region page_tables = {
500 .offset = pt_base,
501 .size = handler_base - pt_base,
502 };
503 if (append_and_check_region(smram, page_tables, region_list, "PAGE TABLES"))
504 return -1;
505 params->cr3 = pt_base;
506 stub_segment_base = pt_base - SMM_CODE_SEGMENT_SIZE;
507 } else {
508 stub_segment_base = handler_base - SMM_CODE_SEGMENT_SIZE;
509 }
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700510
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200511 if (!smm_create_map(stub_segment_base, params->num_concurrent_save_states, params)) {
Arthur Heymansdfff5c22021-02-15 23:39:01 +0100512 printk(BIOS_ERR, "%s: Error creating CPU map\n", __func__);
513 return -1;
514 }
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200515 for (unsigned int i = 0; i < params->num_concurrent_save_states; i++) {
516 printk(BIOS_DEBUG, "\nCPU %u\n", i);
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200517 char string[13];
518 snprintf(string, sizeof(string), " ss%d", i);
Arthur Heymans0effeb52022-05-23 22:43:51 +0200519 if (append_and_check_region(smram, cpus[i].ss, region_list, string))
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200520 return -1;
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200521 snprintf(string, sizeof(string), " stub%d", i);
Arthur Heymans0effeb52022-05-23 22:43:51 +0200522 if (append_and_check_region(smram, cpus[i].stub_code, region_list, string))
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200523 return -1;
Arthur Heymans64d9e852021-02-15 18:55:40 +0100524 }
525
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200526 struct region stacks = {
527 .offset = smram_base,
528 .size = params->num_concurrent_save_states * CONFIG_SMM_MODULE_STACK_SIZE
529 };
530 printk(BIOS_DEBUG, "\n");
531 if (append_and_check_region(smram, stacks, region_list, "stacks"))
532 return -1;
533
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200534 if (rmodule_load((void *)handler_base, &smi_handler))
535 return -1;
536
537 struct smm_runtime *smihandler_params = rmodule_parameters(&smi_handler);
538 params->handler = rmodule_entry(&smi_handler);
Benjamin Doronbb1f8122024-02-20 22:46:50 -0500539 setup_smihandler_params(smihandler_params, params);
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200540
Arthur Heymans1efca4d2023-05-17 18:10:47 +0200541 return smm_module_setup_stub(stub_segment_base, smram_size, params);
Arthur Heymansb4ba2892021-10-28 16:48:36 +0200542}