blob: d965593c55d0efb5cf985485656083ff6d21afb4 [file] [log] [blame]
Rocky Phaguraafb7a812020-07-21 14:48:48 -07001/* SPDX-License-Identifier: GPL-2.0-only */
2
Kyösti Mälkki84935f72021-01-11 20:13:34 +02003#include <acpi/acpi_gnvs.h>
Arthur Heymansaf04f3c2022-04-13 02:10:15 +02004#include <cbmem.h>
5#include <commonlib/helpers.h>
6#include <commonlib/region.h>
7#include <console/console.h>
Arthur Heymansa804f912022-05-30 14:39:45 +02008#include <cpu/cpu.h>
Arthur Heymansaf04f3c2022-04-13 02:10:15 +02009#include <cpu/x86/smm.h>
Arthur Heymansa804f912022-05-30 14:39:45 +020010#include <device/device.h>
Arthur Heymansaf04f3c2022-04-13 02:10:15 +020011#include <rmodule.h>
Arthur Heymansaf04f3c2022-04-13 02:10:15 +020012#include <stdio.h>
Rocky Phaguraafb7a812020-07-21 14:48:48 -070013#include <string.h>
Elyes Haouasafc5f9b2022-10-02 10:26:55 +020014#include <types.h>
Rocky Phaguraafb7a812020-07-21 14:48:48 -070015
Rocky Phaguraafb7a812020-07-21 14:48:48 -070016#define SMM_CODE_SEGMENT_SIZE 0x10000
Rocky Phaguraafb7a812020-07-21 14:48:48 -070017
18/*
19 * Components that make up the SMRAM:
20 * 1. Save state - the total save state memory used
21 * 2. Stack - stacks for the CPUs in the SMM handler
22 * 3. Stub - SMM stub code for calling into handler
23 * 4. Handler - C-based SMM handler.
24 *
25 * The components are assumed to consist of one consecutive region.
26 */
27
Rocky Phaguraafb7a812020-07-21 14:48:48 -070028/*
29 * The stub is the entry point that sets up protected mode and stacks for each
30 * CPU. It then calls into the SMM handler module. It is encoded as an rmodule.
31 */
32extern unsigned char _binary_smmstub_start[];
33
34/* Per CPU minimum stack size. */
35#define SMM_MINIMUM_STACK_SIZE 32
36
37struct cpu_smm_info {
38 uint8_t active;
39 uintptr_t smbase;
Arthur Heymans0effeb52022-05-23 22:43:51 +020040 struct region ss;
41 struct region stub_code;
Rocky Phaguraafb7a812020-07-21 14:48:48 -070042};
43struct cpu_smm_info cpus[CONFIG_MAX_CPUS] = { 0 };
44
45/*
46 * This method creates a map of all the CPU entry points, save state locations
47 * and the beginning and end of code segments for each CPU. This map is used
48 * during relocation to properly align as many CPUs that can fit into the SMRAM
49 * region. For more information on how SMRAM works, refer to the latest Intel
50 * developer's manuals (volume 3, chapter 34). SMRAM is divided up into the
51 * following regions:
52 * +-----------------+ Top of SMRAM
Arthur Heymans1efca4d2023-05-17 18:10:47 +020053 * | MSEG |
Rocky Phaguraafb7a812020-07-21 14:48:48 -070054 * +-----------------+
55 * | common |
56 * | smi handler | 64K
57 * | |
58 * +-----------------+
59 * | CPU 0 code seg |
60 * +-----------------+
61 * | CPU 1 code seg |
62 * +-----------------+
63 * | CPU x code seg |
64 * +-----------------+
65 * | |
66 * | |
67 * +-----------------+
68 * | stacks |
69 * +-----------------+ <- START of SMRAM
70 *
71 * The code below checks when a code segment is full and begins placing the remainder
72 * CPUs in the lower segments. The entry point for each CPU is smbase + 0x8000
73 * and save state is smbase + 0x8000 + (0x8000 - state save size). Save state
74 * area grows downward into the CPUs entry point. Therefore staggering too many
75 * CPUs in one 32K block will corrupt CPU0's entry code as the save states move
76 * downward.
77 * input : smbase of first CPU (all other CPUs
78 * will go below this address)
79 * input : num_cpus in the system. The map will
80 * be created from 0 to num_cpus.
81 */
Arthur Heymans0ab98d52022-04-07 19:14:48 +020082static int smm_create_map(const uintptr_t smbase, const unsigned int num_cpus,
83 const struct smm_loader_params *params)
Rocky Phaguraafb7a812020-07-21 14:48:48 -070084{
Rocky Phaguraafb7a812020-07-21 14:48:48 -070085 struct rmodule smm_stub;
Arthur Heymans0ab98d52022-04-07 19:14:48 +020086
87 if (ARRAY_SIZE(cpus) < num_cpus) {
88 printk(BIOS_ERR, "%s: increase MAX_CPUS in Kconfig\n", __func__);
89 return 0;
90 }
Rocky Phaguraafb7a812020-07-21 14:48:48 -070091
92 if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) {
93 printk(BIOS_ERR, "%s: unable to get SMM module size\n", __func__);
94 return 0;
95 }
96
Arthur Heymans0ab98d52022-04-07 19:14:48 +020097 /*
98 * How many CPUs can fit into one 64K segment?
99 * Make sure that the first stub does not overlap with the last save state of a segment.
100 */
101 const size_t stub_size = rmodule_memory_size(&smm_stub);
Arthur Heymans1684b0a2022-04-07 21:50:16 +0200102 const size_t needed_ss_size = MAX(params->cpu_save_state_size, stub_size);
Arthur Heymans0ab98d52022-04-07 19:14:48 +0200103 const size_t cpus_per_segment =
104 (SMM_CODE_SEGMENT_SIZE - SMM_ENTRY_OFFSET - stub_size) / needed_ss_size;
105
106 if (cpus_per_segment == 0) {
107 printk(BIOS_ERR, "%s: CPUs won't fit in segment. Broken stub or save state size\n",
108 __func__);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700109 return 0;
110 }
111
Arthur Heymans0ab98d52022-04-07 19:14:48 +0200112 for (unsigned int i = 0; i < num_cpus; i++) {
Arthur Heymans0ab98d52022-04-07 19:14:48 +0200113 const size_t segment_number = i / cpus_per_segment;
114 cpus[i].smbase = smbase - SMM_CODE_SEGMENT_SIZE * segment_number
115 - needed_ss_size * (i % cpus_per_segment);
Arthur Heymans0effeb52022-05-23 22:43:51 +0200116 cpus[i].stub_code.offset = cpus[i].smbase + SMM_ENTRY_OFFSET;
117 cpus[i].stub_code.size = stub_size;
118 cpus[i].ss.offset = cpus[i].smbase + SMM_CODE_SEGMENT_SIZE
119 - params->cpu_save_state_size;
120 cpus[i].ss.size = params->cpu_save_state_size;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700121 cpus[i].active = 1;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700122 }
123
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700124 return 1;
125}
126
127/*
128 * This method expects the smm relocation map to be complete.
129 * This method does not read any HW registers, it simply uses a
130 * map that was created during SMM setup.
131 * input: cpu_num - cpu number which is used as an index into the
132 * map to return the smbase
133 */
134u32 smm_get_cpu_smbase(unsigned int cpu_num)
135{
136 if (cpu_num < CONFIG_MAX_CPUS) {
137 if (cpus[cpu_num].active)
138 return cpus[cpu_num].smbase;
139 }
140 return 0;
141}
142
143/*
144 * This method assumes that at least 1 CPU has been set up from
145 * which it will place other CPUs below its smbase ensuring that
146 * save state does not clobber the first CPUs init code segment. The init
147 * code which is the smm stub code is the same for all CPUs. They enter
148 * smm, setup stacks (based on their apic id), enter protected mode
149 * and then jump to the common smi handler. The stack is allocated
150 * at the beginning of smram (aka tseg base, not smbase). The stack
151 * pointer for each CPU is calculated by using its apic id
152 * (code is in smm_stub.s)
153 * Each entry point will now have the same stub code which, sets up the CPU
154 * stack, enters protected mode and then jumps to the smi handler. It is
155 * important to enter protected mode before the jump because the "jump to
156 * address" might be larger than the 20bit address supported by real mode.
157 * SMI entry right now is in real mode.
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700158 * input: num_cpus - number of cpus that need relocation including
159 * the first CPU (though its code is already loaded)
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700160 */
161
Arthur Heymans346db922022-04-07 21:52:59 +0200162static void smm_place_entry_code(const unsigned int num_cpus)
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700163{
164 unsigned int i;
Elyes Haouas1f077972022-06-15 15:36:03 +0200165 size_t size;
Arthur Heymansdfff5c22021-02-15 23:39:01 +0100166
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700167 /* start at 1, the first CPU stub code is already there */
Arthur Heymans0effeb52022-05-23 22:43:51 +0200168 size = region_sz(&cpus[0].stub_code);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700169 for (i = 1; i < num_cpus; i++) {
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700170 printk(BIOS_DEBUG,
Elyes Haouas1f077972022-06-15 15:36:03 +0200171 "SMM Module: placing smm entry code at %zx, cpu # 0x%x\n",
Arthur Heymans0effeb52022-05-23 22:43:51 +0200172 region_offset(&cpus[i].stub_code), i);
173 memcpy((void *)region_offset(&cpus[i].stub_code),
174 (void *)region_offset(&cpus[0].stub_code), size);
Elyes Haouas1f077972022-06-15 15:36:03 +0200175 printk(BIOS_SPEW, "%s: copying from %zx to %zx 0x%zx bytes\n",
Arthur Heymans0effeb52022-05-23 22:43:51 +0200176 __func__, region_offset(&cpus[0].stub_code),
177 region_offset(&cpus[i].stub_code), size);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700178 }
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700179}
180
Arthur Heymans96451a72021-10-28 15:14:18 +0200181static uintptr_t stack_top;
182static size_t g_stack_size;
183
184int smm_setup_stack(const uintptr_t perm_smbase, const size_t perm_smram_size,
185 const unsigned int total_cpus, const size_t stack_size)
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700186{
Arthur Heymans96451a72021-10-28 15:14:18 +0200187 /* Need a minimum stack size and alignment. */
188 if (stack_size <= SMM_MINIMUM_STACK_SIZE || (stack_size & 3) != 0) {
189 printk(BIOS_ERR, "%s: need minimum stack size\n", __func__);
190 return -1;
191 }
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700192
Arthur Heymans96451a72021-10-28 15:14:18 +0200193 const size_t total_stack_size = total_cpus * stack_size;
194 if (total_stack_size >= perm_smram_size) {
195 printk(BIOS_ERR, "%s: Stack won't fit smram\n", __func__);
196 return -1;
197 }
198 stack_top = perm_smbase + total_stack_size;
199 g_stack_size = stack_size;
200 return 0;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700201}
202
203/*
204 * Place the staggered entry points for each CPU. The entry points are
205 * staggered by the per CPU SMM save state size extending down from
206 * SMM_ENTRY_OFFSET.
207 */
Arthur Heymans346db922022-04-07 21:52:59 +0200208static void smm_stub_place_staggered_entry_points(const struct smm_loader_params *params)
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700209{
Arthur Heymans346db922022-04-07 21:52:59 +0200210 if (params->num_concurrent_save_states > 1)
211 smm_place_entry_code(params->num_concurrent_save_states);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700212}
213
214/*
215 * The stub setup code assumes it is completely contained within the
216 * default SMRAM size (0x10000) for the default SMI handler (entry at
217 * 0x30000), but no assumption should be made for the permanent SMI handler.
218 * The placement of CPU entry points for permanent handler are determined
219 * by the number of CPUs in the system and the amount of SMRAM.
Arthur Heymanse6c35232021-02-16 13:19:18 +0100220 * There are potentially 2 regions to place
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700221 * within the default SMRAM size:
222 * 1. Save state areas
223 * 2. Stub code
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700224 *
Arthur Heymanse6c35232021-02-16 13:19:18 +0100225 * The save state always lives at the top of the CPUS smbase (and the entry
226 * point is at offset 0x8000). This allows only a certain number of CPUs with
227 * staggered entry points until the save state area comes down far enough to
228 * overwrite/corrupt the entry code (stub code). Therefore, an SMM map is
229 * created to avoid this corruption, see smm_create_map() above.
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700230 * This module setup code works for the default (0x30000) SMM handler setup and the
231 * permanent SMM handler.
Arthur Heymanse6c35232021-02-16 13:19:18 +0100232 * The CPU stack is decided at runtime in the stub and is treaded as a continuous
233 * region. As this might not fit the default SMRAM region, the same region used
Arthur Heymans96451a72021-10-28 15:14:18 +0200234 * by the permanent handler can be used during relocation.
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700235 */
Arthur Heymanscfd32242021-10-28 13:59:54 +0200236static int smm_module_setup_stub(const uintptr_t smbase, const size_t smm_size,
Arthur Heymans1efca4d2023-05-17 18:10:47 +0200237 struct smm_loader_params *params)
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700238{
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700239 struct rmodule smm_stub;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700240 if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) {
241 printk(BIOS_ERR, "%s: unable to parse smm stub\n", __func__);
242 return -1;
243 }
Arthur Heymansd7c37162022-04-07 21:41:26 +0200244 const size_t stub_size = rmodule_memory_size(&smm_stub);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700245
Arthur Heymansd7c37162022-04-07 21:41:26 +0200246 /* Some sanity check */
247 if (stub_size >= SMM_ENTRY_OFFSET) {
248 printk(BIOS_ERR, "%s: Stub too large\n", __func__);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700249 return -1;
250 }
251
Arthur Heymansd7c37162022-04-07 21:41:26 +0200252 const uintptr_t smm_stub_loc = smbase + SMM_ENTRY_OFFSET;
Arthur Heymanscfd32242021-10-28 13:59:54 +0200253 if (rmodule_load((void *)smm_stub_loc, &smm_stub)) {
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700254 printk(BIOS_ERR, "%s: load module failed\n", __func__);
255 return -1;
256 }
257
Arthur Heymansd7c37162022-04-07 21:41:26 +0200258 struct smm_stub_params *stub_params = rmodule_parameters(&smm_stub);
Arthur Heymans96451a72021-10-28 15:14:18 +0200259 stub_params->stack_top = stack_top;
260 stub_params->stack_size = g_stack_size;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700261 stub_params->c_handler = (uintptr_t)params->handler;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700262
Arthur Heymansa804f912022-05-30 14:39:45 +0200263 /* This runs on the BSP. All the APs are its siblings */
264 struct cpu_info *info = cpu_info();
265 if (!info || !info->cpu) {
266 printk(BIOS_ERR, "%s: Failed to find BSP struct device\n", __func__);
267 return -1;
268 }
269 int i = 0;
270 for (struct device *dev = info->cpu; dev; dev = dev->sibling)
271 if (dev->enabled)
272 stub_params->apic_id_to_cpu[i++] = dev->path.apic.initial_lapicid;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700273
Arthur Heymansa804f912022-05-30 14:39:45 +0200274 if (i != params->num_cpus) {
275 printk(BIOS_ERR, "%s: Failed to set up apic map correctly\n", __func__);
276 return -1;
277 }
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700278
Arthur Heymansd7c37162022-04-07 21:41:26 +0200279 printk(BIOS_DEBUG, "%s: stack_top = 0x%x\n", __func__, stub_params->stack_top);
280 printk(BIOS_DEBUG, "%s: per cpu stack_size = 0x%x\n", __func__,
281 stub_params->stack_size);
Arthur Heymansd7c37162022-04-07 21:41:26 +0200282 printk(BIOS_DEBUG, "%s: runtime.smm_size = 0x%zx\n", __func__, smm_size);
283
Arthur Heymans346db922022-04-07 21:52:59 +0200284 smm_stub_place_staggered_entry_points(params);
Arthur Heymansd7c37162022-04-07 21:41:26 +0200285
286 printk(BIOS_DEBUG, "SMM Module: stub loaded at %lx. Will call %p\n", smm_stub_loc,
287 params->handler);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700288 return 0;
289}
290
291/*
292 * smm_setup_relocation_handler assumes the callback is already loaded in
293 * memory. i.e. Another SMM module isn't chained to the stub. The other
294 * assumption is that the stub will be entered from the default SMRAM
295 * location: 0x30000 -> 0x40000.
296 */
Arthur Heymans96451a72021-10-28 15:14:18 +0200297int smm_setup_relocation_handler(struct smm_loader_params *params)
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700298{
Arthur Heymanscfd32242021-10-28 13:59:54 +0200299 uintptr_t smram = SMM_DEFAULT_BASE;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700300 printk(BIOS_SPEW, "%s: enter\n", __func__);
301 /* There can't be more than 1 concurrent save state for the relocation
302 * handler because all CPUs default to 0x30000 as SMBASE. */
303 if (params->num_concurrent_save_states > 1)
304 return -1;
305
306 /* A handler has to be defined to call for relocation. */
307 if (params->handler == NULL)
308 return -1;
309
310 /* Since the relocation handler always uses stack, adjust the number
311 * of concurrent stack users to be CONFIG_MAX_CPUS. */
Arthur Heymans2412c812021-10-28 15:19:39 +0200312 if (params->num_cpus == 0)
313 params->num_cpus = CONFIG_MAX_CPUS;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700314
John Zhao457c6612021-04-21 10:13:17 -0700315 printk(BIOS_SPEW, "%s: exit\n", __func__);
Arthur Heymans1efca4d2023-05-17 18:10:47 +0200316 return smm_module_setup_stub(smram, SMM_DEFAULT_SIZE, params);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700317}
318
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200319static void setup_smihandler_params(struct smm_runtime *mod_params,
320 uintptr_t smram_base,
321 uintptr_t smram_size,
322 struct smm_loader_params *loader_params)
323{
324 mod_params->smbase = smram_base;
325 mod_params->smm_size = smram_size;
Arthur Heymans1684b0a2022-04-07 21:50:16 +0200326 mod_params->save_state_size = loader_params->cpu_save_state_size;
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200327 mod_params->num_cpus = loader_params->num_cpus;
328 mod_params->gnvs_ptr = (uint32_t)(uintptr_t)acpi_get_gnvs();
329 const struct cbmem_entry *cbmemc;
330 if (CONFIG(CONSOLE_CBMEM) && (cbmemc = cbmem_entry_find(CBMEM_ID_CONSOLE))) {
331 mod_params->cbmemc = cbmem_entry_start(cbmemc);
332 mod_params->cbmemc_size = cbmem_entry_size(cbmemc);
333 } else {
334 mod_params->cbmemc = 0;
335 mod_params->cbmemc_size = 0;
336 }
337
Arthur Heymanscb361da2022-04-07 21:20:50 +0200338 for (int i = 0; i < loader_params->num_cpus; i++)
Arthur Heymans0effeb52022-05-23 22:43:51 +0200339 mod_params->save_state_top[i] = region_end(&cpus[i].ss);
Johnny Lin107e7aa2021-01-14 17:49:08 +0800340
341 if (CONFIG(RUNTIME_CONFIGURABLE_SMM_LOGLEVEL))
342 mod_params->smm_log_level = mainboard_set_smm_log_level();
343 else
344 mod_params->smm_log_level = 0;
Robert Ziebaac8c3782022-09-07 16:25:15 -0600345
346 if (CONFIG(SMM_PCI_RESOURCE_STORE))
347 smm_pci_resource_store_init(mod_params);
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200348}
Arthur Heymansb4ba2892021-10-28 16:48:36 +0200349
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200350static void print_region(const char *name, const struct region region)
351{
Elyes Haouas1f077972022-06-15 15:36:03 +0200352 printk(BIOS_DEBUG, "%-12s [0x%zx-0x%zx]\n", name, region_offset(&region),
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200353 region_end(&region));
354}
355
Arthur Heymans1efca4d2023-05-17 18:10:47 +0200356/* STM + Handler + (Stub + Save state) * CONFIG_MAX_CPUS + stacks */
357#define SMM_REGIONS_ARRAY_SIZE (1 + 1 + CONFIG_MAX_CPUS * 2 + 1)
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200358
359static int append_and_check_region(const struct region smram,
360 const struct region region,
361 struct region *region_list,
362 const char *name)
363{
364 unsigned int region_counter = 0;
365 for (; region_counter < SMM_REGIONS_ARRAY_SIZE; region_counter++)
Nico Huberd7612e92024-01-11 18:50:50 +0100366 if (region_sz(&region_list[region_counter]) == 0)
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200367 break;
368
369 if (region_counter >= SMM_REGIONS_ARRAY_SIZE) {
370 printk(BIOS_ERR, "Array used to check regions too small\n");
371 return 1;
372 }
373
374 if (!region_is_subregion(&smram, &region)) {
375 printk(BIOS_ERR, "%s not in SMM\n", name);
376 return 1;
377 }
378
379 print_region(name, region);
380 for (unsigned int i = 0; i < region_counter; i++) {
381 if (region_overlap(&region_list[i], &region)) {
382 printk(BIOS_ERR, "%s overlaps with a previous region\n", name);
383 return 1;
384 }
385 }
386
387 region_list[region_counter] = region;
388
389 return 0;
390}
391
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700392/*
393 *The SMM module is placed within the provided region in the following
394 * manner:
395 * +-----------------+ <- smram + size
396 * | BIOS resource |
397 * | list (STM) |
398 * +-----------------+
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700399 * | smi handler |
400 * | ... |
401 * +-----------------+ <- cpu0
402 * | stub code | <- cpu1
403 * | stub code | <- cpu2
404 * | stub code | <- cpu3, etc
405 * | |
406 * | |
407 * | |
408 * | stacks |
409 * +-----------------+ <- smram start
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200410 *
411 * With CONFIG(SMM_TSEG) the stubs will be placed in the same segment as the
412 * permanent handler and the stacks.
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700413 */
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200414int smm_load_module(const uintptr_t smram_base, const size_t smram_size,
415 struct smm_loader_params *params)
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700416{
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200417 /*
418 * Place in .bss to reduce stack usage.
419 * TODO: once CPU_INFO_V2 is used everywhere, use smaller stack for APs and move
420 * this back to the BSP stack.
421 */
422 static struct region region_list[SMM_REGIONS_ARRAY_SIZE] = {};
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700423
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200424 struct rmodule smi_handler;
425 if (rmodule_parse(&_binary_smm_start, &smi_handler))
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700426 return -1;
427
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200428 const struct region smram = { .offset = smram_base, .size = smram_size };
429 const uintptr_t smram_top = region_end(&smram);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700430
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200431 const size_t stm_size =
Eugene Myersa19ff6d2022-09-15 15:33:58 -0400432 CONFIG(STM) ? CONFIG_MSEG_SIZE + CONFIG_BIOS_RESOURCE_LIST_SIZE : 0;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700433
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200434 if (CONFIG(STM)) {
435 struct region stm = {};
436 stm.offset = smram_top - stm_size;
437 stm.size = stm_size;
438 if (append_and_check_region(smram, stm, region_list, "STM"))
439 return -1;
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200440 printk(BIOS_DEBUG, "MSEG size 0x%x\n", CONFIG_MSEG_SIZE);
441 printk(BIOS_DEBUG, "BIOS res list 0x%x\n", CONFIG_BIOS_RESOURCE_LIST_SIZE);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700442 }
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200443
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200444 const size_t handler_size = rmodule_memory_size(&smi_handler);
445 const size_t handler_alignment = rmodule_load_alignment(&smi_handler);
446 const uintptr_t handler_base =
Arthur Heymans1efca4d2023-05-17 18:10:47 +0200447 ALIGN_DOWN(smram_top - stm_size - handler_size,
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200448 handler_alignment);
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200449 struct region handler = {
450 .offset = handler_base,
451 .size = handler_size
452 };
453 if (append_and_check_region(smram, handler, region_list, "HANDLER"))
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700454 return -1;
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200455
Arthur Heymansf874fc22022-11-04 20:20:49 +0100456 uintptr_t stub_segment_base = handler_base - SMM_CODE_SEGMENT_SIZE;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700457
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200458 if (!smm_create_map(stub_segment_base, params->num_concurrent_save_states, params)) {
Arthur Heymansdfff5c22021-02-15 23:39:01 +0100459 printk(BIOS_ERR, "%s: Error creating CPU map\n", __func__);
460 return -1;
461 }
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200462 for (unsigned int i = 0; i < params->num_concurrent_save_states; i++) {
463 printk(BIOS_DEBUG, "\nCPU %u\n", i);
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200464 char string[13];
465 snprintf(string, sizeof(string), " ss%d", i);
Arthur Heymans0effeb52022-05-23 22:43:51 +0200466 if (append_and_check_region(smram, cpus[i].ss, region_list, string))
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200467 return -1;
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200468 snprintf(string, sizeof(string), " stub%d", i);
Arthur Heymans0effeb52022-05-23 22:43:51 +0200469 if (append_and_check_region(smram, cpus[i].stub_code, region_list, string))
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200470 return -1;
Arthur Heymans64d9e852021-02-15 18:55:40 +0100471 }
472
Arthur Heymansaf04f3c2022-04-13 02:10:15 +0200473 struct region stacks = {
474 .offset = smram_base,
475 .size = params->num_concurrent_save_states * CONFIG_SMM_MODULE_STACK_SIZE
476 };
477 printk(BIOS_DEBUG, "\n");
478 if (append_and_check_region(smram, stacks, region_list, "stacks"))
479 return -1;
480
Arthur Heymans5747f6c2022-04-07 20:54:26 +0200481 if (rmodule_load((void *)handler_base, &smi_handler))
482 return -1;
483
484 struct smm_runtime *smihandler_params = rmodule_parameters(&smi_handler);
485 params->handler = rmodule_entry(&smi_handler);
486 setup_smihandler_params(smihandler_params, smram_base, smram_size, params);
487
Arthur Heymans1efca4d2023-05-17 18:10:47 +0200488 return smm_module_setup_stub(stub_segment_base, smram_size, params);
Arthur Heymansb4ba2892021-10-28 16:48:36 +0200489}