blob: cd44f93cc9f258d2c849a3f9cef3184b336306c8 [file] [log] [blame]
Rocky Phaguraafb7a812020-07-21 14:48:48 -07001/* SPDX-License-Identifier: GPL-2.0-only */
2
Kyösti Mälkki84935f72021-01-11 20:13:34 +02003#include <acpi/acpi_gnvs.h>
Rocky Phaguraafb7a812020-07-21 14:48:48 -07004#include <stdint.h>
5#include <string.h>
6#include <rmodule.h>
7#include <cpu/x86/smm.h>
8#include <commonlib/helpers.h>
9#include <console/console.h>
10#include <security/intel/stm/SmmStm.h>
11
12#define FXSAVE_SIZE 512
13#define SMM_CODE_SEGMENT_SIZE 0x10000
14/* FXSAVE area during relocation. While it may not be strictly needed the
15 SMM stub code relies on the FXSAVE area being non-zero to enable SSE
16 instructions within SMM mode. */
17static uint8_t fxsave_area_relocation[CONFIG_MAX_CPUS][FXSAVE_SIZE]
18__attribute__((aligned(16)));
19
20/*
21 * Components that make up the SMRAM:
22 * 1. Save state - the total save state memory used
23 * 2. Stack - stacks for the CPUs in the SMM handler
24 * 3. Stub - SMM stub code for calling into handler
25 * 4. Handler - C-based SMM handler.
26 *
27 * The components are assumed to consist of one consecutive region.
28 */
29
30/* These parameters are used by the SMM stub code. A pointer to the params
31 * is also passed to the C-base handler. */
32struct smm_stub_params {
33 u32 stack_size;
34 u32 stack_top;
35 u32 c_handler;
36 u32 c_handler_arg;
37 u32 fxsave_area;
38 u32 fxsave_area_size;
39 struct smm_runtime runtime;
40} __packed;
41
42/*
43 * The stub is the entry point that sets up protected mode and stacks for each
44 * CPU. It then calls into the SMM handler module. It is encoded as an rmodule.
45 */
46extern unsigned char _binary_smmstub_start[];
47
48/* Per CPU minimum stack size. */
49#define SMM_MINIMUM_STACK_SIZE 32
50
51struct cpu_smm_info {
52 uint8_t active;
53 uintptr_t smbase;
54 uintptr_t entry;
55 uintptr_t ss_start;
56 uintptr_t code_start;
57 uintptr_t code_end;
58};
59struct cpu_smm_info cpus[CONFIG_MAX_CPUS] = { 0 };
60
61/*
62 * This method creates a map of all the CPU entry points, save state locations
63 * and the beginning and end of code segments for each CPU. This map is used
64 * during relocation to properly align as many CPUs that can fit into the SMRAM
65 * region. For more information on how SMRAM works, refer to the latest Intel
66 * developer's manuals (volume 3, chapter 34). SMRAM is divided up into the
67 * following regions:
68 * +-----------------+ Top of SMRAM
69 * | | <- MSEG, FXSAVE
70 * +-----------------+
71 * | common |
72 * | smi handler | 64K
73 * | |
74 * +-----------------+
75 * | CPU 0 code seg |
76 * +-----------------+
77 * | CPU 1 code seg |
78 * +-----------------+
79 * | CPU x code seg |
80 * +-----------------+
81 * | |
82 * | |
83 * +-----------------+
84 * | stacks |
85 * +-----------------+ <- START of SMRAM
86 *
87 * The code below checks when a code segment is full and begins placing the remainder
88 * CPUs in the lower segments. The entry point for each CPU is smbase + 0x8000
89 * and save state is smbase + 0x8000 + (0x8000 - state save size). Save state
90 * area grows downward into the CPUs entry point. Therefore staggering too many
91 * CPUs in one 32K block will corrupt CPU0's entry code as the save states move
92 * downward.
93 * input : smbase of first CPU (all other CPUs
94 * will go below this address)
95 * input : num_cpus in the system. The map will
96 * be created from 0 to num_cpus.
97 */
98static int smm_create_map(uintptr_t smbase, unsigned int num_cpus,
99 const struct smm_loader_params *params)
100{
101 unsigned int i;
102 struct rmodule smm_stub;
103 unsigned int ss_size = params->per_cpu_save_state_size, stub_size;
104 unsigned int smm_entry_offset = params->smm_main_entry_offset;
105 unsigned int seg_count = 0, segments = 0, available;
106 unsigned int cpus_in_segment = 0;
107 unsigned int base = smbase;
108
109 if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) {
110 printk(BIOS_ERR, "%s: unable to get SMM module size\n", __func__);
111 return 0;
112 }
113
114 stub_size = rmodule_memory_size(&smm_stub);
115 /* How many CPUs can fit into one 64K segment? */
116 available = 0xFFFF - smm_entry_offset - ss_size - stub_size;
117 if (available > 0) {
118 cpus_in_segment = available / ss_size;
119 /* minimum segments needed will always be 1 */
120 segments = num_cpus / cpus_in_segment + 1;
121 printk(BIOS_DEBUG,
122 "%s: cpus allowed in one segment %d\n", __func__, cpus_in_segment);
123 printk(BIOS_DEBUG,
124 "%s: min # of segments needed %d\n", __func__, segments);
125 } else {
126 printk(BIOS_ERR, "%s: not enough space in SMM to setup all CPUs\n", __func__);
127 printk(BIOS_ERR, " save state & stub size need to be reduced\n");
128 printk(BIOS_ERR, " or increase SMRAM size\n");
129 return 0;
130 }
131
Patrick Georgi6b688f52021-02-12 13:49:11 +0100132 if (ARRAY_SIZE(cpus) < num_cpus) {
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700133 printk(BIOS_ERR,
134 "%s: increase MAX_CPUS in Kconfig\n", __func__);
135 return 0;
136 }
137
Arthur Heymansfd8619e2020-11-01 12:37:40 +0100138 if (stub_size > ss_size) {
139 printk(BIOS_ERR, "%s: Save state larger than SMM stub size\n", __func__);
140 printk(BIOS_ERR, " Decrease stub size or increase the size allocated for the save state\n");
141 return 0;
142 }
143
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700144 for (i = 0; i < num_cpus; i++) {
145 cpus[i].smbase = base;
146 cpus[i].entry = base + smm_entry_offset;
147 cpus[i].ss_start = cpus[i].entry + (smm_entry_offset - ss_size);
148 cpus[i].code_start = cpus[i].entry;
149 cpus[i].code_end = cpus[i].entry + stub_size;
150 cpus[i].active = 1;
151 base -= ss_size;
152 seg_count++;
153 if (seg_count >= cpus_in_segment) {
154 base -= smm_entry_offset;
155 seg_count = 0;
156 }
157 }
158
159 if (CONFIG_DEFAULT_CONSOLE_LOGLEVEL >= BIOS_DEBUG) {
160 seg_count = 0;
161 for (i = 0; i < num_cpus; i++) {
162 printk(BIOS_DEBUG, "CPU 0x%x\n", i);
163 printk(BIOS_DEBUG,
164 " smbase %zx entry %zx\n",
165 cpus[i].smbase, cpus[i].entry);
166 printk(BIOS_DEBUG,
167 " ss_start %zx code_end %zx\n",
168 cpus[i].ss_start, cpus[i].code_end);
169 seg_count++;
170 if (seg_count >= cpus_in_segment) {
171 printk(BIOS_DEBUG,
172 "-------------NEW CODE SEGMENT --------------\n");
173 seg_count = 0;
174 }
175 }
176 }
177 return 1;
178}
179
180/*
181 * This method expects the smm relocation map to be complete.
182 * This method does not read any HW registers, it simply uses a
183 * map that was created during SMM setup.
184 * input: cpu_num - cpu number which is used as an index into the
185 * map to return the smbase
186 */
187u32 smm_get_cpu_smbase(unsigned int cpu_num)
188{
189 if (cpu_num < CONFIG_MAX_CPUS) {
190 if (cpus[cpu_num].active)
191 return cpus[cpu_num].smbase;
192 }
193 return 0;
194}
195
196/*
197 * This method assumes that at least 1 CPU has been set up from
198 * which it will place other CPUs below its smbase ensuring that
199 * save state does not clobber the first CPUs init code segment. The init
200 * code which is the smm stub code is the same for all CPUs. They enter
201 * smm, setup stacks (based on their apic id), enter protected mode
202 * and then jump to the common smi handler. The stack is allocated
203 * at the beginning of smram (aka tseg base, not smbase). The stack
204 * pointer for each CPU is calculated by using its apic id
205 * (code is in smm_stub.s)
206 * Each entry point will now have the same stub code which, sets up the CPU
207 * stack, enters protected mode and then jumps to the smi handler. It is
208 * important to enter protected mode before the jump because the "jump to
209 * address" might be larger than the 20bit address supported by real mode.
210 * SMI entry right now is in real mode.
211 * input: smbase - this is the smbase of the first cpu not the smbase
212 * where tseg starts (aka smram_start). All CPUs code segment
213 * and stack will be below this point except for the common
214 * SMI handler which is one segment above
215 * input: num_cpus - number of cpus that need relocation including
216 * the first CPU (though its code is already loaded)
217 * input: top of stack (stacks work downward by default in Intel HW)
218 * output: return -1, if runtime smi code could not be installed. In
219 * this case SMM will not work and any SMI's generated will
220 * cause a CPU shutdown or general protection fault because
221 * the appropriate smi handling code was not installed
222 */
223
224static int smm_place_entry_code(uintptr_t smbase, unsigned int num_cpus,
Arthur Heymans9ddd9002020-12-03 11:02:42 +0100225 uintptr_t stack_top, const struct smm_loader_params *params)
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700226{
227 unsigned int i;
228 unsigned int size;
229 if (smm_create_map(smbase, num_cpus, params)) {
230 /*
231 * Ensure there was enough space and the last CPUs smbase
232 * did not encroach upon the stack. Stack top is smram start
233 * + size of stack.
234 */
235 if (cpus[num_cpus].active) {
236 if (cpus[num_cpus - 1].smbase +
237 params->smm_main_entry_offset < stack_top) {
238 printk(BIOS_ERR, "%s: stack encroachment\n", __func__);
Arthur Heymans9ddd9002020-12-03 11:02:42 +0100239 printk(BIOS_ERR, "%s: smbase %zx, stack_top %lx\n",
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700240 __func__, cpus[num_cpus].smbase, stack_top);
241 return 0;
242 }
243 }
244 } else {
245 printk(BIOS_ERR, "%s: unable to place smm entry code\n", __func__);
246 return 0;
247 }
248
Arthur Heymans9ddd9002020-12-03 11:02:42 +0100249 printk(BIOS_INFO, "%s: smbase %zx, stack_top %lx\n",
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700250 __func__, cpus[num_cpus-1].smbase, stack_top);
251
252 /* start at 1, the first CPU stub code is already there */
253 size = cpus[0].code_end - cpus[0].code_start;
254 for (i = 1; i < num_cpus; i++) {
255 memcpy((int *)cpus[i].code_start, (int *)cpus[0].code_start, size);
256 printk(BIOS_DEBUG,
257 "SMM Module: placing smm entry code at %zx, cpu # 0x%x\n",
258 cpus[i].code_start, i);
259 printk(BIOS_DEBUG, "%s: copying from %zx to %zx 0x%x bytes\n",
260 __func__, cpus[0].code_start, cpus[i].code_start, size);
261 }
262 return 1;
263}
264
265/*
266 * Place stacks in base -> base + size region, but ensure the stacks don't
267 * overlap the staggered entry points.
268 */
269static void *smm_stub_place_stacks(char *base, size_t size,
270 struct smm_loader_params *params)
271{
272 size_t total_stack_size;
273 char *stacks_top;
274
275 /* If stack space is requested assume the space lives in the lower
276 * half of SMRAM. */
277 total_stack_size = params->per_cpu_stack_size *
278 params->num_concurrent_stacks;
279 printk(BIOS_DEBUG, "%s: cpus: %zx : stack space: needed -> %zx\n",
280 __func__, params->num_concurrent_stacks,
281 total_stack_size);
282 printk(BIOS_DEBUG, " available -> %zx : per_cpu_stack_size : %zx\n",
283 size, params->per_cpu_stack_size);
284
285 /* There has to be at least one stack user. */
286 if (params->num_concurrent_stacks < 1)
287 return NULL;
288
289 /* Total stack size cannot fit. */
290 if (total_stack_size > size)
291 return NULL;
292
293 /* Stacks extend down to SMBASE */
294 stacks_top = &base[total_stack_size];
295 printk(BIOS_DEBUG, "%s: exit, stack_top %p\n", __func__, stacks_top);
296
297 return stacks_top;
298}
299
300/*
301 * Place the staggered entry points for each CPU. The entry points are
302 * staggered by the per CPU SMM save state size extending down from
303 * SMM_ENTRY_OFFSET.
304 */
305static int smm_stub_place_staggered_entry_points(char *base,
306 const struct smm_loader_params *params, const struct rmodule *smm_stub)
307{
308 size_t stub_entry_offset;
309 int rc = 1;
310 stub_entry_offset = rmodule_entry_offset(smm_stub);
311 /* Each CPU now has its own stub code, which enters protected mode,
312 * sets up the stack, and then jumps to common SMI handler
313 */
314 if (params->num_concurrent_save_states > 1 || stub_entry_offset != 0) {
Arthur Heymans9ddd9002020-12-03 11:02:42 +0100315 rc = smm_place_entry_code((uintptr_t)base,
316 params->num_concurrent_save_states,
317 (uintptr_t)params->stack_top, params);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700318 }
319 return rc;
320}
321
322/*
323 * The stub setup code assumes it is completely contained within the
324 * default SMRAM size (0x10000) for the default SMI handler (entry at
325 * 0x30000), but no assumption should be made for the permanent SMI handler.
326 * The placement of CPU entry points for permanent handler are determined
327 * by the number of CPUs in the system and the amount of SMRAM.
328 * There are potentially 3 regions to place
329 * within the default SMRAM size:
330 * 1. Save state areas
331 * 2. Stub code
332 * 3. Stack areas
333 *
334 * The save state and smm stack are treated as contiguous for the number of
335 * concurrent areas requested. The save state always lives at the top of the
Elyes HAOUAScc2c5c92021-01-16 14:43:22 +0100336 * CPUS smbase (and the entry point is at offset 0x8000). This allows only a certain
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700337 * number of CPUs with staggered entry points until the save state area comes
338 * down far enough to overwrite/corrupt the entry code (stub code). Therefore,
339 * an SMM map is created to avoid this corruption, see smm_create_map() above.
340 * This module setup code works for the default (0x30000) SMM handler setup and the
341 * permanent SMM handler.
342 */
343static int smm_module_setup_stub(void *smbase, size_t smm_size,
344 struct smm_loader_params *params,
345 void *fxsave_area)
346{
347 size_t total_save_state_size;
348 size_t smm_stub_size;
349 size_t stub_entry_offset;
350 char *smm_stub_loc;
351 void *stacks_top;
352 size_t size;
353 char *base;
354 size_t i;
355 struct smm_stub_params *stub_params;
356 struct rmodule smm_stub;
357 unsigned int total_size_all;
358 base = smbase;
359 size = smm_size;
360
361 /* The number of concurrent stacks cannot exceed CONFIG_MAX_CPUS. */
362 if (params->num_concurrent_stacks > CONFIG_MAX_CPUS) {
363 printk(BIOS_ERR, "%s: not enough stacks\n", __func__);
364 return -1;
365 }
366
367 /* Fail if can't parse the smm stub rmodule. */
368 if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) {
369 printk(BIOS_ERR, "%s: unable to parse smm stub\n", __func__);
370 return -1;
371 }
372
373 /* Adjust remaining size to account for save state. */
374 total_save_state_size = params->per_cpu_save_state_size *
375 params->num_concurrent_save_states;
376 if (total_save_state_size > size) {
377 printk(BIOS_ERR,
378 "%s: more state save space needed:need -> %zx:available->%zx\n",
379 __func__, total_save_state_size, size);
380 return -1;
381 }
382
383 size -= total_save_state_size;
384
385 /* The save state size encroached over the first SMM entry point. */
386 if (size <= params->smm_main_entry_offset) {
387 printk(BIOS_ERR, "%s: encroachment over SMM entry point\n", __func__);
Arthur Heymans9ddd9002020-12-03 11:02:42 +0100388 printk(BIOS_ERR, "%s: state save size: %zx : smm_entry_offset -> %lx\n",
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700389 __func__, size, params->smm_main_entry_offset);
390 return -1;
391 }
392
393 /* Need a minimum stack size and alignment. */
394 if (params->per_cpu_stack_size <= SMM_MINIMUM_STACK_SIZE ||
395 (params->per_cpu_stack_size & 3) != 0) {
396 printk(BIOS_ERR, "%s: need minimum stack size\n", __func__);
397 return -1;
398 }
399
400 smm_stub_loc = NULL;
401 smm_stub_size = rmodule_memory_size(&smm_stub);
402 stub_entry_offset = rmodule_entry_offset(&smm_stub);
403
404 /* Put the stub at the main entry point */
405 smm_stub_loc = &base[params->smm_main_entry_offset];
406
407 /* Stub is too big to fit. */
408 if (smm_stub_size > (size - params->smm_main_entry_offset)) {
409 printk(BIOS_ERR, "%s: stub is too big to fit\n", __func__);
410 return -1;
411 }
412
413 /* The stacks, if requested, live in the lower half of SMRAM space
414 * for default handler, but for relocated handler it lives at the beginning
415 * of SMRAM which is TSEG base
416 */
Arthur Heymansb17f11e2020-11-01 21:04:55 +0100417 const size_t total_stack_size = params->num_concurrent_stacks *
418 params->per_cpu_stack_size;
419 stacks_top = smm_stub_place_stacks((char *)params->smram_start, total_stack_size,
420 params);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700421 if (stacks_top == NULL) {
422 printk(BIOS_ERR, "%s: not enough space for stacks\n", __func__);
423 printk(BIOS_ERR, "%s: ....need -> %p : available -> %zx\n", __func__,
Arthur Heymansb17f11e2020-11-01 21:04:55 +0100424 base, total_stack_size);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700425 return -1;
426 }
427 params->stack_top = stacks_top;
428 /* Load the stub. */
429 if (rmodule_load(smm_stub_loc, &smm_stub)) {
430 printk(BIOS_ERR, "%s: load module failed\n", __func__);
431 return -1;
432 }
433
434 if (!smm_stub_place_staggered_entry_points(base, params, &smm_stub)) {
435 printk(BIOS_ERR, "%s: staggered entry points failed\n", __func__);
436 return -1;
437 }
438
439 /* Setup the parameters for the stub code. */
440 stub_params = rmodule_parameters(&smm_stub);
441 stub_params->stack_top = (uintptr_t)stacks_top;
442 stub_params->stack_size = params->per_cpu_stack_size;
443 stub_params->c_handler = (uintptr_t)params->handler;
444 stub_params->c_handler_arg = (uintptr_t)params->handler_arg;
445 stub_params->fxsave_area = (uintptr_t)fxsave_area;
446 stub_params->fxsave_area_size = FXSAVE_SIZE;
447 stub_params->runtime.smbase = (uintptr_t)smbase;
448 stub_params->runtime.smm_size = smm_size;
449 stub_params->runtime.save_state_size = params->per_cpu_save_state_size;
450 stub_params->runtime.num_cpus = params->num_concurrent_stacks;
Kyösti Mälkki84935f72021-01-11 20:13:34 +0200451 stub_params->runtime.gnvs_ptr = (uintptr_t)acpi_get_gnvs();
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700452
Arthur Heymans81b88a12020-11-01 21:06:39 +0100453 printk(BIOS_DEBUG, "%s: stack_end = 0x%lx\n",
454 __func__, stub_params->stack_top - total_stack_size);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700455 printk(BIOS_DEBUG,
456 "%s: stack_top = 0x%x\n", __func__, stub_params->stack_top);
457 printk(BIOS_DEBUG, "%s: stack_size = 0x%x\n",
458 __func__, stub_params->stack_size);
459 printk(BIOS_DEBUG, "%s: runtime.smbase = 0x%x\n",
460 __func__, stub_params->runtime.smbase);
461 printk(BIOS_DEBUG, "%s: runtime.start32_offset = 0x%x\n", __func__,
462 stub_params->runtime.start32_offset);
463 printk(BIOS_DEBUG, "%s: runtime.smm_size = 0x%zx\n",
464 __func__, smm_size);
465 printk(BIOS_DEBUG, "%s: per_cpu_save_state_size = 0x%x\n",
466 __func__, stub_params->runtime.save_state_size);
467 printk(BIOS_DEBUG, "%s: num_cpus = 0x%x\n", __func__,
468 stub_params->runtime.num_cpus);
469 printk(BIOS_DEBUG, "%s: total_save_state_size = 0x%x\n",
470 __func__, (stub_params->runtime.save_state_size *
471 stub_params->runtime.num_cpus));
472 total_size_all = stub_params->stack_size +
473 (stub_params->runtime.save_state_size *
474 stub_params->runtime.num_cpus);
475 printk(BIOS_DEBUG, "%s: total_size_all = 0x%x\n", __func__,
476 total_size_all);
477
478 /* Initialize the APIC id to CPU number table to be 1:1 */
479 for (i = 0; i < params->num_concurrent_stacks; i++)
480 stub_params->runtime.apic_id_to_cpu[i] = i;
481
482 /* Allow the initiator to manipulate SMM stub parameters. */
483 params->runtime = &stub_params->runtime;
484
485 printk(BIOS_DEBUG, "SMM Module: stub loaded at %p. Will call %p(%p)\n",
486 smm_stub_loc, params->handler, params->handler_arg);
487 return 0;
488}
489
490/*
491 * smm_setup_relocation_handler assumes the callback is already loaded in
492 * memory. i.e. Another SMM module isn't chained to the stub. The other
493 * assumption is that the stub will be entered from the default SMRAM
494 * location: 0x30000 -> 0x40000.
495 */
496int smm_setup_relocation_handler(struct smm_loader_params *params)
497{
498 void *smram = (void *)(SMM_DEFAULT_BASE);
499 printk(BIOS_SPEW, "%s: enter\n", __func__);
500 /* There can't be more than 1 concurrent save state for the relocation
501 * handler because all CPUs default to 0x30000 as SMBASE. */
502 if (params->num_concurrent_save_states > 1)
503 return -1;
504
505 /* A handler has to be defined to call for relocation. */
506 if (params->handler == NULL)
507 return -1;
508
509 /* Since the relocation handler always uses stack, adjust the number
510 * of concurrent stack users to be CONFIG_MAX_CPUS. */
511 if (params->num_concurrent_stacks == 0)
512 params->num_concurrent_stacks = CONFIG_MAX_CPUS;
513
514 params->smm_main_entry_offset = SMM_ENTRY_OFFSET;
515 params->smram_start = SMM_DEFAULT_BASE;
516 params->smram_end = SMM_DEFAULT_BASE + SMM_DEFAULT_SIZE;
517 return smm_module_setup_stub(smram, SMM_DEFAULT_SIZE,
518 params, fxsave_area_relocation);
519 printk(BIOS_SPEW, "%s: exit\n", __func__);
520}
521
522/*
523 *The SMM module is placed within the provided region in the following
524 * manner:
525 * +-----------------+ <- smram + size
526 * | BIOS resource |
527 * | list (STM) |
528 * +-----------------+
529 * | fxsave area |
530 * +-----------------+
531 * | smi handler |
532 * | ... |
533 * +-----------------+ <- cpu0
534 * | stub code | <- cpu1
535 * | stub code | <- cpu2
536 * | stub code | <- cpu3, etc
537 * | |
538 * | |
539 * | |
540 * | stacks |
541 * +-----------------+ <- smram start
542
543 * It should be noted that this algorithm will not work for
544 * SMM_DEFAULT_SIZE SMRAM regions such as the A segment. This algorithm
545 * expects a region large enough to encompass the handler and stacks
546 * as well as the SMM_DEFAULT_SIZE.
547 */
548int smm_load_module(void *smram, size_t size, struct smm_loader_params *params)
549{
550 struct rmodule smm_mod;
551 size_t total_stack_size;
552 size_t handler_size;
553 size_t module_alignment;
554 size_t alignment_size;
555 size_t fxsave_size;
556 void *fxsave_area;
557 size_t total_size = 0;
558 char *base;
559
560 if (size <= SMM_DEFAULT_SIZE)
561 return -1;
562
563 /* Load main SMI handler at the top of SMRAM
564 * everything else will go below
565 */
566 base = smram;
567 base += size;
568 params->smram_start = (uintptr_t)smram;
569 params->smram_end = params->smram_start + size;
570 params->smm_main_entry_offset = SMM_ENTRY_OFFSET;
571
572 /* Fail if can't parse the smm rmodule. */
573 if (rmodule_parse(&_binary_smm_start, &smm_mod))
574 return -1;
575
576 /* Clear SMM region */
577 if (CONFIG(DEBUG_SMI))
578 memset(smram, 0xcd, size);
579
580 total_stack_size = params->per_cpu_stack_size *
581 params->num_concurrent_stacks;
582 total_size += total_stack_size;
583 /* Stacks are the base of SMRAM */
584 params->stack_top = smram + total_stack_size;
585
586 /* MSEG starts at the top of SMRAM and works down */
587 if (CONFIG(STM)) {
588 base -= CONFIG_MSEG_SIZE + CONFIG_BIOS_RESOURCE_LIST_SIZE;
589 total_size += CONFIG_MSEG_SIZE + CONFIG_BIOS_RESOURCE_LIST_SIZE;
590 }
591
592 /* FXSAVE goes below MSEG */
593 if (CONFIG(SSE)) {
594 fxsave_size = FXSAVE_SIZE * params->num_concurrent_stacks;
595 fxsave_area = base - fxsave_size;
596 base -= fxsave_size;
597 total_size += fxsave_size;
598 } else {
599 fxsave_size = 0;
600 fxsave_area = NULL;
601 }
602
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700603 handler_size = rmodule_memory_size(&smm_mod);
604 base -= handler_size;
605 total_size += handler_size;
606 module_alignment = rmodule_load_alignment(&smm_mod);
607 alignment_size = module_alignment -
608 ((uintptr_t)base % module_alignment);
609 if (alignment_size != module_alignment) {
610 handler_size += alignment_size;
611 base += alignment_size;
612 }
613
614 printk(BIOS_DEBUG,
615 "%s: total_smm_space_needed %zx, available -> %zx\n",
616 __func__, total_size, size);
617
618 /* Does the required amount of memory exceed the SMRAM region size? */
619 if (total_size > size) {
620 printk(BIOS_ERR, "%s: need more SMRAM\n", __func__);
621 return -1;
622 }
623 if (handler_size > SMM_CODE_SEGMENT_SIZE) {
624 printk(BIOS_ERR, "%s: increase SMM_CODE_SEGMENT_SIZE: handler_size = %zx\n",
625 __func__, handler_size);
626 return -1;
627 }
628
629 if (rmodule_load(base, &smm_mod))
630 return -1;
631
632 params->handler = rmodule_entry(&smm_mod);
633 params->handler_arg = rmodule_parameters(&smm_mod);
634
635 printk(BIOS_DEBUG, "%s: smram_start: 0x%p\n",
636 __func__, smram);
637 printk(BIOS_DEBUG, "%s: smram_end: %p\n",
638 __func__, smram + size);
639 printk(BIOS_DEBUG, "%s: stack_top: %p\n",
640 __func__, params->stack_top);
641 printk(BIOS_DEBUG, "%s: handler start %p\n",
642 __func__, params->handler);
643 printk(BIOS_DEBUG, "%s: handler_size %zx\n",
644 __func__, handler_size);
645 printk(BIOS_DEBUG, "%s: handler_arg %p\n",
646 __func__, params->handler_arg);
647 printk(BIOS_DEBUG, "%s: fxsave_area %p\n",
648 __func__, fxsave_area);
649 printk(BIOS_DEBUG, "%s: fxsave_size %zx\n",
650 __func__, fxsave_size);
651 printk(BIOS_DEBUG, "%s: CONFIG_MSEG_SIZE 0x%x\n",
652 __func__, CONFIG_MSEG_SIZE);
653 printk(BIOS_DEBUG, "%s: CONFIG_BIOS_RESOURCE_LIST_SIZE 0x%x\n",
654 __func__, CONFIG_BIOS_RESOURCE_LIST_SIZE);
655
656 /* CPU 0 smbase goes first, all other CPUs
657 * will be staggered below
658 */
659 base -= SMM_CODE_SEGMENT_SIZE;
660 printk(BIOS_DEBUG, "%s: cpu0 entry: %p\n",
661 __func__, base);
662 params->smm_entry = (uintptr_t)base + params->smm_main_entry_offset;
663 return smm_module_setup_stub(base, size, params, fxsave_area);
664}