blob: e3e9c28b7bc0d1236716ff3013e8cb83421652fe [file] [log] [blame]
Rocky Phaguraafb7a812020-07-21 14:48:48 -07001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#include <stdint.h>
4#include <string.h>
5#include <rmodule.h>
6#include <cpu/x86/smm.h>
7#include <commonlib/helpers.h>
8#include <console/console.h>
9#include <security/intel/stm/SmmStm.h>
10
11#define FXSAVE_SIZE 512
12#define SMM_CODE_SEGMENT_SIZE 0x10000
13/* FXSAVE area during relocation. While it may not be strictly needed the
14 SMM stub code relies on the FXSAVE area being non-zero to enable SSE
15 instructions within SMM mode. */
16static uint8_t fxsave_area_relocation[CONFIG_MAX_CPUS][FXSAVE_SIZE]
17__attribute__((aligned(16)));
18
19/*
20 * Components that make up the SMRAM:
21 * 1. Save state - the total save state memory used
22 * 2. Stack - stacks for the CPUs in the SMM handler
23 * 3. Stub - SMM stub code for calling into handler
24 * 4. Handler - C-based SMM handler.
25 *
26 * The components are assumed to consist of one consecutive region.
27 */
28
29/* These parameters are used by the SMM stub code. A pointer to the params
30 * is also passed to the C-base handler. */
31struct smm_stub_params {
32 u32 stack_size;
33 u32 stack_top;
34 u32 c_handler;
35 u32 c_handler_arg;
36 u32 fxsave_area;
37 u32 fxsave_area_size;
38 struct smm_runtime runtime;
39} __packed;
40
41/*
42 * The stub is the entry point that sets up protected mode and stacks for each
43 * CPU. It then calls into the SMM handler module. It is encoded as an rmodule.
44 */
45extern unsigned char _binary_smmstub_start[];
46
47/* Per CPU minimum stack size. */
48#define SMM_MINIMUM_STACK_SIZE 32
49
50struct cpu_smm_info {
51 uint8_t active;
52 uintptr_t smbase;
53 uintptr_t entry;
54 uintptr_t ss_start;
55 uintptr_t code_start;
56 uintptr_t code_end;
57};
58struct cpu_smm_info cpus[CONFIG_MAX_CPUS] = { 0 };
59
60/*
61 * This method creates a map of all the CPU entry points, save state locations
62 * and the beginning and end of code segments for each CPU. This map is used
63 * during relocation to properly align as many CPUs that can fit into the SMRAM
64 * region. For more information on how SMRAM works, refer to the latest Intel
65 * developer's manuals (volume 3, chapter 34). SMRAM is divided up into the
66 * following regions:
67 * +-----------------+ Top of SMRAM
68 * | | <- MSEG, FXSAVE
69 * +-----------------+
70 * | common |
71 * | smi handler | 64K
72 * | |
73 * +-----------------+
74 * | CPU 0 code seg |
75 * +-----------------+
76 * | CPU 1 code seg |
77 * +-----------------+
78 * | CPU x code seg |
79 * +-----------------+
80 * | |
81 * | |
82 * +-----------------+
83 * | stacks |
84 * +-----------------+ <- START of SMRAM
85 *
86 * The code below checks when a code segment is full and begins placing the remainder
87 * CPUs in the lower segments. The entry point for each CPU is smbase + 0x8000
88 * and save state is smbase + 0x8000 + (0x8000 - state save size). Save state
89 * area grows downward into the CPUs entry point. Therefore staggering too many
90 * CPUs in one 32K block will corrupt CPU0's entry code as the save states move
91 * downward.
92 * input : smbase of first CPU (all other CPUs
93 * will go below this address)
94 * input : num_cpus in the system. The map will
95 * be created from 0 to num_cpus.
96 */
97static int smm_create_map(uintptr_t smbase, unsigned int num_cpus,
98 const struct smm_loader_params *params)
99{
100 unsigned int i;
101 struct rmodule smm_stub;
102 unsigned int ss_size = params->per_cpu_save_state_size, stub_size;
103 unsigned int smm_entry_offset = params->smm_main_entry_offset;
104 unsigned int seg_count = 0, segments = 0, available;
105 unsigned int cpus_in_segment = 0;
106 unsigned int base = smbase;
107
108 if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) {
109 printk(BIOS_ERR, "%s: unable to get SMM module size\n", __func__);
110 return 0;
111 }
112
113 stub_size = rmodule_memory_size(&smm_stub);
114 /* How many CPUs can fit into one 64K segment? */
115 available = 0xFFFF - smm_entry_offset - ss_size - stub_size;
116 if (available > 0) {
117 cpus_in_segment = available / ss_size;
118 /* minimum segments needed will always be 1 */
119 segments = num_cpus / cpus_in_segment + 1;
120 printk(BIOS_DEBUG,
121 "%s: cpus allowed in one segment %d\n", __func__, cpus_in_segment);
122 printk(BIOS_DEBUG,
123 "%s: min # of segments needed %d\n", __func__, segments);
124 } else {
125 printk(BIOS_ERR, "%s: not enough space in SMM to setup all CPUs\n", __func__);
126 printk(BIOS_ERR, " save state & stub size need to be reduced\n");
127 printk(BIOS_ERR, " or increase SMRAM size\n");
128 return 0;
129 }
130
131 if (sizeof(cpus) / sizeof(struct cpu_smm_info) < num_cpus) {
132 printk(BIOS_ERR,
133 "%s: increase MAX_CPUS in Kconfig\n", __func__);
134 return 0;
135 }
136
Arthur Heymansfd8619e2020-11-01 12:37:40 +0100137 if (stub_size > ss_size) {
138 printk(BIOS_ERR, "%s: Save state larger than SMM stub size\n", __func__);
139 printk(BIOS_ERR, " Decrease stub size or increase the size allocated for the save state\n");
140 return 0;
141 }
142
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700143 for (i = 0; i < num_cpus; i++) {
144 cpus[i].smbase = base;
145 cpus[i].entry = base + smm_entry_offset;
146 cpus[i].ss_start = cpus[i].entry + (smm_entry_offset - ss_size);
147 cpus[i].code_start = cpus[i].entry;
148 cpus[i].code_end = cpus[i].entry + stub_size;
149 cpus[i].active = 1;
150 base -= ss_size;
151 seg_count++;
152 if (seg_count >= cpus_in_segment) {
153 base -= smm_entry_offset;
154 seg_count = 0;
155 }
156 }
157
158 if (CONFIG_DEFAULT_CONSOLE_LOGLEVEL >= BIOS_DEBUG) {
159 seg_count = 0;
160 for (i = 0; i < num_cpus; i++) {
161 printk(BIOS_DEBUG, "CPU 0x%x\n", i);
162 printk(BIOS_DEBUG,
163 " smbase %zx entry %zx\n",
164 cpus[i].smbase, cpus[i].entry);
165 printk(BIOS_DEBUG,
166 " ss_start %zx code_end %zx\n",
167 cpus[i].ss_start, cpus[i].code_end);
168 seg_count++;
169 if (seg_count >= cpus_in_segment) {
170 printk(BIOS_DEBUG,
171 "-------------NEW CODE SEGMENT --------------\n");
172 seg_count = 0;
173 }
174 }
175 }
176 return 1;
177}
178
179/*
180 * This method expects the smm relocation map to be complete.
181 * This method does not read any HW registers, it simply uses a
182 * map that was created during SMM setup.
183 * input: cpu_num - cpu number which is used as an index into the
184 * map to return the smbase
185 */
186u32 smm_get_cpu_smbase(unsigned int cpu_num)
187{
188 if (cpu_num < CONFIG_MAX_CPUS) {
189 if (cpus[cpu_num].active)
190 return cpus[cpu_num].smbase;
191 }
192 return 0;
193}
194
195/*
196 * This method assumes that at least 1 CPU has been set up from
197 * which it will place other CPUs below its smbase ensuring that
198 * save state does not clobber the first CPUs init code segment. The init
199 * code which is the smm stub code is the same for all CPUs. They enter
200 * smm, setup stacks (based on their apic id), enter protected mode
201 * and then jump to the common smi handler. The stack is allocated
202 * at the beginning of smram (aka tseg base, not smbase). The stack
203 * pointer for each CPU is calculated by using its apic id
204 * (code is in smm_stub.s)
205 * Each entry point will now have the same stub code which, sets up the CPU
206 * stack, enters protected mode and then jumps to the smi handler. It is
207 * important to enter protected mode before the jump because the "jump to
208 * address" might be larger than the 20bit address supported by real mode.
209 * SMI entry right now is in real mode.
210 * input: smbase - this is the smbase of the first cpu not the smbase
211 * where tseg starts (aka smram_start). All CPUs code segment
212 * and stack will be below this point except for the common
213 * SMI handler which is one segment above
214 * input: num_cpus - number of cpus that need relocation including
215 * the first CPU (though its code is already loaded)
216 * input: top of stack (stacks work downward by default in Intel HW)
217 * output: return -1, if runtime smi code could not be installed. In
218 * this case SMM will not work and any SMI's generated will
219 * cause a CPU shutdown or general protection fault because
220 * the appropriate smi handling code was not installed
221 */
222
223static int smm_place_entry_code(uintptr_t smbase, unsigned int num_cpus,
Arthur Heymans9ddd9002020-12-03 11:02:42 +0100224 uintptr_t stack_top, const struct smm_loader_params *params)
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700225{
226 unsigned int i;
227 unsigned int size;
228 if (smm_create_map(smbase, num_cpus, params)) {
229 /*
230 * Ensure there was enough space and the last CPUs smbase
231 * did not encroach upon the stack. Stack top is smram start
232 * + size of stack.
233 */
234 if (cpus[num_cpus].active) {
235 if (cpus[num_cpus - 1].smbase +
236 params->smm_main_entry_offset < stack_top) {
237 printk(BIOS_ERR, "%s: stack encroachment\n", __func__);
Arthur Heymans9ddd9002020-12-03 11:02:42 +0100238 printk(BIOS_ERR, "%s: smbase %zx, stack_top %lx\n",
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700239 __func__, cpus[num_cpus].smbase, stack_top);
240 return 0;
241 }
242 }
243 } else {
244 printk(BIOS_ERR, "%s: unable to place smm entry code\n", __func__);
245 return 0;
246 }
247
Arthur Heymans9ddd9002020-12-03 11:02:42 +0100248 printk(BIOS_INFO, "%s: smbase %zx, stack_top %lx\n",
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700249 __func__, cpus[num_cpus-1].smbase, stack_top);
250
251 /* start at 1, the first CPU stub code is already there */
252 size = cpus[0].code_end - cpus[0].code_start;
253 for (i = 1; i < num_cpus; i++) {
254 memcpy((int *)cpus[i].code_start, (int *)cpus[0].code_start, size);
255 printk(BIOS_DEBUG,
256 "SMM Module: placing smm entry code at %zx, cpu # 0x%x\n",
257 cpus[i].code_start, i);
258 printk(BIOS_DEBUG, "%s: copying from %zx to %zx 0x%x bytes\n",
259 __func__, cpus[0].code_start, cpus[i].code_start, size);
260 }
261 return 1;
262}
263
264/*
265 * Place stacks in base -> base + size region, but ensure the stacks don't
266 * overlap the staggered entry points.
267 */
268static void *smm_stub_place_stacks(char *base, size_t size,
269 struct smm_loader_params *params)
270{
271 size_t total_stack_size;
272 char *stacks_top;
273
274 /* If stack space is requested assume the space lives in the lower
275 * half of SMRAM. */
276 total_stack_size = params->per_cpu_stack_size *
277 params->num_concurrent_stacks;
278 printk(BIOS_DEBUG, "%s: cpus: %zx : stack space: needed -> %zx\n",
279 __func__, params->num_concurrent_stacks,
280 total_stack_size);
281 printk(BIOS_DEBUG, " available -> %zx : per_cpu_stack_size : %zx\n",
282 size, params->per_cpu_stack_size);
283
284 /* There has to be at least one stack user. */
285 if (params->num_concurrent_stacks < 1)
286 return NULL;
287
288 /* Total stack size cannot fit. */
289 if (total_stack_size > size)
290 return NULL;
291
292 /* Stacks extend down to SMBASE */
293 stacks_top = &base[total_stack_size];
294 printk(BIOS_DEBUG, "%s: exit, stack_top %p\n", __func__, stacks_top);
295
296 return stacks_top;
297}
298
299/*
300 * Place the staggered entry points for each CPU. The entry points are
301 * staggered by the per CPU SMM save state size extending down from
302 * SMM_ENTRY_OFFSET.
303 */
304static int smm_stub_place_staggered_entry_points(char *base,
305 const struct smm_loader_params *params, const struct rmodule *smm_stub)
306{
307 size_t stub_entry_offset;
308 int rc = 1;
309 stub_entry_offset = rmodule_entry_offset(smm_stub);
310 /* Each CPU now has its own stub code, which enters protected mode,
311 * sets up the stack, and then jumps to common SMI handler
312 */
313 if (params->num_concurrent_save_states > 1 || stub_entry_offset != 0) {
Arthur Heymans9ddd9002020-12-03 11:02:42 +0100314 rc = smm_place_entry_code((uintptr_t)base,
315 params->num_concurrent_save_states,
316 (uintptr_t)params->stack_top, params);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700317 }
318 return rc;
319}
320
321/*
322 * The stub setup code assumes it is completely contained within the
323 * default SMRAM size (0x10000) for the default SMI handler (entry at
324 * 0x30000), but no assumption should be made for the permanent SMI handler.
325 * The placement of CPU entry points for permanent handler are determined
326 * by the number of CPUs in the system and the amount of SMRAM.
327 * There are potentially 3 regions to place
328 * within the default SMRAM size:
329 * 1. Save state areas
330 * 2. Stub code
331 * 3. Stack areas
332 *
333 * The save state and smm stack are treated as contiguous for the number of
334 * concurrent areas requested. The save state always lives at the top of the
335 * the CPUS smbase (and the entry point is at offset 0x8000). This allows only a certain
336 * number of CPUs with staggered entry points until the save state area comes
337 * down far enough to overwrite/corrupt the entry code (stub code). Therefore,
338 * an SMM map is created to avoid this corruption, see smm_create_map() above.
339 * This module setup code works for the default (0x30000) SMM handler setup and the
340 * permanent SMM handler.
341 */
342static int smm_module_setup_stub(void *smbase, size_t smm_size,
343 struct smm_loader_params *params,
344 void *fxsave_area)
345{
346 size_t total_save_state_size;
347 size_t smm_stub_size;
348 size_t stub_entry_offset;
349 char *smm_stub_loc;
350 void *stacks_top;
351 size_t size;
352 char *base;
353 size_t i;
354 struct smm_stub_params *stub_params;
355 struct rmodule smm_stub;
356 unsigned int total_size_all;
357 base = smbase;
358 size = smm_size;
359
360 /* The number of concurrent stacks cannot exceed CONFIG_MAX_CPUS. */
361 if (params->num_concurrent_stacks > CONFIG_MAX_CPUS) {
362 printk(BIOS_ERR, "%s: not enough stacks\n", __func__);
363 return -1;
364 }
365
366 /* Fail if can't parse the smm stub rmodule. */
367 if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) {
368 printk(BIOS_ERR, "%s: unable to parse smm stub\n", __func__);
369 return -1;
370 }
371
372 /* Adjust remaining size to account for save state. */
373 total_save_state_size = params->per_cpu_save_state_size *
374 params->num_concurrent_save_states;
375 if (total_save_state_size > size) {
376 printk(BIOS_ERR,
377 "%s: more state save space needed:need -> %zx:available->%zx\n",
378 __func__, total_save_state_size, size);
379 return -1;
380 }
381
382 size -= total_save_state_size;
383
384 /* The save state size encroached over the first SMM entry point. */
385 if (size <= params->smm_main_entry_offset) {
386 printk(BIOS_ERR, "%s: encroachment over SMM entry point\n", __func__);
Arthur Heymans9ddd9002020-12-03 11:02:42 +0100387 printk(BIOS_ERR, "%s: state save size: %zx : smm_entry_offset -> %lx\n",
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700388 __func__, size, params->smm_main_entry_offset);
389 return -1;
390 }
391
392 /* Need a minimum stack size and alignment. */
393 if (params->per_cpu_stack_size <= SMM_MINIMUM_STACK_SIZE ||
394 (params->per_cpu_stack_size & 3) != 0) {
395 printk(BIOS_ERR, "%s: need minimum stack size\n", __func__);
396 return -1;
397 }
398
399 smm_stub_loc = NULL;
400 smm_stub_size = rmodule_memory_size(&smm_stub);
401 stub_entry_offset = rmodule_entry_offset(&smm_stub);
402
403 /* Put the stub at the main entry point */
404 smm_stub_loc = &base[params->smm_main_entry_offset];
405
406 /* Stub is too big to fit. */
407 if (smm_stub_size > (size - params->smm_main_entry_offset)) {
408 printk(BIOS_ERR, "%s: stub is too big to fit\n", __func__);
409 return -1;
410 }
411
412 /* The stacks, if requested, live in the lower half of SMRAM space
413 * for default handler, but for relocated handler it lives at the beginning
414 * of SMRAM which is TSEG base
415 */
Arthur Heymansb17f11e2020-11-01 21:04:55 +0100416 const size_t total_stack_size = params->num_concurrent_stacks *
417 params->per_cpu_stack_size;
418 stacks_top = smm_stub_place_stacks((char *)params->smram_start, total_stack_size,
419 params);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700420 if (stacks_top == NULL) {
421 printk(BIOS_ERR, "%s: not enough space for stacks\n", __func__);
422 printk(BIOS_ERR, "%s: ....need -> %p : available -> %zx\n", __func__,
Arthur Heymansb17f11e2020-11-01 21:04:55 +0100423 base, total_stack_size);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700424 return -1;
425 }
426 params->stack_top = stacks_top;
427 /* Load the stub. */
428 if (rmodule_load(smm_stub_loc, &smm_stub)) {
429 printk(BIOS_ERR, "%s: load module failed\n", __func__);
430 return -1;
431 }
432
433 if (!smm_stub_place_staggered_entry_points(base, params, &smm_stub)) {
434 printk(BIOS_ERR, "%s: staggered entry points failed\n", __func__);
435 return -1;
436 }
437
438 /* Setup the parameters for the stub code. */
439 stub_params = rmodule_parameters(&smm_stub);
440 stub_params->stack_top = (uintptr_t)stacks_top;
441 stub_params->stack_size = params->per_cpu_stack_size;
442 stub_params->c_handler = (uintptr_t)params->handler;
443 stub_params->c_handler_arg = (uintptr_t)params->handler_arg;
444 stub_params->fxsave_area = (uintptr_t)fxsave_area;
445 stub_params->fxsave_area_size = FXSAVE_SIZE;
446 stub_params->runtime.smbase = (uintptr_t)smbase;
447 stub_params->runtime.smm_size = smm_size;
448 stub_params->runtime.save_state_size = params->per_cpu_save_state_size;
449 stub_params->runtime.num_cpus = params->num_concurrent_stacks;
450
Arthur Heymans81b88a12020-11-01 21:06:39 +0100451 printk(BIOS_DEBUG, "%s: stack_end = 0x%lx\n",
452 __func__, stub_params->stack_top - total_stack_size);
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700453 printk(BIOS_DEBUG,
454 "%s: stack_top = 0x%x\n", __func__, stub_params->stack_top);
455 printk(BIOS_DEBUG, "%s: stack_size = 0x%x\n",
456 __func__, stub_params->stack_size);
457 printk(BIOS_DEBUG, "%s: runtime.smbase = 0x%x\n",
458 __func__, stub_params->runtime.smbase);
459 printk(BIOS_DEBUG, "%s: runtime.start32_offset = 0x%x\n", __func__,
460 stub_params->runtime.start32_offset);
461 printk(BIOS_DEBUG, "%s: runtime.smm_size = 0x%zx\n",
462 __func__, smm_size);
463 printk(BIOS_DEBUG, "%s: per_cpu_save_state_size = 0x%x\n",
464 __func__, stub_params->runtime.save_state_size);
465 printk(BIOS_DEBUG, "%s: num_cpus = 0x%x\n", __func__,
466 stub_params->runtime.num_cpus);
467 printk(BIOS_DEBUG, "%s: total_save_state_size = 0x%x\n",
468 __func__, (stub_params->runtime.save_state_size *
469 stub_params->runtime.num_cpus));
470 total_size_all = stub_params->stack_size +
471 (stub_params->runtime.save_state_size *
472 stub_params->runtime.num_cpus);
473 printk(BIOS_DEBUG, "%s: total_size_all = 0x%x\n", __func__,
474 total_size_all);
475
476 /* Initialize the APIC id to CPU number table to be 1:1 */
477 for (i = 0; i < params->num_concurrent_stacks; i++)
478 stub_params->runtime.apic_id_to_cpu[i] = i;
479
480 /* Allow the initiator to manipulate SMM stub parameters. */
481 params->runtime = &stub_params->runtime;
482
483 printk(BIOS_DEBUG, "SMM Module: stub loaded at %p. Will call %p(%p)\n",
484 smm_stub_loc, params->handler, params->handler_arg);
485 return 0;
486}
487
488/*
489 * smm_setup_relocation_handler assumes the callback is already loaded in
490 * memory. i.e. Another SMM module isn't chained to the stub. The other
491 * assumption is that the stub will be entered from the default SMRAM
492 * location: 0x30000 -> 0x40000.
493 */
494int smm_setup_relocation_handler(struct smm_loader_params *params)
495{
496 void *smram = (void *)(SMM_DEFAULT_BASE);
497 printk(BIOS_SPEW, "%s: enter\n", __func__);
498 /* There can't be more than 1 concurrent save state for the relocation
499 * handler because all CPUs default to 0x30000 as SMBASE. */
500 if (params->num_concurrent_save_states > 1)
501 return -1;
502
503 /* A handler has to be defined to call for relocation. */
504 if (params->handler == NULL)
505 return -1;
506
507 /* Since the relocation handler always uses stack, adjust the number
508 * of concurrent stack users to be CONFIG_MAX_CPUS. */
509 if (params->num_concurrent_stacks == 0)
510 params->num_concurrent_stacks = CONFIG_MAX_CPUS;
511
512 params->smm_main_entry_offset = SMM_ENTRY_OFFSET;
513 params->smram_start = SMM_DEFAULT_BASE;
514 params->smram_end = SMM_DEFAULT_BASE + SMM_DEFAULT_SIZE;
515 return smm_module_setup_stub(smram, SMM_DEFAULT_SIZE,
516 params, fxsave_area_relocation);
517 printk(BIOS_SPEW, "%s: exit\n", __func__);
518}
519
520/*
521 *The SMM module is placed within the provided region in the following
522 * manner:
523 * +-----------------+ <- smram + size
524 * | BIOS resource |
525 * | list (STM) |
526 * +-----------------+
527 * | fxsave area |
528 * +-----------------+
529 * | smi handler |
530 * | ... |
531 * +-----------------+ <- cpu0
532 * | stub code | <- cpu1
533 * | stub code | <- cpu2
534 * | stub code | <- cpu3, etc
535 * | |
536 * | |
537 * | |
538 * | stacks |
539 * +-----------------+ <- smram start
540
541 * It should be noted that this algorithm will not work for
542 * SMM_DEFAULT_SIZE SMRAM regions such as the A segment. This algorithm
543 * expects a region large enough to encompass the handler and stacks
544 * as well as the SMM_DEFAULT_SIZE.
545 */
546int smm_load_module(void *smram, size_t size, struct smm_loader_params *params)
547{
548 struct rmodule smm_mod;
549 size_t total_stack_size;
550 size_t handler_size;
551 size_t module_alignment;
552 size_t alignment_size;
553 size_t fxsave_size;
554 void *fxsave_area;
555 size_t total_size = 0;
556 char *base;
557
558 if (size <= SMM_DEFAULT_SIZE)
559 return -1;
560
561 /* Load main SMI handler at the top of SMRAM
562 * everything else will go below
563 */
564 base = smram;
565 base += size;
566 params->smram_start = (uintptr_t)smram;
567 params->smram_end = params->smram_start + size;
568 params->smm_main_entry_offset = SMM_ENTRY_OFFSET;
569
570 /* Fail if can't parse the smm rmodule. */
571 if (rmodule_parse(&_binary_smm_start, &smm_mod))
572 return -1;
573
574 /* Clear SMM region */
575 if (CONFIG(DEBUG_SMI))
576 memset(smram, 0xcd, size);
577
578 total_stack_size = params->per_cpu_stack_size *
579 params->num_concurrent_stacks;
580 total_size += total_stack_size;
581 /* Stacks are the base of SMRAM */
582 params->stack_top = smram + total_stack_size;
583
584 /* MSEG starts at the top of SMRAM and works down */
585 if (CONFIG(STM)) {
586 base -= CONFIG_MSEG_SIZE + CONFIG_BIOS_RESOURCE_LIST_SIZE;
587 total_size += CONFIG_MSEG_SIZE + CONFIG_BIOS_RESOURCE_LIST_SIZE;
588 }
589
590 /* FXSAVE goes below MSEG */
591 if (CONFIG(SSE)) {
592 fxsave_size = FXSAVE_SIZE * params->num_concurrent_stacks;
593 fxsave_area = base - fxsave_size;
594 base -= fxsave_size;
595 total_size += fxsave_size;
596 } else {
597 fxsave_size = 0;
598 fxsave_area = NULL;
599 }
600
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700601 handler_size = rmodule_memory_size(&smm_mod);
602 base -= handler_size;
603 total_size += handler_size;
604 module_alignment = rmodule_load_alignment(&smm_mod);
605 alignment_size = module_alignment -
606 ((uintptr_t)base % module_alignment);
607 if (alignment_size != module_alignment) {
608 handler_size += alignment_size;
609 base += alignment_size;
610 }
611
612 printk(BIOS_DEBUG,
613 "%s: total_smm_space_needed %zx, available -> %zx\n",
614 __func__, total_size, size);
615
616 /* Does the required amount of memory exceed the SMRAM region size? */
617 if (total_size > size) {
618 printk(BIOS_ERR, "%s: need more SMRAM\n", __func__);
619 return -1;
620 }
621 if (handler_size > SMM_CODE_SEGMENT_SIZE) {
622 printk(BIOS_ERR, "%s: increase SMM_CODE_SEGMENT_SIZE: handler_size = %zx\n",
623 __func__, handler_size);
624 return -1;
625 }
626
627 if (rmodule_load(base, &smm_mod))
628 return -1;
629
630 params->handler = rmodule_entry(&smm_mod);
631 params->handler_arg = rmodule_parameters(&smm_mod);
632
633 printk(BIOS_DEBUG, "%s: smram_start: 0x%p\n",
634 __func__, smram);
635 printk(BIOS_DEBUG, "%s: smram_end: %p\n",
636 __func__, smram + size);
637 printk(BIOS_DEBUG, "%s: stack_top: %p\n",
638 __func__, params->stack_top);
639 printk(BIOS_DEBUG, "%s: handler start %p\n",
640 __func__, params->handler);
641 printk(BIOS_DEBUG, "%s: handler_size %zx\n",
642 __func__, handler_size);
643 printk(BIOS_DEBUG, "%s: handler_arg %p\n",
644 __func__, params->handler_arg);
645 printk(BIOS_DEBUG, "%s: fxsave_area %p\n",
646 __func__, fxsave_area);
647 printk(BIOS_DEBUG, "%s: fxsave_size %zx\n",
648 __func__, fxsave_size);
649 printk(BIOS_DEBUG, "%s: CONFIG_MSEG_SIZE 0x%x\n",
650 __func__, CONFIG_MSEG_SIZE);
651 printk(BIOS_DEBUG, "%s: CONFIG_BIOS_RESOURCE_LIST_SIZE 0x%x\n",
652 __func__, CONFIG_BIOS_RESOURCE_LIST_SIZE);
653
654 /* CPU 0 smbase goes first, all other CPUs
655 * will be staggered below
656 */
657 base -= SMM_CODE_SEGMENT_SIZE;
658 printk(BIOS_DEBUG, "%s: cpu0 entry: %p\n",
659 __func__, base);
660 params->smm_entry = (uintptr_t)base + params->smm_main_entry_offset;
661 return smm_module_setup_stub(base, size, params, fxsave_area);
662}