blob: 10cc6281f715c3d263ec62860d0572efbca03f35 [file] [log] [blame]
Rocky Phaguraafb7a812020-07-21 14:48:48 -07001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#include <stdint.h>
4#include <string.h>
5#include <rmodule.h>
6#include <cpu/x86/smm.h>
7#include <commonlib/helpers.h>
8#include <console/console.h>
9#include <security/intel/stm/SmmStm.h>
10
11#define FXSAVE_SIZE 512
12#define SMM_CODE_SEGMENT_SIZE 0x10000
13/* FXSAVE area during relocation. While it may not be strictly needed the
14 SMM stub code relies on the FXSAVE area being non-zero to enable SSE
15 instructions within SMM mode. */
16static uint8_t fxsave_area_relocation[CONFIG_MAX_CPUS][FXSAVE_SIZE]
17__attribute__((aligned(16)));
18
19/*
20 * Components that make up the SMRAM:
21 * 1. Save state - the total save state memory used
22 * 2. Stack - stacks for the CPUs in the SMM handler
23 * 3. Stub - SMM stub code for calling into handler
24 * 4. Handler - C-based SMM handler.
25 *
26 * The components are assumed to consist of one consecutive region.
27 */
28
29/* These parameters are used by the SMM stub code. A pointer to the params
30 * is also passed to the C-base handler. */
31struct smm_stub_params {
32 u32 stack_size;
33 u32 stack_top;
34 u32 c_handler;
35 u32 c_handler_arg;
36 u32 fxsave_area;
37 u32 fxsave_area_size;
38 struct smm_runtime runtime;
39} __packed;
40
41/*
42 * The stub is the entry point that sets up protected mode and stacks for each
43 * CPU. It then calls into the SMM handler module. It is encoded as an rmodule.
44 */
45extern unsigned char _binary_smmstub_start[];
46
47/* Per CPU minimum stack size. */
48#define SMM_MINIMUM_STACK_SIZE 32
49
50struct cpu_smm_info {
51 uint8_t active;
52 uintptr_t smbase;
53 uintptr_t entry;
54 uintptr_t ss_start;
55 uintptr_t code_start;
56 uintptr_t code_end;
57};
58struct cpu_smm_info cpus[CONFIG_MAX_CPUS] = { 0 };
59
60/*
61 * This method creates a map of all the CPU entry points, save state locations
62 * and the beginning and end of code segments for each CPU. This map is used
63 * during relocation to properly align as many CPUs that can fit into the SMRAM
64 * region. For more information on how SMRAM works, refer to the latest Intel
65 * developer's manuals (volume 3, chapter 34). SMRAM is divided up into the
66 * following regions:
67 * +-----------------+ Top of SMRAM
68 * | | <- MSEG, FXSAVE
69 * +-----------------+
70 * | common |
71 * | smi handler | 64K
72 * | |
73 * +-----------------+
74 * | CPU 0 code seg |
75 * +-----------------+
76 * | CPU 1 code seg |
77 * +-----------------+
78 * | CPU x code seg |
79 * +-----------------+
80 * | |
81 * | |
82 * +-----------------+
83 * | stacks |
84 * +-----------------+ <- START of SMRAM
85 *
86 * The code below checks when a code segment is full and begins placing the remainder
87 * CPUs in the lower segments. The entry point for each CPU is smbase + 0x8000
88 * and save state is smbase + 0x8000 + (0x8000 - state save size). Save state
89 * area grows downward into the CPUs entry point. Therefore staggering too many
90 * CPUs in one 32K block will corrupt CPU0's entry code as the save states move
91 * downward.
92 * input : smbase of first CPU (all other CPUs
93 * will go below this address)
94 * input : num_cpus in the system. The map will
95 * be created from 0 to num_cpus.
96 */
97static int smm_create_map(uintptr_t smbase, unsigned int num_cpus,
98 const struct smm_loader_params *params)
99{
100 unsigned int i;
101 struct rmodule smm_stub;
102 unsigned int ss_size = params->per_cpu_save_state_size, stub_size;
103 unsigned int smm_entry_offset = params->smm_main_entry_offset;
104 unsigned int seg_count = 0, segments = 0, available;
105 unsigned int cpus_in_segment = 0;
106 unsigned int base = smbase;
107
108 if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) {
109 printk(BIOS_ERR, "%s: unable to get SMM module size\n", __func__);
110 return 0;
111 }
112
113 stub_size = rmodule_memory_size(&smm_stub);
114 /* How many CPUs can fit into one 64K segment? */
115 available = 0xFFFF - smm_entry_offset - ss_size - stub_size;
116 if (available > 0) {
117 cpus_in_segment = available / ss_size;
118 /* minimum segments needed will always be 1 */
119 segments = num_cpus / cpus_in_segment + 1;
120 printk(BIOS_DEBUG,
121 "%s: cpus allowed in one segment %d\n", __func__, cpus_in_segment);
122 printk(BIOS_DEBUG,
123 "%s: min # of segments needed %d\n", __func__, segments);
124 } else {
125 printk(BIOS_ERR, "%s: not enough space in SMM to setup all CPUs\n", __func__);
126 printk(BIOS_ERR, " save state & stub size need to be reduced\n");
127 printk(BIOS_ERR, " or increase SMRAM size\n");
128 return 0;
129 }
130
131 if (sizeof(cpus) / sizeof(struct cpu_smm_info) < num_cpus) {
132 printk(BIOS_ERR,
133 "%s: increase MAX_CPUS in Kconfig\n", __func__);
134 return 0;
135 }
136
137 for (i = 0; i < num_cpus; i++) {
138 cpus[i].smbase = base;
139 cpus[i].entry = base + smm_entry_offset;
140 cpus[i].ss_start = cpus[i].entry + (smm_entry_offset - ss_size);
141 cpus[i].code_start = cpus[i].entry;
142 cpus[i].code_end = cpus[i].entry + stub_size;
143 cpus[i].active = 1;
144 base -= ss_size;
145 seg_count++;
146 if (seg_count >= cpus_in_segment) {
147 base -= smm_entry_offset;
148 seg_count = 0;
149 }
150 }
151
152 if (CONFIG_DEFAULT_CONSOLE_LOGLEVEL >= BIOS_DEBUG) {
153 seg_count = 0;
154 for (i = 0; i < num_cpus; i++) {
155 printk(BIOS_DEBUG, "CPU 0x%x\n", i);
156 printk(BIOS_DEBUG,
157 " smbase %zx entry %zx\n",
158 cpus[i].smbase, cpus[i].entry);
159 printk(BIOS_DEBUG,
160 " ss_start %zx code_end %zx\n",
161 cpus[i].ss_start, cpus[i].code_end);
162 seg_count++;
163 if (seg_count >= cpus_in_segment) {
164 printk(BIOS_DEBUG,
165 "-------------NEW CODE SEGMENT --------------\n");
166 seg_count = 0;
167 }
168 }
169 }
170 return 1;
171}
172
173/*
174 * This method expects the smm relocation map to be complete.
175 * This method does not read any HW registers, it simply uses a
176 * map that was created during SMM setup.
177 * input: cpu_num - cpu number which is used as an index into the
178 * map to return the smbase
179 */
180u32 smm_get_cpu_smbase(unsigned int cpu_num)
181{
182 if (cpu_num < CONFIG_MAX_CPUS) {
183 if (cpus[cpu_num].active)
184 return cpus[cpu_num].smbase;
185 }
186 return 0;
187}
188
189/*
190 * This method assumes that at least 1 CPU has been set up from
191 * which it will place other CPUs below its smbase ensuring that
192 * save state does not clobber the first CPUs init code segment. The init
193 * code which is the smm stub code is the same for all CPUs. They enter
194 * smm, setup stacks (based on their apic id), enter protected mode
195 * and then jump to the common smi handler. The stack is allocated
196 * at the beginning of smram (aka tseg base, not smbase). The stack
197 * pointer for each CPU is calculated by using its apic id
198 * (code is in smm_stub.s)
199 * Each entry point will now have the same stub code which, sets up the CPU
200 * stack, enters protected mode and then jumps to the smi handler. It is
201 * important to enter protected mode before the jump because the "jump to
202 * address" might be larger than the 20bit address supported by real mode.
203 * SMI entry right now is in real mode.
204 * input: smbase - this is the smbase of the first cpu not the smbase
205 * where tseg starts (aka smram_start). All CPUs code segment
206 * and stack will be below this point except for the common
207 * SMI handler which is one segment above
208 * input: num_cpus - number of cpus that need relocation including
209 * the first CPU (though its code is already loaded)
210 * input: top of stack (stacks work downward by default in Intel HW)
211 * output: return -1, if runtime smi code could not be installed. In
212 * this case SMM will not work and any SMI's generated will
213 * cause a CPU shutdown or general protection fault because
214 * the appropriate smi handling code was not installed
215 */
216
217static int smm_place_entry_code(uintptr_t smbase, unsigned int num_cpus,
218 unsigned int stack_top, const struct smm_loader_params *params)
219{
220 unsigned int i;
221 unsigned int size;
222 if (smm_create_map(smbase, num_cpus, params)) {
223 /*
224 * Ensure there was enough space and the last CPUs smbase
225 * did not encroach upon the stack. Stack top is smram start
226 * + size of stack.
227 */
228 if (cpus[num_cpus].active) {
229 if (cpus[num_cpus - 1].smbase +
230 params->smm_main_entry_offset < stack_top) {
231 printk(BIOS_ERR, "%s: stack encroachment\n", __func__);
232 printk(BIOS_ERR, "%s: smbase %zx, stack_top %x\n",
233 __func__, cpus[num_cpus].smbase, stack_top);
234 return 0;
235 }
236 }
237 } else {
238 printk(BIOS_ERR, "%s: unable to place smm entry code\n", __func__);
239 return 0;
240 }
241
242 printk(BIOS_INFO, "%s: smbase %zx, stack_top %x\n",
243 __func__, cpus[num_cpus-1].smbase, stack_top);
244
245 /* start at 1, the first CPU stub code is already there */
246 size = cpus[0].code_end - cpus[0].code_start;
247 for (i = 1; i < num_cpus; i++) {
248 memcpy((int *)cpus[i].code_start, (int *)cpus[0].code_start, size);
249 printk(BIOS_DEBUG,
250 "SMM Module: placing smm entry code at %zx, cpu # 0x%x\n",
251 cpus[i].code_start, i);
252 printk(BIOS_DEBUG, "%s: copying from %zx to %zx 0x%x bytes\n",
253 __func__, cpus[0].code_start, cpus[i].code_start, size);
254 }
255 return 1;
256}
257
258/*
259 * Place stacks in base -> base + size region, but ensure the stacks don't
260 * overlap the staggered entry points.
261 */
262static void *smm_stub_place_stacks(char *base, size_t size,
263 struct smm_loader_params *params)
264{
265 size_t total_stack_size;
266 char *stacks_top;
267
268 /* If stack space is requested assume the space lives in the lower
269 * half of SMRAM. */
270 total_stack_size = params->per_cpu_stack_size *
271 params->num_concurrent_stacks;
272 printk(BIOS_DEBUG, "%s: cpus: %zx : stack space: needed -> %zx\n",
273 __func__, params->num_concurrent_stacks,
274 total_stack_size);
275 printk(BIOS_DEBUG, " available -> %zx : per_cpu_stack_size : %zx\n",
276 size, params->per_cpu_stack_size);
277
278 /* There has to be at least one stack user. */
279 if (params->num_concurrent_stacks < 1)
280 return NULL;
281
282 /* Total stack size cannot fit. */
283 if (total_stack_size > size)
284 return NULL;
285
286 /* Stacks extend down to SMBASE */
287 stacks_top = &base[total_stack_size];
288 printk(BIOS_DEBUG, "%s: exit, stack_top %p\n", __func__, stacks_top);
289
290 return stacks_top;
291}
292
293/*
294 * Place the staggered entry points for each CPU. The entry points are
295 * staggered by the per CPU SMM save state size extending down from
296 * SMM_ENTRY_OFFSET.
297 */
298static int smm_stub_place_staggered_entry_points(char *base,
299 const struct smm_loader_params *params, const struct rmodule *smm_stub)
300{
301 size_t stub_entry_offset;
302 int rc = 1;
303 stub_entry_offset = rmodule_entry_offset(smm_stub);
304 /* Each CPU now has its own stub code, which enters protected mode,
305 * sets up the stack, and then jumps to common SMI handler
306 */
307 if (params->num_concurrent_save_states > 1 || stub_entry_offset != 0) {
308 rc = smm_place_entry_code((unsigned int)base,
309 params->num_concurrent_save_states,
310 (unsigned int)params->stack_top, params);
311 }
312 return rc;
313}
314
315/*
316 * The stub setup code assumes it is completely contained within the
317 * default SMRAM size (0x10000) for the default SMI handler (entry at
318 * 0x30000), but no assumption should be made for the permanent SMI handler.
319 * The placement of CPU entry points for permanent handler are determined
320 * by the number of CPUs in the system and the amount of SMRAM.
321 * There are potentially 3 regions to place
322 * within the default SMRAM size:
323 * 1. Save state areas
324 * 2. Stub code
325 * 3. Stack areas
326 *
327 * The save state and smm stack are treated as contiguous for the number of
328 * concurrent areas requested. The save state always lives at the top of the
329 * the CPUS smbase (and the entry point is at offset 0x8000). This allows only a certain
330 * number of CPUs with staggered entry points until the save state area comes
331 * down far enough to overwrite/corrupt the entry code (stub code). Therefore,
332 * an SMM map is created to avoid this corruption, see smm_create_map() above.
333 * This module setup code works for the default (0x30000) SMM handler setup and the
334 * permanent SMM handler.
335 */
336static int smm_module_setup_stub(void *smbase, size_t smm_size,
337 struct smm_loader_params *params,
338 void *fxsave_area)
339{
340 size_t total_save_state_size;
341 size_t smm_stub_size;
342 size_t stub_entry_offset;
343 char *smm_stub_loc;
344 void *stacks_top;
345 size_t size;
346 char *base;
347 size_t i;
348 struct smm_stub_params *stub_params;
349 struct rmodule smm_stub;
350 unsigned int total_size_all;
351 base = smbase;
352 size = smm_size;
353
354 /* The number of concurrent stacks cannot exceed CONFIG_MAX_CPUS. */
355 if (params->num_concurrent_stacks > CONFIG_MAX_CPUS) {
356 printk(BIOS_ERR, "%s: not enough stacks\n", __func__);
357 return -1;
358 }
359
360 /* Fail if can't parse the smm stub rmodule. */
361 if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) {
362 printk(BIOS_ERR, "%s: unable to parse smm stub\n", __func__);
363 return -1;
364 }
365
366 /* Adjust remaining size to account for save state. */
367 total_save_state_size = params->per_cpu_save_state_size *
368 params->num_concurrent_save_states;
369 if (total_save_state_size > size) {
370 printk(BIOS_ERR,
371 "%s: more state save space needed:need -> %zx:available->%zx\n",
372 __func__, total_save_state_size, size);
373 return -1;
374 }
375
376 size -= total_save_state_size;
377
378 /* The save state size encroached over the first SMM entry point. */
379 if (size <= params->smm_main_entry_offset) {
380 printk(BIOS_ERR, "%s: encroachment over SMM entry point\n", __func__);
381 printk(BIOS_ERR, "%s: state save size: %zx : smm_entry_offset -> %x\n",
382 __func__, size, params->smm_main_entry_offset);
383 return -1;
384 }
385
386 /* Need a minimum stack size and alignment. */
387 if (params->per_cpu_stack_size <= SMM_MINIMUM_STACK_SIZE ||
388 (params->per_cpu_stack_size & 3) != 0) {
389 printk(BIOS_ERR, "%s: need minimum stack size\n", __func__);
390 return -1;
391 }
392
393 smm_stub_loc = NULL;
394 smm_stub_size = rmodule_memory_size(&smm_stub);
395 stub_entry_offset = rmodule_entry_offset(&smm_stub);
396
397 /* Put the stub at the main entry point */
398 smm_stub_loc = &base[params->smm_main_entry_offset];
399
400 /* Stub is too big to fit. */
401 if (smm_stub_size > (size - params->smm_main_entry_offset)) {
402 printk(BIOS_ERR, "%s: stub is too big to fit\n", __func__);
403 return -1;
404 }
405
406 /* The stacks, if requested, live in the lower half of SMRAM space
407 * for default handler, but for relocated handler it lives at the beginning
408 * of SMRAM which is TSEG base
409 */
410 size = params->num_concurrent_stacks * params->per_cpu_stack_size;
411 stacks_top = smm_stub_place_stacks((char *)params->smram_start, size, params);
412 if (stacks_top == NULL) {
413 printk(BIOS_ERR, "%s: not enough space for stacks\n", __func__);
414 printk(BIOS_ERR, "%s: ....need -> %p : available -> %zx\n", __func__,
415 base, size);
416 return -1;
417 }
418 params->stack_top = stacks_top;
419 /* Load the stub. */
420 if (rmodule_load(smm_stub_loc, &smm_stub)) {
421 printk(BIOS_ERR, "%s: load module failed\n", __func__);
422 return -1;
423 }
424
425 if (!smm_stub_place_staggered_entry_points(base, params, &smm_stub)) {
426 printk(BIOS_ERR, "%s: staggered entry points failed\n", __func__);
427 return -1;
428 }
429
430 /* Setup the parameters for the stub code. */
431 stub_params = rmodule_parameters(&smm_stub);
432 stub_params->stack_top = (uintptr_t)stacks_top;
433 stub_params->stack_size = params->per_cpu_stack_size;
434 stub_params->c_handler = (uintptr_t)params->handler;
435 stub_params->c_handler_arg = (uintptr_t)params->handler_arg;
436 stub_params->fxsave_area = (uintptr_t)fxsave_area;
437 stub_params->fxsave_area_size = FXSAVE_SIZE;
438 stub_params->runtime.smbase = (uintptr_t)smbase;
439 stub_params->runtime.smm_size = smm_size;
440 stub_params->runtime.save_state_size = params->per_cpu_save_state_size;
441 stub_params->runtime.num_cpus = params->num_concurrent_stacks;
442
443 printk(BIOS_DEBUG, "%s: stack_end = 0x%x\n",
444 __func__, stub_params->runtime.smbase);
445 printk(BIOS_DEBUG,
446 "%s: stack_top = 0x%x\n", __func__, stub_params->stack_top);
447 printk(BIOS_DEBUG, "%s: stack_size = 0x%x\n",
448 __func__, stub_params->stack_size);
449 printk(BIOS_DEBUG, "%s: runtime.smbase = 0x%x\n",
450 __func__, stub_params->runtime.smbase);
451 printk(BIOS_DEBUG, "%s: runtime.start32_offset = 0x%x\n", __func__,
452 stub_params->runtime.start32_offset);
453 printk(BIOS_DEBUG, "%s: runtime.smm_size = 0x%zx\n",
454 __func__, smm_size);
455 printk(BIOS_DEBUG, "%s: per_cpu_save_state_size = 0x%x\n",
456 __func__, stub_params->runtime.save_state_size);
457 printk(BIOS_DEBUG, "%s: num_cpus = 0x%x\n", __func__,
458 stub_params->runtime.num_cpus);
459 printk(BIOS_DEBUG, "%s: total_save_state_size = 0x%x\n",
460 __func__, (stub_params->runtime.save_state_size *
461 stub_params->runtime.num_cpus));
462 total_size_all = stub_params->stack_size +
463 (stub_params->runtime.save_state_size *
464 stub_params->runtime.num_cpus);
465 printk(BIOS_DEBUG, "%s: total_size_all = 0x%x\n", __func__,
466 total_size_all);
467
468 /* Initialize the APIC id to CPU number table to be 1:1 */
469 for (i = 0; i < params->num_concurrent_stacks; i++)
470 stub_params->runtime.apic_id_to_cpu[i] = i;
471
472 /* Allow the initiator to manipulate SMM stub parameters. */
473 params->runtime = &stub_params->runtime;
474
475 printk(BIOS_DEBUG, "SMM Module: stub loaded at %p. Will call %p(%p)\n",
476 smm_stub_loc, params->handler, params->handler_arg);
477 return 0;
478}
479
480/*
481 * smm_setup_relocation_handler assumes the callback is already loaded in
482 * memory. i.e. Another SMM module isn't chained to the stub. The other
483 * assumption is that the stub will be entered from the default SMRAM
484 * location: 0x30000 -> 0x40000.
485 */
486int smm_setup_relocation_handler(struct smm_loader_params *params)
487{
488 void *smram = (void *)(SMM_DEFAULT_BASE);
489 printk(BIOS_SPEW, "%s: enter\n", __func__);
490 /* There can't be more than 1 concurrent save state for the relocation
491 * handler because all CPUs default to 0x30000 as SMBASE. */
492 if (params->num_concurrent_save_states > 1)
493 return -1;
494
495 /* A handler has to be defined to call for relocation. */
496 if (params->handler == NULL)
497 return -1;
498
499 /* Since the relocation handler always uses stack, adjust the number
500 * of concurrent stack users to be CONFIG_MAX_CPUS. */
501 if (params->num_concurrent_stacks == 0)
502 params->num_concurrent_stacks = CONFIG_MAX_CPUS;
503
504 params->smm_main_entry_offset = SMM_ENTRY_OFFSET;
505 params->smram_start = SMM_DEFAULT_BASE;
506 params->smram_end = SMM_DEFAULT_BASE + SMM_DEFAULT_SIZE;
507 return smm_module_setup_stub(smram, SMM_DEFAULT_SIZE,
508 params, fxsave_area_relocation);
509 printk(BIOS_SPEW, "%s: exit\n", __func__);
510}
511
512/*
513 *The SMM module is placed within the provided region in the following
514 * manner:
515 * +-----------------+ <- smram + size
516 * | BIOS resource |
517 * | list (STM) |
518 * +-----------------+
519 * | fxsave area |
520 * +-----------------+
521 * | smi handler |
522 * | ... |
523 * +-----------------+ <- cpu0
524 * | stub code | <- cpu1
525 * | stub code | <- cpu2
526 * | stub code | <- cpu3, etc
527 * | |
528 * | |
529 * | |
530 * | stacks |
531 * +-----------------+ <- smram start
532
533 * It should be noted that this algorithm will not work for
534 * SMM_DEFAULT_SIZE SMRAM regions such as the A segment. This algorithm
535 * expects a region large enough to encompass the handler and stacks
536 * as well as the SMM_DEFAULT_SIZE.
537 */
538int smm_load_module(void *smram, size_t size, struct smm_loader_params *params)
539{
540 struct rmodule smm_mod;
541 size_t total_stack_size;
542 size_t handler_size;
543 size_t module_alignment;
544 size_t alignment_size;
545 size_t fxsave_size;
546 void *fxsave_area;
547 size_t total_size = 0;
548 char *base;
549
550 if (size <= SMM_DEFAULT_SIZE)
551 return -1;
552
553 /* Load main SMI handler at the top of SMRAM
554 * everything else will go below
555 */
556 base = smram;
557 base += size;
558 params->smram_start = (uintptr_t)smram;
559 params->smram_end = params->smram_start + size;
560 params->smm_main_entry_offset = SMM_ENTRY_OFFSET;
561
562 /* Fail if can't parse the smm rmodule. */
563 if (rmodule_parse(&_binary_smm_start, &smm_mod))
564 return -1;
565
566 /* Clear SMM region */
567 if (CONFIG(DEBUG_SMI))
568 memset(smram, 0xcd, size);
569
570 total_stack_size = params->per_cpu_stack_size *
571 params->num_concurrent_stacks;
572 total_size += total_stack_size;
573 /* Stacks are the base of SMRAM */
574 params->stack_top = smram + total_stack_size;
575
576 /* MSEG starts at the top of SMRAM and works down */
577 if (CONFIG(STM)) {
578 base -= CONFIG_MSEG_SIZE + CONFIG_BIOS_RESOURCE_LIST_SIZE;
579 total_size += CONFIG_MSEG_SIZE + CONFIG_BIOS_RESOURCE_LIST_SIZE;
580 }
581
582 /* FXSAVE goes below MSEG */
583 if (CONFIG(SSE)) {
584 fxsave_size = FXSAVE_SIZE * params->num_concurrent_stacks;
585 fxsave_area = base - fxsave_size;
586 base -= fxsave_size;
587 total_size += fxsave_size;
588 } else {
589 fxsave_size = 0;
590 fxsave_area = NULL;
591 }
592
593
594 handler_size = rmodule_memory_size(&smm_mod);
595 base -= handler_size;
596 total_size += handler_size;
597 module_alignment = rmodule_load_alignment(&smm_mod);
598 alignment_size = module_alignment -
599 ((uintptr_t)base % module_alignment);
600 if (alignment_size != module_alignment) {
601 handler_size += alignment_size;
602 base += alignment_size;
603 }
604
605 printk(BIOS_DEBUG,
606 "%s: total_smm_space_needed %zx, available -> %zx\n",
607 __func__, total_size, size);
608
609 /* Does the required amount of memory exceed the SMRAM region size? */
610 if (total_size > size) {
611 printk(BIOS_ERR, "%s: need more SMRAM\n", __func__);
612 return -1;
613 }
614 if (handler_size > SMM_CODE_SEGMENT_SIZE) {
615 printk(BIOS_ERR, "%s: increase SMM_CODE_SEGMENT_SIZE: handler_size = %zx\n",
616 __func__, handler_size);
617 return -1;
618 }
619
620 if (rmodule_load(base, &smm_mod))
621 return -1;
622
623 params->handler = rmodule_entry(&smm_mod);
624 params->handler_arg = rmodule_parameters(&smm_mod);
625
626 printk(BIOS_DEBUG, "%s: smram_start: 0x%p\n",
627 __func__, smram);
628 printk(BIOS_DEBUG, "%s: smram_end: %p\n",
629 __func__, smram + size);
630 printk(BIOS_DEBUG, "%s: stack_top: %p\n",
631 __func__, params->stack_top);
632 printk(BIOS_DEBUG, "%s: handler start %p\n",
633 __func__, params->handler);
634 printk(BIOS_DEBUG, "%s: handler_size %zx\n",
635 __func__, handler_size);
636 printk(BIOS_DEBUG, "%s: handler_arg %p\n",
637 __func__, params->handler_arg);
638 printk(BIOS_DEBUG, "%s: fxsave_area %p\n",
639 __func__, fxsave_area);
640 printk(BIOS_DEBUG, "%s: fxsave_size %zx\n",
641 __func__, fxsave_size);
642 printk(BIOS_DEBUG, "%s: CONFIG_MSEG_SIZE 0x%x\n",
643 __func__, CONFIG_MSEG_SIZE);
644 printk(BIOS_DEBUG, "%s: CONFIG_BIOS_RESOURCE_LIST_SIZE 0x%x\n",
645 __func__, CONFIG_BIOS_RESOURCE_LIST_SIZE);
646
647 /* CPU 0 smbase goes first, all other CPUs
648 * will be staggered below
649 */
650 base -= SMM_CODE_SEGMENT_SIZE;
651 printk(BIOS_DEBUG, "%s: cpu0 entry: %p\n",
652 __func__, base);
653 params->smm_entry = (uintptr_t)base + params->smm_main_entry_offset;
654 return smm_module_setup_stub(base, size, params, fxsave_area);
655}