blob: 853923d99cada7089fdcf3ec317337198a457184 [file] [log] [blame]
Aaron Durbin50a34642013-01-03 17:38:47 -06001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2012 ChromeOS Authors
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Aaron Durbin50a34642013-01-03 17:38:47 -060014 */
15
16#include <string.h>
17#include <rmodule.h>
18#include <cpu/x86/smm.h>
19#include <cpu/x86/cache.h>
20#include <console/console.h>
21
22/*
Martin Roth4c3ab732013-07-08 16:23:54 -060023 * Components that make up the SMRAM:
Aaron Durbin50a34642013-01-03 17:38:47 -060024 * 1. Save state - the total save state memory used
25 * 2. Stack - stacks for the CPUs in the SMM handler
26 * 3. Stub - SMM stub code for calling into handler
27 * 4. Handler - C-based SMM handler.
28 *
Martin Roth4c3ab732013-07-08 16:23:54 -060029 * The components are assumed to consist of one consecutive region.
Aaron Durbin50a34642013-01-03 17:38:47 -060030 */
31
Martin Roth4c3ab732013-07-08 16:23:54 -060032/* These parameters are used by the SMM stub code. A pointer to the params
Aaron Durbin50a34642013-01-03 17:38:47 -060033 * is also passed to the C-base handler. */
34struct smm_stub_params {
35 u32 stack_size;
36 u32 stack_top;
37 u32 c_handler;
38 u32 c_handler_arg;
39 struct smm_runtime runtime;
40} __attribute__ ((packed));
41
42/*
43 * The stub is the entry point that sets up protected mode and stacks for each
44 * cpu. It then calls into the SMM handler module. It is encoded as an rmodule.
45 */
46extern unsigned char _binary_smmstub_start[];
47
48/* This is the SMM handler that the stub calls. It is encoded as an rmodule. */
49extern unsigned char _binary_smm_start[];
50
51/* Per cpu minimum stack size. */
52#define SMM_MINIMUM_STACK_SIZE 32
53
54/*
55 * The smm_entry_ins consists of 3 bytes. It is used when staggering SMRAM entry
56 * addresses across CPUs.
57 *
58 * 0xe9 <16-bit relative target> ; jmp <relative-offset>
59 */
60struct smm_entry_ins {
61 char jmp_rel;
62 uint16_t rel16;
63} __attribute__ ((packed));
64
65/*
66 * Place the entry instructions for num entries beginning at entry_start with
67 * a given stride. The entry_start is the highest entry point's address. All
68 * other entry points are stride size below the previous.
69 */
70static void smm_place_jmp_instructions(void *entry_start, int stride, int num,
71 void *jmp_target)
72{
73 int i;
74 char *cur;
75 struct smm_entry_ins entry = { .jmp_rel = 0xe9 };
76
77 /* Each entry point has an IP value of 0x8000. The SMBASE for each
78 * cpu is different so the effective address of the entry instruction
Martin Roth4c3ab732013-07-08 16:23:54 -060079 * is different. Therefore, the relative displacement for each entry
Aaron Durbin50a34642013-01-03 17:38:47 -060080 * instruction needs to be updated to reflect the current effective
81 * IP. Additionally, the IP result from the jmp instruction is
82 * calculated using the next instruction's address so the size of
83 * the jmp instruction needs to be taken into account. */
84 cur = entry_start;
85 for (i = 0; i < num; i++) {
86 uint32_t disp = (uint32_t)jmp_target;
87
88 disp -= sizeof(entry) + (uint32_t)cur;
89 printk(BIOS_DEBUG,
90 "SMM Module: placing jmp sequence at %p rel16 0x%04x\n",
91 cur, disp);
92 entry.rel16 = disp;
93 memcpy(cur, &entry, sizeof(entry));
94 cur -= stride;
95 }
96}
97
98/* Place stacks in base -> base + size region, but ensure the stacks don't
99 * overlap the staggered entry points. */
100static void *smm_stub_place_stacks(char *base, int size,
101 struct smm_loader_params *params)
102{
103 int total_stack_size;
104 char *stacks_top;
105
106 if (params->stack_top != NULL)
107 return params->stack_top;
108
109 /* If stack space is requested assume the space lives in the lower
110 * half of SMRAM. */
111 total_stack_size = params->per_cpu_stack_size *
112 params->num_concurrent_stacks;
113
114 /* There has to be at least one stack user. */
115 if (params->num_concurrent_stacks < 1)
116 return NULL;
117
118 /* Total stack size cannot fit. */
119 if (total_stack_size > size)
120 return NULL;
121
122 /* Stacks extend down to SMBASE */
123 stacks_top = &base[total_stack_size];
124
125 return stacks_top;
126}
127
128/* Place the staggered entry points for each CPU. The entry points are
129 * staggered by the per cpu SMM save state size extending down from
130 * SMM_ENTRY_OFFSET. */
131static void smm_stub_place_staggered_entry_points(char *base,
132 const struct smm_loader_params *params, const struct rmodule *smm_stub)
133{
134 int stub_entry_offset;
135
136 stub_entry_offset = rmodule_entry_offset(smm_stub);
137
138 /* If there are staggered entry points or the stub is not located
Martin Roth4c3ab732013-07-08 16:23:54 -0600139 * at the SMM entry point then jmp instructions need to be placed. */
Aaron Durbin50a34642013-01-03 17:38:47 -0600140 if (params->num_concurrent_save_states > 1 || stub_entry_offset != 0) {
141 int num_entries;
142
143 base += SMM_ENTRY_OFFSET;
144 num_entries = params->num_concurrent_save_states;
145 /* Adjust beginning entry and number of entries down since
146 * the initial entry point doesn't need a jump sequence. */
147 if (stub_entry_offset == 0) {
148 base -= params->per_cpu_save_state_size;
149 num_entries--;
150 }
151 smm_place_jmp_instructions(base,
152 params->per_cpu_save_state_size,
153 num_entries,
154 rmodule_entry(smm_stub));
155 }
156}
157
158/*
159 * The stub setup code assumes it is completely contained within the
160 * default SMRAM size (0x10000). There are potentially 3 regions to place
161 * within the default SMRAM size:
162 * 1. Save state areas
163 * 2. Stub code
164 * 3. Stack areas
165 *
166 * The save state and stack areas are treated as contiguous for the number of
167 * concurrent areas requested. The save state always lives at the top of SMRAM
168 * space, and the entry point is at offset 0x8000.
169 */
170static int smm_module_setup_stub(void *smbase, struct smm_loader_params *params)
171{
172 int total_save_state_size;
173 int smm_stub_size;
174 int stub_entry_offset;
175 char *smm_stub_loc;
176 void *stacks_top;
177 int size;
178 char *base;
179 int i;
180 struct smm_stub_params *stub_params;
181 struct rmodule smm_stub;
182
183 base = smbase;
184 size = SMM_DEFAULT_SIZE;
185
186 /* The number of concurrent stacks cannot exceed CONFIG_MAX_CPUS. */
187 if (params->num_concurrent_stacks > CONFIG_MAX_CPUS)
188 return -1;
189
190 /* Fail if can't parse the smm stub rmodule. */
191 if (rmodule_parse(&_binary_smmstub_start, &smm_stub))
192 return -1;
193
194 /* Adjust remaining size to account for save state. */
195 total_save_state_size = params->per_cpu_save_state_size *
196 params->num_concurrent_save_states;
197 size -= total_save_state_size;
198
199 /* The save state size encroached over the first SMM entry point. */
200 if (size <= SMM_ENTRY_OFFSET)
201 return -1;
202
203 /* Need a minimum stack size and alignment. */
204 if (params->per_cpu_stack_size <= SMM_MINIMUM_STACK_SIZE ||
205 (params->per_cpu_stack_size & 3) != 0)
206 return -1;
207
208 smm_stub_loc = NULL;
209 smm_stub_size = rmodule_memory_size(&smm_stub);
210 stub_entry_offset = rmodule_entry_offset(&smm_stub);
211
212 /* Assume the stub is always small enough to live within upper half of
213 * SMRAM region after the save state space has been allocated. */
214 smm_stub_loc = &base[SMM_ENTRY_OFFSET];
215
216 /* Adjust for jmp instruction sequence. */
217 if (stub_entry_offset != 0) {
218 int entry_sequence_size = sizeof(struct smm_entry_ins);
219 /* Align up to 16 bytes. */
220 entry_sequence_size += 15;
221 entry_sequence_size &= ~15;
222 smm_stub_loc += entry_sequence_size;
223 smm_stub_size += entry_sequence_size;
224 }
225
226 /* Stub is too big to fit. */
227 if (smm_stub_size > (size - SMM_ENTRY_OFFSET))
228 return -1;
229
230 /* The stacks, if requested, live in the lower half of SMRAM space. */
231 size = SMM_ENTRY_OFFSET;
232
233 /* Ensure stacks don't encroach onto staggered SMM
234 * entry points. The staggered entry points extend
235 * below SMM_ENTRY_OFFSET by the number of concurrent
236 * save states - 1 and save state size. */
237 if (params->num_concurrent_save_states > 1) {
238 size -= total_save_state_size;
239 size += params->per_cpu_save_state_size;
240 }
241
242 /* Place the stacks in the lower half of SMRAM. */
243 stacks_top = smm_stub_place_stacks(base, size, params);
244 if (stacks_top == NULL)
245 return -1;
246
247 /* Load the stub. */
248 if (rmodule_load(smm_stub_loc, &smm_stub))
249 return -1;
250
251 /* Place staggered entry points. */
252 smm_stub_place_staggered_entry_points(base, params, &smm_stub);
253
254 /* Setup the parameters for the stub code. */
255 stub_params = rmodule_parameters(&smm_stub);
256 stub_params->stack_top = (u32)stacks_top;
257 stub_params->stack_size = params->per_cpu_stack_size;
258 stub_params->c_handler = (u32)params->handler;
259 stub_params->c_handler_arg = (u32)params->handler_arg;
260 stub_params->runtime.smbase = (u32)smbase;
261 stub_params->runtime.save_state_size = params->per_cpu_save_state_size;
262
263 /* Initialize the APIC id to cpu number table to be 1:1 */
264 for (i = 0; i < params->num_concurrent_stacks; i++)
265 stub_params->runtime.apic_id_to_cpu[i] = i;
266
267 /* Allow the initiator to manipulate SMM stub parameters. */
268 params->runtime = &stub_params->runtime;
269
270 printk(BIOS_DEBUG, "SMM Module: stub loaded at %p. Will call %p(%p)\n",
271 smm_stub_loc, params->handler, params->handler_arg);
272
273 return 0;
274}
275
276/*
277 * smm_setup_relocation_handler assumes the callback is already loaded in
278 * memory. i.e. Another SMM module isn't chained to the stub. The other
279 * assumption is that the stub will be entered from the default SMRAM
280 * location: 0x30000 -> 0x40000.
281 */
282int smm_setup_relocation_handler(struct smm_loader_params *params)
283{
284 void *smram = (void *)SMM_DEFAULT_BASE;
285
286 /* There can't be more than 1 concurrent save state for the relocation
287 * handler because all CPUs default to 0x30000 as SMBASE. */
288 if (params->num_concurrent_save_states > 1)
289 return -1;
290
291 /* A handler has to be defined to call for relocation. */
292 if (params->handler == NULL)
293 return -1;
294
295 /* Since the relocation handler always uses stack, adjust the number
Martin Roth4c3ab732013-07-08 16:23:54 -0600296 * of concurrent stack users to be CONFIG_MAX_CPUS. */
Aaron Durbin50a34642013-01-03 17:38:47 -0600297 if (params->num_concurrent_stacks == 0)
298 params->num_concurrent_stacks = CONFIG_MAX_CPUS;
299
300 return smm_module_setup_stub(smram, params);
301}
302
303/* The SMM module is placed within the provided region in the following
304 * manner:
305 * +-----------------+ <- smram + size
306 * | stacks |
307 * +-----------------+ <- smram + size - total_stack_size
308 * | ... |
309 * +-----------------+ <- smram + handler_size + SMM_DEFAULT_SIZE
310 * | handler |
311 * +-----------------+ <- smram + SMM_DEFAULT_SIZE
312 * | stub code |
313 * +-----------------+ <- smram
314 *
315 * It should be noted that this algorithm will not work for
316 * SMM_DEFAULT_SIZE SMRAM regions such as the A segment. This algorithm
Martin Roth4c3ab732013-07-08 16:23:54 -0600317 * expects a region large enough to encompass the handler and stacks
Aaron Durbin50a34642013-01-03 17:38:47 -0600318 * as well as the SMM_DEFAULT_SIZE.
319 */
320int smm_load_module(void *smram, int size, struct smm_loader_params *params)
321{
322 struct rmodule smm_mod;
323 int total_stack_size;
324 int handler_size;
325 int module_alignment;
326 int alignment_size;
327 char *base;
328
329 if (size <= SMM_DEFAULT_SIZE)
330 return -1;
331
332 /* Fail if can't parse the smm rmodule. */
333 if (rmodule_parse(&_binary_smm_start, &smm_mod))
334 return -1;
335
336 total_stack_size = params->per_cpu_stack_size *
337 params->num_concurrent_stacks;
338
339 /* Stacks start at the top of the region. */
340 base = smram;
341 base += size;
342 params->stack_top = base;
343
344 /* SMM module starts at offset SMM_DEFAULT_SIZE with the load alignment
345 * taken into account. */
346 base = smram;
347 base += SMM_DEFAULT_SIZE;
348 handler_size = rmodule_memory_size(&smm_mod);
349 module_alignment = rmodule_load_alignment(&smm_mod);
350 alignment_size = module_alignment - ((u32)base % module_alignment);
351 if (alignment_size != module_alignment) {
352 handler_size += alignment_size;
353 base += alignment_size;
354 }
355
356 /* Does the required amount of memory exceed the SMRAM region size? */
357 if ((total_stack_size + handler_size + SMM_DEFAULT_SIZE) > size)
358 return -1;
359
360 if (rmodule_load(base, &smm_mod))
361 return -1;
362
363 params->handler = rmodule_entry(&smm_mod);
364 params->handler_arg = rmodule_parameters(&smm_mod);
365
366 return smm_module_setup_stub(smram, params);
367}