blob: 4b824a57a5a3e96baeae06fb3bf22aeca0fe6ba7 [file] [log] [blame]
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +02001/*
2 * This file is part of the coreboot project.
3 *
Patrick Georgi5b2a2d02018-09-26 20:46:04 +02004 * Copyright (C) 2013 Google LLC
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +02005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +020014 */
15
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +020016/* SMM relocation with intention to work for i945-ivybridge.
17 Right now used for sandybridge and ivybridge. */
18
Arthur Heymanscf2941a2018-08-06 15:00:25 +020019#include <assert.h>
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +020020#include <types.h>
21#include <string.h>
22#include <device/device.h>
23#include <device/pci.h>
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +020024#include <cpu/x86/cache.h>
Arthur Heymansedbf5d92018-01-25 20:03:42 +010025#include <cpu/x86/mp.h>
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +020026#include <cpu/x86/msr.h>
27#include <cpu/x86/mtrr.h>
28#include <cpu/x86/smm.h>
29#include <console/console.h>
Arthur Heymansedbf5d92018-01-25 20:03:42 +010030#include <smp/node.h>
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +020031#include "smi.h"
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +020032
Elyes HAOUAS168ef392017-06-27 22:54:42 +020033#define SMRR_SUPPORTED (1 << 11)
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +020034
35#define D_OPEN (1 << 6)
36#define D_CLS (1 << 5)
37#define D_LCK (1 << 4)
38#define G_SMRAME (1 << 3)
39#define C_BASE_SEG ((0 << 2) | (1 << 1) | (0 << 0))
40
41struct ied_header {
42 char signature[10];
43 u32 size;
44 u8 reserved[34];
Stefan Reinauer6a001132017-07-13 02:20:27 +020045} __packed;
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +020046
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +020047
48struct smm_relocation_params {
49 u32 smram_base;
50 u32 smram_size;
51 u32 ied_base;
52 u32 ied_size;
53 msr_t smrr_base;
54 msr_t smrr_mask;
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +020055};
56
57/* This gets filled in and used during relocation. */
58static struct smm_relocation_params smm_reloc_params;
Kyösti Mälkkia9450082017-08-18 22:19:03 +030059static void *default_smm_area = NULL;
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +020060
Arthur Heymansd30894b2019-01-12 00:27:18 +010061/* On model_6fx, model_1067x and model_106cx SMRR functions slightly
62 differently. The MSR are at different location from the rest
63 and need to be explicitly enabled in IA32_FEATURE_CONTROL MSR. */
64bool cpu_has_alternative_smrr(void)
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +020065{
Arthur Heymans06f818c2018-07-20 23:41:54 +020066 struct cpuinfo_x86 c;
Arthur Heymansd30894b2019-01-12 00:27:18 +010067 get_fms(&c, cpuid_eax(1));
68 if (c.x86 != 6)
69 return false;
70 switch (c.x86_model) {
71 case 0xf:
72 case 0x17: /* core2 */
73 case 0x1c: /* Bonnell */
74 return true;
75 default:
76 return false;
77 }
78}
Arthur Heymans06f818c2018-07-20 23:41:54 +020079
Arthur Heymansd30894b2019-01-12 00:27:18 +010080static void write_smrr(struct smm_relocation_params *relo_params)
81{
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +020082 printk(BIOS_DEBUG, "Writing SMRR. base = 0x%08x, mask=0x%08x\n",
83 relo_params->smrr_base.lo, relo_params->smrr_mask.lo);
Arthur Heymansd30894b2019-01-12 00:27:18 +010084
85 if (cpu_has_alternative_smrr()) {
Arthur Heymans06f818c2018-07-20 23:41:54 +020086 msr_t msr;
87 msr = rdmsr(IA32_FEATURE_CONTROL);
88 /* SMRR enabled and feature locked */
89 if (!((msr.lo & SMRR_ENABLE)
90 && (msr.lo & FEATURE_CONTROL_LOCK_BIT))) {
91 printk(BIOS_WARNING,
92 "SMRR not enabled, skip writing SMRR...\n");
93 return;
94 }
95 wrmsr(MSR_SMRR_PHYS_BASE, relo_params->smrr_base);
96 wrmsr(MSR_SMRR_PHYS_MASK, relo_params->smrr_mask);
97 } else {
98 wrmsr(IA32_SMRR_PHYS_BASE, relo_params->smrr_base);
99 wrmsr(IA32_SMRR_PHYS_MASK, relo_params->smrr_mask);
100 }
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200101}
102
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200103/* The relocation work is actually performed in SMM context, but the code
104 * resides in the ramstage module. This occurs by trampolining from the default
105 * SMRAM entry point to here. */
106static void asmlinkage cpu_smm_do_relocation(void *arg)
107{
108 em64t101_smm_state_save_area_t *save_state;
109 msr_t mtrr_cap;
110 struct smm_relocation_params *relo_params;
111 const struct smm_module_params *p;
112 const struct smm_runtime *runtime;
113 int cpu;
114
115 p = arg;
116 runtime = p->runtime;
117 relo_params = p->arg;
118 cpu = p->cpu;
119
120 if (cpu >= CONFIG_MAX_CPUS) {
121 printk(BIOS_CRIT,
122 "Invalid CPU number assigned in SMM stub: %d\n", cpu);
123 return;
124 }
125
126 printk(BIOS_DEBUG, "In relocation handler: cpu %d\n", cpu);
127
128 /* All threads need to set IEDBASE and SMBASE in the save state area.
129 * Since one thread runs at a time during the relocation the save state
130 * is the same for all cpus. */
131 save_state = (void *)(runtime->smbase + SMM_DEFAULT_SIZE -
Lee Leahy7b5f12b92017-03-15 17:16:59 -0700132 runtime->save_state_size);
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200133
134 /* The relocated handler runs with all CPUs concurrently. Therefore
135 * stagger the entry points adjusting SMBASE downwards by save state
136 * size * CPU num. */
137 save_state->smbase = relo_params->smram_base -
Lee Leahy7b5f12b92017-03-15 17:16:59 -0700138 cpu * runtime->save_state_size;
Arthur Heymanscf2941a2018-08-06 15:00:25 +0200139 if (CONFIG_IED_REGION_SIZE != 0) {
140 save_state->iedbase = relo_params->ied_base;
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200141
Arthur Heymanscf2941a2018-08-06 15:00:25 +0200142 printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x @ %p\n",
143 save_state->smbase, save_state->iedbase, save_state);
144 } else {
145 printk(BIOS_DEBUG, "New SMBASE=0x%08x @ %p\n",
146 save_state->smbase, save_state);
147 }
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200148
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +0200149 /* Write SMRR MSRs based on indicated support. */
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700150 mtrr_cap = rdmsr(MTRR_CAP_MSR);
Arthur Heymans786a1fe2019-01-07 15:10:57 +0100151 if (mtrr_cap.lo & SMRR_SUPPORTED && relo_params->smrr_mask.lo != 0)
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200152 write_smrr(relo_params);
153
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200154 southbridge_clear_smi_status();
155}
156
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +0200157static void fill_in_relocation_params(struct smm_relocation_params *params)
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200158{
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200159 /* All range registers are aligned to 4KiB */
160 const u32 rmask = ~((1 << 12) - 1);
161
Nico Huber6f8b7df2016-10-08 18:42:46 +0200162 const u32 tsegmb = northbridge_get_tseg_base();
163 /* TSEG base is usually aligned down (to 8MiB). So we can't
164 derive the TSEG size from the distance to GTT but use the
165 configuration value instead. */
Arthur Heymansaade90e2018-01-25 00:33:45 +0100166 const u32 tseg_size = northbridge_get_tseg_size();
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200167
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200168 params->smram_base = tsegmb;
Arthur Heymanscf2941a2018-08-06 15:00:25 +0200169 params->smram_size = tseg_size;
170 if (CONFIG_IED_REGION_SIZE != 0) {
171 ASSERT(params->smram_size > CONFIG_IED_REGION_SIZE);
172 params->smram_size -= CONFIG_IED_REGION_SIZE;
173 params->ied_base = tsegmb + tseg_size - CONFIG_IED_REGION_SIZE;
174 params->ied_size = CONFIG_IED_REGION_SIZE;
175 }
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200176
Arthur Heymans67031a52018-02-05 19:08:03 +0100177 /* Adjust available SMM handler memory size. */
Arthur Heymanscf2941a2018-08-06 15:00:25 +0200178 if (IS_ENABLED(CONFIG_CACHE_RELOCATED_RAMSTAGE_OUTSIDE_CBMEM)) {
179 ASSERT(params->smram_size > CONFIG_SMM_RESERVED_SIZE);
Arthur Heymans67031a52018-02-05 19:08:03 +0100180 params->smram_size -= CONFIG_SMM_RESERVED_SIZE;
Arthur Heymanscf2941a2018-08-06 15:00:25 +0200181 }
Arthur Heymans67031a52018-02-05 19:08:03 +0100182
Arthur Heymanscb5304b2018-08-06 12:10:10 +0200183 if (IS_ALIGNED(tsegmb, tseg_size)) {
184 /* SMRR has 32-bits of valid address aligned to 4KiB. */
185 struct cpuinfo_x86 c;
Arthur Heymans06f818c2018-07-20 23:41:54 +0200186
Arthur Heymanscb5304b2018-08-06 12:10:10 +0200187 /* On model_6fx and model_1067x bits [0:11] on smrr_base
188 are reserved */
189 get_fms(&c, cpuid_eax(1));
Arthur Heymansd30894b2019-01-12 00:27:18 +0100190 if (cpu_has_alternative_smrr())
Arthur Heymanscb5304b2018-08-06 12:10:10 +0200191 params->smrr_base.lo = (params->smram_base & rmask);
192 else
193 params->smrr_base.lo = (params->smram_base & rmask)
194 | MTRR_TYPE_WRBACK;
195 params->smrr_base.hi = 0;
196 params->smrr_mask.lo = (~(tseg_size - 1) & rmask)
197 | MTRR_PHYS_MASK_VALID;
198 params->smrr_mask.hi = 0;
199 } else {
200 printk(BIOS_WARNING,
201 "TSEG base not aligned with TSEG SIZE! Not setting SMRR\n");
202 }
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200203}
204
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +0200205static int install_relocation_handler(int *apic_id_map, int num_cpus,
Lee Leahy7b5f12b92017-03-15 17:16:59 -0700206 struct smm_relocation_params *relo_params)
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200207{
208 /* The default SMM entry happens serially at the default location.
209 * Therefore, there is only 1 concurrent save state area. Set the
210 * stack size to the save state size, and call into the
211 * do_relocation handler. */
212 int save_state_size = sizeof(em64t101_smm_state_save_area_t);
213 struct smm_loader_params smm_params = {
214 .per_cpu_stack_size = save_state_size,
215 .num_concurrent_stacks = num_cpus,
216 .per_cpu_save_state_size = save_state_size,
217 .num_concurrent_save_states = 1,
218 .handler = &cpu_smm_do_relocation,
219 .handler_arg = (void *)relo_params,
220 };
221
Kyösti Mälkkia9450082017-08-18 22:19:03 +0300222 default_smm_area = backup_default_smm_area();
223
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +0200224 if (smm_setup_relocation_handler(&smm_params))
225 return -1;
226 int i;
Lee Leahy26eeb0f2017-03-15 18:08:50 -0700227 for (i = 0; i < num_cpus; i++)
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +0200228 smm_params.runtime->apic_id_to_cpu[i] = apic_id_map[i];
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +0200229 return 0;
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200230}
231
232static void setup_ied_area(struct smm_relocation_params *params)
233{
234 char *ied_base;
235
236 struct ied_header ied = {
237 .signature = "INTEL RSVD",
238 .size = params->ied_size,
239 .reserved = {0},
240 };
241
242 ied_base = (void *)params->ied_base;
243
244 /* Place IED header at IEDBASE. */
245 memcpy(ied_base, &ied, sizeof(ied));
246
247 /* Zero out 32KiB at IEDBASE + 1MiB */
248 memset(ied_base + (1 << 20), 0, (32 << 10));
249}
250
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +0200251static int install_permanent_handler(int *apic_id_map, int num_cpus,
Lee Leahy7b5f12b92017-03-15 17:16:59 -0700252 struct smm_relocation_params *relo_params)
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200253{
254 /* There are num_cpus concurrent stacks and num_cpus concurrent save
255 * state areas. Lastly, set the stack size to the save state size. */
256 int save_state_size = sizeof(em64t101_smm_state_save_area_t);
257 struct smm_loader_params smm_params = {
258 .per_cpu_stack_size = save_state_size,
259 .num_concurrent_stacks = num_cpus,
260 .per_cpu_save_state_size = save_state_size,
261 .num_concurrent_save_states = num_cpus,
262 };
263
264 printk(BIOS_DEBUG, "Installing SMM handler to 0x%08x\n",
265 relo_params->smram_base);
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +0200266 if (smm_load_module((void *)relo_params->smram_base,
267 relo_params->smram_size, &smm_params))
268 return -1;
269 int i;
Lee Leahy26eeb0f2017-03-15 18:08:50 -0700270 for (i = 0; i < num_cpus; i++)
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +0200271 smm_params.runtime->apic_id_to_cpu[i] = apic_id_map[i];
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +0200272 return 0;
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200273}
274
275static int cpu_smm_setup(void)
276{
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200277 int num_cpus;
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +0200278 int apic_id_map[CONFIG_MAX_CPUS];
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200279
280 printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
281
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +0200282 fill_in_relocation_params(&smm_reloc_params);
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200283
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +0200284 /* enable the SMM memory window */
285 northbridge_write_smram(D_OPEN | G_SMRAME | C_BASE_SEG);
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200286
Arthur Heymanscf2941a2018-08-06 15:00:25 +0200287 if (CONFIG_IED_REGION_SIZE != 0)
288 setup_ied_area(&smm_reloc_params);
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200289
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +0200290 num_cpus = cpu_get_apic_id_map(apic_id_map);
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200291 if (num_cpus > CONFIG_MAX_CPUS) {
292 printk(BIOS_CRIT,
293 "Error: Hardware CPUs (%d) > MAX_CPUS (%d)\n",
294 num_cpus, CONFIG_MAX_CPUS);
295 }
296
Lee Leahycdc50482017-03-15 18:26:18 -0700297 if (install_relocation_handler(apic_id_map, num_cpus,
298 &smm_reloc_params)) {
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200299 printk(BIOS_CRIT, "SMM Relocation handler install failed.\n");
300 return -1;
301 }
302
Lee Leahycdc50482017-03-15 18:26:18 -0700303 if (install_permanent_handler(apic_id_map, num_cpus,
304 &smm_reloc_params)) {
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200305 printk(BIOS_CRIT, "SMM Permanent handler install failed.\n");
306 return -1;
307 }
308
309 /* Ensure the SMM handlers hit DRAM before performing first SMI. */
310 /* TODO(adurbin): Is this really needed? */
311 wbinvd();
312
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +0200313 /* close the SMM memory window and enable normal SMM */
314 northbridge_write_smram(G_SMRAME | C_BASE_SEG);
315
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200316 return 0;
317}
318
319void smm_init(void)
320{
321 /* Return early if CPU SMM setup failed. */
322 if (cpu_smm_setup())
323 return;
324
325 southbridge_smm_init();
326
327 /* Initiate first SMI to kick off SMM-context relocation. Note: this
328 * SMI being triggered here queues up an SMI in the APs which are in
329 * wait-for-SIPI state. Once an AP gets an SIPI it will service the SMI
330 * at the SMM_DEFAULT_BASE before jumping to startup vector. */
331 southbridge_trigger_smi();
332
333 printk(BIOS_DEBUG, "Relocation complete.\n");
334
335 /* Lock down the SMRAM space. */
336 smm_lock();
337}
338
Kyösti Mälkkia9450082017-08-18 22:19:03 +0300339void smm_init_completion(void)
340{
341 restore_default_smm_area(default_smm_area);
342}
343
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200344void smm_lock(void)
345{
346 /* LOCK the SMM memory window and enable normal SMM.
347 * After running this function, only a full reset can
348 * make the SMM registers writable again.
349 */
350 printk(BIOS_DEBUG, "Locking SMM.\n");
Vladimir Serbinenkoc16e9dfa2015-05-29 16:18:01 +0200351
352 northbridge_write_smram(D_LCK | G_SMRAME | C_BASE_SEG);
Vladimir Serbinenkoa3e41c02015-05-28 16:04:17 +0200353}
Arthur Heymansedbf5d92018-01-25 20:03:42 +0100354
355void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
356 size_t *smm_save_state_size)
357{
358 printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
359
360 fill_in_relocation_params(&smm_reloc_params);
361
362 if (CONFIG_IED_REGION_SIZE != 0)
363 setup_ied_area(&smm_reloc_params);
364
365 *perm_smbase = smm_reloc_params.smram_base;
366 *perm_smsize = smm_reloc_params.smram_size;
367 *smm_save_state_size = sizeof(em64t101_smm_state_save_area_t);
368}
369
370void smm_initialize(void)
371{
372 /* Clear the SMM state in the southbridge. */
373 southbridge_smm_clear_state();
374
375 /*
376 * Run the relocation handler for on the BSP to check and set up
377 * parallel SMM relocation.
378 */
379 smm_initiate_relocation();
380}
381
382/* The relocation work is actually performed in SMM context, but the code
383 * resides in the ramstage module. This occurs by trampolining from the default
384 * SMRAM entry point to here. */
385void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
386 uintptr_t staggered_smbase)
387{
388 msr_t mtrr_cap;
389 struct smm_relocation_params *relo_params = &smm_reloc_params;
390 em64t101_smm_state_save_area_t *save_state;
391 u32 smbase = staggered_smbase;
392 u32 iedbase = relo_params->ied_base;
393
394 printk(BIOS_DEBUG, "In relocation handler: cpu %d\n", cpu);
395
396 /* Make appropriate changes to the save state map. */
397 if (CONFIG_IED_REGION_SIZE != 0)
398 printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
399 smbase, iedbase);
400 else
401 printk(BIOS_DEBUG, "New SMBASE=0x%08x\n",
402 smbase);
403
404 save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE -
405 sizeof(*save_state));
406 save_state->smbase = smbase;
407 save_state->iedbase = iedbase;
408
409 /* Write EMRR and SMRR MSRs based on indicated support. */
410 mtrr_cap = rdmsr(MTRR_CAP_MSR);
411 if (mtrr_cap.lo & SMRR_SUPPORTED && relo_params->smrr_mask.lo != 0)
412 write_smrr(relo_params);
413}
414
415/*
416 * The default SMM entry can happen in parallel or serially. If the
417 * default SMM entry is done in parallel the BSP has already setup
418 * the saving state to each CPU's MSRs. At least one save state size
419 * is required for the initial SMM entry for the BSP to determine if
420 * parallel SMM relocation is even feasible.
421 */
422void smm_relocate(void)
423{
424 /*
425 * If smm_save_state_in_msrs is non-zero then parallel SMM relocation
426 * shall take place. Run the relocation handler a second time on the
427 * BSP to do the final move. For APs, a relocation handler always
428 * needs to be run.
429 */
430 if (!boot_cpu())
431 smm_initiate_relocation();
432}