blob: 1cc8e54e55d3a978073ae6b7b59a2d999bdeffde [file] [log] [blame]
Lee Leahyb0005132015-05-12 18:19:47 -07001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2014 Google Inc.
Lee Leahy1d14b3e2015-05-12 18:23:27 -07005 * Copyright (C) 2015 Intel Corporation.
Lee Leahyb0005132015-05-12 18:19:47 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
Lee Leahyb0005132015-05-12 18:19:47 -070015 */
16
17#include <types.h>
18#include <string.h>
19#include <device/device.h>
20#include <device/pci.h>
21#include <cpu/cpu.h>
22#include <cpu/x86/cache.h>
23#include <cpu/x86/lapic.h>
24#include <cpu/x86/mp.h>
25#include <cpu/x86/msr.h>
26#include <cpu/x86/mtrr.h>
27#include <cpu/x86/smm.h>
28#include <console/console.h>
29#include <soc/cpu.h>
30#include <soc/msr.h>
31#include <soc/pci_devs.h>
32#include <soc/smm.h>
33#include <soc/systemagent.h>
Lee Leahy1d14b3e2015-05-12 18:23:27 -070034#include "chip.h"
Lee Leahyb0005132015-05-12 18:19:47 -070035
36/* This gets filled in and used during relocation. */
37static struct smm_relocation_params smm_reloc_params;
38
39static inline void write_smrr(struct smm_relocation_params *relo_params)
40{
41 printk(BIOS_DEBUG, "Writing SMRR. base = 0x%08x, mask=0x%08x\n",
42 relo_params->smrr_base.lo, relo_params->smrr_mask.lo);
Alexandru Gagniuc86091f92015-09-30 20:23:09 -070043 wrmsr(SMRR_PHYS_BASE, relo_params->smrr_base);
44 wrmsr(SMRR_PHYS_MASK, relo_params->smrr_mask);
Lee Leahyb0005132015-05-12 18:19:47 -070045}
46
Lee Leahyb0005132015-05-12 18:19:47 -070047static inline void write_uncore_emrr(struct smm_relocation_params *relo_params)
48{
49 printk(BIOS_DEBUG,
50 "Writing UNCORE_EMRR. base = 0x%08x, mask=0x%08x\n",
51 relo_params->uncore_emrr_base.lo,
52 relo_params->uncore_emrr_mask.lo);
Rizwan Qureshi188e3702015-07-23 17:40:32 +053053 wrmsr(UNCORE_PRMRR_PHYS_BASE_MSR, relo_params->uncore_emrr_base);
54 wrmsr(UNCORE_PRMRR_PHYS_MASK_MSR, relo_params->uncore_emrr_mask);
Lee Leahyb0005132015-05-12 18:19:47 -070055}
56
Aaron Durbin58225822016-05-03 17:45:59 -050057static void update_save_state(int cpu, uintptr_t curr_smbase,
58 uintptr_t staggered_smbase,
59 struct smm_relocation_params *relo_params)
Lee Leahyb0005132015-05-12 18:19:47 -070060{
61 u32 smbase;
62 u32 iedbase;
63
Lee Leahy1d14b3e2015-05-12 18:23:27 -070064 /*
65 * The relocated handler runs with all CPUs concurrently. Therefore
Lee Leahyb0005132015-05-12 18:19:47 -070066 * stagger the entry points adjusting SMBASE downwards by save state
Lee Leahy1d14b3e2015-05-12 18:23:27 -070067 * size * CPU num.
68 */
Aaron Durbin58225822016-05-03 17:45:59 -050069 smbase = staggered_smbase;
Lee Leahyb0005132015-05-12 18:19:47 -070070 iedbase = relo_params->ied_base;
71
72 printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
73 smbase, iedbase);
74
Lee Leahy1d14b3e2015-05-12 18:23:27 -070075 /*
76 * All threads need to set IEDBASE and SMBASE to the relocated
Lee Leahyb0005132015-05-12 18:19:47 -070077 * handler region. However, the save state location depends on the
78 * smm_save_state_in_msrs field in the relocation parameters. If
79 * smm_save_state_in_msrs is non-zero then the CPUs are relocating
80 * the SMM handler in parallel, and each CPUs save state area is
81 * located in their respective MSR space. If smm_save_state_in_msrs
82 * is zero then the SMM relocation is happening serially so the
Lee Leahy1d14b3e2015-05-12 18:23:27 -070083 * save state is at the same default location for all CPUs.
84 */
Lee Leahyb0005132015-05-12 18:19:47 -070085 if (relo_params->smm_save_state_in_msrs) {
86 msr_t smbase_msr;
87 msr_t iedbase_msr;
88
89 smbase_msr.lo = smbase;
90 smbase_msr.hi = 0;
91
Lee Leahy1d14b3e2015-05-12 18:23:27 -070092 /*
93 * According the BWG the IEDBASE MSR is in bits 63:32. It's
94 * not clear why it differs from the SMBASE MSR.
95 */
Lee Leahyb0005132015-05-12 18:19:47 -070096 iedbase_msr.lo = 0;
97 iedbase_msr.hi = iedbase;
98
99 wrmsr(SMBASE_MSR, smbase_msr);
100 wrmsr(IEDBASE_MSR, iedbase_msr);
101 } else {
102 em64t101_smm_state_save_area_t *save_state;
103
Aaron Durbin58225822016-05-03 17:45:59 -0500104 save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE -
105 sizeof(*save_state));
Lee Leahyb0005132015-05-12 18:19:47 -0700106
107 save_state->smbase = smbase;
108 save_state->iedbase = iedbase;
109 }
110}
111
112/* Returns 1 if SMM MSR save state was set. */
113static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params)
114{
115 msr_t smm_mca_cap;
116
117 smm_mca_cap = rdmsr(SMM_MCA_CAP_MSR);
118 if (smm_mca_cap.hi & SMM_CPU_SVRSTR_MASK) {
119 msr_t smm_feature_control;
120
121 smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
122 smm_feature_control.hi = 0;
123 smm_feature_control.lo |= SMM_CPU_SAVE_EN;
124 wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
125 relo_params->smm_save_state_in_msrs = 1;
126 }
127 return relo_params->smm_save_state_in_msrs;
128}
129
Lee Leahy1d14b3e2015-05-12 18:23:27 -0700130/*
131 * The relocation work is actually performed in SMM context, but the code
Lee Leahyb0005132015-05-12 18:19:47 -0700132 * resides in the ramstage module. This occurs by trampolining from the default
Lee Leahy1d14b3e2015-05-12 18:23:27 -0700133 * SMRAM entry point to here.
134 */
Aaron Durbin58225822016-05-03 17:45:59 -0500135void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
136 uintptr_t staggered_smbase)
Lee Leahyb0005132015-05-12 18:19:47 -0700137{
138 msr_t mtrr_cap;
Aaron Durbin58225822016-05-03 17:45:59 -0500139 struct smm_relocation_params *relo_params = &smm_reloc_params;
Lee Leahyb0005132015-05-12 18:19:47 -0700140
Elyes HAOUAS038e7242016-07-29 18:31:16 +0200141 printk(BIOS_DEBUG, "In relocation handler: CPU %d\n", cpu);
Lee Leahyb0005132015-05-12 18:19:47 -0700142
Lee Leahy1d14b3e2015-05-12 18:23:27 -0700143 /*
144 * Determine if the processor supports saving state in MSRs. If so,
Lee Leahyb0005132015-05-12 18:19:47 -0700145 * enable it before the non-BSPs run so that SMM relocation can occur
Lee Leahy1d14b3e2015-05-12 18:23:27 -0700146 * in parallel in the non-BSP CPUs.
147 */
Lee Leahyb0005132015-05-12 18:19:47 -0700148 if (cpu == 0) {
Lee Leahy1d14b3e2015-05-12 18:23:27 -0700149 /*
150 * If smm_save_state_in_msrs is 1 then that means this is the
Lee Leahyb0005132015-05-12 18:19:47 -0700151 * 2nd time through the relocation handler for the BSP.
152 * Parallel SMM handler relocation is taking place. However,
153 * it is desired to access other CPUs save state in the real
154 * SMM handler. Therefore, disable the SMM save state in MSRs
Lee Leahy1d14b3e2015-05-12 18:23:27 -0700155 * feature.
156 */
Lee Leahyb0005132015-05-12 18:19:47 -0700157 if (relo_params->smm_save_state_in_msrs) {
158 msr_t smm_feature_control;
159
160 smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
161 smm_feature_control.lo &= ~SMM_CPU_SAVE_EN;
162 wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
163 } else if (bsp_setup_msr_save_state(relo_params))
Lee Leahy1d14b3e2015-05-12 18:23:27 -0700164 /*
165 * Just return from relocation handler if MSR save
Lee Leahyb0005132015-05-12 18:19:47 -0700166 * state is enabled. In that case the BSP will come
167 * back into the relocation handler to setup the new
Lee Leahy1d14b3e2015-05-12 18:23:27 -0700168 * SMBASE as well disabling SMM save state in MSRs.
169 */
Lee Leahyb0005132015-05-12 18:19:47 -0700170 return;
171 }
172
173 /* Make appropriate changes to the save state map. */
Aaron Durbin58225822016-05-03 17:45:59 -0500174 update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
Lee Leahyb0005132015-05-12 18:19:47 -0700175
176 /* Write EMRR and SMRR MSRs based on indicated support. */
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700177 mtrr_cap = rdmsr(MTRR_CAP_MSR);
Lee Leahyb0005132015-05-12 18:19:47 -0700178 if (mtrr_cap.lo & SMRR_SUPPORTED)
179 write_smrr(relo_params);
Lee Leahyb0005132015-05-12 18:19:47 -0700180}
181
Lee Leahyb0005132015-05-12 18:19:47 -0700182static void fill_in_relocation_params(device_t dev,
Lee Leahy1d14b3e2015-05-12 18:23:27 -0700183 struct smm_relocation_params *params)
Lee Leahyb0005132015-05-12 18:19:47 -0700184{
Aaron Durbind452b6e2015-08-05 17:28:50 -0500185 void *handler_base;
186 size_t handler_size;
187 void *ied_base;
188 size_t ied_size;
189 void *tseg_base;
190 size_t tseg_size;
Lee Leahyb0005132015-05-12 18:19:47 -0700191 u32 emrr_base;
192 u32 emrr_size;
193 int phys_bits;
194 /* All range registers are aligned to 4KiB */
195 const u32 rmask = ~((1 << 12) - 1);
196
Lee Leahy1d14b3e2015-05-12 18:23:27 -0700197 /*
198 * Some of the range registers are dependent on the number of physical
199 * address bits supported.
200 */
Lee Leahyb0005132015-05-12 18:19:47 -0700201 phys_bits = cpuid_eax(0x80000008) & 0xff;
202
Aaron Durbind452b6e2015-08-05 17:28:50 -0500203 smm_region(&tseg_base, &tseg_size);
204 smm_subregion(SMM_SUBREGION_HANDLER, &handler_base, &handler_size);
205 smm_subregion(SMM_SUBREGION_CHIPSET, &ied_base, &ied_size);
Lee Leahyb0005132015-05-12 18:19:47 -0700206
Aaron Durbind452b6e2015-08-05 17:28:50 -0500207 params->smram_size = handler_size;
208 params->smram_base = (uintptr_t)handler_base;
Lee Leahyb0005132015-05-12 18:19:47 -0700209
Aaron Durbind452b6e2015-08-05 17:28:50 -0500210 params->ied_base = (uintptr_t)ied_base;
211 params->ied_size = ied_size;
Lee Leahyb0005132015-05-12 18:19:47 -0700212
213 /* SMRR has 32-bits of valid address aligned to 4KiB. */
214 params->smrr_base.lo = (params->smram_base & rmask) | MTRR_TYPE_WRBACK;
215 params->smrr_base.hi = 0;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700216 params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
Lee Leahyb0005132015-05-12 18:19:47 -0700217 params->smrr_mask.hi = 0;
218
219 /* The EMRR and UNCORE_EMRR are at IEDBASE + 2MiB */
220 emrr_base = (params->ied_base + (2 << 20)) & rmask;
221 emrr_size = params->ied_size - (2 << 20);
222
Lee Leahy1d14b3e2015-05-12 18:23:27 -0700223 /*
224 * EMRR has 46 bits of valid address aligned to 4KiB. It's dependent
225 * on the number of physical address bits supported.
226 */
Lee Leahyb0005132015-05-12 18:19:47 -0700227 params->emrr_base.lo = emrr_base | MTRR_TYPE_WRBACK;
228 params->emrr_base.hi = 0;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700229 params->emrr_mask.lo = (~(emrr_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
Lee Leahyb0005132015-05-12 18:19:47 -0700230 params->emrr_mask.hi = (1 << (phys_bits - 32)) - 1;
231
232 /* UNCORE_EMRR has 39 bits of valid address aligned to 4KiB. */
233 params->uncore_emrr_base.lo = emrr_base;
234 params->uncore_emrr_base.hi = 0;
235 params->uncore_emrr_mask.lo = (~(emrr_size - 1) & rmask) |
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700236 MTRR_PHYS_MASK_VALID;
Lee Leahyb0005132015-05-12 18:19:47 -0700237 params->uncore_emrr_mask.hi = (1 << (39 - 32)) - 1;
238}
239
Lee Leahyb0005132015-05-12 18:19:47 -0700240static void setup_ied_area(struct smm_relocation_params *params)
241{
242 char *ied_base;
243
244 struct ied_header ied = {
245 .signature = "INTEL RSVD",
246 .size = params->ied_size,
247 .reserved = {0},
248 };
249
250 ied_base = (void *)params->ied_base;
251
Lee Leahy1d14b3e2015-05-12 18:23:27 -0700252 printk(BIOS_DEBUG, "IED base = 0x%08x\n", params->ied_base);
253 printk(BIOS_DEBUG, "IED size = 0x%08x\n", params->ied_size);
254
Lee Leahyb0005132015-05-12 18:19:47 -0700255 /* Place IED header at IEDBASE. */
256 memcpy(ied_base, &ied, sizeof(ied));
257
258 /* Zero out 32KiB at IEDBASE + 1MiB */
259 memset(ied_base + (1 << 20), 0, (32 << 10));
260}
261
Aaron Durbin58225822016-05-03 17:45:59 -0500262void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
263 size_t *smm_save_state_size)
Lee Leahyb0005132015-05-12 18:19:47 -0700264{
265 device_t dev = SA_DEV_ROOT;
Lee Leahyb0005132015-05-12 18:19:47 -0700266
267 printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
268
269 fill_in_relocation_params(dev, &smm_reloc_params);
270
Lee Leahy1d14b3e2015-05-12 18:23:27 -0700271 if (smm_reloc_params.ied_size)
272 setup_ied_area(&smm_reloc_params);
Lee Leahyb0005132015-05-12 18:19:47 -0700273
Aaron Durbin58225822016-05-03 17:45:59 -0500274 *perm_smbase = smm_reloc_params.smram_base;
275 *perm_smsize = smm_reloc_params.smram_size;
276 *smm_save_state_size = sizeof(em64t101_smm_state_save_area_t);
Lee Leahyb0005132015-05-12 18:19:47 -0700277}
278
Aaron Durbin58225822016-05-03 17:45:59 -0500279void smm_initialize(void)
Lee Leahyb0005132015-05-12 18:19:47 -0700280{
Lee Leahyb0005132015-05-12 18:19:47 -0700281 /* Clear the SMM state in the southbridge. */
282 southbridge_smm_clear_state();
283
Aaron Durbin58225822016-05-03 17:45:59 -0500284 /*
285 * Run the relocation handler for on the BSP to check and set up
286 * parallel SMM relocation.
287 */
Lee Leahyb0005132015-05-12 18:19:47 -0700288 smm_initiate_relocation();
289
Lee Leahy1d14b3e2015-05-12 18:23:27 -0700290 if (smm_reloc_params.smm_save_state_in_msrs)
Lee Leahyb0005132015-05-12 18:19:47 -0700291 printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n");
Lee Leahyb0005132015-05-12 18:19:47 -0700292}
293
294void smm_relocate(void)
295{
296 /*
297 * If smm_save_state_in_msrs is non-zero then parallel SMM relocation
298 * shall take place. Run the relocation handler a second time on the
299 * BSP to do * the final move. For APs, a relocation handler always
300 * needs to be run.
301 */
302 if (smm_reloc_params.smm_save_state_in_msrs)
303 smm_initiate_relocation_parallel();
304 else if (!boot_cpu())
305 smm_initiate_relocation();
306}
307
Lee Leahyb0005132015-05-12 18:19:47 -0700308void smm_lock(void)
309{
Lee Leahy1d14b3e2015-05-12 18:23:27 -0700310 /*
311 * LOCK the SMM memory window and enable normal SMM.
Lee Leahyb0005132015-05-12 18:19:47 -0700312 * After running this function, only a full reset can
313 * make the SMM registers writable again.
314 */
315 printk(BIOS_DEBUG, "Locking SMM.\n");
316 pci_write_config8(SA_DEV_ROOT, SMRAM, D_LCK | G_SMRAME | C_BASE_SEG);
317}