blob: 04c83847af38a0f069ed468928dd3c609302ff90 [file] [log] [blame]
Lee Leahy77ff0b12015-05-05 15:07:29 -07001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google Inc.
Lee Leahy32471722015-04-20 15:20:28 -07005 * Copyright (C) 2015 Intel Corp.
Lee Leahy77ff0b12015-05-05 15:07:29 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
Lee Leahy77ff0b12015-05-05 15:07:29 -070015 */
16
Lee Leahy77ff0b12015-05-05 15:07:29 -070017#include <console/console.h>
18#include <cpu/cpu.h>
19#include <cpu/intel/microcode.h>
20#include <cpu/intel/turbo.h>
21#include <cpu/x86/cache.h>
22#include <cpu/x86/lapic.h>
23#include <cpu/x86/mp.h>
24#include <cpu/x86/msr.h>
25#include <cpu/x86/mtrr.h>
26#include <cpu/x86/smm.h>
Lee Leahy94b856e2015-10-15 12:07:03 -070027#include <fsp/memmap.h>
Lee Leahyacb9c0b2015-07-02 11:55:18 -070028#include <reg_script.h>
Chiranjeevi Rapolufd016a42015-08-11 14:09:46 -070029#include <soc/iosf.h>
Lee Leahy77ff0b12015-05-05 15:07:29 -070030#include <soc/msr.h>
31#include <soc/pattrs.h>
32#include <soc/ramstage.h>
33#include <soc/smm.h>
Lee Leahy32471722015-04-20 15:20:28 -070034#include <stdlib.h>
Lee Leahy77ff0b12015-05-05 15:07:29 -070035
36static void smm_relocate(void *unused);
37static void enable_smis(void *unused);
Lee Leahy32471722015-04-20 15:20:28 -070038static void pre_smm_relocation(void *unused);
Lee Leahy77ff0b12015-05-05 15:07:29 -070039
40static struct mp_flight_record mp_steps[] = {
Lee Leahy32471722015-04-20 15:20:28 -070041 MP_FR_BLOCK_APS(pre_smm_relocation, NULL, pre_smm_relocation, NULL),
Lee Leahy77ff0b12015-05-05 15:07:29 -070042 MP_FR_BLOCK_APS(smm_relocate, NULL, smm_relocate, NULL),
43 MP_FR_BLOCK_APS(mp_initialize_cpu, NULL, mp_initialize_cpu, NULL),
44 /* Wait for APs to finish initialization before proceeding. */
45 MP_FR_BLOCK_APS(NULL, NULL, enable_smis, NULL),
46};
47
Lee Leahy32471722015-04-20 15:20:28 -070048/* The APIC id space is sparse. Each id is separated by 2. */
Lee Leahy77ff0b12015-05-05 15:07:29 -070049static int adjust_apic_id(int index, int apic_id)
50{
51 return 2 * index;
52}
53
Lee Leahyacb9c0b2015-07-02 11:55:18 -070054/* Package level MSRs */
55const struct reg_script package_msr_script[] = {
56 /* Set Package TDP to ~7W */
57 REG_MSR_WRITE(MSR_PKG_POWER_LIMIT, 0x3880fa),
58 REG_MSR_RMW(MSR_PP1_POWER_LIMIT, ~(0x7f << 17), 0),
59 REG_MSR_WRITE(MSR_PKG_TURBO_CFG1, 0x702),
60 REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG1, 0x200b),
61 REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG2, 0),
62 REG_MSR_WRITE(MSR_CPU_THERM_CFG1, 0x00000305),
63 REG_MSR_WRITE(MSR_CPU_THERM_CFG2, 0x0405500d),
64 REG_MSR_WRITE(MSR_CPU_THERM_SENS_CFG, 0x27),
65 REG_SCRIPT_END
66};
67
68/* Core level MSRs */
69const struct reg_script core_msr_script[] = {
70 /* Dynamic L2 shrink enable and threshold, clear SINGLE_PCTL bit 11 */
71 REG_MSR_RMW(MSR_PMG_CST_CONFIG_CONTROL, ~0x3f080f, 0xe0008),
72 REG_MSR_RMW(MSR_POWER_MISC,
73 ~(ENABLE_ULFM_AUTOCM_MASK | ENABLE_INDP_AUTOCM_MASK), 0),
74 /* Disable C1E */
75 REG_MSR_RMW(MSR_POWER_CTL, ~0x2, 0),
76 REG_MSR_OR(MSR_POWER_MISC, 0x44),
77 REG_SCRIPT_END
78};
Lee Leahy77ff0b12015-05-05 15:07:29 -070079
Lee Leahy32471722015-04-20 15:20:28 -070080void soc_init_cpus(device_t dev)
Lee Leahy77ff0b12015-05-05 15:07:29 -070081{
82 struct bus *cpu_bus = dev->link_list;
83 const struct pattrs *pattrs = pattrs_get();
84 struct mp_params mp_params;
Lee Leahy77ff0b12015-05-05 15:07:29 -070085 void *default_smm_area;
Chiranjeevi Rapolufd016a42015-08-11 14:09:46 -070086 uint32_t bsmrwac;
Lee Leahy77ff0b12015-05-05 15:07:29 -070087
Lee Leahy32471722015-04-20 15:20:28 -070088 printk(BIOS_SPEW, "%s/%s ( %s )\n",
89 __FILE__, __func__, dev_name(dev));
90
Lee Leahy77ff0b12015-05-05 15:07:29 -070091 /* Set up MTRRs based on physical address size. */
92 x86_setup_fixed_mtrrs();
93 x86_setup_var_mtrrs(pattrs->address_bits, 2);
94 x86_mtrr_check();
95
96 mp_params.num_cpus = pattrs->num_cpus,
97 mp_params.parallel_microcode_load = 1,
98 mp_params.adjust_apic_id = adjust_apic_id;
99 mp_params.flight_plan = &mp_steps[0];
100 mp_params.num_records = ARRAY_SIZE(mp_steps);
101 mp_params.microcode_pointer = pattrs->microcode_patch;
102
103 default_smm_area = backup_default_smm_area();
104
Chiranjeevi Rapolufd016a42015-08-11 14:09:46 -0700105 /*
106 * Configure the BUNIT to allow dirty cache line evictions in non-SMM
107 * mode for the lines that were dirtied while in SMM mode. Otherwise
108 * the writes would be silently dropped.
109 */
110 bsmrwac = iosf_bunit_read(BUNIT_SMRWAC) | SAI_IA_UNTRUSTED;
111 iosf_bunit_write(BUNIT_SMRWAC, bsmrwac);
112
Lee Leahyacb9c0b2015-07-02 11:55:18 -0700113 /* Set package MSRs */
114 reg_script_run(package_msr_script);
115
116 /* Enable Turbo Mode on BSP and siblings of the BSP's building block. */
Lee Leahy77ff0b12015-05-05 15:07:29 -0700117 enable_turbo();
118
Lee Leahy32471722015-04-20 15:20:28 -0700119 if (mp_init(cpu_bus, &mp_params))
Lee Leahy77ff0b12015-05-05 15:07:29 -0700120 printk(BIOS_ERR, "MP initialization failure.\n");
Lee Leahy77ff0b12015-05-05 15:07:29 -0700121
122 restore_default_smm_area(default_smm_area);
123}
124
Lee Leahyacb9c0b2015-07-02 11:55:18 -0700125static void soc_core_init(device_t cpu)
126{
127 printk(BIOS_SPEW, "%s/%s ( %s )\n",
128 __FILE__, __func__, dev_name(cpu));
129 printk(BIOS_DEBUG, "Init Braswell core.\n");
130
131 /*
132 * The turbo disable bit is actually scoped at building
133 * block level -- not package. For non-bsp cores that are within a
134 * building block enable turbo. The cores within the BSP's building
135 * block will just see it already enabled and move on.
136 */
137 if (lapicid())
138 enable_turbo();
139
140 /* Set core MSRs */
141 reg_script_run(core_msr_script);
142
143 /* Set this core to max frequency ratio */
144 set_max_freq();
145}
Lee Leahy77ff0b12015-05-05 15:07:29 -0700146
147static struct device_operations cpu_dev_ops = {
Lee Leahyacb9c0b2015-07-02 11:55:18 -0700148 .init = soc_core_init,
Lee Leahy77ff0b12015-05-05 15:07:29 -0700149};
150
151static struct cpu_device_id cpu_table[] = {
Lee Leahy32471722015-04-20 15:20:28 -0700152 { X86_VENDOR_INTEL, 0x406C3 },
153 { X86_VENDOR_INTEL, 0x406C2 },
Lee Leahy77ff0b12015-05-05 15:07:29 -0700154 { 0, 0 },
155};
156
157static const struct cpu_driver driver __cpu_driver = {
158 .ops = &cpu_dev_ops,
159 .id_table = cpu_table,
160};
161
162
163/*
164 * SMM loading and initialization.
165 */
166
167struct smm_relocation_attrs {
168 uint32_t smbase;
169 uint32_t smrr_base;
170 uint32_t smrr_mask;
171};
172
173static struct smm_relocation_attrs relo_attrs;
174
175static void adjust_apic_id_map(struct smm_loader_params *smm_params)
176{
177 int i;
178 struct smm_runtime *runtime = smm_params->runtime;
179
180 for (i = 0; i < CONFIG_MAX_CPUS; i++)
181 runtime->apic_id_to_cpu[i] = mp_get_apic_id(i);
182}
183
184static void asmlinkage cpu_smm_do_relocation(void *arg)
185{
186 msr_t smrr;
187 em64t100_smm_state_save_area_t *smm_state;
188 const struct smm_module_params *p;
189 const struct smm_runtime *runtime;
190 int cpu;
191
192 p = arg;
193 runtime = p->runtime;
194 cpu = p->cpu;
195
196 if (cpu >= CONFIG_MAX_CPUS) {
197 printk(BIOS_CRIT,
198 "Invalid CPU number assigned in SMM stub: %d\n", cpu);
199 return;
200 }
201
202 /* Set up SMRR. */
203 smrr.lo = relo_attrs.smrr_base;
204 smrr.hi = 0;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700205 wrmsr(SMRR_PHYS_BASE, smrr);
Lee Leahy77ff0b12015-05-05 15:07:29 -0700206 smrr.lo = relo_attrs.smrr_mask;
207 smrr.hi = 0;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700208 wrmsr(SMRR_PHYS_MASK, smrr);
Lee Leahy77ff0b12015-05-05 15:07:29 -0700209
Lee Leahy32471722015-04-20 15:20:28 -0700210 /*
211 * The relocated handler runs with all CPUs concurrently. Therefore
Lee Leahy77ff0b12015-05-05 15:07:29 -0700212 * stagger the entry points adjusting SMBASE downwards by save state
Lee Leahy32471722015-04-20 15:20:28 -0700213 * size * CPU num.
214 */
Lee Leahy77ff0b12015-05-05 15:07:29 -0700215 smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + runtime->smbase);
216 smm_state->smbase = relo_attrs.smbase - cpu * runtime->save_state_size;
217 printk(BIOS_DEBUG, "New SMBASE 0x%08x\n", smm_state->smbase);
218}
219
220static int install_relocation_handler(int num_cpus)
221{
222 const int save_state_size = sizeof(em64t100_smm_state_save_area_t);
223
224 struct smm_loader_params smm_params = {
225 .per_cpu_stack_size = save_state_size,
226 .num_concurrent_stacks = num_cpus,
227 .per_cpu_save_state_size = save_state_size,
228 .num_concurrent_save_states = 1,
229 .handler = (smm_handler_t)&cpu_smm_do_relocation,
230 };
231
232 if (smm_setup_relocation_handler(&smm_params))
233 return -1;
234
235 adjust_apic_id_map(&smm_params);
236
237 return 0;
238}
239
240static int install_permanent_handler(int num_cpus)
241{
Lee Leahy32471722015-04-20 15:20:28 -0700242 /*
243 * There are num_cpus concurrent stacks and num_cpus concurrent save
244 * state areas. Lastly, set the stack size to the save state size.
245 */
Lee Leahy77ff0b12015-05-05 15:07:29 -0700246 int save_state_size = sizeof(em64t100_smm_state_save_area_t);
247 struct smm_loader_params smm_params = {
248 .per_cpu_stack_size = save_state_size,
249 .num_concurrent_stacks = num_cpus,
250 .per_cpu_save_state_size = save_state_size,
251 .num_concurrent_save_states = num_cpus,
252 };
Lee Leahy32471722015-04-20 15:20:28 -0700253 void *smm_base;
254 size_t smm_size;
255 int tseg_size;
Lee Leahy77ff0b12015-05-05 15:07:29 -0700256
257 printk(BIOS_DEBUG, "Installing SMM handler to 0x%08x\n",
258 relo_attrs.smbase);
259
Lee Leahy32471722015-04-20 15:20:28 -0700260 smm_region(&smm_base, &smm_size);
261 tseg_size = smm_size - CONFIG_SMM_RESERVED_SIZE;
Lee Leahy77ff0b12015-05-05 15:07:29 -0700262 if (smm_load_module((void *)relo_attrs.smbase, tseg_size, &smm_params))
263 return -1;
264
265 adjust_apic_id_map(&smm_params);
266
267 return 0;
268}
269
270static int smm_load_handlers(void)
271{
272 /* All range registers are aligned to 4KiB */
273 const uint32_t rmask = ~((1 << 12) - 1);
274 const struct pattrs *pattrs = pattrs_get();
Lee Leahy32471722015-04-20 15:20:28 -0700275 void *smm_base;
276 size_t smm_size;
Lee Leahy77ff0b12015-05-05 15:07:29 -0700277
278 /* Initialize global tracking state. */
Lee Leahy32471722015-04-20 15:20:28 -0700279 smm_region(&smm_base, &smm_size);
280 relo_attrs.smbase = (uint32_t)smm_base;
Lee Leahy77ff0b12015-05-05 15:07:29 -0700281 relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK;
Lee Leahy32471722015-04-20 15:20:28 -0700282 relo_attrs.smrr_mask = ~(smm_size - 1) & rmask;
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700283 relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID;
Lee Leahy77ff0b12015-05-05 15:07:29 -0700284
285 /* Install handlers. */
286 if (install_relocation_handler(pattrs->num_cpus) < 0) {
287 printk(BIOS_ERR, "Unable to install SMM relocation handler.\n");
288 return -1;
289 }
290
291 if (install_permanent_handler(pattrs->num_cpus) < 0) {
292 printk(BIOS_ERR, "Unable to install SMM permanent handler.\n");
293 return -1;
294 }
295
296 /* Ensure the SMM handlers hit DRAM before performing first SMI. */
297 wbinvd();
298
299 return 0;
300}
301
Lee Leahy32471722015-04-20 15:20:28 -0700302static void pre_smm_relocation(void *unused)
303{
304 const struct pattrs *pattrs = pattrs_get();
305 msr_t msr_value;
306
307 /* Need to make sure that all cores have microcode loaded. */
308 msr_value = rdmsr(MSR_IA32_BIOS_SIGN_ID);
309 if (msr_value.hi == 0)
310 intel_microcode_load_unlocked(pattrs->microcode_patch);
311}
312
Lee Leahy77ff0b12015-05-05 15:07:29 -0700313static void smm_relocate(void *unused)
314{
315 const struct pattrs *pattrs = pattrs_get();
316
317 /* Load relocation and permanent handler. */
318 if (boot_cpu()) {
319 if (smm_load_handlers() < 0) {
320 printk(BIOS_ERR, "Error loading SMM handlers.\n");
321 return;
322 }
323 southcluster_smm_clear_state();
324 }
325
326 /* Relocate SMM space. */
327 smm_initiate_relocation();
328
329 /* Load microcode after SMM relocation. */
330 intel_microcode_load_unlocked(pattrs->microcode_patch);
331}
332
333static void enable_smis(void *unused)
334{
335 southcluster_smm_enable_smi();
336}