blob: f260880d396ed711fe3bde17993cab3b34fdacc2 [file] [log] [blame]
Martin Roth433659a2014-05-12 21:55:00 -06001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google Inc.
5 * Copyright (C) 2014 Sage Electronic Engineering, LLC.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#include <stdlib.h>
22#include <console/console.h>
23#include <cpu/cpu.h>
24#include <cpu/intel/microcode.h>
25#include <cpu/intel/turbo.h>
26#include <cpu/x86/cache.h>
27#include <cpu/x86/lapic.h>
28#include <cpu/x86/mp.h>
29#include <cpu/x86/msr.h>
30#include <cpu/x86/mtrr.h>
31#include <cpu/x86/smm.h>
32#include <reg_script.h>
33
34#include <baytrail/msr.h>
35#include <baytrail/pattrs.h>
36#include <baytrail/ramstage.h>
37#if IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)
38#include <baytrail/smm.h>
39
40static void smm_relocate(void *unused);
41static void enable_smis(void *unused);
42
43static struct mp_flight_record mp_steps[] = {
44 MP_FR_BLOCK_APS(smm_relocate, NULL, smm_relocate, NULL),
45 MP_FR_BLOCK_APS(mp_initialize_cpu, NULL, mp_initialize_cpu, NULL),
46 /* Wait for APs to finish initialization before proceeding. */
47 MP_FR_BLOCK_APS(NULL, NULL, enable_smis, NULL),
48};
49#else /* CONFIG_HAVE_SMI_HANDLER */
50static struct mp_flight_record mp_steps[] = {
51 MP_FR_BLOCK_APS(mp_initialize_cpu, NULL, mp_initialize_cpu, NULL),
52};
53#endif
54
55/* The APIC id space on Bay Trail is sparse. Each id is separated by 2. */
56static int adjust_apic_id(int index, int apic_id)
57{
58 return 2 * index;
59}
60
61/* Core level MSRs */
62const struct reg_script core_msr_script[] = {
63 /* Dynamic L2 shrink enable and threshold */
64 REG_MSR_RMW(MSR_PMG_CST_CONFIG_CONTROL, ~0x3f000f, 0xe0008),
65 /* Disable C1E */
66 REG_MSR_RMW(MSR_POWER_CTL, ~0x2, 0),
67 REG_MSR_OR(MSR_POWER_MISC, 0x44),
68 REG_SCRIPT_END
69};
70
71void baytrail_init_cpus(device_t dev)
72{
73 struct bus *cpu_bus = dev->link_list;
74 const struct pattrs *pattrs = pattrs_get();
75 struct mp_params mp_params;
76
77 x86_mtrr_check();
78
79 /* Enable the local cpu apics */
80 setup_lapic();
81
82 mp_params.num_cpus = pattrs->num_cpus,
83 mp_params.parallel_microcode_load = 0,
84 mp_params.adjust_apic_id = adjust_apic_id;
85 mp_params.flight_plan = &mp_steps[0];
86 mp_params.num_records = ARRAY_SIZE(mp_steps);
87 mp_params.microcode_pointer = 0;
88
89 if (mp_init(cpu_bus, &mp_params)) {
90 printk(BIOS_ERR, "MP initialization failure.\n");
91 }
92}
93
94static void baytrail_core_init(device_t cpu)
95{
96 printk(BIOS_DEBUG, "Init BayTrail core.\n");
97
98 /* On bay trail the turbo disable bit is actually scoped at building
99 * block level -- not package. For non-bsp cores that are within a
100 * building block enable turbo. The cores within the BSP's building
101 * block will just see it already enabled and move on. */
102 if (lapicid())
103 enable_turbo();
104
105 /* Set core MSRs */
106 reg_script_run(core_msr_script);
107
108 /* Set this core to max frequency ratio */
109 set_max_freq();
110}
111
112static struct device_operations cpu_dev_ops = {
113 .init = baytrail_core_init,
114};
115
116static struct cpu_device_id cpu_table[] = {
117 { X86_VENDOR_INTEL, 0x30671 },
118 { X86_VENDOR_INTEL, 0x30672 },
119 { X86_VENDOR_INTEL, 0x30673 },
120 { X86_VENDOR_INTEL, 0x30678 },
121 { 0, 0 },
122};
123
124static const struct cpu_driver driver __cpu_driver = {
125 .ops = &cpu_dev_ops,
126 .id_table = cpu_table,
127};
128
129#if IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)
130/*
131 * SMM loading and initialization.
132 */
133
134struct smm_relocation_attrs {
135 uint32_t smbase;
136 uint32_t smrr_base;
137 uint32_t smrr_mask;
138};
139
140static struct smm_relocation_attrs relo_attrs;
141
142static void adjust_apic_id_map(struct smm_loader_params *smm_params)
143{
144 int i;
145 struct smm_runtime *runtime = smm_params->runtime;
146
147 for (i = 0; i < CONFIG_MAX_CPUS; i++)
148 runtime->apic_id_to_cpu[i] = mp_get_apic_id(i);
149}
150
151static void asmlinkage
152cpu_smm_do_relocation(void *arg, int cpu, const struct smm_runtime *runtime)
153{
154#ifndef CONFIG_MAX_CPUS
155#error CONFIG_MAX_CPUS must be set.
156#endif
157 msr_t smrr;
158 em64t100_smm_state_save_area_t *smm_state;
159
160 if (cpu >= CONFIG_MAX_CPUS) {
161 printk(BIOS_CRIT,
162 "Invalid CPU number assigned in SMM stub: %d\n", cpu);
163 return;
164 }
165
166 /* Set up SMRR. */
167 smrr.lo = relo_attrs.smrr_base;
168 smrr.hi = 0;
169 wrmsr(SMRRphysBase_MSR, smrr);
170 smrr.lo = relo_attrs.smrr_mask;
171 smrr.hi = 0;
172 wrmsr(SMRRphysMask_MSR, smrr);
173
174 /* The relocated handler runs with all CPUs concurrently. Therefore
175 * stagger the entry points adjusting SMBASE downwards by save state
176 * size * CPU num. */
177 smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + runtime->smbase);
178 smm_state->smbase = relo_attrs.smbase - cpu * runtime->save_state_size;
179 printk(BIOS_DEBUG, "New SMBASE 0x%08x\n", smm_state->smbase);
180}
181
182static int install_relocation_handler(int num_cpus)
183{
184 const int save_state_size = sizeof(em64t100_smm_state_save_area_t);
185
186 struct smm_loader_params smm_params = {
187 .per_cpu_stack_size = save_state_size,
188 .num_concurrent_stacks = num_cpus,
189 .per_cpu_save_state_size = save_state_size,
190 .num_concurrent_save_states = 1,
191 .handler = (smm_handler_t)&cpu_smm_do_relocation,
192 };
193
194 if (smm_setup_relocation_handler(&smm_params))
195 return -1;
196
197 adjust_apic_id_map(&smm_params);
198
199 return 0;
200}
201
202static int install_permanent_handler(int num_cpus)
203{
204#ifndef CONFIG_SMM_RESERVED_SIZE
205#error CONFIG_SMM_RESERVED_SIZE must be set.
206#endif
207 /* There are num_cpus concurrent stacks and num_cpus concurrent save
208 * state areas. Lastly, set the stack size to the save state size. */
209 int save_state_size = sizeof(em64t100_smm_state_save_area_t);
210 struct smm_loader_params smm_params = {
211 .per_cpu_stack_size = save_state_size,
212 .num_concurrent_stacks = num_cpus,
213 .per_cpu_save_state_size = save_state_size,
214 .num_concurrent_save_states = num_cpus,
215 };
216 const int tseg_size = smm_region_size() - CONFIG_SMM_RESERVED_SIZE;
217
218 printk(BIOS_DEBUG, "Installing SMM handler to 0x%08x\n",
219 relo_attrs.smbase);
220
221 if (smm_load_module((void *)relo_attrs.smbase, tseg_size, &smm_params))
222 return -1;
223
224 adjust_apic_id_map(&smm_params);
225
226 return 0;
227}
228
229static int smm_load_handlers(void)
230{
231 /* All range registers are aligned to 4KiB */
232 const uint32_t rmask = ~((1 << 12) - 1);
233 const struct pattrs *pattrs = pattrs_get();
234
235 /* Initialize global tracking state. */
236 relo_attrs.smbase = (uint32_t)smm_region_start();
237 relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK;
238 relo_attrs.smrr_mask = ~(smm_region_size() - 1) & rmask;
239 relo_attrs.smrr_mask |= MTRRphysMaskValid;
240
241 /* Install handlers. */
242 if (install_relocation_handler(pattrs->num_cpus) < 0) {
243 printk(BIOS_ERR, "Unable to install SMM relocation handler.\n");
244 return -1;
245 }
246
247 if (install_permanent_handler(pattrs->num_cpus) < 0) {
248 printk(BIOS_ERR, "Unable to install SMM permanent handler.\n");
249 return -1;
250 }
251
252 /* Ensure the SMM handlers hit DRAM before performing first SMI. */
253 wbinvd();
254
255 return 0;
256}
257
258static void smm_relocate(void *unused)
259{
260
261 /* Load relocation and permanent handler. */
262 if (boot_cpu()) {
263 if (smm_load_handlers() < 0) {
264 printk(BIOS_ERR, "Error loading SMM handlers.\n");
265 return;
266 }
267 southcluster_smm_clear_state();
268 }
269
270 /* Relocate SMM space. */
271 smm_initiate_relocation();
272}
273
274static void enable_smis(void *unused)
275{
276 southcluster_smm_enable_smi();
277}
278#endif