blob: 88a8f36badedc1242a2a1d0ba08a9373058e6e70 [file] [log] [blame]
Aaron Durbine0785c02013-10-21 12:15:29 -05001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; version 2 of
9 * the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
Aaron Durbine0785c02013-10-21 12:15:29 -050015 */
16
17#include <console/console.h>
18#include <stdint.h>
Stefan Reinauer6a001132017-07-13 02:20:27 +020019#include <compiler.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050020#include <rmodule.h>
21#include <arch/cpu.h>
22#include <cpu/cpu.h>
23#include <cpu/intel/microcode.h>
24#include <cpu/x86/cache.h>
Kyösti Mälkkibae775a2014-12-18 10:36:33 +020025#include <cpu/x86/gdt.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050026#include <cpu/x86/lapic.h>
27#include <cpu/x86/name.h>
28#include <cpu/x86/msr.h>
29#include <cpu/x86/mtrr.h>
30#include <cpu/x86/smm.h>
31#include <cpu/x86/mp.h>
32#include <delay.h>
33#include <device/device.h>
34#include <device/path.h>
35#include <lib.h>
36#include <smp/atomic.h>
37#include <smp/spinlock.h>
Julius Wernerec5e5e02014-08-20 15:29:56 -070038#include <symbols.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050039#include <thread.h>
40
41#define MAX_APIC_IDS 256
Aaron Durbin770d7c72016-05-03 17:49:57 -050042
43typedef void (*mp_callback_t)(void);
44
45/*
46 * A mp_flight_record details a sequence of calls for the APs to perform
47 * along with the BSP to coordinate sequencing. Each flight record either
48 * provides a barrier for each AP before calling the callback or the APs
49 * are allowed to perform the callback without waiting. Regardless, each
50 * record has the cpus_entered field incremented for each record. When
51 * the BSP observes that the cpus_entered matches the number of APs
52 * the bsp_call is called with bsp_arg and upon returning releases the
53 * barrier allowing the APs to make further progress.
54 *
55 * Note that ap_call() and bsp_call() can be NULL. In the NULL case the
56 * callback will just not be called.
57 */
58struct mp_flight_record {
59 atomic_t barrier;
60 atomic_t cpus_entered;
61 mp_callback_t ap_call;
62 mp_callback_t bsp_call;
Aaron Durbin381feb82018-05-02 22:38:58 -060063} __aligned(CACHELINE_SIZE);
Aaron Durbin770d7c72016-05-03 17:49:57 -050064
65#define _MP_FLIGHT_RECORD(barrier_, ap_func_, bsp_func_) \
66 { \
67 .barrier = ATOMIC_INIT(barrier_), \
68 .cpus_entered = ATOMIC_INIT(0), \
69 .ap_call = ap_func_, \
70 .bsp_call = bsp_func_, \
71 }
72
73#define MP_FR_BLOCK_APS(ap_func_, bsp_func_) \
74 _MP_FLIGHT_RECORD(0, ap_func_, bsp_func_)
75
76#define MP_FR_NOBLOCK_APS(ap_func_, bsp_func_) \
77 _MP_FLIGHT_RECORD(1, ap_func_, bsp_func_)
78
79/* The mp_params structure provides the arguments to the mp subsystem
80 * for bringing up APs. */
81struct mp_params {
82 int num_cpus; /* Total cpus include BSP */
83 int parallel_microcode_load;
84 const void *microcode_pointer;
Aaron Durbin770d7c72016-05-03 17:49:57 -050085 /* Flight plan for APs and BSP. */
86 struct mp_flight_record *flight_plan;
87 int num_records;
88};
89
Aaron Durbine0785c02013-10-21 12:15:29 -050090/* This needs to match the layout in the .module_parametrs section. */
91struct sipi_params {
92 uint16_t gdtlimit;
93 uint32_t gdt;
94 uint16_t unused;
95 uint32_t idt_ptr;
96 uint32_t stack_top;
97 uint32_t stack_size;
98 uint32_t microcode_lock; /* 0xffffffff means parallel loading. */
99 uint32_t microcode_ptr;
100 uint32_t msr_table_ptr;
101 uint32_t msr_count;
102 uint32_t c_handler;
103 atomic_t ap_count;
Stefan Reinauer6a001132017-07-13 02:20:27 +0200104} __packed;
Aaron Durbine0785c02013-10-21 12:15:29 -0500105
106/* This also needs to match the assembly code for saved MSR encoding. */
107struct saved_msr {
108 uint32_t index;
109 uint32_t lo;
110 uint32_t hi;
Stefan Reinauer6a001132017-07-13 02:20:27 +0200111} __packed;
Aaron Durbine0785c02013-10-21 12:15:29 -0500112
113
114/* The sipi vector rmodule is included in the ramstage using 'objdump -B'. */
115extern char _binary_sipi_vector_start[];
Aaron Durbine0785c02013-10-21 12:15:29 -0500116
117/* The SIPI vector is loaded at the SMM_DEFAULT_BASE. The reason is at the
118 * memory range is already reserved so the OS cannot use it. That region is
119 * free to use for AP bringup before SMM is initialized. */
120static const uint32_t sipi_vector_location = SMM_DEFAULT_BASE;
121static const int sipi_vector_location_size = SMM_DEFAULT_SIZE;
122
123struct mp_flight_plan {
124 int num_records;
125 struct mp_flight_record *records;
126};
127
Aaron Durbinb21e3622016-12-07 00:32:19 -0600128static int global_num_aps;
Aaron Durbine0785c02013-10-21 12:15:29 -0500129static struct mp_flight_plan mp_info;
130
131struct cpu_map {
Edward O'Callaghan2c9d2cf2014-10-27 23:29:29 +1100132 struct device *dev;
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600133 /* Keep track of default apic ids for SMM. */
134 int default_apic_id;
Aaron Durbine0785c02013-10-21 12:15:29 -0500135};
136
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200137/* Keep track of APIC and device structure for each CPU. */
Aaron Durbine0785c02013-10-21 12:15:29 -0500138static struct cpu_map cpus[CONFIG_MAX_CPUS];
139
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600140static inline void add_cpu_map_entry(const struct cpu_info *info)
141{
142 cpus[info->index].dev = info->cpu;
143 cpus[info->index].default_apic_id = cpuid_ebx(1) >> 24;
144}
145
Aaron Durbin4c16f8f2018-05-02 22:35:33 -0600146static inline void barrier_wait(atomic_t *b)
Aaron Durbine0785c02013-10-21 12:15:29 -0500147{
Lee Leahya15d8af2017-03-15 14:49:35 -0700148 while (atomic_read(b) == 0)
Aaron Durbine0785c02013-10-21 12:15:29 -0500149 asm ("pause");
Aaron Durbine0785c02013-10-21 12:15:29 -0500150 mfence();
151}
152
Aaron Durbin4c16f8f2018-05-02 22:35:33 -0600153static inline void release_barrier(atomic_t *b)
Aaron Durbine0785c02013-10-21 12:15:29 -0500154{
155 mfence();
156 atomic_set(b, 1);
157}
158
159/* Returns 1 if timeout waiting for APs. 0 if target aps found. */
160static int wait_for_aps(atomic_t *val, int target, int total_delay,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700161 int delay_step)
Aaron Durbine0785c02013-10-21 12:15:29 -0500162{
163 int timeout = 0;
164 int delayed = 0;
165 while (atomic_read(val) != target) {
166 udelay(delay_step);
167 delayed += delay_step;
168 if (delayed >= total_delay) {
169 timeout = 1;
170 break;
171 }
172 }
173
174 return timeout;
175}
176
177static void ap_do_flight_plan(void)
178{
179 int i;
180
181 for (i = 0; i < mp_info.num_records; i++) {
182 struct mp_flight_record *rec = &mp_info.records[i];
183
184 atomic_inc(&rec->cpus_entered);
185 barrier_wait(&rec->barrier);
186
Lee Leahya15d8af2017-03-15 14:49:35 -0700187 if (rec->ap_call != NULL)
Aaron Durbin0e556322016-04-29 23:15:12 -0500188 rec->ap_call();
Aaron Durbine0785c02013-10-21 12:15:29 -0500189 }
190}
191
Aaron Durbinb21e3622016-12-07 00:32:19 -0600192static void park_this_cpu(void)
193{
194 stop_this_cpu();
195}
196
Aaron Durbine0785c02013-10-21 12:15:29 -0500197/* By the time APs call ap_init() caching has been setup, and microcode has
198 * been loaded. */
199static void asmlinkage ap_init(unsigned int cpu)
200{
201 struct cpu_info *info;
Aaron Durbine0785c02013-10-21 12:15:29 -0500202
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200203 /* Ensure the local APIC is enabled */
Aaron Durbine0785c02013-10-21 12:15:29 -0500204 enable_lapic();
205
206 info = cpu_info();
207 info->index = cpu;
208 info->cpu = cpus[cpu].dev;
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600209
210 add_cpu_map_entry(info);
Aaron Durbine0785c02013-10-21 12:15:29 -0500211 thread_init_cpu_info_non_bsp(info);
212
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600213 /* Fix up APIC id with reality. */
214 info->cpu->path.apic.apic_id = lapicid();
Aaron Durbine0785c02013-10-21 12:15:29 -0500215
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600216 printk(BIOS_INFO, "AP: slot %d apic_id %x.\n", cpu,
217 info->cpu->path.apic.apic_id);
Aaron Durbine0785c02013-10-21 12:15:29 -0500218
219 /* Walk the flight plan */
220 ap_do_flight_plan();
221
222 /* Park the AP. */
Aaron Durbinb21e3622016-12-07 00:32:19 -0600223 park_this_cpu();
Aaron Durbine0785c02013-10-21 12:15:29 -0500224}
225
226static void setup_default_sipi_vector_params(struct sipi_params *sp)
227{
228 sp->gdt = (uint32_t)&gdt;
229 sp->gdtlimit = (uint32_t)&gdt_end - (u32)&gdt - 1;
230 sp->idt_ptr = (uint32_t)&idtarg;
231 sp->stack_size = CONFIG_STACK_SIZE;
232 sp->stack_top = (uint32_t)&_estack;
233 /* Adjust the stack top to take into account cpu_info. */
234 sp->stack_top -= sizeof(struct cpu_info);
235}
236
237#define NUM_FIXED_MTRRS 11
238static const unsigned int fixed_mtrrs[NUM_FIXED_MTRRS] = {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700239 MTRR_FIX_64K_00000, MTRR_FIX_16K_80000, MTRR_FIX_16K_A0000,
240 MTRR_FIX_4K_C0000, MTRR_FIX_4K_C8000, MTRR_FIX_4K_D0000,
241 MTRR_FIX_4K_D8000, MTRR_FIX_4K_E0000, MTRR_FIX_4K_E8000,
242 MTRR_FIX_4K_F0000, MTRR_FIX_4K_F8000,
Aaron Durbine0785c02013-10-21 12:15:29 -0500243};
244
245static inline struct saved_msr *save_msr(int index, struct saved_msr *entry)
246{
247 msr_t msr;
248
249 msr = rdmsr(index);
250 entry->index = index;
251 entry->lo = msr.lo;
252 entry->hi = msr.hi;
253
254 /* Return the next entry. */
255 entry++;
256 return entry;
257}
258
259static int save_bsp_msrs(char *start, int size)
260{
261 int msr_count;
262 int num_var_mtrrs;
263 struct saved_msr *msr_entry;
264 int i;
265 msr_t msr;
266
267 /* Determine number of MTRRs need to be saved. */
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700268 msr = rdmsr(MTRR_CAP_MSR);
Aaron Durbine0785c02013-10-21 12:15:29 -0500269 num_var_mtrrs = msr.lo & 0xff;
270
271 /* 2 * num_var_mtrrs for base and mask. +1 for IA32_MTRR_DEF_TYPE. */
272 msr_count = 2 * num_var_mtrrs + NUM_FIXED_MTRRS + 1;
273
274 if ((msr_count * sizeof(struct saved_msr)) > size) {
275 printk(BIOS_CRIT, "Cannot mirror all %d msrs.\n", msr_count);
276 return -1;
277 }
278
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600279 fixed_mtrrs_expose_amd_rwdram();
280
Aaron Durbine0785c02013-10-21 12:15:29 -0500281 msr_entry = (void *)start;
Lee Leahya15d8af2017-03-15 14:49:35 -0700282 for (i = 0; i < NUM_FIXED_MTRRS; i++)
Aaron Durbine0785c02013-10-21 12:15:29 -0500283 msr_entry = save_msr(fixed_mtrrs[i], msr_entry);
Aaron Durbine0785c02013-10-21 12:15:29 -0500284
285 for (i = 0; i < num_var_mtrrs; i++) {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700286 msr_entry = save_msr(MTRR_PHYS_BASE(i), msr_entry);
287 msr_entry = save_msr(MTRR_PHYS_MASK(i), msr_entry);
Aaron Durbine0785c02013-10-21 12:15:29 -0500288 }
289
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700290 msr_entry = save_msr(MTRR_DEF_TYPE_MSR, msr_entry);
Aaron Durbine0785c02013-10-21 12:15:29 -0500291
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600292 fixed_mtrrs_hide_amd_rwdram();
293
Aaron Durbine0785c02013-10-21 12:15:29 -0500294 return msr_count;
295}
296
297static atomic_t *load_sipi_vector(struct mp_params *mp_params)
298{
299 struct rmodule sipi_mod;
300 int module_size;
301 int num_msrs;
302 struct sipi_params *sp;
303 char *mod_loc = (void *)sipi_vector_location;
304 const int loc_size = sipi_vector_location_size;
305 atomic_t *ap_count = NULL;
306
307 if (rmodule_parse(&_binary_sipi_vector_start, &sipi_mod)) {
308 printk(BIOS_CRIT, "Unable to parse sipi module.\n");
309 return ap_count;
310 }
311
312 if (rmodule_entry_offset(&sipi_mod) != 0) {
313 printk(BIOS_CRIT, "SIPI module entry offset is not 0!\n");
314 return ap_count;
315 }
316
317 if (rmodule_load_alignment(&sipi_mod) != 4096) {
318 printk(BIOS_CRIT, "SIPI module load alignment(%d) != 4096.\n",
319 rmodule_load_alignment(&sipi_mod));
320 return ap_count;
321 }
322
323 module_size = rmodule_memory_size(&sipi_mod);
324
325 /* Align to 4 bytes. */
326 module_size = ALIGN(module_size, 4);
327
328 if (module_size > loc_size) {
329 printk(BIOS_CRIT, "SIPI module size (%d) > region size (%d).\n",
330 module_size, loc_size);
331 return ap_count;
332 }
333
334 num_msrs = save_bsp_msrs(&mod_loc[module_size], loc_size - module_size);
335
336 if (num_msrs < 0) {
337 printk(BIOS_CRIT, "Error mirroring BSP's msrs.\n");
338 return ap_count;
339 }
340
341 if (rmodule_load(mod_loc, &sipi_mod)) {
342 printk(BIOS_CRIT, "Unable to load SIPI module.\n");
343 return ap_count;
344 }
345
346 sp = rmodule_parameters(&sipi_mod);
347
348 if (sp == NULL) {
349 printk(BIOS_CRIT, "SIPI module has no parameters.\n");
350 return ap_count;
351 }
352
353 setup_default_sipi_vector_params(sp);
354 /* Setup MSR table. */
355 sp->msr_table_ptr = (uint32_t)&mod_loc[module_size];
356 sp->msr_count = num_msrs;
357 /* Provide pointer to microcode patch. */
358 sp->microcode_ptr = (uint32_t)mp_params->microcode_pointer;
359 /* Pass on abiility to load microcode in parallel. */
Lee Leahya15d8af2017-03-15 14:49:35 -0700360 if (mp_params->parallel_microcode_load)
Aaron Durbine0785c02013-10-21 12:15:29 -0500361 sp->microcode_lock = 0;
Lee Leahya15d8af2017-03-15 14:49:35 -0700362 else
Aaron Durbine0785c02013-10-21 12:15:29 -0500363 sp->microcode_lock = ~0;
Aaron Durbine0785c02013-10-21 12:15:29 -0500364 sp->c_handler = (uint32_t)&ap_init;
365 ap_count = &sp->ap_count;
366 atomic_set(ap_count, 0);
367
368 return ap_count;
369}
370
371static int allocate_cpu_devices(struct bus *cpu_bus, struct mp_params *p)
372{
373 int i;
374 int max_cpus;
375 struct cpu_info *info;
376
377 max_cpus = p->num_cpus;
378 if (max_cpus > CONFIG_MAX_CPUS) {
379 printk(BIOS_CRIT, "CPU count(%d) exceeds CONFIG_MAX_CPUS(%d)\n",
380 max_cpus, CONFIG_MAX_CPUS);
381 max_cpus = CONFIG_MAX_CPUS;
382 }
383
384 info = cpu_info();
385 for (i = 1; i < max_cpus; i++) {
386 struct device_path cpu_path;
Edward O'Callaghan2c9d2cf2014-10-27 23:29:29 +1100387 struct device *new;
Aaron Durbine0785c02013-10-21 12:15:29 -0500388
Elyes HAOUASd82be922016-07-28 18:58:27 +0200389 /* Build the CPU device path */
Aaron Durbine0785c02013-10-21 12:15:29 -0500390 cpu_path.type = DEVICE_PATH_APIC;
391
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600392 /* Assuming linear APIC space allocation. AP will set its own
393 APIC id in the ap_init() path above. */
394 cpu_path.apic.apic_id = info->cpu->path.apic.apic_id + i;
Aaron Durbine0785c02013-10-21 12:15:29 -0500395
Elyes HAOUASd82be922016-07-28 18:58:27 +0200396 /* Allocate the new CPU device structure */
Aaron Durbine0785c02013-10-21 12:15:29 -0500397 new = alloc_find_dev(cpu_bus, &cpu_path);
398 if (new == NULL) {
Elyes HAOUASd82be922016-07-28 18:58:27 +0200399 printk(BIOS_CRIT, "Could not allocate CPU device\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500400 max_cpus--;
401 }
402 cpus[i].dev = new;
403 }
404
405 return max_cpus;
406}
407
408/* Returns 1 for timeout. 0 on success. */
409static int apic_wait_timeout(int total_delay, int delay_step)
410{
411 int total = 0;
412 int timeout = 0;
413
414 while (lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY) {
415 udelay(delay_step);
416 total += delay_step;
417 if (total >= total_delay) {
418 timeout = 1;
419 break;
420 }
421 }
422
423 return timeout;
424}
425
426static int start_aps(struct bus *cpu_bus, int ap_count, atomic_t *num_aps)
427{
428 int sipi_vector;
429 /* Max location is 4KiB below 1MiB */
430 const int max_vector_loc = ((1 << 20) - (1 << 12)) >> 12;
431
432 if (ap_count == 0)
433 return 0;
434
435 /* The vector is sent as a 4k aligned address in one byte. */
436 sipi_vector = sipi_vector_location >> 12;
437
438 if (sipi_vector > max_vector_loc) {
439 printk(BIOS_CRIT, "SIPI vector too large! 0x%08x\n",
440 sipi_vector);
441 return -1;
442 }
443
444 printk(BIOS_DEBUG, "Attempting to start %d APs\n", ap_count);
445
446 if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
447 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
448 if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
449 printk(BIOS_DEBUG, "timed out. Aborting.\n");
450 return -1;
Lee Leahya15d8af2017-03-15 14:49:35 -0700451 }
452 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500453 }
454
455 /* Send INIT IPI to all but self. */
456 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
457 lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
Lee Leahya07d0dd2017-03-15 14:25:22 -0700458 LAPIC_DM_INIT);
Aaron Durbine0785c02013-10-21 12:15:29 -0500459 printk(BIOS_DEBUG, "Waiting for 10ms after sending INIT.\n");
460 mdelay(10);
461
462 /* Send 1st SIPI */
463 if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
464 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
465 if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
466 printk(BIOS_DEBUG, "timed out. Aborting.\n");
467 return -1;
Lee Leahya15d8af2017-03-15 14:49:35 -0700468 }
469 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500470 }
471
472 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
473 lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
Lee Leahya07d0dd2017-03-15 14:25:22 -0700474 LAPIC_DM_STARTUP | sipi_vector);
Aaron Durbine0785c02013-10-21 12:15:29 -0500475 printk(BIOS_DEBUG, "Waiting for 1st SIPI to complete...");
476 if (apic_wait_timeout(10000 /* 10 ms */, 50 /* us */)) {
477 printk(BIOS_DEBUG, "timed out.\n");
478 return -1;
Aaron Durbine0785c02013-10-21 12:15:29 -0500479 }
Lee Leahya15d8af2017-03-15 14:49:35 -0700480 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500481
482 /* Wait for CPUs to check in up to 200 us. */
483 wait_for_aps(num_aps, ap_count, 200 /* us */, 15 /* us */);
484
485 /* Send 2nd SIPI */
486 if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
487 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
488 if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
489 printk(BIOS_DEBUG, "timed out. Aborting.\n");
490 return -1;
Lee Leahya15d8af2017-03-15 14:49:35 -0700491 }
492 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500493 }
494
495 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
496 lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
Lee Leahya07d0dd2017-03-15 14:25:22 -0700497 LAPIC_DM_STARTUP | sipi_vector);
Aaron Durbine0785c02013-10-21 12:15:29 -0500498 printk(BIOS_DEBUG, "Waiting for 2nd SIPI to complete...");
499 if (apic_wait_timeout(10000 /* 10 ms */, 50 /* us */)) {
500 printk(BIOS_DEBUG, "timed out.\n");
501 return -1;
Aaron Durbine0785c02013-10-21 12:15:29 -0500502 }
Lee Leahya15d8af2017-03-15 14:49:35 -0700503 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500504
505 /* Wait for CPUs to check in. */
506 if (wait_for_aps(num_aps, ap_count, 10000 /* 10 ms */, 50 /* us */)) {
507 printk(BIOS_DEBUG, "Not all APs checked in: %d/%d.\n",
508 atomic_read(num_aps), ap_count);
509 return -1;
510 }
511
512 return 0;
513}
514
515static int bsp_do_flight_plan(struct mp_params *mp_params)
516{
517 int i;
518 int ret = 0;
Furquan Shaikhfa9f1072018-03-01 16:37:06 -0800519 /*
520 * Set time-out to wait for APs to a huge value (=1 second) since it
521 * could take a longer time for APs to check-in as the number of APs
522 * increases (contention for resources like UART also increases).
523 */
524 const int timeout_us = 1000000;
Aaron Durbine0785c02013-10-21 12:15:29 -0500525 const int step_us = 100;
526 int num_aps = mp_params->num_cpus - 1;
Furquan Shaikh5d8faef2018-03-07 23:16:57 -0800527 struct stopwatch sw;
528
529 stopwatch_init(&sw);
Aaron Durbine0785c02013-10-21 12:15:29 -0500530
531 for (i = 0; i < mp_params->num_records; i++) {
532 struct mp_flight_record *rec = &mp_params->flight_plan[i];
533
534 /* Wait for APs if the record is not released. */
535 if (atomic_read(&rec->barrier) == 0) {
536 /* Wait for the APs to check in. */
537 if (wait_for_aps(&rec->cpus_entered, num_aps,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700538 timeout_us, step_us)) {
Aaron Durbine0785c02013-10-21 12:15:29 -0500539 printk(BIOS_ERR, "MP record %d timeout.\n", i);
540 ret = -1;
541 }
542 }
543
Lee Leahya15d8af2017-03-15 14:49:35 -0700544 if (rec->bsp_call != NULL)
Aaron Durbin0e556322016-04-29 23:15:12 -0500545 rec->bsp_call();
Aaron Durbine0785c02013-10-21 12:15:29 -0500546
547 release_barrier(&rec->barrier);
548 }
Furquan Shaikh5d8faef2018-03-07 23:16:57 -0800549
550 printk(BIOS_INFO, "%s done after %ld msecs.\n", __func__,
551 stopwatch_duration_msecs(&sw));
Aaron Durbine0785c02013-10-21 12:15:29 -0500552 return ret;
553}
554
555static void init_bsp(struct bus *cpu_bus)
556{
557 struct device_path cpu_path;
558 struct cpu_info *info;
559 char processor_name[49];
560
561 /* Print processor name */
562 fill_processor_name(processor_name);
563 printk(BIOS_INFO, "CPU: %s.\n", processor_name);
564
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200565 /* Ensure the local APIC is enabled */
Aaron Durbine0785c02013-10-21 12:15:29 -0500566 enable_lapic();
567
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200568 /* Set the device path of the boot CPU. */
Aaron Durbine0785c02013-10-21 12:15:29 -0500569 cpu_path.type = DEVICE_PATH_APIC;
570 cpu_path.apic.apic_id = lapicid();
571
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200572 /* Find the device structure for the boot CPU. */
Aaron Durbine0785c02013-10-21 12:15:29 -0500573 info = cpu_info();
574 info->cpu = alloc_find_dev(cpu_bus, &cpu_path);
575
576 if (info->index != 0)
577 printk(BIOS_CRIT, "BSP index(%d) != 0!\n", info->index);
578
579 /* Track BSP in cpu_map structures. */
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600580 add_cpu_map_entry(info);
Aaron Durbine0785c02013-10-21 12:15:29 -0500581}
582
Aaron Durbin770d7c72016-05-03 17:49:57 -0500583/*
584 * mp_init() will set up the SIPI vector and bring up the APs according to
585 * mp_params. Each flight record will be executed according to the plan. Note
586 * that the MP infrastructure uses SMM default area without saving it. It's
587 * up to the chipset or mainboard to either e820 reserve this area or save this
588 * region prior to calling mp_init() and restoring it after mp_init returns.
589 *
590 * At the time mp_init() is called the MTRR MSRs are mirrored into APs then
591 * caching is enabled before running the flight plan.
592 *
593 * The MP initialization has the following properties:
594 * 1. APs are brought up in parallel.
Elyes HAOUASd82be922016-07-28 18:58:27 +0200595 * 2. The ordering of coreboot CPU number and APIC ids is not deterministic.
Aaron Durbin770d7c72016-05-03 17:49:57 -0500596 * Therefore, one cannot rely on this property or the order of devices in
597 * the device tree unless the chipset or mainboard know the APIC ids
598 * a priori.
599 *
600 * mp_init() returns < 0 on error, 0 on success.
601 */
602static int mp_init(struct bus *cpu_bus, struct mp_params *p)
Aaron Durbine0785c02013-10-21 12:15:29 -0500603{
604 int num_cpus;
Aaron Durbine0785c02013-10-21 12:15:29 -0500605 atomic_t *ap_count;
606
607 init_bsp(cpu_bus);
608
609 if (p == NULL || p->flight_plan == NULL || p->num_records < 1) {
610 printk(BIOS_CRIT, "Invalid MP parameters\n");
611 return -1;
612 }
613
614 /* Default to currently running CPU. */
615 num_cpus = allocate_cpu_devices(cpu_bus, p);
616
617 if (num_cpus < p->num_cpus) {
618 printk(BIOS_CRIT,
619 "ERROR: More cpus requested (%d) than supported (%d).\n",
620 p->num_cpus, num_cpus);
621 return -1;
622 }
623
624 /* Copy needed parameters so that APs have a reference to the plan. */
625 mp_info.num_records = p->num_records;
626 mp_info.records = p->flight_plan;
627
628 /* Load the SIPI vector. */
629 ap_count = load_sipi_vector(p);
630 if (ap_count == NULL)
631 return -1;
632
633 /* Make sure SIPI data hits RAM so the APs that come up will see
634 * the startup code even if the caches are disabled. */
635 wbinvd();
636
637 /* Start the APs providing number of APs and the cpus_entered field. */
Aaron Durbinb21e3622016-12-07 00:32:19 -0600638 global_num_aps = p->num_cpus - 1;
639 if (start_aps(cpu_bus, global_num_aps, ap_count) < 0) {
Aaron Durbine0785c02013-10-21 12:15:29 -0500640 mdelay(1000);
641 printk(BIOS_DEBUG, "%d/%d eventually checked in?\n",
Aaron Durbinb21e3622016-12-07 00:32:19 -0600642 atomic_read(ap_count), global_num_aps);
Aaron Durbine0785c02013-10-21 12:15:29 -0500643 return -1;
644 }
645
646 /* Walk the flight plan for the BSP. */
647 return bsp_do_flight_plan(p);
648}
649
Aaron Durbin770d7c72016-05-03 17:49:57 -0500650/* Calls cpu_initialize(info->index) which calls the coreboot CPU drivers. */
651static void mp_initialize_cpu(void)
Aaron Durbine0785c02013-10-21 12:15:29 -0500652{
653 /* Call back into driver infrastructure for the AP initialization. */
654 struct cpu_info *info = cpu_info();
655 cpu_initialize(info->index);
656}
657
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200658/* Returns APIC id for coreboot CPU number or < 0 on failure. */
Aaron Durbin770d7c72016-05-03 17:49:57 -0500659static int mp_get_apic_id(int cpu_slot)
Aaron Durbine0785c02013-10-21 12:15:29 -0500660{
661 if (cpu_slot >= CONFIG_MAX_CPUS || cpu_slot < 0)
662 return -1;
663
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600664 return cpus[cpu_slot].default_apic_id;
Aaron Durbine0785c02013-10-21 12:15:29 -0500665}
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500666
667void smm_initiate_relocation_parallel(void)
668{
669 if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
670 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
671 if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
672 printk(BIOS_DEBUG, "timed out. Aborting.\n");
673 return;
Lee Leahya15d8af2017-03-15 14:49:35 -0700674 }
675 printk(BIOS_DEBUG, "done.\n");
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500676 }
677
678 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(lapicid()));
679 lapic_write_around(LAPIC_ICR, LAPIC_INT_ASSERT | LAPIC_DM_SMI);
Lee Leahya15d8af2017-03-15 14:49:35 -0700680 if (apic_wait_timeout(1000 /* 1 ms */, 100 /* us */))
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500681 printk(BIOS_DEBUG, "SMI Relocation timed out.\n");
Lee Leahya15d8af2017-03-15 14:49:35 -0700682 else
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500683 printk(BIOS_DEBUG, "Relocation complete.\n");
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500684}
685
686DECLARE_SPIN_LOCK(smm_relocation_lock);
687
688/* Send SMI to self with single user serialization. */
689void smm_initiate_relocation(void)
690{
691 spin_lock(&smm_relocation_lock);
692 smm_initiate_relocation_parallel();
693 spin_unlock(&smm_relocation_lock);
694}
Aaron Durbin82501922016-04-29 22:55:49 -0500695
696struct mp_state {
697 struct mp_ops ops;
698 int cpu_count;
699 uintptr_t perm_smbase;
700 size_t perm_smsize;
701 size_t smm_save_state_size;
702 int do_smm;
703} mp_state;
704
705static int is_smm_enabled(void)
706{
707 return IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) && mp_state.do_smm;
708}
709
710static void smm_disable(void)
711{
712 mp_state.do_smm = 0;
713}
714
715static void smm_enable(void)
716{
717 if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER))
718 mp_state.do_smm = 1;
719}
720
721static void asmlinkage smm_do_relocation(void *arg)
722{
723 const struct smm_module_params *p;
724 const struct smm_runtime *runtime;
725 int cpu;
726 uintptr_t curr_smbase;
727 uintptr_t perm_smbase;
728
729 p = arg;
730 runtime = p->runtime;
731 cpu = p->cpu;
732 curr_smbase = runtime->smbase;
733
734 if (cpu >= CONFIG_MAX_CPUS) {
735 printk(BIOS_CRIT,
736 "Invalid CPU number assigned in SMM stub: %d\n", cpu);
737 return;
738 }
739
740 /*
741 * The permanent handler runs with all cpus concurrently. Precalculate
742 * the location of the new SMBASE. If using SMM modules then this
743 * calculation needs to match that of the module loader.
744 */
745 perm_smbase = mp_state.perm_smbase;
746 perm_smbase -= cpu * runtime->save_state_size;
747
748 printk(BIOS_DEBUG, "New SMBASE 0x%08lx\n", perm_smbase);
749
750 /* Setup code checks this callback for validity. */
751 mp_state.ops.relocation_handler(cpu, curr_smbase, perm_smbase);
752}
753
754static void adjust_smm_apic_id_map(struct smm_loader_params *smm_params)
755{
756 int i;
757 struct smm_runtime *runtime = smm_params->runtime;
758
759 for (i = 0; i < CONFIG_MAX_CPUS; i++)
760 runtime->apic_id_to_cpu[i] = mp_get_apic_id(i);
761}
762
763static int install_relocation_handler(int num_cpus, size_t save_state_size)
764{
765 struct smm_loader_params smm_params = {
766 .per_cpu_stack_size = save_state_size,
767 .num_concurrent_stacks = num_cpus,
768 .per_cpu_save_state_size = save_state_size,
769 .num_concurrent_save_states = 1,
770 .handler = smm_do_relocation,
771 };
772
773 /* Allow callback to override parameters. */
774 if (mp_state.ops.adjust_smm_params != NULL)
775 mp_state.ops.adjust_smm_params(&smm_params, 0);
776
777 if (smm_setup_relocation_handler(&smm_params))
778 return -1;
779
780 adjust_smm_apic_id_map(&smm_params);
781
782 return 0;
783}
784
785static int install_permanent_handler(int num_cpus, uintptr_t smbase,
786 size_t smsize, size_t save_state_size)
787{
788 /* There are num_cpus concurrent stacks and num_cpus concurrent save
Aaron Durbinec2e61a2017-12-19 15:26:46 -0700789 * state areas. Lastly, set the stack size to 1KiB. */
Aaron Durbin82501922016-04-29 22:55:49 -0500790 struct smm_loader_params smm_params = {
Aaron Durbinec2e61a2017-12-19 15:26:46 -0700791 .per_cpu_stack_size = 1 * KiB,
Aaron Durbin82501922016-04-29 22:55:49 -0500792 .num_concurrent_stacks = num_cpus,
793 .per_cpu_save_state_size = save_state_size,
794 .num_concurrent_save_states = num_cpus,
795 };
796
797 /* Allow callback to override parameters. */
798 if (mp_state.ops.adjust_smm_params != NULL)
799 mp_state.ops.adjust_smm_params(&smm_params, 1);
800
801 printk(BIOS_DEBUG, "Installing SMM handler to 0x%08lx\n", smbase);
802
803 if (smm_load_module((void *)smbase, smsize, &smm_params))
804 return -1;
805
806 adjust_smm_apic_id_map(&smm_params);
807
808 return 0;
809}
810
811/* Load SMM handlers as part of MP flight record. */
812static void load_smm_handlers(void)
813{
814 size_t smm_save_state_size = mp_state.smm_save_state_size;
815
816 /* Do nothing if SMM is disabled.*/
817 if (!is_smm_enabled())
818 return;
819
820 /* Install handlers. */
821 if (install_relocation_handler(mp_state.cpu_count,
822 smm_save_state_size) < 0) {
823 printk(BIOS_ERR, "Unable to install SMM relocation handler.\n");
824 smm_disable();
825 }
826
827 if (install_permanent_handler(mp_state.cpu_count, mp_state.perm_smbase,
828 mp_state.perm_smsize, smm_save_state_size) < 0) {
829 printk(BIOS_ERR, "Unable to install SMM permanent handler.\n");
830 smm_disable();
831 }
832
833 /* Ensure the SMM handlers hit DRAM before performing first SMI. */
834 wbinvd();
835
836 /*
837 * Indicate that the SMM handlers have been loaded and MP
838 * initialization is about to start.
839 */
840 if (is_smm_enabled() && mp_state.ops.pre_mp_smm_init != NULL)
841 mp_state.ops.pre_mp_smm_init();
842}
843
844/* Trigger SMM as part of MP flight record. */
845static void trigger_smm_relocation(void)
846{
847 /* Do nothing if SMM is disabled.*/
848 if (!is_smm_enabled() || mp_state.ops.per_cpu_smm_trigger == NULL)
849 return;
850 /* Trigger SMM mode for the currently running processor. */
851 mp_state.ops.per_cpu_smm_trigger();
852}
853
Aaron Durbinb21e3622016-12-07 00:32:19 -0600854static mp_callback_t ap_callbacks[CONFIG_MAX_CPUS];
855
856static mp_callback_t read_callback(mp_callback_t *slot)
857{
858 return *(volatile mp_callback_t *)slot;
859}
860
861static void store_callback(mp_callback_t *slot, mp_callback_t value)
862{
863 *(volatile mp_callback_t *)slot = value;
864}
865
866static int run_ap_work(mp_callback_t func, long expire_us)
867{
868 int i;
869 int cpus_accepted;
870 struct stopwatch sw;
871 int cur_cpu = cpu_index();
872
873 if (!IS_ENABLED(CONFIG_PARALLEL_MP_AP_WORK)) {
874 printk(BIOS_ERR, "APs already parked. PARALLEL_MP_AP_WORK not selected.\n");
875 return -1;
876 }
877
878 /* Signal to all the APs to run the func. */
879 for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) {
880 if (cur_cpu == i)
881 continue;
882 store_callback(&ap_callbacks[i], func);
883 }
884 mfence();
885
886 /* Wait for all the APs to signal back that call has been accepted. */
Subrata Banik838f2962018-04-11 18:45:57 +0530887 if (expire_us > 0)
888 stopwatch_init_usecs_expire(&sw, expire_us);
889
Paul Menzel6bb8ff42017-06-19 13:02:31 +0200890 do {
Aaron Durbin046848c2017-06-15 08:47:04 -0500891 cpus_accepted = 0;
892
Aaron Durbinb21e3622016-12-07 00:32:19 -0600893 for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) {
894 if (cur_cpu == i)
895 continue;
896 if (read_callback(&ap_callbacks[i]) == NULL)
897 cpus_accepted++;
898 }
Aaron Durbin046848c2017-06-15 08:47:04 -0500899
Aaron Durbinb21e3622016-12-07 00:32:19 -0600900 if (cpus_accepted == global_num_aps)
901 return 0;
Subrata Banik838f2962018-04-11 18:45:57 +0530902 } while (expire_us <= 0 || !stopwatch_expired(&sw));
Aaron Durbinb21e3622016-12-07 00:32:19 -0600903
904 printk(BIOS_ERR, "AP call expired. %d/%d CPUs accepted.\n",
905 cpus_accepted, global_num_aps);
906 return -1;
907}
908
909static void ap_wait_for_instruction(void)
910{
911 int cur_cpu = cpu_index();
912
913 if (!IS_ENABLED(CONFIG_PARALLEL_MP_AP_WORK))
914 return;
915
916 while (1) {
917 mp_callback_t func = read_callback(&ap_callbacks[cur_cpu]);
918
919 if (func == NULL) {
920 asm ("pause");
921 continue;
922 }
923
924 store_callback(&ap_callbacks[cur_cpu], NULL);
925 mfence();
926 func();
927 }
928}
929
930int mp_run_on_aps(void (*func)(void), long expire_us)
931{
932 return run_ap_work(func, expire_us);
933}
934
935int mp_run_on_all_cpus(void (*func)(void), long expire_us)
936{
937 /* Run on BSP first. */
938 func();
939 return mp_run_on_aps(func, expire_us);
940}
941
942int mp_park_aps(void)
943{
Furquan Shaikhd6630d12018-03-29 00:10:02 -0700944 struct stopwatch sw;
945 int ret;
946 long duration_msecs;
947
948 stopwatch_init(&sw);
949
950 ret = mp_run_on_aps(park_this_cpu, 250 * USECS_PER_MSEC);
951
952 duration_msecs = stopwatch_duration_msecs(&sw);
953
954 if (!ret)
955 printk(BIOS_DEBUG, "%s done after %ld msecs.\n", __func__,
956 duration_msecs);
957 else
958 printk(BIOS_ERR, "%s failed after %ld msecs.\n", __func__,
959 duration_msecs);
960
961 return ret;
Aaron Durbinb21e3622016-12-07 00:32:19 -0600962}
963
Aaron Durbin82501922016-04-29 22:55:49 -0500964static struct mp_flight_record mp_steps[] = {
965 /* Once the APs are up load the SMM handlers. */
966 MP_FR_BLOCK_APS(NULL, load_smm_handlers),
967 /* Perform SMM relocation. */
968 MP_FR_NOBLOCK_APS(trigger_smm_relocation, trigger_smm_relocation),
Elyes HAOUASd82be922016-07-28 18:58:27 +0200969 /* Initialize each CPU through the driver framework. */
Aaron Durbin82501922016-04-29 22:55:49 -0500970 MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu),
Aaron Durbinb21e3622016-12-07 00:32:19 -0600971 /* Wait for APs to finish then optionally start looking for work. */
972 MP_FR_BLOCK_APS(ap_wait_for_instruction, NULL),
Aaron Durbin82501922016-04-29 22:55:49 -0500973};
974
975static void fill_mp_state(struct mp_state *state, const struct mp_ops *ops)
976{
977 /*
978 * Make copy of the ops so that defaults can be set in the non-const
979 * structure if needed.
980 */
981 memcpy(&state->ops, ops, sizeof(*ops));
982
983 if (ops->get_cpu_count != NULL)
984 state->cpu_count = ops->get_cpu_count();
985
986 if (ops->get_smm_info != NULL)
987 ops->get_smm_info(&state->perm_smbase, &state->perm_smsize,
988 &state->smm_save_state_size);
989
990 /*
991 * Default to smm_initiate_relocation() if trigger callback isn't
992 * provided.
993 */
994 if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) &&
995 ops->per_cpu_smm_trigger == NULL)
996 mp_state.ops.per_cpu_smm_trigger = smm_initiate_relocation;
997}
998
999int mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops)
1000{
1001 int ret;
1002 void *default_smm_area;
1003 struct mp_params mp_params;
1004
1005 if (mp_ops->pre_mp_init != NULL)
1006 mp_ops->pre_mp_init();
1007
1008 fill_mp_state(&mp_state, mp_ops);
1009
1010 memset(&mp_params, 0, sizeof(mp_params));
1011
1012 if (mp_state.cpu_count <= 0) {
1013 printk(BIOS_ERR, "Invalid cpu_count: %d\n", mp_state.cpu_count);
1014 return -1;
1015 }
1016
1017 /* Sanity check SMM state. */
1018 if (mp_state.perm_smsize != 0 && mp_state.smm_save_state_size != 0 &&
1019 mp_state.ops.relocation_handler != NULL)
1020 smm_enable();
1021
1022 if (is_smm_enabled())
1023 printk(BIOS_INFO, "Will perform SMM setup.\n");
1024
1025 mp_params.num_cpus = mp_state.cpu_count;
1026 /* Gather microcode information. */
1027 if (mp_state.ops.get_microcode_info != NULL)
1028 mp_state.ops.get_microcode_info(&mp_params.microcode_pointer,
1029 &mp_params.parallel_microcode_load);
Aaron Durbin82501922016-04-29 22:55:49 -05001030 mp_params.flight_plan = &mp_steps[0];
1031 mp_params.num_records = ARRAY_SIZE(mp_steps);
1032
1033 /* Perform backup of default SMM area. */
1034 default_smm_area = backup_default_smm_area();
1035
1036 ret = mp_init(cpu_bus, &mp_params);
1037
1038 restore_default_smm_area(default_smm_area);
1039
1040 /* Signal callback on success if it's provided. */
1041 if (ret == 0 && mp_state.ops.post_mp_init != NULL)
1042 mp_state.ops.post_mp_init();
1043
1044 return ret;
1045}