blob: 3889c7d28e60ae2eeee2d9667a8e045d9e5cdeee [file] [log] [blame]
Aaron Durbine0785c02013-10-21 12:15:29 -05001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; version 2 of
9 * the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
Aaron Durbine0785c02013-10-21 12:15:29 -050015 */
16
17#include <console/console.h>
18#include <stdint.h>
19#include <rmodule.h>
20#include <arch/cpu.h>
Kyösti Mälkki2fbb6772018-05-15 19:50:20 +030021#include <commonlib/helpers.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050022#include <cpu/cpu.h>
23#include <cpu/intel/microcode.h>
24#include <cpu/x86/cache.h>
Kyösti Mälkkibae775a2014-12-18 10:36:33 +020025#include <cpu/x86/gdt.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050026#include <cpu/x86/lapic.h>
27#include <cpu/x86/name.h>
28#include <cpu/x86/msr.h>
29#include <cpu/x86/mtrr.h>
30#include <cpu/x86/smm.h>
31#include <cpu/x86/mp.h>
32#include <delay.h>
33#include <device/device.h>
34#include <device/path.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050035#include <smp/atomic.h>
36#include <smp/spinlock.h>
Julius Wernerec5e5e02014-08-20 15:29:56 -070037#include <symbols.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050038#include <thread.h>
39
40#define MAX_APIC_IDS 256
Aaron Durbin770d7c72016-05-03 17:49:57 -050041
Aaron Durbin223fb432018-05-03 13:49:41 -060042struct mp_callback {
Subrata Banik33374972018-04-24 13:45:30 +053043 void (*func)(void *);
44 void *arg;
Subrata Banik8a25cae2018-05-03 18:48:41 +053045 int logical_cpu_number;
Aaron Durbin223fb432018-05-03 13:49:41 -060046};
Aaron Durbin770d7c72016-05-03 17:49:57 -050047
Naresh G Solanki24635332018-05-31 23:13:18 +053048static char processor_name[49];
49
Aaron Durbin770d7c72016-05-03 17:49:57 -050050/*
51 * A mp_flight_record details a sequence of calls for the APs to perform
52 * along with the BSP to coordinate sequencing. Each flight record either
53 * provides a barrier for each AP before calling the callback or the APs
54 * are allowed to perform the callback without waiting. Regardless, each
55 * record has the cpus_entered field incremented for each record. When
56 * the BSP observes that the cpus_entered matches the number of APs
57 * the bsp_call is called with bsp_arg and upon returning releases the
58 * barrier allowing the APs to make further progress.
59 *
60 * Note that ap_call() and bsp_call() can be NULL. In the NULL case the
61 * callback will just not be called.
62 */
63struct mp_flight_record {
64 atomic_t barrier;
65 atomic_t cpus_entered;
Aaron Durbin223fb432018-05-03 13:49:41 -060066 void (*ap_call)(void);
67 void (*bsp_call)(void);
Aaron Durbin381feb82018-05-02 22:38:58 -060068} __aligned(CACHELINE_SIZE);
Aaron Durbin770d7c72016-05-03 17:49:57 -050069
70#define _MP_FLIGHT_RECORD(barrier_, ap_func_, bsp_func_) \
71 { \
72 .barrier = ATOMIC_INIT(barrier_), \
73 .cpus_entered = ATOMIC_INIT(0), \
74 .ap_call = ap_func_, \
75 .bsp_call = bsp_func_, \
76 }
77
78#define MP_FR_BLOCK_APS(ap_func_, bsp_func_) \
79 _MP_FLIGHT_RECORD(0, ap_func_, bsp_func_)
80
81#define MP_FR_NOBLOCK_APS(ap_func_, bsp_func_) \
82 _MP_FLIGHT_RECORD(1, ap_func_, bsp_func_)
83
84/* The mp_params structure provides the arguments to the mp subsystem
85 * for bringing up APs. */
86struct mp_params {
87 int num_cpus; /* Total cpus include BSP */
88 int parallel_microcode_load;
89 const void *microcode_pointer;
Aaron Durbin770d7c72016-05-03 17:49:57 -050090 /* Flight plan for APs and BSP. */
91 struct mp_flight_record *flight_plan;
92 int num_records;
93};
94
Aaron Durbine0785c02013-10-21 12:15:29 -050095/* This needs to match the layout in the .module_parametrs section. */
96struct sipi_params {
97 uint16_t gdtlimit;
98 uint32_t gdt;
99 uint16_t unused;
100 uint32_t idt_ptr;
101 uint32_t stack_top;
102 uint32_t stack_size;
103 uint32_t microcode_lock; /* 0xffffffff means parallel loading. */
104 uint32_t microcode_ptr;
105 uint32_t msr_table_ptr;
106 uint32_t msr_count;
107 uint32_t c_handler;
108 atomic_t ap_count;
Stefan Reinauer6a001132017-07-13 02:20:27 +0200109} __packed;
Aaron Durbine0785c02013-10-21 12:15:29 -0500110
111/* This also needs to match the assembly code for saved MSR encoding. */
112struct saved_msr {
113 uint32_t index;
114 uint32_t lo;
115 uint32_t hi;
Stefan Reinauer6a001132017-07-13 02:20:27 +0200116} __packed;
Aaron Durbine0785c02013-10-21 12:15:29 -0500117
118
119/* The sipi vector rmodule is included in the ramstage using 'objdump -B'. */
120extern char _binary_sipi_vector_start[];
Aaron Durbine0785c02013-10-21 12:15:29 -0500121
122/* The SIPI vector is loaded at the SMM_DEFAULT_BASE. The reason is at the
123 * memory range is already reserved so the OS cannot use it. That region is
124 * free to use for AP bringup before SMM is initialized. */
125static const uint32_t sipi_vector_location = SMM_DEFAULT_BASE;
126static const int sipi_vector_location_size = SMM_DEFAULT_SIZE;
127
128struct mp_flight_plan {
129 int num_records;
130 struct mp_flight_record *records;
131};
132
Aaron Durbinb21e3622016-12-07 00:32:19 -0600133static int global_num_aps;
Aaron Durbine0785c02013-10-21 12:15:29 -0500134static struct mp_flight_plan mp_info;
135
136struct cpu_map {
Edward O'Callaghan2c9d2cf2014-10-27 23:29:29 +1100137 struct device *dev;
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600138 /* Keep track of default apic ids for SMM. */
139 int default_apic_id;
Aaron Durbine0785c02013-10-21 12:15:29 -0500140};
141
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200142/* Keep track of APIC and device structure for each CPU. */
Aaron Durbine0785c02013-10-21 12:15:29 -0500143static struct cpu_map cpus[CONFIG_MAX_CPUS];
144
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600145static inline void add_cpu_map_entry(const struct cpu_info *info)
146{
147 cpus[info->index].dev = info->cpu;
148 cpus[info->index].default_apic_id = cpuid_ebx(1) >> 24;
149}
150
Aaron Durbin4c16f8f2018-05-02 22:35:33 -0600151static inline void barrier_wait(atomic_t *b)
Aaron Durbine0785c02013-10-21 12:15:29 -0500152{
Lee Leahya15d8af2017-03-15 14:49:35 -0700153 while (atomic_read(b) == 0)
Aaron Durbine0785c02013-10-21 12:15:29 -0500154 asm ("pause");
Aaron Durbine0785c02013-10-21 12:15:29 -0500155 mfence();
156}
157
Aaron Durbin4c16f8f2018-05-02 22:35:33 -0600158static inline void release_barrier(atomic_t *b)
Aaron Durbine0785c02013-10-21 12:15:29 -0500159{
160 mfence();
161 atomic_set(b, 1);
162}
163
164/* Returns 1 if timeout waiting for APs. 0 if target aps found. */
165static int wait_for_aps(atomic_t *val, int target, int total_delay,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700166 int delay_step)
Aaron Durbine0785c02013-10-21 12:15:29 -0500167{
168 int timeout = 0;
169 int delayed = 0;
170 while (atomic_read(val) != target) {
171 udelay(delay_step);
172 delayed += delay_step;
173 if (delayed >= total_delay) {
174 timeout = 1;
175 break;
176 }
177 }
178
179 return timeout;
180}
181
182static void ap_do_flight_plan(void)
183{
184 int i;
185
186 for (i = 0; i < mp_info.num_records; i++) {
187 struct mp_flight_record *rec = &mp_info.records[i];
188
189 atomic_inc(&rec->cpus_entered);
190 barrier_wait(&rec->barrier);
191
Lee Leahya15d8af2017-03-15 14:49:35 -0700192 if (rec->ap_call != NULL)
Aaron Durbin0e556322016-04-29 23:15:12 -0500193 rec->ap_call();
Aaron Durbine0785c02013-10-21 12:15:29 -0500194 }
195}
196
Subrata Banik33374972018-04-24 13:45:30 +0530197static void park_this_cpu(void *unused)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600198{
199 stop_this_cpu();
200}
201
Aaron Durbine0785c02013-10-21 12:15:29 -0500202/* By the time APs call ap_init() caching has been setup, and microcode has
203 * been loaded. */
204static void asmlinkage ap_init(unsigned int cpu)
205{
206 struct cpu_info *info;
Aaron Durbine0785c02013-10-21 12:15:29 -0500207
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200208 /* Ensure the local APIC is enabled */
Aaron Durbine0785c02013-10-21 12:15:29 -0500209 enable_lapic();
210
211 info = cpu_info();
212 info->index = cpu;
213 info->cpu = cpus[cpu].dev;
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600214
215 add_cpu_map_entry(info);
Aaron Durbine0785c02013-10-21 12:15:29 -0500216 thread_init_cpu_info_non_bsp(info);
217
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600218 /* Fix up APIC id with reality. */
219 info->cpu->path.apic.apic_id = lapicid();
Aaron Durbine0785c02013-10-21 12:15:29 -0500220
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600221 printk(BIOS_INFO, "AP: slot %d apic_id %x.\n", cpu,
222 info->cpu->path.apic.apic_id);
Aaron Durbine0785c02013-10-21 12:15:29 -0500223
224 /* Walk the flight plan */
225 ap_do_flight_plan();
226
227 /* Park the AP. */
Subrata Banik33374972018-04-24 13:45:30 +0530228 park_this_cpu(NULL);
Aaron Durbine0785c02013-10-21 12:15:29 -0500229}
230
231static void setup_default_sipi_vector_params(struct sipi_params *sp)
232{
Kyösti Mälkki2fbb6772018-05-15 19:50:20 +0300233 sp->gdt = (uintptr_t)&gdt;
234 sp->gdtlimit = (uintptr_t)&gdt_end - (uintptr_t)&gdt - 1;
235 sp->idt_ptr = (uintptr_t)&idtarg;
Aaron Durbine0785c02013-10-21 12:15:29 -0500236 sp->stack_size = CONFIG_STACK_SIZE;
Kyösti Mälkki2fbb6772018-05-15 19:50:20 +0300237 sp->stack_top = ALIGN_DOWN((uintptr_t)&_estack, CONFIG_STACK_SIZE);
Aaron Durbine0785c02013-10-21 12:15:29 -0500238 /* Adjust the stack top to take into account cpu_info. */
239 sp->stack_top -= sizeof(struct cpu_info);
240}
241
242#define NUM_FIXED_MTRRS 11
243static const unsigned int fixed_mtrrs[NUM_FIXED_MTRRS] = {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700244 MTRR_FIX_64K_00000, MTRR_FIX_16K_80000, MTRR_FIX_16K_A0000,
245 MTRR_FIX_4K_C0000, MTRR_FIX_4K_C8000, MTRR_FIX_4K_D0000,
246 MTRR_FIX_4K_D8000, MTRR_FIX_4K_E0000, MTRR_FIX_4K_E8000,
247 MTRR_FIX_4K_F0000, MTRR_FIX_4K_F8000,
Aaron Durbine0785c02013-10-21 12:15:29 -0500248};
249
250static inline struct saved_msr *save_msr(int index, struct saved_msr *entry)
251{
252 msr_t msr;
253
254 msr = rdmsr(index);
255 entry->index = index;
256 entry->lo = msr.lo;
257 entry->hi = msr.hi;
258
259 /* Return the next entry. */
260 entry++;
261 return entry;
262}
263
264static int save_bsp_msrs(char *start, int size)
265{
266 int msr_count;
267 int num_var_mtrrs;
268 struct saved_msr *msr_entry;
269 int i;
270 msr_t msr;
271
272 /* Determine number of MTRRs need to be saved. */
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700273 msr = rdmsr(MTRR_CAP_MSR);
Aaron Durbine0785c02013-10-21 12:15:29 -0500274 num_var_mtrrs = msr.lo & 0xff;
275
276 /* 2 * num_var_mtrrs for base and mask. +1 for IA32_MTRR_DEF_TYPE. */
277 msr_count = 2 * num_var_mtrrs + NUM_FIXED_MTRRS + 1;
278
279 if ((msr_count * sizeof(struct saved_msr)) > size) {
280 printk(BIOS_CRIT, "Cannot mirror all %d msrs.\n", msr_count);
281 return -1;
282 }
283
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600284 fixed_mtrrs_expose_amd_rwdram();
285
Aaron Durbine0785c02013-10-21 12:15:29 -0500286 msr_entry = (void *)start;
Lee Leahya15d8af2017-03-15 14:49:35 -0700287 for (i = 0; i < NUM_FIXED_MTRRS; i++)
Aaron Durbine0785c02013-10-21 12:15:29 -0500288 msr_entry = save_msr(fixed_mtrrs[i], msr_entry);
Aaron Durbine0785c02013-10-21 12:15:29 -0500289
290 for (i = 0; i < num_var_mtrrs; i++) {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700291 msr_entry = save_msr(MTRR_PHYS_BASE(i), msr_entry);
292 msr_entry = save_msr(MTRR_PHYS_MASK(i), msr_entry);
Aaron Durbine0785c02013-10-21 12:15:29 -0500293 }
294
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700295 msr_entry = save_msr(MTRR_DEF_TYPE_MSR, msr_entry);
Aaron Durbine0785c02013-10-21 12:15:29 -0500296
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600297 fixed_mtrrs_hide_amd_rwdram();
298
Richard Spiegel43bd5942018-08-08 09:45:23 -0700299 /* Tell static analysis we know value is left unused. */
300 (void)msr_entry;
301
Aaron Durbine0785c02013-10-21 12:15:29 -0500302 return msr_count;
303}
304
305static atomic_t *load_sipi_vector(struct mp_params *mp_params)
306{
307 struct rmodule sipi_mod;
308 int module_size;
309 int num_msrs;
310 struct sipi_params *sp;
311 char *mod_loc = (void *)sipi_vector_location;
312 const int loc_size = sipi_vector_location_size;
313 atomic_t *ap_count = NULL;
314
315 if (rmodule_parse(&_binary_sipi_vector_start, &sipi_mod)) {
316 printk(BIOS_CRIT, "Unable to parse sipi module.\n");
317 return ap_count;
318 }
319
320 if (rmodule_entry_offset(&sipi_mod) != 0) {
321 printk(BIOS_CRIT, "SIPI module entry offset is not 0!\n");
322 return ap_count;
323 }
324
325 if (rmodule_load_alignment(&sipi_mod) != 4096) {
326 printk(BIOS_CRIT, "SIPI module load alignment(%d) != 4096.\n",
327 rmodule_load_alignment(&sipi_mod));
328 return ap_count;
329 }
330
331 module_size = rmodule_memory_size(&sipi_mod);
332
333 /* Align to 4 bytes. */
334 module_size = ALIGN(module_size, 4);
335
336 if (module_size > loc_size) {
337 printk(BIOS_CRIT, "SIPI module size (%d) > region size (%d).\n",
338 module_size, loc_size);
339 return ap_count;
340 }
341
342 num_msrs = save_bsp_msrs(&mod_loc[module_size], loc_size - module_size);
343
344 if (num_msrs < 0) {
345 printk(BIOS_CRIT, "Error mirroring BSP's msrs.\n");
346 return ap_count;
347 }
348
349 if (rmodule_load(mod_loc, &sipi_mod)) {
350 printk(BIOS_CRIT, "Unable to load SIPI module.\n");
351 return ap_count;
352 }
353
354 sp = rmodule_parameters(&sipi_mod);
355
356 if (sp == NULL) {
357 printk(BIOS_CRIT, "SIPI module has no parameters.\n");
358 return ap_count;
359 }
360
361 setup_default_sipi_vector_params(sp);
362 /* Setup MSR table. */
363 sp->msr_table_ptr = (uint32_t)&mod_loc[module_size];
364 sp->msr_count = num_msrs;
365 /* Provide pointer to microcode patch. */
366 sp->microcode_ptr = (uint32_t)mp_params->microcode_pointer;
367 /* Pass on abiility to load microcode in parallel. */
Lee Leahya15d8af2017-03-15 14:49:35 -0700368 if (mp_params->parallel_microcode_load)
Aaron Durbine0785c02013-10-21 12:15:29 -0500369 sp->microcode_lock = 0;
Lee Leahya15d8af2017-03-15 14:49:35 -0700370 else
Aaron Durbine0785c02013-10-21 12:15:29 -0500371 sp->microcode_lock = ~0;
Aaron Durbine0785c02013-10-21 12:15:29 -0500372 sp->c_handler = (uint32_t)&ap_init;
373 ap_count = &sp->ap_count;
374 atomic_set(ap_count, 0);
375
376 return ap_count;
377}
378
379static int allocate_cpu_devices(struct bus *cpu_bus, struct mp_params *p)
380{
381 int i;
382 int max_cpus;
383 struct cpu_info *info;
384
385 max_cpus = p->num_cpus;
386 if (max_cpus > CONFIG_MAX_CPUS) {
387 printk(BIOS_CRIT, "CPU count(%d) exceeds CONFIG_MAX_CPUS(%d)\n",
388 max_cpus, CONFIG_MAX_CPUS);
389 max_cpus = CONFIG_MAX_CPUS;
390 }
391
392 info = cpu_info();
393 for (i = 1; i < max_cpus; i++) {
394 struct device_path cpu_path;
Edward O'Callaghan2c9d2cf2014-10-27 23:29:29 +1100395 struct device *new;
Aaron Durbine0785c02013-10-21 12:15:29 -0500396
Elyes HAOUASd82be922016-07-28 18:58:27 +0200397 /* Build the CPU device path */
Aaron Durbine0785c02013-10-21 12:15:29 -0500398 cpu_path.type = DEVICE_PATH_APIC;
399
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600400 /* Assuming linear APIC space allocation. AP will set its own
401 APIC id in the ap_init() path above. */
402 cpu_path.apic.apic_id = info->cpu->path.apic.apic_id + i;
Aaron Durbine0785c02013-10-21 12:15:29 -0500403
Elyes HAOUASd82be922016-07-28 18:58:27 +0200404 /* Allocate the new CPU device structure */
Aaron Durbine0785c02013-10-21 12:15:29 -0500405 new = alloc_find_dev(cpu_bus, &cpu_path);
406 if (new == NULL) {
Elyes HAOUASd82be922016-07-28 18:58:27 +0200407 printk(BIOS_CRIT, "Could not allocate CPU device\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500408 max_cpus--;
Richard Spiegel569711a2018-08-07 15:59:34 -0700409 continue;
Aaron Durbine0785c02013-10-21 12:15:29 -0500410 }
Naresh G Solanki24635332018-05-31 23:13:18 +0530411 new->name = processor_name;
Aaron Durbine0785c02013-10-21 12:15:29 -0500412 cpus[i].dev = new;
413 }
414
415 return max_cpus;
416}
417
418/* Returns 1 for timeout. 0 on success. */
419static int apic_wait_timeout(int total_delay, int delay_step)
420{
421 int total = 0;
422 int timeout = 0;
423
424 while (lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY) {
425 udelay(delay_step);
426 total += delay_step;
427 if (total >= total_delay) {
428 timeout = 1;
429 break;
430 }
431 }
432
433 return timeout;
434}
435
436static int start_aps(struct bus *cpu_bus, int ap_count, atomic_t *num_aps)
437{
438 int sipi_vector;
439 /* Max location is 4KiB below 1MiB */
440 const int max_vector_loc = ((1 << 20) - (1 << 12)) >> 12;
441
442 if (ap_count == 0)
443 return 0;
444
445 /* The vector is sent as a 4k aligned address in one byte. */
446 sipi_vector = sipi_vector_location >> 12;
447
448 if (sipi_vector > max_vector_loc) {
449 printk(BIOS_CRIT, "SIPI vector too large! 0x%08x\n",
450 sipi_vector);
451 return -1;
452 }
453
454 printk(BIOS_DEBUG, "Attempting to start %d APs\n", ap_count);
455
456 if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
457 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
458 if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
459 printk(BIOS_DEBUG, "timed out. Aborting.\n");
460 return -1;
Lee Leahya15d8af2017-03-15 14:49:35 -0700461 }
462 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500463 }
464
465 /* Send INIT IPI to all but self. */
466 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
467 lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
Lee Leahya07d0dd2017-03-15 14:25:22 -0700468 LAPIC_DM_INIT);
Aaron Durbine0785c02013-10-21 12:15:29 -0500469 printk(BIOS_DEBUG, "Waiting for 10ms after sending INIT.\n");
470 mdelay(10);
471
472 /* Send 1st SIPI */
473 if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
474 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
475 if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
476 printk(BIOS_DEBUG, "timed out. Aborting.\n");
477 return -1;
Lee Leahya15d8af2017-03-15 14:49:35 -0700478 }
479 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500480 }
481
482 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
483 lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
Lee Leahya07d0dd2017-03-15 14:25:22 -0700484 LAPIC_DM_STARTUP | sipi_vector);
Aaron Durbine0785c02013-10-21 12:15:29 -0500485 printk(BIOS_DEBUG, "Waiting for 1st SIPI to complete...");
486 if (apic_wait_timeout(10000 /* 10 ms */, 50 /* us */)) {
487 printk(BIOS_DEBUG, "timed out.\n");
488 return -1;
Aaron Durbine0785c02013-10-21 12:15:29 -0500489 }
Lee Leahya15d8af2017-03-15 14:49:35 -0700490 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500491
492 /* Wait for CPUs to check in up to 200 us. */
493 wait_for_aps(num_aps, ap_count, 200 /* us */, 15 /* us */);
494
495 /* Send 2nd SIPI */
496 if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
497 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
498 if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
499 printk(BIOS_DEBUG, "timed out. Aborting.\n");
500 return -1;
Lee Leahya15d8af2017-03-15 14:49:35 -0700501 }
502 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500503 }
504
505 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
506 lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
Lee Leahya07d0dd2017-03-15 14:25:22 -0700507 LAPIC_DM_STARTUP | sipi_vector);
Aaron Durbine0785c02013-10-21 12:15:29 -0500508 printk(BIOS_DEBUG, "Waiting for 2nd SIPI to complete...");
509 if (apic_wait_timeout(10000 /* 10 ms */, 50 /* us */)) {
510 printk(BIOS_DEBUG, "timed out.\n");
511 return -1;
Aaron Durbine0785c02013-10-21 12:15:29 -0500512 }
Lee Leahya15d8af2017-03-15 14:49:35 -0700513 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500514
515 /* Wait for CPUs to check in. */
516 if (wait_for_aps(num_aps, ap_count, 10000 /* 10 ms */, 50 /* us */)) {
517 printk(BIOS_DEBUG, "Not all APs checked in: %d/%d.\n",
518 atomic_read(num_aps), ap_count);
519 return -1;
520 }
521
522 return 0;
523}
524
525static int bsp_do_flight_plan(struct mp_params *mp_params)
526{
527 int i;
528 int ret = 0;
Furquan Shaikhfa9f1072018-03-01 16:37:06 -0800529 /*
530 * Set time-out to wait for APs to a huge value (=1 second) since it
531 * could take a longer time for APs to check-in as the number of APs
532 * increases (contention for resources like UART also increases).
533 */
534 const int timeout_us = 1000000;
Aaron Durbine0785c02013-10-21 12:15:29 -0500535 const int step_us = 100;
536 int num_aps = mp_params->num_cpus - 1;
Furquan Shaikh5d8faef2018-03-07 23:16:57 -0800537 struct stopwatch sw;
538
539 stopwatch_init(&sw);
Aaron Durbine0785c02013-10-21 12:15:29 -0500540
541 for (i = 0; i < mp_params->num_records; i++) {
542 struct mp_flight_record *rec = &mp_params->flight_plan[i];
543
544 /* Wait for APs if the record is not released. */
545 if (atomic_read(&rec->barrier) == 0) {
546 /* Wait for the APs to check in. */
547 if (wait_for_aps(&rec->cpus_entered, num_aps,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700548 timeout_us, step_us)) {
Aaron Durbine0785c02013-10-21 12:15:29 -0500549 printk(BIOS_ERR, "MP record %d timeout.\n", i);
550 ret = -1;
551 }
552 }
553
Lee Leahya15d8af2017-03-15 14:49:35 -0700554 if (rec->bsp_call != NULL)
Aaron Durbin0e556322016-04-29 23:15:12 -0500555 rec->bsp_call();
Aaron Durbine0785c02013-10-21 12:15:29 -0500556
557 release_barrier(&rec->barrier);
558 }
Furquan Shaikh5d8faef2018-03-07 23:16:57 -0800559
560 printk(BIOS_INFO, "%s done after %ld msecs.\n", __func__,
561 stopwatch_duration_msecs(&sw));
Aaron Durbine0785c02013-10-21 12:15:29 -0500562 return ret;
563}
564
565static void init_bsp(struct bus *cpu_bus)
566{
567 struct device_path cpu_path;
568 struct cpu_info *info;
Aaron Durbine0785c02013-10-21 12:15:29 -0500569
570 /* Print processor name */
571 fill_processor_name(processor_name);
572 printk(BIOS_INFO, "CPU: %s.\n", processor_name);
573
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200574 /* Ensure the local APIC is enabled */
Aaron Durbine0785c02013-10-21 12:15:29 -0500575 enable_lapic();
576
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200577 /* Set the device path of the boot CPU. */
Aaron Durbine0785c02013-10-21 12:15:29 -0500578 cpu_path.type = DEVICE_PATH_APIC;
579 cpu_path.apic.apic_id = lapicid();
580
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200581 /* Find the device structure for the boot CPU. */
Aaron Durbine0785c02013-10-21 12:15:29 -0500582 info = cpu_info();
583 info->cpu = alloc_find_dev(cpu_bus, &cpu_path);
Naresh G Solanki24635332018-05-31 23:13:18 +0530584 info->cpu->name = processor_name;
Aaron Durbine0785c02013-10-21 12:15:29 -0500585
586 if (info->index != 0)
587 printk(BIOS_CRIT, "BSP index(%d) != 0!\n", info->index);
588
589 /* Track BSP in cpu_map structures. */
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600590 add_cpu_map_entry(info);
Aaron Durbine0785c02013-10-21 12:15:29 -0500591}
592
Aaron Durbin770d7c72016-05-03 17:49:57 -0500593/*
594 * mp_init() will set up the SIPI vector and bring up the APs according to
595 * mp_params. Each flight record will be executed according to the plan. Note
596 * that the MP infrastructure uses SMM default area without saving it. It's
597 * up to the chipset or mainboard to either e820 reserve this area or save this
598 * region prior to calling mp_init() and restoring it after mp_init returns.
599 *
600 * At the time mp_init() is called the MTRR MSRs are mirrored into APs then
601 * caching is enabled before running the flight plan.
602 *
603 * The MP initialization has the following properties:
604 * 1. APs are brought up in parallel.
Elyes HAOUASd82be922016-07-28 18:58:27 +0200605 * 2. The ordering of coreboot CPU number and APIC ids is not deterministic.
Aaron Durbin770d7c72016-05-03 17:49:57 -0500606 * Therefore, one cannot rely on this property or the order of devices in
607 * the device tree unless the chipset or mainboard know the APIC ids
608 * a priori.
609 *
610 * mp_init() returns < 0 on error, 0 on success.
611 */
612static int mp_init(struct bus *cpu_bus, struct mp_params *p)
Aaron Durbine0785c02013-10-21 12:15:29 -0500613{
614 int num_cpus;
Aaron Durbine0785c02013-10-21 12:15:29 -0500615 atomic_t *ap_count;
616
617 init_bsp(cpu_bus);
618
619 if (p == NULL || p->flight_plan == NULL || p->num_records < 1) {
620 printk(BIOS_CRIT, "Invalid MP parameters\n");
621 return -1;
622 }
623
624 /* Default to currently running CPU. */
625 num_cpus = allocate_cpu_devices(cpu_bus, p);
626
627 if (num_cpus < p->num_cpus) {
628 printk(BIOS_CRIT,
629 "ERROR: More cpus requested (%d) than supported (%d).\n",
630 p->num_cpus, num_cpus);
631 return -1;
632 }
633
634 /* Copy needed parameters so that APs have a reference to the plan. */
635 mp_info.num_records = p->num_records;
636 mp_info.records = p->flight_plan;
637
638 /* Load the SIPI vector. */
639 ap_count = load_sipi_vector(p);
640 if (ap_count == NULL)
641 return -1;
642
643 /* Make sure SIPI data hits RAM so the APs that come up will see
644 * the startup code even if the caches are disabled. */
645 wbinvd();
646
647 /* Start the APs providing number of APs and the cpus_entered field. */
Aaron Durbinb21e3622016-12-07 00:32:19 -0600648 global_num_aps = p->num_cpus - 1;
649 if (start_aps(cpu_bus, global_num_aps, ap_count) < 0) {
Aaron Durbine0785c02013-10-21 12:15:29 -0500650 mdelay(1000);
651 printk(BIOS_DEBUG, "%d/%d eventually checked in?\n",
Aaron Durbinb21e3622016-12-07 00:32:19 -0600652 atomic_read(ap_count), global_num_aps);
Aaron Durbine0785c02013-10-21 12:15:29 -0500653 return -1;
654 }
655
656 /* Walk the flight plan for the BSP. */
657 return bsp_do_flight_plan(p);
658}
659
Aaron Durbin770d7c72016-05-03 17:49:57 -0500660/* Calls cpu_initialize(info->index) which calls the coreboot CPU drivers. */
661static void mp_initialize_cpu(void)
Aaron Durbine0785c02013-10-21 12:15:29 -0500662{
663 /* Call back into driver infrastructure for the AP initialization. */
664 struct cpu_info *info = cpu_info();
665 cpu_initialize(info->index);
666}
667
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200668/* Returns APIC id for coreboot CPU number or < 0 on failure. */
Subrata Banikbefa72b2018-12-11 17:10:18 +0530669int mp_get_apic_id(int logical_cpu)
Aaron Durbine0785c02013-10-21 12:15:29 -0500670{
Subrata Banikbefa72b2018-12-11 17:10:18 +0530671 if (logical_cpu >= CONFIG_MAX_CPUS || logical_cpu < 0)
Aaron Durbine0785c02013-10-21 12:15:29 -0500672 return -1;
673
Subrata Banikbefa72b2018-12-11 17:10:18 +0530674 return cpus[logical_cpu].default_apic_id;
Aaron Durbine0785c02013-10-21 12:15:29 -0500675}
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500676
677void smm_initiate_relocation_parallel(void)
678{
679 if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
680 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
681 if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
682 printk(BIOS_DEBUG, "timed out. Aborting.\n");
683 return;
Lee Leahya15d8af2017-03-15 14:49:35 -0700684 }
685 printk(BIOS_DEBUG, "done.\n");
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500686 }
687
688 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(lapicid()));
689 lapic_write_around(LAPIC_ICR, LAPIC_INT_ASSERT | LAPIC_DM_SMI);
Lee Leahya15d8af2017-03-15 14:49:35 -0700690 if (apic_wait_timeout(1000 /* 1 ms */, 100 /* us */))
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500691 printk(BIOS_DEBUG, "SMI Relocation timed out.\n");
Lee Leahya15d8af2017-03-15 14:49:35 -0700692 else
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500693 printk(BIOS_DEBUG, "Relocation complete.\n");
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500694}
695
696DECLARE_SPIN_LOCK(smm_relocation_lock);
697
698/* Send SMI to self with single user serialization. */
699void smm_initiate_relocation(void)
700{
701 spin_lock(&smm_relocation_lock);
702 smm_initiate_relocation_parallel();
703 spin_unlock(&smm_relocation_lock);
704}
Aaron Durbin82501922016-04-29 22:55:49 -0500705
706struct mp_state {
707 struct mp_ops ops;
708 int cpu_count;
709 uintptr_t perm_smbase;
710 size_t perm_smsize;
711 size_t smm_save_state_size;
712 int do_smm;
713} mp_state;
714
715static int is_smm_enabled(void)
716{
717 return IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) && mp_state.do_smm;
718}
719
720static void smm_disable(void)
721{
722 mp_state.do_smm = 0;
723}
724
725static void smm_enable(void)
726{
727 if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER))
728 mp_state.do_smm = 1;
729}
730
731static void asmlinkage smm_do_relocation(void *arg)
732{
733 const struct smm_module_params *p;
734 const struct smm_runtime *runtime;
735 int cpu;
736 uintptr_t curr_smbase;
737 uintptr_t perm_smbase;
738
739 p = arg;
740 runtime = p->runtime;
741 cpu = p->cpu;
742 curr_smbase = runtime->smbase;
743
744 if (cpu >= CONFIG_MAX_CPUS) {
745 printk(BIOS_CRIT,
746 "Invalid CPU number assigned in SMM stub: %d\n", cpu);
747 return;
748 }
749
750 /*
751 * The permanent handler runs with all cpus concurrently. Precalculate
752 * the location of the new SMBASE. If using SMM modules then this
753 * calculation needs to match that of the module loader.
754 */
755 perm_smbase = mp_state.perm_smbase;
756 perm_smbase -= cpu * runtime->save_state_size;
757
758 printk(BIOS_DEBUG, "New SMBASE 0x%08lx\n", perm_smbase);
759
760 /* Setup code checks this callback for validity. */
761 mp_state.ops.relocation_handler(cpu, curr_smbase, perm_smbase);
762}
763
764static void adjust_smm_apic_id_map(struct smm_loader_params *smm_params)
765{
766 int i;
767 struct smm_runtime *runtime = smm_params->runtime;
768
769 for (i = 0; i < CONFIG_MAX_CPUS; i++)
770 runtime->apic_id_to_cpu[i] = mp_get_apic_id(i);
771}
772
773static int install_relocation_handler(int num_cpus, size_t save_state_size)
774{
775 struct smm_loader_params smm_params = {
Marshall Dawson46fc68472018-10-25 13:01:55 -0600776 .per_cpu_stack_size = CONFIG_SMM_STUB_STACK_SIZE,
Aaron Durbin82501922016-04-29 22:55:49 -0500777 .num_concurrent_stacks = num_cpus,
778 .per_cpu_save_state_size = save_state_size,
779 .num_concurrent_save_states = 1,
780 .handler = smm_do_relocation,
781 };
782
783 /* Allow callback to override parameters. */
784 if (mp_state.ops.adjust_smm_params != NULL)
785 mp_state.ops.adjust_smm_params(&smm_params, 0);
786
787 if (smm_setup_relocation_handler(&smm_params))
788 return -1;
789
790 adjust_smm_apic_id_map(&smm_params);
791
792 return 0;
793}
794
795static int install_permanent_handler(int num_cpus, uintptr_t smbase,
796 size_t smsize, size_t save_state_size)
797{
798 /* There are num_cpus concurrent stacks and num_cpus concurrent save
Aaron Durbinec2e61a2017-12-19 15:26:46 -0700799 * state areas. Lastly, set the stack size to 1KiB. */
Aaron Durbin82501922016-04-29 22:55:49 -0500800 struct smm_loader_params smm_params = {
Raul E Rangeld3b83932018-06-12 10:43:09 -0600801 .per_cpu_stack_size = CONFIG_SMM_MODULE_STACK_SIZE,
Aaron Durbin82501922016-04-29 22:55:49 -0500802 .num_concurrent_stacks = num_cpus,
803 .per_cpu_save_state_size = save_state_size,
804 .num_concurrent_save_states = num_cpus,
805 };
806
807 /* Allow callback to override parameters. */
808 if (mp_state.ops.adjust_smm_params != NULL)
809 mp_state.ops.adjust_smm_params(&smm_params, 1);
810
811 printk(BIOS_DEBUG, "Installing SMM handler to 0x%08lx\n", smbase);
812
813 if (smm_load_module((void *)smbase, smsize, &smm_params))
814 return -1;
815
816 adjust_smm_apic_id_map(&smm_params);
817
818 return 0;
819}
820
821/* Load SMM handlers as part of MP flight record. */
822static void load_smm_handlers(void)
823{
824 size_t smm_save_state_size = mp_state.smm_save_state_size;
825
826 /* Do nothing if SMM is disabled.*/
827 if (!is_smm_enabled())
828 return;
829
830 /* Install handlers. */
831 if (install_relocation_handler(mp_state.cpu_count,
832 smm_save_state_size) < 0) {
833 printk(BIOS_ERR, "Unable to install SMM relocation handler.\n");
834 smm_disable();
835 }
836
837 if (install_permanent_handler(mp_state.cpu_count, mp_state.perm_smbase,
838 mp_state.perm_smsize, smm_save_state_size) < 0) {
839 printk(BIOS_ERR, "Unable to install SMM permanent handler.\n");
840 smm_disable();
841 }
842
843 /* Ensure the SMM handlers hit DRAM before performing first SMI. */
844 wbinvd();
845
846 /*
847 * Indicate that the SMM handlers have been loaded and MP
848 * initialization is about to start.
849 */
850 if (is_smm_enabled() && mp_state.ops.pre_mp_smm_init != NULL)
851 mp_state.ops.pre_mp_smm_init();
852}
853
854/* Trigger SMM as part of MP flight record. */
855static void trigger_smm_relocation(void)
856{
857 /* Do nothing if SMM is disabled.*/
858 if (!is_smm_enabled() || mp_state.ops.per_cpu_smm_trigger == NULL)
859 return;
860 /* Trigger SMM mode for the currently running processor. */
861 mp_state.ops.per_cpu_smm_trigger();
862}
863
Aaron Durbin223fb432018-05-03 13:49:41 -0600864static struct mp_callback *ap_callbacks[CONFIG_MAX_CPUS];
Aaron Durbinb21e3622016-12-07 00:32:19 -0600865
Aaron Durbin223fb432018-05-03 13:49:41 -0600866static struct mp_callback *read_callback(struct mp_callback **slot)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600867{
Aaron Durbin223fb432018-05-03 13:49:41 -0600868 struct mp_callback *ret;
869
870 asm volatile ("mov %1, %0\n"
871 : "=r" (ret)
872 : "m" (*slot)
873 : "memory"
874 );
875 return ret;
Aaron Durbinb21e3622016-12-07 00:32:19 -0600876}
877
Aaron Durbin223fb432018-05-03 13:49:41 -0600878static void store_callback(struct mp_callback **slot, struct mp_callback *val)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600879{
Aaron Durbin223fb432018-05-03 13:49:41 -0600880 asm volatile ("mov %1, %0\n"
881 : "=m" (*slot)
882 : "r" (val)
883 : "memory"
884 );
Aaron Durbinb21e3622016-12-07 00:32:19 -0600885}
886
Aaron Durbin223fb432018-05-03 13:49:41 -0600887static int run_ap_work(struct mp_callback *val, long expire_us)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600888{
889 int i;
890 int cpus_accepted;
891 struct stopwatch sw;
892 int cur_cpu = cpu_index();
893
894 if (!IS_ENABLED(CONFIG_PARALLEL_MP_AP_WORK)) {
895 printk(BIOS_ERR, "APs already parked. PARALLEL_MP_AP_WORK not selected.\n");
896 return -1;
897 }
898
899 /* Signal to all the APs to run the func. */
900 for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) {
901 if (cur_cpu == i)
902 continue;
Aaron Durbin223fb432018-05-03 13:49:41 -0600903 store_callback(&ap_callbacks[i], val);
Aaron Durbinb21e3622016-12-07 00:32:19 -0600904 }
905 mfence();
906
907 /* Wait for all the APs to signal back that call has been accepted. */
Subrata Banik838f2962018-04-11 18:45:57 +0530908 if (expire_us > 0)
909 stopwatch_init_usecs_expire(&sw, expire_us);
910
Paul Menzel6bb8ff42017-06-19 13:02:31 +0200911 do {
Aaron Durbin046848c2017-06-15 08:47:04 -0500912 cpus_accepted = 0;
913
Aaron Durbinb21e3622016-12-07 00:32:19 -0600914 for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) {
915 if (cur_cpu == i)
916 continue;
917 if (read_callback(&ap_callbacks[i]) == NULL)
918 cpus_accepted++;
919 }
Aaron Durbin046848c2017-06-15 08:47:04 -0500920
Aaron Durbinb21e3622016-12-07 00:32:19 -0600921 if (cpus_accepted == global_num_aps)
922 return 0;
Subrata Banik838f2962018-04-11 18:45:57 +0530923 } while (expire_us <= 0 || !stopwatch_expired(&sw));
Aaron Durbinb21e3622016-12-07 00:32:19 -0600924
925 printk(BIOS_ERR, "AP call expired. %d/%d CPUs accepted.\n",
926 cpus_accepted, global_num_aps);
927 return -1;
928}
929
930static void ap_wait_for_instruction(void)
931{
Aaron Durbin223fb432018-05-03 13:49:41 -0600932 struct mp_callback lcb;
933 struct mp_callback **per_cpu_slot;
Subrata Banik8a25cae2018-05-03 18:48:41 +0530934 int cur_cpu;
Aaron Durbinb21e3622016-12-07 00:32:19 -0600935
936 if (!IS_ENABLED(CONFIG_PARALLEL_MP_AP_WORK))
937 return;
938
Subrata Banik8a25cae2018-05-03 18:48:41 +0530939 cur_cpu = cpu_index();
940 per_cpu_slot = &ap_callbacks[cur_cpu];
Aaron Durbinb21e3622016-12-07 00:32:19 -0600941
Aaron Durbin223fb432018-05-03 13:49:41 -0600942 while (1) {
943 struct mp_callback *cb = read_callback(per_cpu_slot);
944
945 if (cb == NULL) {
Aaron Durbinb21e3622016-12-07 00:32:19 -0600946 asm ("pause");
947 continue;
948 }
949
Raul E Rangel9ea77622018-08-02 15:12:17 -0600950 /* Copy to local variable before signaling consumption. */
Aaron Durbin223fb432018-05-03 13:49:41 -0600951 memcpy(&lcb, cb, sizeof(lcb));
Aaron Durbinb21e3622016-12-07 00:32:19 -0600952 mfence();
Aaron Durbin223fb432018-05-03 13:49:41 -0600953 store_callback(per_cpu_slot, NULL);
Subrata Banik8a25cae2018-05-03 18:48:41 +0530954 if (lcb.logical_cpu_number && (cur_cpu !=
955 lcb.logical_cpu_number))
956 continue;
957 else
958 lcb.func(lcb.arg);
Aaron Durbinb21e3622016-12-07 00:32:19 -0600959 }
960}
961
Subrata Banik8a25cae2018-05-03 18:48:41 +0530962int mp_run_on_aps(void (*func)(void *), void *arg, int logical_cpu_num,
963 long expire_us)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600964{
Subrata Banik8a25cae2018-05-03 18:48:41 +0530965 struct mp_callback lcb = { .func = func, .arg = arg,
966 .logical_cpu_number = logical_cpu_num};
Aaron Durbin223fb432018-05-03 13:49:41 -0600967 return run_ap_work(&lcb, expire_us);
Aaron Durbinb21e3622016-12-07 00:32:19 -0600968}
969
Subrata Banik33374972018-04-24 13:45:30 +0530970int mp_run_on_all_cpus(void (*func)(void *), void *arg, long expire_us)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600971{
972 /* Run on BSP first. */
Subrata Banik33374972018-04-24 13:45:30 +0530973 func(arg);
974
Subrata Banik8a25cae2018-05-03 18:48:41 +0530975 return mp_run_on_aps(func, arg, MP_RUN_ON_ALL_CPUS, expire_us);
Aaron Durbinb21e3622016-12-07 00:32:19 -0600976}
977
978int mp_park_aps(void)
979{
Furquan Shaikhd6630d12018-03-29 00:10:02 -0700980 struct stopwatch sw;
981 int ret;
982 long duration_msecs;
983
984 stopwatch_init(&sw);
985
Subrata Banik8a25cae2018-05-03 18:48:41 +0530986 ret = mp_run_on_aps(park_this_cpu, NULL, MP_RUN_ON_ALL_CPUS,
987 250 * USECS_PER_MSEC);
Furquan Shaikhd6630d12018-03-29 00:10:02 -0700988
989 duration_msecs = stopwatch_duration_msecs(&sw);
990
991 if (!ret)
992 printk(BIOS_DEBUG, "%s done after %ld msecs.\n", __func__,
993 duration_msecs);
994 else
995 printk(BIOS_ERR, "%s failed after %ld msecs.\n", __func__,
996 duration_msecs);
997
998 return ret;
Aaron Durbinb21e3622016-12-07 00:32:19 -0600999}
1000
Aaron Durbin82501922016-04-29 22:55:49 -05001001static struct mp_flight_record mp_steps[] = {
1002 /* Once the APs are up load the SMM handlers. */
1003 MP_FR_BLOCK_APS(NULL, load_smm_handlers),
1004 /* Perform SMM relocation. */
1005 MP_FR_NOBLOCK_APS(trigger_smm_relocation, trigger_smm_relocation),
Elyes HAOUASd82be922016-07-28 18:58:27 +02001006 /* Initialize each CPU through the driver framework. */
Aaron Durbin82501922016-04-29 22:55:49 -05001007 MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu),
Aaron Durbinb21e3622016-12-07 00:32:19 -06001008 /* Wait for APs to finish then optionally start looking for work. */
1009 MP_FR_BLOCK_APS(ap_wait_for_instruction, NULL),
Aaron Durbin82501922016-04-29 22:55:49 -05001010};
1011
1012static void fill_mp_state(struct mp_state *state, const struct mp_ops *ops)
1013{
1014 /*
1015 * Make copy of the ops so that defaults can be set in the non-const
1016 * structure if needed.
1017 */
1018 memcpy(&state->ops, ops, sizeof(*ops));
1019
1020 if (ops->get_cpu_count != NULL)
1021 state->cpu_count = ops->get_cpu_count();
1022
1023 if (ops->get_smm_info != NULL)
1024 ops->get_smm_info(&state->perm_smbase, &state->perm_smsize,
1025 &state->smm_save_state_size);
1026
1027 /*
1028 * Default to smm_initiate_relocation() if trigger callback isn't
1029 * provided.
1030 */
1031 if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) &&
1032 ops->per_cpu_smm_trigger == NULL)
1033 mp_state.ops.per_cpu_smm_trigger = smm_initiate_relocation;
1034}
1035
1036int mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops)
1037{
1038 int ret;
1039 void *default_smm_area;
1040 struct mp_params mp_params;
1041
1042 if (mp_ops->pre_mp_init != NULL)
1043 mp_ops->pre_mp_init();
1044
1045 fill_mp_state(&mp_state, mp_ops);
1046
1047 memset(&mp_params, 0, sizeof(mp_params));
1048
1049 if (mp_state.cpu_count <= 0) {
1050 printk(BIOS_ERR, "Invalid cpu_count: %d\n", mp_state.cpu_count);
1051 return -1;
1052 }
1053
1054 /* Sanity check SMM state. */
1055 if (mp_state.perm_smsize != 0 && mp_state.smm_save_state_size != 0 &&
1056 mp_state.ops.relocation_handler != NULL)
1057 smm_enable();
1058
1059 if (is_smm_enabled())
1060 printk(BIOS_INFO, "Will perform SMM setup.\n");
1061
1062 mp_params.num_cpus = mp_state.cpu_count;
1063 /* Gather microcode information. */
1064 if (mp_state.ops.get_microcode_info != NULL)
1065 mp_state.ops.get_microcode_info(&mp_params.microcode_pointer,
1066 &mp_params.parallel_microcode_load);
Aaron Durbin82501922016-04-29 22:55:49 -05001067 mp_params.flight_plan = &mp_steps[0];
1068 mp_params.num_records = ARRAY_SIZE(mp_steps);
1069
1070 /* Perform backup of default SMM area. */
1071 default_smm_area = backup_default_smm_area();
1072
1073 ret = mp_init(cpu_bus, &mp_params);
1074
1075 restore_default_smm_area(default_smm_area);
1076
1077 /* Signal callback on success if it's provided. */
1078 if (ret == 0 && mp_state.ops.post_mp_init != NULL)
1079 mp_state.ops.post_mp_init();
1080
1081 return ret;
1082}