blob: 54b6d1571ab2251f7781c6db5fd17ad214ffa7a9 [file] [log] [blame]
Aaron Durbine0785c02013-10-21 12:15:29 -05001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; version 2 of
9 * the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
Aaron Durbine0785c02013-10-21 12:15:29 -050015 */
16
17#include <console/console.h>
18#include <stdint.h>
Stefan Reinauer6a001132017-07-13 02:20:27 +020019#include <compiler.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050020#include <rmodule.h>
21#include <arch/cpu.h>
22#include <cpu/cpu.h>
23#include <cpu/intel/microcode.h>
24#include <cpu/x86/cache.h>
Kyösti Mälkkibae775a2014-12-18 10:36:33 +020025#include <cpu/x86/gdt.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050026#include <cpu/x86/lapic.h>
27#include <cpu/x86/name.h>
28#include <cpu/x86/msr.h>
29#include <cpu/x86/mtrr.h>
30#include <cpu/x86/smm.h>
31#include <cpu/x86/mp.h>
32#include <delay.h>
33#include <device/device.h>
34#include <device/path.h>
35#include <lib.h>
36#include <smp/atomic.h>
37#include <smp/spinlock.h>
Julius Wernerec5e5e02014-08-20 15:29:56 -070038#include <symbols.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050039#include <thread.h>
40
41#define MAX_APIC_IDS 256
Aaron Durbin770d7c72016-05-03 17:49:57 -050042
Aaron Durbin223fb432018-05-03 13:49:41 -060043struct mp_callback {
Subrata Banik33374972018-04-24 13:45:30 +053044 void (*func)(void *);
45 void *arg;
Subrata Banik8a25cae2018-05-03 18:48:41 +053046 int logical_cpu_number;
Aaron Durbin223fb432018-05-03 13:49:41 -060047};
Aaron Durbin770d7c72016-05-03 17:49:57 -050048
Naresh G Solanki24635332018-05-31 23:13:18 +053049static char processor_name[49];
50
Aaron Durbin770d7c72016-05-03 17:49:57 -050051/*
52 * A mp_flight_record details a sequence of calls for the APs to perform
53 * along with the BSP to coordinate sequencing. Each flight record either
54 * provides a barrier for each AP before calling the callback or the APs
55 * are allowed to perform the callback without waiting. Regardless, each
56 * record has the cpus_entered field incremented for each record. When
57 * the BSP observes that the cpus_entered matches the number of APs
58 * the bsp_call is called with bsp_arg and upon returning releases the
59 * barrier allowing the APs to make further progress.
60 *
61 * Note that ap_call() and bsp_call() can be NULL. In the NULL case the
62 * callback will just not be called.
63 */
64struct mp_flight_record {
65 atomic_t barrier;
66 atomic_t cpus_entered;
Aaron Durbin223fb432018-05-03 13:49:41 -060067 void (*ap_call)(void);
68 void (*bsp_call)(void);
Aaron Durbin381feb82018-05-02 22:38:58 -060069} __aligned(CACHELINE_SIZE);
Aaron Durbin770d7c72016-05-03 17:49:57 -050070
71#define _MP_FLIGHT_RECORD(barrier_, ap_func_, bsp_func_) \
72 { \
73 .barrier = ATOMIC_INIT(barrier_), \
74 .cpus_entered = ATOMIC_INIT(0), \
75 .ap_call = ap_func_, \
76 .bsp_call = bsp_func_, \
77 }
78
79#define MP_FR_BLOCK_APS(ap_func_, bsp_func_) \
80 _MP_FLIGHT_RECORD(0, ap_func_, bsp_func_)
81
82#define MP_FR_NOBLOCK_APS(ap_func_, bsp_func_) \
83 _MP_FLIGHT_RECORD(1, ap_func_, bsp_func_)
84
85/* The mp_params structure provides the arguments to the mp subsystem
86 * for bringing up APs. */
87struct mp_params {
88 int num_cpus; /* Total cpus include BSP */
89 int parallel_microcode_load;
90 const void *microcode_pointer;
Aaron Durbin770d7c72016-05-03 17:49:57 -050091 /* Flight plan for APs and BSP. */
92 struct mp_flight_record *flight_plan;
93 int num_records;
94};
95
Aaron Durbine0785c02013-10-21 12:15:29 -050096/* This needs to match the layout in the .module_parametrs section. */
97struct sipi_params {
98 uint16_t gdtlimit;
99 uint32_t gdt;
100 uint16_t unused;
101 uint32_t idt_ptr;
102 uint32_t stack_top;
103 uint32_t stack_size;
104 uint32_t microcode_lock; /* 0xffffffff means parallel loading. */
105 uint32_t microcode_ptr;
106 uint32_t msr_table_ptr;
107 uint32_t msr_count;
108 uint32_t c_handler;
109 atomic_t ap_count;
Stefan Reinauer6a001132017-07-13 02:20:27 +0200110} __packed;
Aaron Durbine0785c02013-10-21 12:15:29 -0500111
112/* This also needs to match the assembly code for saved MSR encoding. */
113struct saved_msr {
114 uint32_t index;
115 uint32_t lo;
116 uint32_t hi;
Stefan Reinauer6a001132017-07-13 02:20:27 +0200117} __packed;
Aaron Durbine0785c02013-10-21 12:15:29 -0500118
119
120/* The sipi vector rmodule is included in the ramstage using 'objdump -B'. */
121extern char _binary_sipi_vector_start[];
Aaron Durbine0785c02013-10-21 12:15:29 -0500122
123/* The SIPI vector is loaded at the SMM_DEFAULT_BASE. The reason is at the
124 * memory range is already reserved so the OS cannot use it. That region is
125 * free to use for AP bringup before SMM is initialized. */
126static const uint32_t sipi_vector_location = SMM_DEFAULT_BASE;
127static const int sipi_vector_location_size = SMM_DEFAULT_SIZE;
128
129struct mp_flight_plan {
130 int num_records;
131 struct mp_flight_record *records;
132};
133
Aaron Durbinb21e3622016-12-07 00:32:19 -0600134static int global_num_aps;
Aaron Durbine0785c02013-10-21 12:15:29 -0500135static struct mp_flight_plan mp_info;
136
137struct cpu_map {
Edward O'Callaghan2c9d2cf2014-10-27 23:29:29 +1100138 struct device *dev;
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600139 /* Keep track of default apic ids for SMM. */
140 int default_apic_id;
Aaron Durbine0785c02013-10-21 12:15:29 -0500141};
142
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200143/* Keep track of APIC and device structure for each CPU. */
Aaron Durbine0785c02013-10-21 12:15:29 -0500144static struct cpu_map cpus[CONFIG_MAX_CPUS];
145
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600146static inline void add_cpu_map_entry(const struct cpu_info *info)
147{
148 cpus[info->index].dev = info->cpu;
149 cpus[info->index].default_apic_id = cpuid_ebx(1) >> 24;
150}
151
Aaron Durbin4c16f8f2018-05-02 22:35:33 -0600152static inline void barrier_wait(atomic_t *b)
Aaron Durbine0785c02013-10-21 12:15:29 -0500153{
Lee Leahya15d8af2017-03-15 14:49:35 -0700154 while (atomic_read(b) == 0)
Aaron Durbine0785c02013-10-21 12:15:29 -0500155 asm ("pause");
Aaron Durbine0785c02013-10-21 12:15:29 -0500156 mfence();
157}
158
Aaron Durbin4c16f8f2018-05-02 22:35:33 -0600159static inline void release_barrier(atomic_t *b)
Aaron Durbine0785c02013-10-21 12:15:29 -0500160{
161 mfence();
162 atomic_set(b, 1);
163}
164
165/* Returns 1 if timeout waiting for APs. 0 if target aps found. */
166static int wait_for_aps(atomic_t *val, int target, int total_delay,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700167 int delay_step)
Aaron Durbine0785c02013-10-21 12:15:29 -0500168{
169 int timeout = 0;
170 int delayed = 0;
171 while (atomic_read(val) != target) {
172 udelay(delay_step);
173 delayed += delay_step;
174 if (delayed >= total_delay) {
175 timeout = 1;
176 break;
177 }
178 }
179
180 return timeout;
181}
182
183static void ap_do_flight_plan(void)
184{
185 int i;
186
187 for (i = 0; i < mp_info.num_records; i++) {
188 struct mp_flight_record *rec = &mp_info.records[i];
189
190 atomic_inc(&rec->cpus_entered);
191 barrier_wait(&rec->barrier);
192
Lee Leahya15d8af2017-03-15 14:49:35 -0700193 if (rec->ap_call != NULL)
Aaron Durbin0e556322016-04-29 23:15:12 -0500194 rec->ap_call();
Aaron Durbine0785c02013-10-21 12:15:29 -0500195 }
196}
197
Subrata Banik33374972018-04-24 13:45:30 +0530198static void park_this_cpu(void *unused)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600199{
200 stop_this_cpu();
201}
202
Aaron Durbine0785c02013-10-21 12:15:29 -0500203/* By the time APs call ap_init() caching has been setup, and microcode has
204 * been loaded. */
205static void asmlinkage ap_init(unsigned int cpu)
206{
207 struct cpu_info *info;
Aaron Durbine0785c02013-10-21 12:15:29 -0500208
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200209 /* Ensure the local APIC is enabled */
Aaron Durbine0785c02013-10-21 12:15:29 -0500210 enable_lapic();
211
212 info = cpu_info();
213 info->index = cpu;
214 info->cpu = cpus[cpu].dev;
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600215
216 add_cpu_map_entry(info);
Aaron Durbine0785c02013-10-21 12:15:29 -0500217 thread_init_cpu_info_non_bsp(info);
218
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600219 /* Fix up APIC id with reality. */
220 info->cpu->path.apic.apic_id = lapicid();
Aaron Durbine0785c02013-10-21 12:15:29 -0500221
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600222 printk(BIOS_INFO, "AP: slot %d apic_id %x.\n", cpu,
223 info->cpu->path.apic.apic_id);
Aaron Durbine0785c02013-10-21 12:15:29 -0500224
225 /* Walk the flight plan */
226 ap_do_flight_plan();
227
228 /* Park the AP. */
Subrata Banik33374972018-04-24 13:45:30 +0530229 park_this_cpu(NULL);
Aaron Durbine0785c02013-10-21 12:15:29 -0500230}
231
232static void setup_default_sipi_vector_params(struct sipi_params *sp)
233{
234 sp->gdt = (uint32_t)&gdt;
235 sp->gdtlimit = (uint32_t)&gdt_end - (u32)&gdt - 1;
236 sp->idt_ptr = (uint32_t)&idtarg;
237 sp->stack_size = CONFIG_STACK_SIZE;
238 sp->stack_top = (uint32_t)&_estack;
239 /* Adjust the stack top to take into account cpu_info. */
240 sp->stack_top -= sizeof(struct cpu_info);
241}
242
243#define NUM_FIXED_MTRRS 11
244static const unsigned int fixed_mtrrs[NUM_FIXED_MTRRS] = {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700245 MTRR_FIX_64K_00000, MTRR_FIX_16K_80000, MTRR_FIX_16K_A0000,
246 MTRR_FIX_4K_C0000, MTRR_FIX_4K_C8000, MTRR_FIX_4K_D0000,
247 MTRR_FIX_4K_D8000, MTRR_FIX_4K_E0000, MTRR_FIX_4K_E8000,
248 MTRR_FIX_4K_F0000, MTRR_FIX_4K_F8000,
Aaron Durbine0785c02013-10-21 12:15:29 -0500249};
250
251static inline struct saved_msr *save_msr(int index, struct saved_msr *entry)
252{
253 msr_t msr;
254
255 msr = rdmsr(index);
256 entry->index = index;
257 entry->lo = msr.lo;
258 entry->hi = msr.hi;
259
260 /* Return the next entry. */
261 entry++;
262 return entry;
263}
264
265static int save_bsp_msrs(char *start, int size)
266{
267 int msr_count;
268 int num_var_mtrrs;
269 struct saved_msr *msr_entry;
270 int i;
271 msr_t msr;
272
273 /* Determine number of MTRRs need to be saved. */
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700274 msr = rdmsr(MTRR_CAP_MSR);
Aaron Durbine0785c02013-10-21 12:15:29 -0500275 num_var_mtrrs = msr.lo & 0xff;
276
277 /* 2 * num_var_mtrrs for base and mask. +1 for IA32_MTRR_DEF_TYPE. */
278 msr_count = 2 * num_var_mtrrs + NUM_FIXED_MTRRS + 1;
279
280 if ((msr_count * sizeof(struct saved_msr)) > size) {
281 printk(BIOS_CRIT, "Cannot mirror all %d msrs.\n", msr_count);
282 return -1;
283 }
284
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600285 fixed_mtrrs_expose_amd_rwdram();
286
Aaron Durbine0785c02013-10-21 12:15:29 -0500287 msr_entry = (void *)start;
Lee Leahya15d8af2017-03-15 14:49:35 -0700288 for (i = 0; i < NUM_FIXED_MTRRS; i++)
Aaron Durbine0785c02013-10-21 12:15:29 -0500289 msr_entry = save_msr(fixed_mtrrs[i], msr_entry);
Aaron Durbine0785c02013-10-21 12:15:29 -0500290
291 for (i = 0; i < num_var_mtrrs; i++) {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700292 msr_entry = save_msr(MTRR_PHYS_BASE(i), msr_entry);
293 msr_entry = save_msr(MTRR_PHYS_MASK(i), msr_entry);
Aaron Durbine0785c02013-10-21 12:15:29 -0500294 }
295
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700296 msr_entry = save_msr(MTRR_DEF_TYPE_MSR, msr_entry);
Aaron Durbine0785c02013-10-21 12:15:29 -0500297
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600298 fixed_mtrrs_hide_amd_rwdram();
299
Aaron Durbine0785c02013-10-21 12:15:29 -0500300 return msr_count;
301}
302
303static atomic_t *load_sipi_vector(struct mp_params *mp_params)
304{
305 struct rmodule sipi_mod;
306 int module_size;
307 int num_msrs;
308 struct sipi_params *sp;
309 char *mod_loc = (void *)sipi_vector_location;
310 const int loc_size = sipi_vector_location_size;
311 atomic_t *ap_count = NULL;
312
313 if (rmodule_parse(&_binary_sipi_vector_start, &sipi_mod)) {
314 printk(BIOS_CRIT, "Unable to parse sipi module.\n");
315 return ap_count;
316 }
317
318 if (rmodule_entry_offset(&sipi_mod) != 0) {
319 printk(BIOS_CRIT, "SIPI module entry offset is not 0!\n");
320 return ap_count;
321 }
322
323 if (rmodule_load_alignment(&sipi_mod) != 4096) {
324 printk(BIOS_CRIT, "SIPI module load alignment(%d) != 4096.\n",
325 rmodule_load_alignment(&sipi_mod));
326 return ap_count;
327 }
328
329 module_size = rmodule_memory_size(&sipi_mod);
330
331 /* Align to 4 bytes. */
332 module_size = ALIGN(module_size, 4);
333
334 if (module_size > loc_size) {
335 printk(BIOS_CRIT, "SIPI module size (%d) > region size (%d).\n",
336 module_size, loc_size);
337 return ap_count;
338 }
339
340 num_msrs = save_bsp_msrs(&mod_loc[module_size], loc_size - module_size);
341
342 if (num_msrs < 0) {
343 printk(BIOS_CRIT, "Error mirroring BSP's msrs.\n");
344 return ap_count;
345 }
346
347 if (rmodule_load(mod_loc, &sipi_mod)) {
348 printk(BIOS_CRIT, "Unable to load SIPI module.\n");
349 return ap_count;
350 }
351
352 sp = rmodule_parameters(&sipi_mod);
353
354 if (sp == NULL) {
355 printk(BIOS_CRIT, "SIPI module has no parameters.\n");
356 return ap_count;
357 }
358
359 setup_default_sipi_vector_params(sp);
360 /* Setup MSR table. */
361 sp->msr_table_ptr = (uint32_t)&mod_loc[module_size];
362 sp->msr_count = num_msrs;
363 /* Provide pointer to microcode patch. */
364 sp->microcode_ptr = (uint32_t)mp_params->microcode_pointer;
365 /* Pass on abiility to load microcode in parallel. */
Lee Leahya15d8af2017-03-15 14:49:35 -0700366 if (mp_params->parallel_microcode_load)
Aaron Durbine0785c02013-10-21 12:15:29 -0500367 sp->microcode_lock = 0;
Lee Leahya15d8af2017-03-15 14:49:35 -0700368 else
Aaron Durbine0785c02013-10-21 12:15:29 -0500369 sp->microcode_lock = ~0;
Aaron Durbine0785c02013-10-21 12:15:29 -0500370 sp->c_handler = (uint32_t)&ap_init;
371 ap_count = &sp->ap_count;
372 atomic_set(ap_count, 0);
373
374 return ap_count;
375}
376
377static int allocate_cpu_devices(struct bus *cpu_bus, struct mp_params *p)
378{
379 int i;
380 int max_cpus;
381 struct cpu_info *info;
382
383 max_cpus = p->num_cpus;
384 if (max_cpus > CONFIG_MAX_CPUS) {
385 printk(BIOS_CRIT, "CPU count(%d) exceeds CONFIG_MAX_CPUS(%d)\n",
386 max_cpus, CONFIG_MAX_CPUS);
387 max_cpus = CONFIG_MAX_CPUS;
388 }
389
390 info = cpu_info();
391 for (i = 1; i < max_cpus; i++) {
392 struct device_path cpu_path;
Edward O'Callaghan2c9d2cf2014-10-27 23:29:29 +1100393 struct device *new;
Aaron Durbine0785c02013-10-21 12:15:29 -0500394
Elyes HAOUASd82be922016-07-28 18:58:27 +0200395 /* Build the CPU device path */
Aaron Durbine0785c02013-10-21 12:15:29 -0500396 cpu_path.type = DEVICE_PATH_APIC;
397
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600398 /* Assuming linear APIC space allocation. AP will set its own
399 APIC id in the ap_init() path above. */
400 cpu_path.apic.apic_id = info->cpu->path.apic.apic_id + i;
Aaron Durbine0785c02013-10-21 12:15:29 -0500401
Elyes HAOUASd82be922016-07-28 18:58:27 +0200402 /* Allocate the new CPU device structure */
Aaron Durbine0785c02013-10-21 12:15:29 -0500403 new = alloc_find_dev(cpu_bus, &cpu_path);
404 if (new == NULL) {
Elyes HAOUASd82be922016-07-28 18:58:27 +0200405 printk(BIOS_CRIT, "Could not allocate CPU device\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500406 max_cpus--;
407 }
Naresh G Solanki24635332018-05-31 23:13:18 +0530408 new->name = processor_name;
Aaron Durbine0785c02013-10-21 12:15:29 -0500409 cpus[i].dev = new;
410 }
411
412 return max_cpus;
413}
414
415/* Returns 1 for timeout. 0 on success. */
416static int apic_wait_timeout(int total_delay, int delay_step)
417{
418 int total = 0;
419 int timeout = 0;
420
421 while (lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY) {
422 udelay(delay_step);
423 total += delay_step;
424 if (total >= total_delay) {
425 timeout = 1;
426 break;
427 }
428 }
429
430 return timeout;
431}
432
433static int start_aps(struct bus *cpu_bus, int ap_count, atomic_t *num_aps)
434{
435 int sipi_vector;
436 /* Max location is 4KiB below 1MiB */
437 const int max_vector_loc = ((1 << 20) - (1 << 12)) >> 12;
438
439 if (ap_count == 0)
440 return 0;
441
442 /* The vector is sent as a 4k aligned address in one byte. */
443 sipi_vector = sipi_vector_location >> 12;
444
445 if (sipi_vector > max_vector_loc) {
446 printk(BIOS_CRIT, "SIPI vector too large! 0x%08x\n",
447 sipi_vector);
448 return -1;
449 }
450
451 printk(BIOS_DEBUG, "Attempting to start %d APs\n", ap_count);
452
453 if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
454 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
455 if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
456 printk(BIOS_DEBUG, "timed out. Aborting.\n");
457 return -1;
Lee Leahya15d8af2017-03-15 14:49:35 -0700458 }
459 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500460 }
461
462 /* Send INIT IPI to all but self. */
463 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
464 lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
Lee Leahya07d0dd2017-03-15 14:25:22 -0700465 LAPIC_DM_INIT);
Aaron Durbine0785c02013-10-21 12:15:29 -0500466 printk(BIOS_DEBUG, "Waiting for 10ms after sending INIT.\n");
467 mdelay(10);
468
469 /* Send 1st SIPI */
470 if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
471 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
472 if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
473 printk(BIOS_DEBUG, "timed out. Aborting.\n");
474 return -1;
Lee Leahya15d8af2017-03-15 14:49:35 -0700475 }
476 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500477 }
478
479 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
480 lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
Lee Leahya07d0dd2017-03-15 14:25:22 -0700481 LAPIC_DM_STARTUP | sipi_vector);
Aaron Durbine0785c02013-10-21 12:15:29 -0500482 printk(BIOS_DEBUG, "Waiting for 1st SIPI to complete...");
483 if (apic_wait_timeout(10000 /* 10 ms */, 50 /* us */)) {
484 printk(BIOS_DEBUG, "timed out.\n");
485 return -1;
Aaron Durbine0785c02013-10-21 12:15:29 -0500486 }
Lee Leahya15d8af2017-03-15 14:49:35 -0700487 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500488
489 /* Wait for CPUs to check in up to 200 us. */
490 wait_for_aps(num_aps, ap_count, 200 /* us */, 15 /* us */);
491
492 /* Send 2nd SIPI */
493 if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
494 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
495 if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
496 printk(BIOS_DEBUG, "timed out. Aborting.\n");
497 return -1;
Lee Leahya15d8af2017-03-15 14:49:35 -0700498 }
499 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500500 }
501
502 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
503 lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
Lee Leahya07d0dd2017-03-15 14:25:22 -0700504 LAPIC_DM_STARTUP | sipi_vector);
Aaron Durbine0785c02013-10-21 12:15:29 -0500505 printk(BIOS_DEBUG, "Waiting for 2nd SIPI to complete...");
506 if (apic_wait_timeout(10000 /* 10 ms */, 50 /* us */)) {
507 printk(BIOS_DEBUG, "timed out.\n");
508 return -1;
Aaron Durbine0785c02013-10-21 12:15:29 -0500509 }
Lee Leahya15d8af2017-03-15 14:49:35 -0700510 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500511
512 /* Wait for CPUs to check in. */
513 if (wait_for_aps(num_aps, ap_count, 10000 /* 10 ms */, 50 /* us */)) {
514 printk(BIOS_DEBUG, "Not all APs checked in: %d/%d.\n",
515 atomic_read(num_aps), ap_count);
516 return -1;
517 }
518
519 return 0;
520}
521
522static int bsp_do_flight_plan(struct mp_params *mp_params)
523{
524 int i;
525 int ret = 0;
Furquan Shaikhfa9f1072018-03-01 16:37:06 -0800526 /*
527 * Set time-out to wait for APs to a huge value (=1 second) since it
528 * could take a longer time for APs to check-in as the number of APs
529 * increases (contention for resources like UART also increases).
530 */
531 const int timeout_us = 1000000;
Aaron Durbine0785c02013-10-21 12:15:29 -0500532 const int step_us = 100;
533 int num_aps = mp_params->num_cpus - 1;
Furquan Shaikh5d8faef2018-03-07 23:16:57 -0800534 struct stopwatch sw;
535
536 stopwatch_init(&sw);
Aaron Durbine0785c02013-10-21 12:15:29 -0500537
538 for (i = 0; i < mp_params->num_records; i++) {
539 struct mp_flight_record *rec = &mp_params->flight_plan[i];
540
541 /* Wait for APs if the record is not released. */
542 if (atomic_read(&rec->barrier) == 0) {
543 /* Wait for the APs to check in. */
544 if (wait_for_aps(&rec->cpus_entered, num_aps,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700545 timeout_us, step_us)) {
Aaron Durbine0785c02013-10-21 12:15:29 -0500546 printk(BIOS_ERR, "MP record %d timeout.\n", i);
547 ret = -1;
548 }
549 }
550
Lee Leahya15d8af2017-03-15 14:49:35 -0700551 if (rec->bsp_call != NULL)
Aaron Durbin0e556322016-04-29 23:15:12 -0500552 rec->bsp_call();
Aaron Durbine0785c02013-10-21 12:15:29 -0500553
554 release_barrier(&rec->barrier);
555 }
Furquan Shaikh5d8faef2018-03-07 23:16:57 -0800556
557 printk(BIOS_INFO, "%s done after %ld msecs.\n", __func__,
558 stopwatch_duration_msecs(&sw));
Aaron Durbine0785c02013-10-21 12:15:29 -0500559 return ret;
560}
561
562static void init_bsp(struct bus *cpu_bus)
563{
564 struct device_path cpu_path;
565 struct cpu_info *info;
Aaron Durbine0785c02013-10-21 12:15:29 -0500566
567 /* Print processor name */
568 fill_processor_name(processor_name);
569 printk(BIOS_INFO, "CPU: %s.\n", processor_name);
570
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200571 /* Ensure the local APIC is enabled */
Aaron Durbine0785c02013-10-21 12:15:29 -0500572 enable_lapic();
573
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200574 /* Set the device path of the boot CPU. */
Aaron Durbine0785c02013-10-21 12:15:29 -0500575 cpu_path.type = DEVICE_PATH_APIC;
576 cpu_path.apic.apic_id = lapicid();
577
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200578 /* Find the device structure for the boot CPU. */
Aaron Durbine0785c02013-10-21 12:15:29 -0500579 info = cpu_info();
580 info->cpu = alloc_find_dev(cpu_bus, &cpu_path);
Naresh G Solanki24635332018-05-31 23:13:18 +0530581 info->cpu->name = processor_name;
Aaron Durbine0785c02013-10-21 12:15:29 -0500582
583 if (info->index != 0)
584 printk(BIOS_CRIT, "BSP index(%d) != 0!\n", info->index);
585
586 /* Track BSP in cpu_map structures. */
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600587 add_cpu_map_entry(info);
Aaron Durbine0785c02013-10-21 12:15:29 -0500588}
589
Aaron Durbin770d7c72016-05-03 17:49:57 -0500590/*
591 * mp_init() will set up the SIPI vector and bring up the APs according to
592 * mp_params. Each flight record will be executed according to the plan. Note
593 * that the MP infrastructure uses SMM default area without saving it. It's
594 * up to the chipset or mainboard to either e820 reserve this area or save this
595 * region prior to calling mp_init() and restoring it after mp_init returns.
596 *
597 * At the time mp_init() is called the MTRR MSRs are mirrored into APs then
598 * caching is enabled before running the flight plan.
599 *
600 * The MP initialization has the following properties:
601 * 1. APs are brought up in parallel.
Elyes HAOUASd82be922016-07-28 18:58:27 +0200602 * 2. The ordering of coreboot CPU number and APIC ids is not deterministic.
Aaron Durbin770d7c72016-05-03 17:49:57 -0500603 * Therefore, one cannot rely on this property or the order of devices in
604 * the device tree unless the chipset or mainboard know the APIC ids
605 * a priori.
606 *
607 * mp_init() returns < 0 on error, 0 on success.
608 */
609static int mp_init(struct bus *cpu_bus, struct mp_params *p)
Aaron Durbine0785c02013-10-21 12:15:29 -0500610{
611 int num_cpus;
Aaron Durbine0785c02013-10-21 12:15:29 -0500612 atomic_t *ap_count;
613
614 init_bsp(cpu_bus);
615
616 if (p == NULL || p->flight_plan == NULL || p->num_records < 1) {
617 printk(BIOS_CRIT, "Invalid MP parameters\n");
618 return -1;
619 }
620
621 /* Default to currently running CPU. */
622 num_cpus = allocate_cpu_devices(cpu_bus, p);
623
624 if (num_cpus < p->num_cpus) {
625 printk(BIOS_CRIT,
626 "ERROR: More cpus requested (%d) than supported (%d).\n",
627 p->num_cpus, num_cpus);
628 return -1;
629 }
630
631 /* Copy needed parameters so that APs have a reference to the plan. */
632 mp_info.num_records = p->num_records;
633 mp_info.records = p->flight_plan;
634
635 /* Load the SIPI vector. */
636 ap_count = load_sipi_vector(p);
637 if (ap_count == NULL)
638 return -1;
639
640 /* Make sure SIPI data hits RAM so the APs that come up will see
641 * the startup code even if the caches are disabled. */
642 wbinvd();
643
644 /* Start the APs providing number of APs and the cpus_entered field. */
Aaron Durbinb21e3622016-12-07 00:32:19 -0600645 global_num_aps = p->num_cpus - 1;
646 if (start_aps(cpu_bus, global_num_aps, ap_count) < 0) {
Aaron Durbine0785c02013-10-21 12:15:29 -0500647 mdelay(1000);
648 printk(BIOS_DEBUG, "%d/%d eventually checked in?\n",
Aaron Durbinb21e3622016-12-07 00:32:19 -0600649 atomic_read(ap_count), global_num_aps);
Aaron Durbine0785c02013-10-21 12:15:29 -0500650 return -1;
651 }
652
653 /* Walk the flight plan for the BSP. */
654 return bsp_do_flight_plan(p);
655}
656
Aaron Durbin770d7c72016-05-03 17:49:57 -0500657/* Calls cpu_initialize(info->index) which calls the coreboot CPU drivers. */
658static void mp_initialize_cpu(void)
Aaron Durbine0785c02013-10-21 12:15:29 -0500659{
660 /* Call back into driver infrastructure for the AP initialization. */
661 struct cpu_info *info = cpu_info();
662 cpu_initialize(info->index);
663}
664
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200665/* Returns APIC id for coreboot CPU number or < 0 on failure. */
Aaron Durbin770d7c72016-05-03 17:49:57 -0500666static int mp_get_apic_id(int cpu_slot)
Aaron Durbine0785c02013-10-21 12:15:29 -0500667{
668 if (cpu_slot >= CONFIG_MAX_CPUS || cpu_slot < 0)
669 return -1;
670
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600671 return cpus[cpu_slot].default_apic_id;
Aaron Durbine0785c02013-10-21 12:15:29 -0500672}
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500673
674void smm_initiate_relocation_parallel(void)
675{
676 if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
677 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
678 if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
679 printk(BIOS_DEBUG, "timed out. Aborting.\n");
680 return;
Lee Leahya15d8af2017-03-15 14:49:35 -0700681 }
682 printk(BIOS_DEBUG, "done.\n");
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500683 }
684
685 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(lapicid()));
686 lapic_write_around(LAPIC_ICR, LAPIC_INT_ASSERT | LAPIC_DM_SMI);
Lee Leahya15d8af2017-03-15 14:49:35 -0700687 if (apic_wait_timeout(1000 /* 1 ms */, 100 /* us */))
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500688 printk(BIOS_DEBUG, "SMI Relocation timed out.\n");
Lee Leahya15d8af2017-03-15 14:49:35 -0700689 else
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500690 printk(BIOS_DEBUG, "Relocation complete.\n");
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500691}
692
693DECLARE_SPIN_LOCK(smm_relocation_lock);
694
695/* Send SMI to self with single user serialization. */
696void smm_initiate_relocation(void)
697{
698 spin_lock(&smm_relocation_lock);
699 smm_initiate_relocation_parallel();
700 spin_unlock(&smm_relocation_lock);
701}
Aaron Durbin82501922016-04-29 22:55:49 -0500702
703struct mp_state {
704 struct mp_ops ops;
705 int cpu_count;
706 uintptr_t perm_smbase;
707 size_t perm_smsize;
708 size_t smm_save_state_size;
709 int do_smm;
710} mp_state;
711
712static int is_smm_enabled(void)
713{
714 return IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) && mp_state.do_smm;
715}
716
717static void smm_disable(void)
718{
719 mp_state.do_smm = 0;
720}
721
722static void smm_enable(void)
723{
724 if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER))
725 mp_state.do_smm = 1;
726}
727
728static void asmlinkage smm_do_relocation(void *arg)
729{
730 const struct smm_module_params *p;
731 const struct smm_runtime *runtime;
732 int cpu;
733 uintptr_t curr_smbase;
734 uintptr_t perm_smbase;
735
736 p = arg;
737 runtime = p->runtime;
738 cpu = p->cpu;
739 curr_smbase = runtime->smbase;
740
741 if (cpu >= CONFIG_MAX_CPUS) {
742 printk(BIOS_CRIT,
743 "Invalid CPU number assigned in SMM stub: %d\n", cpu);
744 return;
745 }
746
747 /*
748 * The permanent handler runs with all cpus concurrently. Precalculate
749 * the location of the new SMBASE. If using SMM modules then this
750 * calculation needs to match that of the module loader.
751 */
752 perm_smbase = mp_state.perm_smbase;
753 perm_smbase -= cpu * runtime->save_state_size;
754
755 printk(BIOS_DEBUG, "New SMBASE 0x%08lx\n", perm_smbase);
756
757 /* Setup code checks this callback for validity. */
758 mp_state.ops.relocation_handler(cpu, curr_smbase, perm_smbase);
759}
760
761static void adjust_smm_apic_id_map(struct smm_loader_params *smm_params)
762{
763 int i;
764 struct smm_runtime *runtime = smm_params->runtime;
765
766 for (i = 0; i < CONFIG_MAX_CPUS; i++)
767 runtime->apic_id_to_cpu[i] = mp_get_apic_id(i);
768}
769
770static int install_relocation_handler(int num_cpus, size_t save_state_size)
771{
772 struct smm_loader_params smm_params = {
773 .per_cpu_stack_size = save_state_size,
774 .num_concurrent_stacks = num_cpus,
775 .per_cpu_save_state_size = save_state_size,
776 .num_concurrent_save_states = 1,
777 .handler = smm_do_relocation,
778 };
779
780 /* Allow callback to override parameters. */
781 if (mp_state.ops.adjust_smm_params != NULL)
782 mp_state.ops.adjust_smm_params(&smm_params, 0);
783
784 if (smm_setup_relocation_handler(&smm_params))
785 return -1;
786
787 adjust_smm_apic_id_map(&smm_params);
788
789 return 0;
790}
791
792static int install_permanent_handler(int num_cpus, uintptr_t smbase,
793 size_t smsize, size_t save_state_size)
794{
795 /* There are num_cpus concurrent stacks and num_cpus concurrent save
Aaron Durbinec2e61a2017-12-19 15:26:46 -0700796 * state areas. Lastly, set the stack size to 1KiB. */
Aaron Durbin82501922016-04-29 22:55:49 -0500797 struct smm_loader_params smm_params = {
Aaron Durbinec2e61a2017-12-19 15:26:46 -0700798 .per_cpu_stack_size = 1 * KiB,
Aaron Durbin82501922016-04-29 22:55:49 -0500799 .num_concurrent_stacks = num_cpus,
800 .per_cpu_save_state_size = save_state_size,
801 .num_concurrent_save_states = num_cpus,
802 };
803
804 /* Allow callback to override parameters. */
805 if (mp_state.ops.adjust_smm_params != NULL)
806 mp_state.ops.adjust_smm_params(&smm_params, 1);
807
808 printk(BIOS_DEBUG, "Installing SMM handler to 0x%08lx\n", smbase);
809
810 if (smm_load_module((void *)smbase, smsize, &smm_params))
811 return -1;
812
813 adjust_smm_apic_id_map(&smm_params);
814
815 return 0;
816}
817
818/* Load SMM handlers as part of MP flight record. */
819static void load_smm_handlers(void)
820{
821 size_t smm_save_state_size = mp_state.smm_save_state_size;
822
823 /* Do nothing if SMM is disabled.*/
824 if (!is_smm_enabled())
825 return;
826
827 /* Install handlers. */
828 if (install_relocation_handler(mp_state.cpu_count,
829 smm_save_state_size) < 0) {
830 printk(BIOS_ERR, "Unable to install SMM relocation handler.\n");
831 smm_disable();
832 }
833
834 if (install_permanent_handler(mp_state.cpu_count, mp_state.perm_smbase,
835 mp_state.perm_smsize, smm_save_state_size) < 0) {
836 printk(BIOS_ERR, "Unable to install SMM permanent handler.\n");
837 smm_disable();
838 }
839
840 /* Ensure the SMM handlers hit DRAM before performing first SMI. */
841 wbinvd();
842
843 /*
844 * Indicate that the SMM handlers have been loaded and MP
845 * initialization is about to start.
846 */
847 if (is_smm_enabled() && mp_state.ops.pre_mp_smm_init != NULL)
848 mp_state.ops.pre_mp_smm_init();
849}
850
851/* Trigger SMM as part of MP flight record. */
852static void trigger_smm_relocation(void)
853{
854 /* Do nothing if SMM is disabled.*/
855 if (!is_smm_enabled() || mp_state.ops.per_cpu_smm_trigger == NULL)
856 return;
857 /* Trigger SMM mode for the currently running processor. */
858 mp_state.ops.per_cpu_smm_trigger();
859}
860
Aaron Durbin223fb432018-05-03 13:49:41 -0600861static struct mp_callback *ap_callbacks[CONFIG_MAX_CPUS];
Aaron Durbinb21e3622016-12-07 00:32:19 -0600862
Aaron Durbin223fb432018-05-03 13:49:41 -0600863static struct mp_callback *read_callback(struct mp_callback **slot)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600864{
Aaron Durbin223fb432018-05-03 13:49:41 -0600865 struct mp_callback *ret;
866
867 asm volatile ("mov %1, %0\n"
868 : "=r" (ret)
869 : "m" (*slot)
870 : "memory"
871 );
872 return ret;
Aaron Durbinb21e3622016-12-07 00:32:19 -0600873}
874
Aaron Durbin223fb432018-05-03 13:49:41 -0600875static void store_callback(struct mp_callback **slot, struct mp_callback *val)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600876{
Aaron Durbin223fb432018-05-03 13:49:41 -0600877 asm volatile ("mov %1, %0\n"
878 : "=m" (*slot)
879 : "r" (val)
880 : "memory"
881 );
Aaron Durbinb21e3622016-12-07 00:32:19 -0600882}
883
Aaron Durbin223fb432018-05-03 13:49:41 -0600884static int run_ap_work(struct mp_callback *val, long expire_us)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600885{
886 int i;
887 int cpus_accepted;
888 struct stopwatch sw;
889 int cur_cpu = cpu_index();
890
891 if (!IS_ENABLED(CONFIG_PARALLEL_MP_AP_WORK)) {
892 printk(BIOS_ERR, "APs already parked. PARALLEL_MP_AP_WORK not selected.\n");
893 return -1;
894 }
895
896 /* Signal to all the APs to run the func. */
897 for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) {
898 if (cur_cpu == i)
899 continue;
Aaron Durbin223fb432018-05-03 13:49:41 -0600900 store_callback(&ap_callbacks[i], val);
Aaron Durbinb21e3622016-12-07 00:32:19 -0600901 }
902 mfence();
903
904 /* Wait for all the APs to signal back that call has been accepted. */
Subrata Banik838f2962018-04-11 18:45:57 +0530905 if (expire_us > 0)
906 stopwatch_init_usecs_expire(&sw, expire_us);
907
Paul Menzel6bb8ff42017-06-19 13:02:31 +0200908 do {
Aaron Durbin046848c2017-06-15 08:47:04 -0500909 cpus_accepted = 0;
910
Aaron Durbinb21e3622016-12-07 00:32:19 -0600911 for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) {
912 if (cur_cpu == i)
913 continue;
914 if (read_callback(&ap_callbacks[i]) == NULL)
915 cpus_accepted++;
916 }
Aaron Durbin046848c2017-06-15 08:47:04 -0500917
Aaron Durbinb21e3622016-12-07 00:32:19 -0600918 if (cpus_accepted == global_num_aps)
919 return 0;
Subrata Banik838f2962018-04-11 18:45:57 +0530920 } while (expire_us <= 0 || !stopwatch_expired(&sw));
Aaron Durbinb21e3622016-12-07 00:32:19 -0600921
922 printk(BIOS_ERR, "AP call expired. %d/%d CPUs accepted.\n",
923 cpus_accepted, global_num_aps);
924 return -1;
925}
926
927static void ap_wait_for_instruction(void)
928{
Aaron Durbin223fb432018-05-03 13:49:41 -0600929 struct mp_callback lcb;
930 struct mp_callback **per_cpu_slot;
Subrata Banik8a25cae2018-05-03 18:48:41 +0530931 int cur_cpu;
Aaron Durbinb21e3622016-12-07 00:32:19 -0600932
933 if (!IS_ENABLED(CONFIG_PARALLEL_MP_AP_WORK))
934 return;
935
Subrata Banik8a25cae2018-05-03 18:48:41 +0530936 cur_cpu = cpu_index();
937 per_cpu_slot = &ap_callbacks[cur_cpu];
Aaron Durbinb21e3622016-12-07 00:32:19 -0600938
Aaron Durbin223fb432018-05-03 13:49:41 -0600939 while (1) {
940 struct mp_callback *cb = read_callback(per_cpu_slot);
941
942 if (cb == NULL) {
Aaron Durbinb21e3622016-12-07 00:32:19 -0600943 asm ("pause");
944 continue;
945 }
946
Aaron Durbin223fb432018-05-03 13:49:41 -0600947 /* Copy to local variable before signalling consumption. */
948 memcpy(&lcb, cb, sizeof(lcb));
Aaron Durbinb21e3622016-12-07 00:32:19 -0600949 mfence();
Aaron Durbin223fb432018-05-03 13:49:41 -0600950 store_callback(per_cpu_slot, NULL);
Subrata Banik8a25cae2018-05-03 18:48:41 +0530951 if (lcb.logical_cpu_number && (cur_cpu !=
952 lcb.logical_cpu_number))
953 continue;
954 else
955 lcb.func(lcb.arg);
Aaron Durbinb21e3622016-12-07 00:32:19 -0600956 }
957}
958
Subrata Banik8a25cae2018-05-03 18:48:41 +0530959int mp_run_on_aps(void (*func)(void *), void *arg, int logical_cpu_num,
960 long expire_us)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600961{
Subrata Banik8a25cae2018-05-03 18:48:41 +0530962 struct mp_callback lcb = { .func = func, .arg = arg,
963 .logical_cpu_number = logical_cpu_num};
Aaron Durbin223fb432018-05-03 13:49:41 -0600964 return run_ap_work(&lcb, expire_us);
Aaron Durbinb21e3622016-12-07 00:32:19 -0600965}
966
Subrata Banik33374972018-04-24 13:45:30 +0530967int mp_run_on_all_cpus(void (*func)(void *), void *arg, long expire_us)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600968{
969 /* Run on BSP first. */
Subrata Banik33374972018-04-24 13:45:30 +0530970 func(arg);
971
Subrata Banik8a25cae2018-05-03 18:48:41 +0530972 return mp_run_on_aps(func, arg, MP_RUN_ON_ALL_CPUS, expire_us);
Aaron Durbinb21e3622016-12-07 00:32:19 -0600973}
974
975int mp_park_aps(void)
976{
Furquan Shaikhd6630d12018-03-29 00:10:02 -0700977 struct stopwatch sw;
978 int ret;
979 long duration_msecs;
980
981 stopwatch_init(&sw);
982
Subrata Banik8a25cae2018-05-03 18:48:41 +0530983 ret = mp_run_on_aps(park_this_cpu, NULL, MP_RUN_ON_ALL_CPUS,
984 250 * USECS_PER_MSEC);
Furquan Shaikhd6630d12018-03-29 00:10:02 -0700985
986 duration_msecs = stopwatch_duration_msecs(&sw);
987
988 if (!ret)
989 printk(BIOS_DEBUG, "%s done after %ld msecs.\n", __func__,
990 duration_msecs);
991 else
992 printk(BIOS_ERR, "%s failed after %ld msecs.\n", __func__,
993 duration_msecs);
994
995 return ret;
Aaron Durbinb21e3622016-12-07 00:32:19 -0600996}
997
Aaron Durbin82501922016-04-29 22:55:49 -0500998static struct mp_flight_record mp_steps[] = {
999 /* Once the APs are up load the SMM handlers. */
1000 MP_FR_BLOCK_APS(NULL, load_smm_handlers),
1001 /* Perform SMM relocation. */
1002 MP_FR_NOBLOCK_APS(trigger_smm_relocation, trigger_smm_relocation),
Elyes HAOUASd82be922016-07-28 18:58:27 +02001003 /* Initialize each CPU through the driver framework. */
Aaron Durbin82501922016-04-29 22:55:49 -05001004 MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu),
Aaron Durbinb21e3622016-12-07 00:32:19 -06001005 /* Wait for APs to finish then optionally start looking for work. */
1006 MP_FR_BLOCK_APS(ap_wait_for_instruction, NULL),
Aaron Durbin82501922016-04-29 22:55:49 -05001007};
1008
1009static void fill_mp_state(struct mp_state *state, const struct mp_ops *ops)
1010{
1011 /*
1012 * Make copy of the ops so that defaults can be set in the non-const
1013 * structure if needed.
1014 */
1015 memcpy(&state->ops, ops, sizeof(*ops));
1016
1017 if (ops->get_cpu_count != NULL)
1018 state->cpu_count = ops->get_cpu_count();
1019
1020 if (ops->get_smm_info != NULL)
1021 ops->get_smm_info(&state->perm_smbase, &state->perm_smsize,
1022 &state->smm_save_state_size);
1023
1024 /*
1025 * Default to smm_initiate_relocation() if trigger callback isn't
1026 * provided.
1027 */
1028 if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) &&
1029 ops->per_cpu_smm_trigger == NULL)
1030 mp_state.ops.per_cpu_smm_trigger = smm_initiate_relocation;
1031}
1032
1033int mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops)
1034{
1035 int ret;
1036 void *default_smm_area;
1037 struct mp_params mp_params;
1038
1039 if (mp_ops->pre_mp_init != NULL)
1040 mp_ops->pre_mp_init();
1041
1042 fill_mp_state(&mp_state, mp_ops);
1043
1044 memset(&mp_params, 0, sizeof(mp_params));
1045
1046 if (mp_state.cpu_count <= 0) {
1047 printk(BIOS_ERR, "Invalid cpu_count: %d\n", mp_state.cpu_count);
1048 return -1;
1049 }
1050
1051 /* Sanity check SMM state. */
1052 if (mp_state.perm_smsize != 0 && mp_state.smm_save_state_size != 0 &&
1053 mp_state.ops.relocation_handler != NULL)
1054 smm_enable();
1055
1056 if (is_smm_enabled())
1057 printk(BIOS_INFO, "Will perform SMM setup.\n");
1058
1059 mp_params.num_cpus = mp_state.cpu_count;
1060 /* Gather microcode information. */
1061 if (mp_state.ops.get_microcode_info != NULL)
1062 mp_state.ops.get_microcode_info(&mp_params.microcode_pointer,
1063 &mp_params.parallel_microcode_load);
Aaron Durbin82501922016-04-29 22:55:49 -05001064 mp_params.flight_plan = &mp_steps[0];
1065 mp_params.num_records = ARRAY_SIZE(mp_steps);
1066
1067 /* Perform backup of default SMM area. */
1068 default_smm_area = backup_default_smm_area();
1069
1070 ret = mp_init(cpu_bus, &mp_params);
1071
1072 restore_default_smm_area(default_smm_area);
1073
1074 /* Signal callback on success if it's provided. */
1075 if (ret == 0 && mp_state.ops.post_mp_init != NULL)
1076 mp_state.ops.post_mp_init();
1077
1078 return ret;
1079}