blob: 893e8f1fd662e47106b339dde85d598e6de54006 [file] [log] [blame]
Angel Ponsf23ae0b2020-04-02 23:48:12 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbine0785c02013-10-21 12:15:29 -05002
3#include <console/console.h>
Jonathan Zhang6ec322e2020-01-16 11:11:09 -08004#include <stddef.h>
Aaron Durbine0785c02013-10-21 12:15:29 -05005#include <stdint.h>
Elyes HAOUASa1e22b82019-03-18 22:49:36 +01006#include <string.h>
Aaron Durbine0785c02013-10-21 12:15:29 -05007#include <rmodule.h>
8#include <arch/cpu.h>
Kyösti Mälkki2fbb6772018-05-15 19:50:20 +03009#include <commonlib/helpers.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050010#include <cpu/cpu.h>
11#include <cpu/intel/microcode.h>
12#include <cpu/x86/cache.h>
Kyösti Mälkkibae775a2014-12-18 10:36:33 +020013#include <cpu/x86/gdt.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050014#include <cpu/x86/lapic.h>
15#include <cpu/x86/name.h>
16#include <cpu/x86/msr.h>
17#include <cpu/x86/mtrr.h>
18#include <cpu/x86/smm.h>
19#include <cpu/x86/mp.h>
20#include <delay.h>
21#include <device/device.h>
22#include <device/path.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050023#include <smp/atomic.h>
24#include <smp/spinlock.h>
Julius Wernerec5e5e02014-08-20 15:29:56 -070025#include <symbols.h>
Elyes HAOUASadd76f92019-03-21 09:55:49 +010026#include <timer.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050027#include <thread.h>
28
Eugene Myersae438be2020-01-21 17:01:47 -050029#include <security/intel/stm/SmmStm.h>
30
Aaron Durbine0785c02013-10-21 12:15:29 -050031#define MAX_APIC_IDS 256
Aaron Durbin770d7c72016-05-03 17:49:57 -050032
Aaron Durbin223fb432018-05-03 13:49:41 -060033struct mp_callback {
Subrata Banik33374972018-04-24 13:45:30 +053034 void (*func)(void *);
35 void *arg;
Subrata Banik8a25cae2018-05-03 18:48:41 +053036 int logical_cpu_number;
Aaron Durbin223fb432018-05-03 13:49:41 -060037};
Aaron Durbin770d7c72016-05-03 17:49:57 -050038
Naresh G Solanki24635332018-05-31 23:13:18 +053039static char processor_name[49];
40
Aaron Durbin770d7c72016-05-03 17:49:57 -050041/*
42 * A mp_flight_record details a sequence of calls for the APs to perform
43 * along with the BSP to coordinate sequencing. Each flight record either
44 * provides a barrier for each AP before calling the callback or the APs
45 * are allowed to perform the callback without waiting. Regardless, each
46 * record has the cpus_entered field incremented for each record. When
47 * the BSP observes that the cpus_entered matches the number of APs
48 * the bsp_call is called with bsp_arg and upon returning releases the
49 * barrier allowing the APs to make further progress.
50 *
51 * Note that ap_call() and bsp_call() can be NULL. In the NULL case the
52 * callback will just not be called.
53 */
54struct mp_flight_record {
55 atomic_t barrier;
56 atomic_t cpus_entered;
Aaron Durbin223fb432018-05-03 13:49:41 -060057 void (*ap_call)(void);
58 void (*bsp_call)(void);
Aaron Durbin381feb82018-05-02 22:38:58 -060059} __aligned(CACHELINE_SIZE);
Aaron Durbin770d7c72016-05-03 17:49:57 -050060
61#define _MP_FLIGHT_RECORD(barrier_, ap_func_, bsp_func_) \
62 { \
63 .barrier = ATOMIC_INIT(barrier_), \
64 .cpus_entered = ATOMIC_INIT(0), \
65 .ap_call = ap_func_, \
66 .bsp_call = bsp_func_, \
67 }
68
69#define MP_FR_BLOCK_APS(ap_func_, bsp_func_) \
70 _MP_FLIGHT_RECORD(0, ap_func_, bsp_func_)
71
72#define MP_FR_NOBLOCK_APS(ap_func_, bsp_func_) \
73 _MP_FLIGHT_RECORD(1, ap_func_, bsp_func_)
74
75/* The mp_params structure provides the arguments to the mp subsystem
76 * for bringing up APs. */
77struct mp_params {
78 int num_cpus; /* Total cpus include BSP */
79 int parallel_microcode_load;
80 const void *microcode_pointer;
Aaron Durbin770d7c72016-05-03 17:49:57 -050081 /* Flight plan for APs and BSP. */
82 struct mp_flight_record *flight_plan;
83 int num_records;
84};
85
Aaron Durbine0785c02013-10-21 12:15:29 -050086/* This needs to match the layout in the .module_parametrs section. */
87struct sipi_params {
88 uint16_t gdtlimit;
89 uint32_t gdt;
90 uint16_t unused;
91 uint32_t idt_ptr;
92 uint32_t stack_top;
93 uint32_t stack_size;
94 uint32_t microcode_lock; /* 0xffffffff means parallel loading. */
95 uint32_t microcode_ptr;
96 uint32_t msr_table_ptr;
97 uint32_t msr_count;
98 uint32_t c_handler;
99 atomic_t ap_count;
Stefan Reinauer6a001132017-07-13 02:20:27 +0200100} __packed;
Aaron Durbine0785c02013-10-21 12:15:29 -0500101
102/* This also needs to match the assembly code for saved MSR encoding. */
103struct saved_msr {
104 uint32_t index;
105 uint32_t lo;
106 uint32_t hi;
Stefan Reinauer6a001132017-07-13 02:20:27 +0200107} __packed;
Aaron Durbine0785c02013-10-21 12:15:29 -0500108
Aaron Durbine0785c02013-10-21 12:15:29 -0500109/* The sipi vector rmodule is included in the ramstage using 'objdump -B'. */
110extern char _binary_sipi_vector_start[];
Aaron Durbine0785c02013-10-21 12:15:29 -0500111
112/* The SIPI vector is loaded at the SMM_DEFAULT_BASE. The reason is at the
113 * memory range is already reserved so the OS cannot use it. That region is
114 * free to use for AP bringup before SMM is initialized. */
Patrick Rudolph6c46b6f2020-08-21 16:43:15 +0200115static const uintptr_t sipi_vector_location = SMM_DEFAULT_BASE;
Aaron Durbine0785c02013-10-21 12:15:29 -0500116static const int sipi_vector_location_size = SMM_DEFAULT_SIZE;
117
118struct mp_flight_plan {
119 int num_records;
120 struct mp_flight_record *records;
121};
122
Aaron Durbinb21e3622016-12-07 00:32:19 -0600123static int global_num_aps;
Aaron Durbine0785c02013-10-21 12:15:29 -0500124static struct mp_flight_plan mp_info;
125
Subrata Banik7bc90362019-05-10 11:58:37 +0530126/* Keep track of device structure for each CPU. */
127static struct device *cpus_dev[CONFIG_MAX_CPUS];
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600128
Aaron Durbin4c16f8f2018-05-02 22:35:33 -0600129static inline void barrier_wait(atomic_t *b)
Aaron Durbine0785c02013-10-21 12:15:29 -0500130{
Lee Leahya15d8af2017-03-15 14:49:35 -0700131 while (atomic_read(b) == 0)
Aaron Durbine0785c02013-10-21 12:15:29 -0500132 asm ("pause");
Aaron Durbine0785c02013-10-21 12:15:29 -0500133 mfence();
134}
135
Aaron Durbin4c16f8f2018-05-02 22:35:33 -0600136static inline void release_barrier(atomic_t *b)
Aaron Durbine0785c02013-10-21 12:15:29 -0500137{
138 mfence();
139 atomic_set(b, 1);
140}
141
142/* Returns 1 if timeout waiting for APs. 0 if target aps found. */
143static int wait_for_aps(atomic_t *val, int target, int total_delay,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700144 int delay_step)
Aaron Durbine0785c02013-10-21 12:15:29 -0500145{
146 int timeout = 0;
147 int delayed = 0;
148 while (atomic_read(val) != target) {
149 udelay(delay_step);
150 delayed += delay_step;
151 if (delayed >= total_delay) {
152 timeout = 1;
153 break;
154 }
155 }
156
157 return timeout;
158}
159
160static void ap_do_flight_plan(void)
161{
162 int i;
163
164 for (i = 0; i < mp_info.num_records; i++) {
165 struct mp_flight_record *rec = &mp_info.records[i];
166
167 atomic_inc(&rec->cpus_entered);
168 barrier_wait(&rec->barrier);
169
Lee Leahya15d8af2017-03-15 14:49:35 -0700170 if (rec->ap_call != NULL)
Aaron Durbin0e556322016-04-29 23:15:12 -0500171 rec->ap_call();
Aaron Durbine0785c02013-10-21 12:15:29 -0500172 }
173}
174
Subrata Banik33374972018-04-24 13:45:30 +0530175static void park_this_cpu(void *unused)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600176{
177 stop_this_cpu();
178}
179
Aaron Durbine0785c02013-10-21 12:15:29 -0500180/* By the time APs call ap_init() caching has been setup, and microcode has
181 * been loaded. */
182static void asmlinkage ap_init(unsigned int cpu)
183{
184 struct cpu_info *info;
Aaron Durbine0785c02013-10-21 12:15:29 -0500185
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200186 /* Ensure the local APIC is enabled */
Aaron Durbine0785c02013-10-21 12:15:29 -0500187 enable_lapic();
188
189 info = cpu_info();
190 info->index = cpu;
Subrata Banik7bc90362019-05-10 11:58:37 +0530191 info->cpu = cpus_dev[cpu];
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600192
Subrata Banik7bc90362019-05-10 11:58:37 +0530193 cpu_add_map_entry(info->index);
Aaron Durbine0785c02013-10-21 12:15:29 -0500194 thread_init_cpu_info_non_bsp(info);
195
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600196 /* Fix up APIC id with reality. */
197 info->cpu->path.apic.apic_id = lapicid();
Aaron Durbine0785c02013-10-21 12:15:29 -0500198
Arthur Heymans98872642021-01-22 19:05:55 +0100199 if (cpu_is_intel())
200 printk(BIOS_INFO, "AP: slot %d apic_id %x, MCU rev: 0x%08x\n", cpu,
201 info->cpu->path.apic.apic_id, get_current_microcode_rev());
202 else
203 printk(BIOS_INFO, "AP: slot %d apic_id %x\n", cpu,
204 info->cpu->path.apic.apic_id);
Aaron Durbine0785c02013-10-21 12:15:29 -0500205
206 /* Walk the flight plan */
207 ap_do_flight_plan();
208
209 /* Park the AP. */
Subrata Banik33374972018-04-24 13:45:30 +0530210 park_this_cpu(NULL);
Aaron Durbine0785c02013-10-21 12:15:29 -0500211}
212
213static void setup_default_sipi_vector_params(struct sipi_params *sp)
214{
Kyösti Mälkki2fbb6772018-05-15 19:50:20 +0300215 sp->gdt = (uintptr_t)&gdt;
216 sp->gdtlimit = (uintptr_t)&gdt_end - (uintptr_t)&gdt - 1;
217 sp->idt_ptr = (uintptr_t)&idtarg;
Aaron Durbine0785c02013-10-21 12:15:29 -0500218 sp->stack_size = CONFIG_STACK_SIZE;
Kyösti Mälkki2fbb6772018-05-15 19:50:20 +0300219 sp->stack_top = ALIGN_DOWN((uintptr_t)&_estack, CONFIG_STACK_SIZE);
Aaron Durbine0785c02013-10-21 12:15:29 -0500220 /* Adjust the stack top to take into account cpu_info. */
221 sp->stack_top -= sizeof(struct cpu_info);
222}
223
224#define NUM_FIXED_MTRRS 11
225static const unsigned int fixed_mtrrs[NUM_FIXED_MTRRS] = {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700226 MTRR_FIX_64K_00000, MTRR_FIX_16K_80000, MTRR_FIX_16K_A0000,
227 MTRR_FIX_4K_C0000, MTRR_FIX_4K_C8000, MTRR_FIX_4K_D0000,
228 MTRR_FIX_4K_D8000, MTRR_FIX_4K_E0000, MTRR_FIX_4K_E8000,
229 MTRR_FIX_4K_F0000, MTRR_FIX_4K_F8000,
Aaron Durbine0785c02013-10-21 12:15:29 -0500230};
231
232static inline struct saved_msr *save_msr(int index, struct saved_msr *entry)
233{
234 msr_t msr;
235
236 msr = rdmsr(index);
237 entry->index = index;
238 entry->lo = msr.lo;
239 entry->hi = msr.hi;
240
241 /* Return the next entry. */
242 entry++;
243 return entry;
244}
245
246static int save_bsp_msrs(char *start, int size)
247{
248 int msr_count;
249 int num_var_mtrrs;
250 struct saved_msr *msr_entry;
251 int i;
252 msr_t msr;
253
254 /* Determine number of MTRRs need to be saved. */
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700255 msr = rdmsr(MTRR_CAP_MSR);
Aaron Durbine0785c02013-10-21 12:15:29 -0500256 num_var_mtrrs = msr.lo & 0xff;
257
258 /* 2 * num_var_mtrrs for base and mask. +1 for IA32_MTRR_DEF_TYPE. */
259 msr_count = 2 * num_var_mtrrs + NUM_FIXED_MTRRS + 1;
260
261 if ((msr_count * sizeof(struct saved_msr)) > size) {
262 printk(BIOS_CRIT, "Cannot mirror all %d msrs.\n", msr_count);
263 return -1;
264 }
265
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600266 fixed_mtrrs_expose_amd_rwdram();
267
Aaron Durbine0785c02013-10-21 12:15:29 -0500268 msr_entry = (void *)start;
Lee Leahya15d8af2017-03-15 14:49:35 -0700269 for (i = 0; i < NUM_FIXED_MTRRS; i++)
Aaron Durbine0785c02013-10-21 12:15:29 -0500270 msr_entry = save_msr(fixed_mtrrs[i], msr_entry);
Aaron Durbine0785c02013-10-21 12:15:29 -0500271
272 for (i = 0; i < num_var_mtrrs; i++) {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700273 msr_entry = save_msr(MTRR_PHYS_BASE(i), msr_entry);
274 msr_entry = save_msr(MTRR_PHYS_MASK(i), msr_entry);
Aaron Durbine0785c02013-10-21 12:15:29 -0500275 }
276
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700277 msr_entry = save_msr(MTRR_DEF_TYPE_MSR, msr_entry);
Aaron Durbine0785c02013-10-21 12:15:29 -0500278
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600279 fixed_mtrrs_hide_amd_rwdram();
280
Richard Spiegel43bd5942018-08-08 09:45:23 -0700281 /* Tell static analysis we know value is left unused. */
282 (void)msr_entry;
283
Aaron Durbine0785c02013-10-21 12:15:29 -0500284 return msr_count;
285}
286
287static atomic_t *load_sipi_vector(struct mp_params *mp_params)
288{
289 struct rmodule sipi_mod;
290 int module_size;
291 int num_msrs;
292 struct sipi_params *sp;
293 char *mod_loc = (void *)sipi_vector_location;
294 const int loc_size = sipi_vector_location_size;
295 atomic_t *ap_count = NULL;
296
297 if (rmodule_parse(&_binary_sipi_vector_start, &sipi_mod)) {
298 printk(BIOS_CRIT, "Unable to parse sipi module.\n");
299 return ap_count;
300 }
301
302 if (rmodule_entry_offset(&sipi_mod) != 0) {
303 printk(BIOS_CRIT, "SIPI module entry offset is not 0!\n");
304 return ap_count;
305 }
306
307 if (rmodule_load_alignment(&sipi_mod) != 4096) {
308 printk(BIOS_CRIT, "SIPI module load alignment(%d) != 4096.\n",
309 rmodule_load_alignment(&sipi_mod));
310 return ap_count;
311 }
312
313 module_size = rmodule_memory_size(&sipi_mod);
314
315 /* Align to 4 bytes. */
Felix Heldf0cbb092019-06-20 14:45:16 +0200316 module_size = ALIGN_UP(module_size, 4);
Aaron Durbine0785c02013-10-21 12:15:29 -0500317
318 if (module_size > loc_size) {
319 printk(BIOS_CRIT, "SIPI module size (%d) > region size (%d).\n",
320 module_size, loc_size);
321 return ap_count;
322 }
323
324 num_msrs = save_bsp_msrs(&mod_loc[module_size], loc_size - module_size);
325
326 if (num_msrs < 0) {
327 printk(BIOS_CRIT, "Error mirroring BSP's msrs.\n");
328 return ap_count;
329 }
330
331 if (rmodule_load(mod_loc, &sipi_mod)) {
332 printk(BIOS_CRIT, "Unable to load SIPI module.\n");
333 return ap_count;
334 }
335
336 sp = rmodule_parameters(&sipi_mod);
337
338 if (sp == NULL) {
339 printk(BIOS_CRIT, "SIPI module has no parameters.\n");
340 return ap_count;
341 }
342
343 setup_default_sipi_vector_params(sp);
344 /* Setup MSR table. */
Patrick Rudolph6c46b6f2020-08-21 16:43:15 +0200345 sp->msr_table_ptr = (uintptr_t)&mod_loc[module_size];
Aaron Durbine0785c02013-10-21 12:15:29 -0500346 sp->msr_count = num_msrs;
347 /* Provide pointer to microcode patch. */
Patrick Rudolph6c46b6f2020-08-21 16:43:15 +0200348 sp->microcode_ptr = (uintptr_t)mp_params->microcode_pointer;
Elyes HAOUAS1a8dbfc2019-12-18 13:40:50 +0100349 /* Pass on ability to load microcode in parallel. */
Lee Leahya15d8af2017-03-15 14:49:35 -0700350 if (mp_params->parallel_microcode_load)
Aaron Durbine0785c02013-10-21 12:15:29 -0500351 sp->microcode_lock = ~0;
Patrick Rudolph393992f2021-01-11 09:35:49 +0100352 else
353 sp->microcode_lock = 0;
Patrick Rudolph6c46b6f2020-08-21 16:43:15 +0200354 sp->c_handler = (uintptr_t)&ap_init;
Aaron Durbine0785c02013-10-21 12:15:29 -0500355 ap_count = &sp->ap_count;
356 atomic_set(ap_count, 0);
357
358 return ap_count;
359}
360
361static int allocate_cpu_devices(struct bus *cpu_bus, struct mp_params *p)
362{
363 int i;
364 int max_cpus;
365 struct cpu_info *info;
366
367 max_cpus = p->num_cpus;
368 if (max_cpus > CONFIG_MAX_CPUS) {
369 printk(BIOS_CRIT, "CPU count(%d) exceeds CONFIG_MAX_CPUS(%d)\n",
370 max_cpus, CONFIG_MAX_CPUS);
371 max_cpus = CONFIG_MAX_CPUS;
372 }
373
374 info = cpu_info();
375 for (i = 1; i < max_cpus; i++) {
376 struct device_path cpu_path;
Edward O'Callaghan2c9d2cf2014-10-27 23:29:29 +1100377 struct device *new;
Aaron Durbine0785c02013-10-21 12:15:29 -0500378
Elyes HAOUASd82be922016-07-28 18:58:27 +0200379 /* Build the CPU device path */
Aaron Durbine0785c02013-10-21 12:15:29 -0500380 cpu_path.type = DEVICE_PATH_APIC;
381
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600382 /* Assuming linear APIC space allocation. AP will set its own
383 APIC id in the ap_init() path above. */
384 cpu_path.apic.apic_id = info->cpu->path.apic.apic_id + i;
Aaron Durbine0785c02013-10-21 12:15:29 -0500385
Elyes HAOUASd82be922016-07-28 18:58:27 +0200386 /* Allocate the new CPU device structure */
Aaron Durbine0785c02013-10-21 12:15:29 -0500387 new = alloc_find_dev(cpu_bus, &cpu_path);
388 if (new == NULL) {
Elyes HAOUASd82be922016-07-28 18:58:27 +0200389 printk(BIOS_CRIT, "Could not allocate CPU device\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500390 max_cpus--;
Richard Spiegel569711a2018-08-07 15:59:34 -0700391 continue;
Aaron Durbine0785c02013-10-21 12:15:29 -0500392 }
Naresh G Solanki24635332018-05-31 23:13:18 +0530393 new->name = processor_name;
Subrata Banik7bc90362019-05-10 11:58:37 +0530394 cpus_dev[i] = new;
Aaron Durbine0785c02013-10-21 12:15:29 -0500395 }
396
397 return max_cpus;
398}
399
400/* Returns 1 for timeout. 0 on success. */
401static int apic_wait_timeout(int total_delay, int delay_step)
402{
403 int total = 0;
404 int timeout = 0;
405
406 while (lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY) {
407 udelay(delay_step);
408 total += delay_step;
409 if (total >= total_delay) {
410 timeout = 1;
411 break;
412 }
413 }
414
415 return timeout;
416}
417
418static int start_aps(struct bus *cpu_bus, int ap_count, atomic_t *num_aps)
419{
420 int sipi_vector;
421 /* Max location is 4KiB below 1MiB */
422 const int max_vector_loc = ((1 << 20) - (1 << 12)) >> 12;
423
424 if (ap_count == 0)
425 return 0;
426
427 /* The vector is sent as a 4k aligned address in one byte. */
428 sipi_vector = sipi_vector_location >> 12;
429
430 if (sipi_vector > max_vector_loc) {
431 printk(BIOS_CRIT, "SIPI vector too large! 0x%08x\n",
432 sipi_vector);
433 return -1;
434 }
435
436 printk(BIOS_DEBUG, "Attempting to start %d APs\n", ap_count);
437
Wonkyu Kim26ab9bf2021-03-22 19:59:18 -0700438 if (is_x2apic_mode()) {
439 x2apic_send_ipi(LAPIC_DM_INIT | LAPIC_INT_LEVELTRIG |
440 LAPIC_INT_ASSERT | LAPIC_DEST_ALLBUT, 0);
441 mdelay(10);
442 x2apic_send_ipi(LAPIC_DM_STARTUP | LAPIC_INT_LEVELTRIG |
443 LAPIC_DEST_ALLBUT | sipi_vector, 0);
444
445 /* Wait for CPUs to check in up to 200 us. */
446 wait_for_aps(num_aps, ap_count, 200 /* us */, 15 /* us */);
447
448 x2apic_send_ipi(LAPIC_DM_STARTUP | LAPIC_INT_LEVELTRIG |
449 LAPIC_DEST_ALLBUT | sipi_vector, 0);
450
451 /* Wait for CPUs to check in. */
452 if (wait_for_aps(num_aps, ap_count, 100000 /* 100 ms */, 50 /* us */)) {
453 printk(BIOS_ERR, "Not all APs checked in: %d/%d.\n",
454 atomic_read(num_aps), ap_count);
455 return -1;
456 }
457 return 0;
458 }
459
Aaron Durbine0785c02013-10-21 12:15:29 -0500460 if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
461 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
462 if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
Jonathan Zhangcbbce66b2020-10-28 11:35:40 -0700463 printk(BIOS_ERR, "timed out. Aborting.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500464 return -1;
Lee Leahya15d8af2017-03-15 14:49:35 -0700465 }
466 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500467 }
468
469 /* Send INIT IPI to all but self. */
470 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
471 lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
Lee Leahya07d0dd2017-03-15 14:25:22 -0700472 LAPIC_DM_INIT);
Aaron Durbine0785c02013-10-21 12:15:29 -0500473 printk(BIOS_DEBUG, "Waiting for 10ms after sending INIT.\n");
474 mdelay(10);
475
476 /* Send 1st SIPI */
477 if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
478 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
479 if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
Jonathan Zhangcbbce66b2020-10-28 11:35:40 -0700480 printk(BIOS_ERR, "timed out. Aborting.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500481 return -1;
Lee Leahya15d8af2017-03-15 14:49:35 -0700482 }
483 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500484 }
485
486 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
487 lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
Lee Leahya07d0dd2017-03-15 14:25:22 -0700488 LAPIC_DM_STARTUP | sipi_vector);
Aaron Durbine0785c02013-10-21 12:15:29 -0500489 printk(BIOS_DEBUG, "Waiting for 1st SIPI to complete...");
490 if (apic_wait_timeout(10000 /* 10 ms */, 50 /* us */)) {
Jonathan Zhangcbbce66b2020-10-28 11:35:40 -0700491 printk(BIOS_ERR, "timed out.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500492 return -1;
Aaron Durbine0785c02013-10-21 12:15:29 -0500493 }
Lee Leahya15d8af2017-03-15 14:49:35 -0700494 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500495
496 /* Wait for CPUs to check in up to 200 us. */
497 wait_for_aps(num_aps, ap_count, 200 /* us */, 15 /* us */);
498
Marshall Dawson98f43a12019-08-05 16:18:56 -0600499 if (CONFIG(X86_AMD_INIT_SIPI))
500 return 0;
501
Aaron Durbine0785c02013-10-21 12:15:29 -0500502 /* Send 2nd SIPI */
503 if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
504 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
505 if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
Jonathan Zhangcbbce66b2020-10-28 11:35:40 -0700506 printk(BIOS_ERR, "timed out. Aborting.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500507 return -1;
Lee Leahya15d8af2017-03-15 14:49:35 -0700508 }
509 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500510 }
511
512 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
513 lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
Lee Leahya07d0dd2017-03-15 14:25:22 -0700514 LAPIC_DM_STARTUP | sipi_vector);
Aaron Durbine0785c02013-10-21 12:15:29 -0500515 printk(BIOS_DEBUG, "Waiting for 2nd SIPI to complete...");
516 if (apic_wait_timeout(10000 /* 10 ms */, 50 /* us */)) {
Jonathan Zhangcbbce66b2020-10-28 11:35:40 -0700517 printk(BIOS_ERR, "timed out.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500518 return -1;
Aaron Durbine0785c02013-10-21 12:15:29 -0500519 }
Lee Leahya15d8af2017-03-15 14:49:35 -0700520 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500521
522 /* Wait for CPUs to check in. */
Jonathan Zhangcbbce66b2020-10-28 11:35:40 -0700523 if (wait_for_aps(num_aps, ap_count, 100000 /* 100 ms */, 50 /* us */)) {
524 printk(BIOS_ERR, "Not all APs checked in: %d/%d.\n",
Aaron Durbine0785c02013-10-21 12:15:29 -0500525 atomic_read(num_aps), ap_count);
526 return -1;
527 }
528
529 return 0;
530}
531
532static int bsp_do_flight_plan(struct mp_params *mp_params)
533{
534 int i;
535 int ret = 0;
Furquan Shaikhfa9f1072018-03-01 16:37:06 -0800536 /*
Jonathan Zhang6ec322e2020-01-16 11:11:09 -0800537 * Set time out for flight plan to a huge minimum value (>=1 second).
538 * CPUs with many APs may take longer if there is contention for
539 * resources such as UART, so scale the time out up by increments of
540 * 100ms if needed.
Furquan Shaikhfa9f1072018-03-01 16:37:06 -0800541 */
Jonathan Zhang6ec322e2020-01-16 11:11:09 -0800542 const int timeout_us = MAX(1000000, 100000 * mp_params->num_cpus);
Aaron Durbine0785c02013-10-21 12:15:29 -0500543 const int step_us = 100;
544 int num_aps = mp_params->num_cpus - 1;
Furquan Shaikh5d8faef2018-03-07 23:16:57 -0800545 struct stopwatch sw;
546
547 stopwatch_init(&sw);
Aaron Durbine0785c02013-10-21 12:15:29 -0500548
549 for (i = 0; i < mp_params->num_records; i++) {
550 struct mp_flight_record *rec = &mp_params->flight_plan[i];
551
552 /* Wait for APs if the record is not released. */
553 if (atomic_read(&rec->barrier) == 0) {
554 /* Wait for the APs to check in. */
555 if (wait_for_aps(&rec->cpus_entered, num_aps,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700556 timeout_us, step_us)) {
Aaron Durbine0785c02013-10-21 12:15:29 -0500557 printk(BIOS_ERR, "MP record %d timeout.\n", i);
558 ret = -1;
559 }
560 }
561
Lee Leahya15d8af2017-03-15 14:49:35 -0700562 if (rec->bsp_call != NULL)
Aaron Durbin0e556322016-04-29 23:15:12 -0500563 rec->bsp_call();
Aaron Durbine0785c02013-10-21 12:15:29 -0500564
565 release_barrier(&rec->barrier);
566 }
Furquan Shaikh5d8faef2018-03-07 23:16:57 -0800567
568 printk(BIOS_INFO, "%s done after %ld msecs.\n", __func__,
569 stopwatch_duration_msecs(&sw));
Aaron Durbine0785c02013-10-21 12:15:29 -0500570 return ret;
571}
572
573static void init_bsp(struct bus *cpu_bus)
574{
575 struct device_path cpu_path;
576 struct cpu_info *info;
Aaron Durbine0785c02013-10-21 12:15:29 -0500577
578 /* Print processor name */
579 fill_processor_name(processor_name);
580 printk(BIOS_INFO, "CPU: %s.\n", processor_name);
581
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200582 /* Ensure the local APIC is enabled */
Aaron Durbine0785c02013-10-21 12:15:29 -0500583 enable_lapic();
584
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200585 /* Set the device path of the boot CPU. */
Aaron Durbine0785c02013-10-21 12:15:29 -0500586 cpu_path.type = DEVICE_PATH_APIC;
587 cpu_path.apic.apic_id = lapicid();
588
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200589 /* Find the device structure for the boot CPU. */
Aaron Durbine0785c02013-10-21 12:15:29 -0500590 info = cpu_info();
591 info->cpu = alloc_find_dev(cpu_bus, &cpu_path);
Naresh G Solanki24635332018-05-31 23:13:18 +0530592 info->cpu->name = processor_name;
Aaron Durbine0785c02013-10-21 12:15:29 -0500593
594 if (info->index != 0)
595 printk(BIOS_CRIT, "BSP index(%d) != 0!\n", info->index);
596
597 /* Track BSP in cpu_map structures. */
Subrata Banik7bc90362019-05-10 11:58:37 +0530598 cpu_add_map_entry(info->index);
Aaron Durbine0785c02013-10-21 12:15:29 -0500599}
600
Aaron Durbin770d7c72016-05-03 17:49:57 -0500601/*
602 * mp_init() will set up the SIPI vector and bring up the APs according to
603 * mp_params. Each flight record will be executed according to the plan. Note
604 * that the MP infrastructure uses SMM default area without saving it. It's
605 * up to the chipset or mainboard to either e820 reserve this area or save this
606 * region prior to calling mp_init() and restoring it after mp_init returns.
607 *
608 * At the time mp_init() is called the MTRR MSRs are mirrored into APs then
609 * caching is enabled before running the flight plan.
610 *
611 * The MP initialization has the following properties:
612 * 1. APs are brought up in parallel.
Elyes HAOUASd82be922016-07-28 18:58:27 +0200613 * 2. The ordering of coreboot CPU number and APIC ids is not deterministic.
Aaron Durbin770d7c72016-05-03 17:49:57 -0500614 * Therefore, one cannot rely on this property or the order of devices in
615 * the device tree unless the chipset or mainboard know the APIC ids
616 * a priori.
617 *
618 * mp_init() returns < 0 on error, 0 on success.
619 */
620static int mp_init(struct bus *cpu_bus, struct mp_params *p)
Aaron Durbine0785c02013-10-21 12:15:29 -0500621{
622 int num_cpus;
Aaron Durbine0785c02013-10-21 12:15:29 -0500623 atomic_t *ap_count;
624
625 init_bsp(cpu_bus);
626
627 if (p == NULL || p->flight_plan == NULL || p->num_records < 1) {
628 printk(BIOS_CRIT, "Invalid MP parameters\n");
629 return -1;
630 }
631
632 /* Default to currently running CPU. */
633 num_cpus = allocate_cpu_devices(cpu_bus, p);
634
635 if (num_cpus < p->num_cpus) {
636 printk(BIOS_CRIT,
637 "ERROR: More cpus requested (%d) than supported (%d).\n",
638 p->num_cpus, num_cpus);
639 return -1;
640 }
641
642 /* Copy needed parameters so that APs have a reference to the plan. */
643 mp_info.num_records = p->num_records;
644 mp_info.records = p->flight_plan;
645
646 /* Load the SIPI vector. */
647 ap_count = load_sipi_vector(p);
648 if (ap_count == NULL)
649 return -1;
650
651 /* Make sure SIPI data hits RAM so the APs that come up will see
652 * the startup code even if the caches are disabled. */
653 wbinvd();
654
655 /* Start the APs providing number of APs and the cpus_entered field. */
Aaron Durbinb21e3622016-12-07 00:32:19 -0600656 global_num_aps = p->num_cpus - 1;
657 if (start_aps(cpu_bus, global_num_aps, ap_count) < 0) {
Aaron Durbine0785c02013-10-21 12:15:29 -0500658 mdelay(1000);
659 printk(BIOS_DEBUG, "%d/%d eventually checked in?\n",
Aaron Durbinb21e3622016-12-07 00:32:19 -0600660 atomic_read(ap_count), global_num_aps);
Aaron Durbine0785c02013-10-21 12:15:29 -0500661 return -1;
662 }
663
664 /* Walk the flight plan for the BSP. */
665 return bsp_do_flight_plan(p);
666}
667
Aaron Durbin770d7c72016-05-03 17:49:57 -0500668/* Calls cpu_initialize(info->index) which calls the coreboot CPU drivers. */
669static void mp_initialize_cpu(void)
Aaron Durbine0785c02013-10-21 12:15:29 -0500670{
671 /* Call back into driver infrastructure for the AP initialization. */
672 struct cpu_info *info = cpu_info();
673 cpu_initialize(info->index);
674}
675
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500676void smm_initiate_relocation_parallel(void)
677{
Wonkyu Kim26ab9bf2021-03-22 19:59:18 -0700678 if (is_x2apic_mode()) {
679 x2apic_send_ipi(LAPIC_DM_SMI | LAPIC_INT_LEVELTRIG, lapicid());
680 return;
681 }
682
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500683 if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
684 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
685 if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
686 printk(BIOS_DEBUG, "timed out. Aborting.\n");
687 return;
Lee Leahya15d8af2017-03-15 14:49:35 -0700688 }
689 printk(BIOS_DEBUG, "done.\n");
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500690 }
691
692 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(lapicid()));
693 lapic_write_around(LAPIC_ICR, LAPIC_INT_ASSERT | LAPIC_DM_SMI);
Lee Leahya15d8af2017-03-15 14:49:35 -0700694 if (apic_wait_timeout(1000 /* 1 ms */, 100 /* us */))
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500695 printk(BIOS_DEBUG, "SMI Relocation timed out.\n");
Lee Leahya15d8af2017-03-15 14:49:35 -0700696 else
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500697 printk(BIOS_DEBUG, "Relocation complete.\n");
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500698}
699
700DECLARE_SPIN_LOCK(smm_relocation_lock);
701
702/* Send SMI to self with single user serialization. */
703void smm_initiate_relocation(void)
704{
705 spin_lock(&smm_relocation_lock);
706 smm_initiate_relocation_parallel();
707 spin_unlock(&smm_relocation_lock);
708}
Aaron Durbin82501922016-04-29 22:55:49 -0500709
710struct mp_state {
711 struct mp_ops ops;
712 int cpu_count;
713 uintptr_t perm_smbase;
714 size_t perm_smsize;
Arthur Heymans478f3d82021-02-15 19:39:01 +0100715 /* Size of the real CPU save state */
716 size_t smm_real_save_state_size;
717 /* Size of allocated CPU save state, MAX(real save state size, stub size) */
Aaron Durbin82501922016-04-29 22:55:49 -0500718 size_t smm_save_state_size;
Arthur Heymans1dfa46e2021-02-15 16:19:33 +0100719 uintptr_t reloc_start32_offset;
Aaron Durbin82501922016-04-29 22:55:49 -0500720 int do_smm;
721} mp_state;
722
723static int is_smm_enabled(void)
724{
Julius Wernercd49cce2019-03-05 16:53:33 -0800725 return CONFIG(HAVE_SMI_HANDLER) && mp_state.do_smm;
Aaron Durbin82501922016-04-29 22:55:49 -0500726}
727
728static void smm_disable(void)
729{
730 mp_state.do_smm = 0;
731}
732
733static void smm_enable(void)
734{
Julius Wernercd49cce2019-03-05 16:53:33 -0800735 if (CONFIG(HAVE_SMI_HANDLER))
Aaron Durbin82501922016-04-29 22:55:49 -0500736 mp_state.do_smm = 1;
737}
738
739static void asmlinkage smm_do_relocation(void *arg)
740{
741 const struct smm_module_params *p;
Aaron Durbin82501922016-04-29 22:55:49 -0500742 int cpu;
Arthur Heymans50e849f2021-02-15 16:43:19 +0100743 const uintptr_t curr_smbase = SMM_DEFAULT_BASE;
Aaron Durbin82501922016-04-29 22:55:49 -0500744 uintptr_t perm_smbase;
745
746 p = arg;
Aaron Durbin82501922016-04-29 22:55:49 -0500747 cpu = p->cpu;
Aaron Durbin82501922016-04-29 22:55:49 -0500748
749 if (cpu >= CONFIG_MAX_CPUS) {
750 printk(BIOS_CRIT,
751 "Invalid CPU number assigned in SMM stub: %d\n", cpu);
752 return;
753 }
754
755 /*
756 * The permanent handler runs with all cpus concurrently. Precalculate
757 * the location of the new SMBASE. If using SMM modules then this
758 * calculation needs to match that of the module loader.
759 */
Arthur Heymans88407bc2021-03-02 16:07:52 +0100760 perm_smbase = smm_get_cpu_smbase(cpu);
761 if (!perm_smbase) {
762 printk(BIOS_ERR, "%s: bad SMBASE for CPU %d\n", __func__, cpu);
763 return;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700764 }
Aaron Durbin82501922016-04-29 22:55:49 -0500765
766 /* Setup code checks this callback for validity. */
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700767 printk(BIOS_INFO, "%s : curr_smbase 0x%x perm_smbase 0x%x, cpu = %d\n",
768 __func__, (int)curr_smbase, (int)perm_smbase, cpu);
Aaron Durbin82501922016-04-29 22:55:49 -0500769 mp_state.ops.relocation_handler(cpu, curr_smbase, perm_smbase);
Eugene Myersae438be2020-01-21 17:01:47 -0500770
771 if (CONFIG(STM)) {
Eugene Myers53e92362020-02-10 15:44:38 -0500772 uintptr_t mseg;
Eugene Myersae438be2020-01-21 17:01:47 -0500773
Eugene Myers53e92362020-02-10 15:44:38 -0500774 mseg = mp_state.perm_smbase +
775 (mp_state.perm_smsize - CONFIG_MSEG_SIZE);
Eugene Myersae438be2020-01-21 17:01:47 -0500776
Eugene D Myersf213f172020-04-15 19:11:52 -0400777 stm_setup(mseg, p->cpu,
Eugene Myers53e92362020-02-10 15:44:38 -0500778 perm_smbase,
779 mp_state.perm_smbase,
Arthur Heymans1dfa46e2021-02-15 16:19:33 +0100780 mp_state.reloc_start32_offset);
Eugene Myersae438be2020-01-21 17:01:47 -0500781 }
Aaron Durbin82501922016-04-29 22:55:49 -0500782}
783
784static void adjust_smm_apic_id_map(struct smm_loader_params *smm_params)
785{
786 int i;
Arthur Heymansed4be452021-02-15 13:20:35 +0100787 struct smm_stub_params *stub_params = smm_params->stub_params;
Aaron Durbin82501922016-04-29 22:55:49 -0500788
789 for (i = 0; i < CONFIG_MAX_CPUS; i++)
Arthur Heymansed4be452021-02-15 13:20:35 +0100790 stub_params->apic_id_to_cpu[i] = cpu_get_apic_id(i);
Aaron Durbin82501922016-04-29 22:55:49 -0500791}
792
Arthur Heymans478f3d82021-02-15 19:39:01 +0100793static int install_relocation_handler(int num_cpus, size_t real_save_state_size,
Arthur Heymanse6c35232021-02-16 13:19:18 +0100794 size_t save_state_size, uintptr_t perm_smbase)
Aaron Durbin82501922016-04-29 22:55:49 -0500795{
796 struct smm_loader_params smm_params = {
Marshall Dawson46fc68472018-10-25 13:01:55 -0600797 .per_cpu_stack_size = CONFIG_SMM_STUB_STACK_SIZE,
Arthur Heymanse6c35232021-02-16 13:19:18 +0100798 .num_concurrent_stacks = num_cpus,
Arthur Heymans478f3d82021-02-15 19:39:01 +0100799 .real_cpu_save_state_size = real_save_state_size,
Aaron Durbin82501922016-04-29 22:55:49 -0500800 .per_cpu_save_state_size = save_state_size,
801 .num_concurrent_save_states = 1,
802 .handler = smm_do_relocation,
803 };
804
805 /* Allow callback to override parameters. */
806 if (mp_state.ops.adjust_smm_params != NULL)
807 mp_state.ops.adjust_smm_params(&smm_params, 0);
808
Arthur Heymanse6c35232021-02-16 13:19:18 +0100809 if (smm_setup_relocation_handler((void *)perm_smbase, &smm_params)) {
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700810 printk(BIOS_ERR, "%s: smm setup failed\n", __func__);
Aaron Durbin82501922016-04-29 22:55:49 -0500811 return -1;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700812 }
Aaron Durbin82501922016-04-29 22:55:49 -0500813 adjust_smm_apic_id_map(&smm_params);
814
Arthur Heymans1dfa46e2021-02-15 16:19:33 +0100815 mp_state.reloc_start32_offset = smm_params.stub_params->start32_offset;
816
Aaron Durbin82501922016-04-29 22:55:49 -0500817 return 0;
818}
819
820static int install_permanent_handler(int num_cpus, uintptr_t smbase,
Arthur Heymans478f3d82021-02-15 19:39:01 +0100821 size_t smsize, size_t real_save_state_size,
822 size_t save_state_size)
Aaron Durbin82501922016-04-29 22:55:49 -0500823{
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700824 /*
825 * All the CPUs will relocate to permanaent handler now. Set parameters
826 * needed for all CPUs. The placement of each CPUs entry point is
827 * determined by the loader. This code simply provides the beginning of
828 * SMRAM region, the number of CPUs who will use the handler, the stack
829 * size and save state size for each CPU.
830 */
Aaron Durbin82501922016-04-29 22:55:49 -0500831 struct smm_loader_params smm_params = {
Raul E Rangeld3b83932018-06-12 10:43:09 -0600832 .per_cpu_stack_size = CONFIG_SMM_MODULE_STACK_SIZE,
Aaron Durbin82501922016-04-29 22:55:49 -0500833 .num_concurrent_stacks = num_cpus,
Arthur Heymans478f3d82021-02-15 19:39:01 +0100834 .real_cpu_save_state_size = real_save_state_size,
Aaron Durbin82501922016-04-29 22:55:49 -0500835 .per_cpu_save_state_size = save_state_size,
836 .num_concurrent_save_states = num_cpus,
837 };
838
839 /* Allow callback to override parameters. */
840 if (mp_state.ops.adjust_smm_params != NULL)
841 mp_state.ops.adjust_smm_params(&smm_params, 1);
842
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700843 printk(BIOS_DEBUG, "Installing permanent SMM handler to 0x%08lx\n", smbase);
Aaron Durbin82501922016-04-29 22:55:49 -0500844
845 if (smm_load_module((void *)smbase, smsize, &smm_params))
846 return -1;
847
848 adjust_smm_apic_id_map(&smm_params);
849
850 return 0;
851}
852
853/* Load SMM handlers as part of MP flight record. */
854static void load_smm_handlers(void)
855{
Arthur Heymans478f3d82021-02-15 19:39:01 +0100856 size_t real_save_state_size = mp_state.smm_real_save_state_size;
Aaron Durbin82501922016-04-29 22:55:49 -0500857 size_t smm_save_state_size = mp_state.smm_save_state_size;
858
859 /* Do nothing if SMM is disabled.*/
860 if (!is_smm_enabled())
861 return;
862
863 /* Install handlers. */
Arthur Heymanse6c35232021-02-16 13:19:18 +0100864 if (install_relocation_handler(mp_state.cpu_count, real_save_state_size,
865 smm_save_state_size, mp_state.perm_smbase) < 0) {
Aaron Durbin82501922016-04-29 22:55:49 -0500866 printk(BIOS_ERR, "Unable to install SMM relocation handler.\n");
867 smm_disable();
868 }
869
870 if (install_permanent_handler(mp_state.cpu_count, mp_state.perm_smbase,
Arthur Heymans478f3d82021-02-15 19:39:01 +0100871 mp_state.perm_smsize, real_save_state_size,
872 smm_save_state_size) < 0) {
Aaron Durbin82501922016-04-29 22:55:49 -0500873 printk(BIOS_ERR, "Unable to install SMM permanent handler.\n");
874 smm_disable();
875 }
876
877 /* Ensure the SMM handlers hit DRAM before performing first SMI. */
878 wbinvd();
879
880 /*
881 * Indicate that the SMM handlers have been loaded and MP
882 * initialization is about to start.
883 */
884 if (is_smm_enabled() && mp_state.ops.pre_mp_smm_init != NULL)
885 mp_state.ops.pre_mp_smm_init();
886}
887
888/* Trigger SMM as part of MP flight record. */
889static void trigger_smm_relocation(void)
890{
891 /* Do nothing if SMM is disabled.*/
892 if (!is_smm_enabled() || mp_state.ops.per_cpu_smm_trigger == NULL)
893 return;
894 /* Trigger SMM mode for the currently running processor. */
895 mp_state.ops.per_cpu_smm_trigger();
896}
897
Aaron Durbin223fb432018-05-03 13:49:41 -0600898static struct mp_callback *ap_callbacks[CONFIG_MAX_CPUS];
Aaron Durbinb21e3622016-12-07 00:32:19 -0600899
Aaron Durbin223fb432018-05-03 13:49:41 -0600900static struct mp_callback *read_callback(struct mp_callback **slot)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600901{
Aaron Durbin223fb432018-05-03 13:49:41 -0600902 struct mp_callback *ret;
903
904 asm volatile ("mov %1, %0\n"
905 : "=r" (ret)
906 : "m" (*slot)
907 : "memory"
908 );
909 return ret;
Aaron Durbinb21e3622016-12-07 00:32:19 -0600910}
911
Aaron Durbin223fb432018-05-03 13:49:41 -0600912static void store_callback(struct mp_callback **slot, struct mp_callback *val)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600913{
Aaron Durbin223fb432018-05-03 13:49:41 -0600914 asm volatile ("mov %1, %0\n"
915 : "=m" (*slot)
916 : "r" (val)
917 : "memory"
918 );
Aaron Durbinb21e3622016-12-07 00:32:19 -0600919}
920
Aaron Durbin223fb432018-05-03 13:49:41 -0600921static int run_ap_work(struct mp_callback *val, long expire_us)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600922{
923 int i;
924 int cpus_accepted;
925 struct stopwatch sw;
Jacob Garberbc674762019-05-14 11:21:41 -0600926 int cur_cpu;
Aaron Durbinb21e3622016-12-07 00:32:19 -0600927
Julius Wernercd49cce2019-03-05 16:53:33 -0800928 if (!CONFIG(PARALLEL_MP_AP_WORK)) {
Aaron Durbinb21e3622016-12-07 00:32:19 -0600929 printk(BIOS_ERR, "APs already parked. PARALLEL_MP_AP_WORK not selected.\n");
930 return -1;
931 }
932
Jacob Garberbc674762019-05-14 11:21:41 -0600933 cur_cpu = cpu_index();
934
935 if (cur_cpu < 0) {
936 printk(BIOS_ERR, "Invalid CPU index.\n");
937 return -1;
938 }
939
Aaron Durbinb21e3622016-12-07 00:32:19 -0600940 /* Signal to all the APs to run the func. */
941 for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) {
942 if (cur_cpu == i)
943 continue;
Aaron Durbin223fb432018-05-03 13:49:41 -0600944 store_callback(&ap_callbacks[i], val);
Aaron Durbinb21e3622016-12-07 00:32:19 -0600945 }
946 mfence();
947
948 /* Wait for all the APs to signal back that call has been accepted. */
Subrata Banik838f2962018-04-11 18:45:57 +0530949 if (expire_us > 0)
950 stopwatch_init_usecs_expire(&sw, expire_us);
951
Paul Menzel6bb8ff42017-06-19 13:02:31 +0200952 do {
Aaron Durbin046848c2017-06-15 08:47:04 -0500953 cpus_accepted = 0;
954
Aaron Durbinb21e3622016-12-07 00:32:19 -0600955 for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) {
956 if (cur_cpu == i)
957 continue;
958 if (read_callback(&ap_callbacks[i]) == NULL)
959 cpus_accepted++;
960 }
Aaron Durbin046848c2017-06-15 08:47:04 -0500961
Aaron Durbinb21e3622016-12-07 00:32:19 -0600962 if (cpus_accepted == global_num_aps)
963 return 0;
Subrata Banik838f2962018-04-11 18:45:57 +0530964 } while (expire_us <= 0 || !stopwatch_expired(&sw));
Aaron Durbinb21e3622016-12-07 00:32:19 -0600965
Subrata Banikdbcb0ce2020-03-19 20:51:09 +0530966 printk(BIOS_CRIT, "CRITICAL ERROR: AP call expired. %d/%d CPUs accepted.\n",
Aaron Durbinb21e3622016-12-07 00:32:19 -0600967 cpus_accepted, global_num_aps);
968 return -1;
969}
970
971static void ap_wait_for_instruction(void)
972{
Aaron Durbin223fb432018-05-03 13:49:41 -0600973 struct mp_callback lcb;
974 struct mp_callback **per_cpu_slot;
Subrata Banik8a25cae2018-05-03 18:48:41 +0530975 int cur_cpu;
Aaron Durbinb21e3622016-12-07 00:32:19 -0600976
Julius Wernercd49cce2019-03-05 16:53:33 -0800977 if (!CONFIG(PARALLEL_MP_AP_WORK))
Aaron Durbinb21e3622016-12-07 00:32:19 -0600978 return;
979
Subrata Banik8a25cae2018-05-03 18:48:41 +0530980 cur_cpu = cpu_index();
Jacob Garberbc674762019-05-14 11:21:41 -0600981
982 if (cur_cpu < 0) {
983 printk(BIOS_ERR, "Invalid CPU index.\n");
984 return;
985 }
986
Subrata Banik8a25cae2018-05-03 18:48:41 +0530987 per_cpu_slot = &ap_callbacks[cur_cpu];
Aaron Durbinb21e3622016-12-07 00:32:19 -0600988
Aaron Durbin223fb432018-05-03 13:49:41 -0600989 while (1) {
990 struct mp_callback *cb = read_callback(per_cpu_slot);
991
992 if (cb == NULL) {
Aaron Durbinb21e3622016-12-07 00:32:19 -0600993 asm ("pause");
994 continue;
995 }
996
Raul E Rangel9ea77622018-08-02 15:12:17 -0600997 /* Copy to local variable before signaling consumption. */
Aaron Durbin223fb432018-05-03 13:49:41 -0600998 memcpy(&lcb, cb, sizeof(lcb));
Aaron Durbinb21e3622016-12-07 00:32:19 -0600999 mfence();
Aaron Durbin223fb432018-05-03 13:49:41 -06001000 store_callback(per_cpu_slot, NULL);
Subrata Banik8a25cae2018-05-03 18:48:41 +05301001 if (lcb.logical_cpu_number && (cur_cpu !=
1002 lcb.logical_cpu_number))
1003 continue;
1004 else
1005 lcb.func(lcb.arg);
Aaron Durbinb21e3622016-12-07 00:32:19 -06001006 }
1007}
1008
Subrata Banik8a25cae2018-05-03 18:48:41 +05301009int mp_run_on_aps(void (*func)(void *), void *arg, int logical_cpu_num,
1010 long expire_us)
Aaron Durbinb21e3622016-12-07 00:32:19 -06001011{
Subrata Banik8a25cae2018-05-03 18:48:41 +05301012 struct mp_callback lcb = { .func = func, .arg = arg,
1013 .logical_cpu_number = logical_cpu_num};
Aaron Durbin223fb432018-05-03 13:49:41 -06001014 return run_ap_work(&lcb, expire_us);
Aaron Durbinb21e3622016-12-07 00:32:19 -06001015}
1016
Aamir Bohra7e0019e2021-03-05 09:41:20 +05301017int mp_run_on_all_aps(void (*func)(void *), void *arg, long expire_us, bool run_parallel)
1018{
1019 int ap_index, bsp_index;
1020
1021 if (run_parallel)
1022 return mp_run_on_aps(func, arg, 0, expire_us);
1023
1024 bsp_index = cpu_index();
1025
1026 const int total_threads = global_num_aps + 1; /* +1 for BSP */
1027
1028 for (ap_index = 0; ap_index < total_threads; ap_index++) {
1029 /* skip if BSP */
1030 if (ap_index == bsp_index)
1031 continue;
1032 if (mp_run_on_aps(func, arg, ap_index, expire_us))
1033 return CB_ERR;
1034 }
1035
1036 return CB_SUCCESS;
1037}
1038
Patrick Rudolph5ec97ce2019-07-26 14:47:32 +02001039int mp_run_on_all_cpus(void (*func)(void *), void *arg)
Aaron Durbinb21e3622016-12-07 00:32:19 -06001040{
1041 /* Run on BSP first. */
Subrata Banik33374972018-04-24 13:45:30 +05301042 func(arg);
1043
Patrick Rudolph5ec97ce2019-07-26 14:47:32 +02001044 /* For up to 1 second for AP to finish previous work. */
1045 return mp_run_on_aps(func, arg, MP_RUN_ON_ALL_CPUS, 1000 * USECS_PER_MSEC);
Aaron Durbinb21e3622016-12-07 00:32:19 -06001046}
1047
1048int mp_park_aps(void)
1049{
Furquan Shaikhd6630d12018-03-29 00:10:02 -07001050 struct stopwatch sw;
1051 int ret;
1052 long duration_msecs;
1053
1054 stopwatch_init(&sw);
1055
Subrata Banik8a25cae2018-05-03 18:48:41 +05301056 ret = mp_run_on_aps(park_this_cpu, NULL, MP_RUN_ON_ALL_CPUS,
Patrick Rudolph5ec97ce2019-07-26 14:47:32 +02001057 1000 * USECS_PER_MSEC);
Furquan Shaikhd6630d12018-03-29 00:10:02 -07001058
1059 duration_msecs = stopwatch_duration_msecs(&sw);
1060
1061 if (!ret)
1062 printk(BIOS_DEBUG, "%s done after %ld msecs.\n", __func__,
1063 duration_msecs);
1064 else
1065 printk(BIOS_ERR, "%s failed after %ld msecs.\n", __func__,
1066 duration_msecs);
1067
1068 return ret;
Aaron Durbinb21e3622016-12-07 00:32:19 -06001069}
1070
Aaron Durbin82501922016-04-29 22:55:49 -05001071static struct mp_flight_record mp_steps[] = {
1072 /* Once the APs are up load the SMM handlers. */
1073 MP_FR_BLOCK_APS(NULL, load_smm_handlers),
1074 /* Perform SMM relocation. */
1075 MP_FR_NOBLOCK_APS(trigger_smm_relocation, trigger_smm_relocation),
Elyes HAOUASd82be922016-07-28 18:58:27 +02001076 /* Initialize each CPU through the driver framework. */
Aaron Durbin82501922016-04-29 22:55:49 -05001077 MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu),
Aaron Durbinb21e3622016-12-07 00:32:19 -06001078 /* Wait for APs to finish then optionally start looking for work. */
1079 MP_FR_BLOCK_APS(ap_wait_for_instruction, NULL),
Aaron Durbin82501922016-04-29 22:55:49 -05001080};
1081
Arthur Heymans478f3d82021-02-15 19:39:01 +01001082static size_t smm_stub_size(void)
1083{
1084 extern unsigned char _binary_smmstub_start[];
1085 struct rmodule smm_stub;
1086
1087 if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) {
1088 printk(BIOS_ERR, "%s: unable to get SMM module size\n", __func__);
1089 return 0;
1090 }
1091
1092 return rmodule_memory_size(&smm_stub);
1093}
1094
Aaron Durbin82501922016-04-29 22:55:49 -05001095static void fill_mp_state(struct mp_state *state, const struct mp_ops *ops)
1096{
1097 /*
1098 * Make copy of the ops so that defaults can be set in the non-const
1099 * structure if needed.
1100 */
1101 memcpy(&state->ops, ops, sizeof(*ops));
1102
1103 if (ops->get_cpu_count != NULL)
1104 state->cpu_count = ops->get_cpu_count();
1105
1106 if (ops->get_smm_info != NULL)
1107 ops->get_smm_info(&state->perm_smbase, &state->perm_smsize,
Arthur Heymans478f3d82021-02-15 19:39:01 +01001108 &state->smm_real_save_state_size);
1109
1110 state->smm_save_state_size = MAX(state->smm_real_save_state_size, smm_stub_size());
Aaron Durbin82501922016-04-29 22:55:49 -05001111
1112 /*
Eugene Myersae438be2020-01-21 17:01:47 -05001113 * Make sure there is enough room for the SMM descriptor
1114 */
Eugene Myersfaa11182020-02-06 10:37:01 -05001115 if (CONFIG(STM)) {
Eugene Myersae438be2020-01-21 17:01:47 -05001116 state->smm_save_state_size +=
Eugene Myers970ed2a2020-02-10 15:02:27 -05001117 ALIGN_UP(sizeof(TXT_PROCESSOR_SMM_DESCRIPTOR), 0x100);
Eugene Myersfaa11182020-02-06 10:37:01 -05001118 }
Eugene Myersae438be2020-01-21 17:01:47 -05001119
1120 /*
Aaron Durbin82501922016-04-29 22:55:49 -05001121 * Default to smm_initiate_relocation() if trigger callback isn't
1122 * provided.
1123 */
Julius Wernercd49cce2019-03-05 16:53:33 -08001124 if (CONFIG(HAVE_SMI_HANDLER) &&
Aaron Durbin82501922016-04-29 22:55:49 -05001125 ops->per_cpu_smm_trigger == NULL)
1126 mp_state.ops.per_cpu_smm_trigger = smm_initiate_relocation;
1127}
1128
1129int mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops)
1130{
1131 int ret;
1132 void *default_smm_area;
1133 struct mp_params mp_params;
1134
1135 if (mp_ops->pre_mp_init != NULL)
1136 mp_ops->pre_mp_init();
1137
1138 fill_mp_state(&mp_state, mp_ops);
1139
1140 memset(&mp_params, 0, sizeof(mp_params));
1141
1142 if (mp_state.cpu_count <= 0) {
1143 printk(BIOS_ERR, "Invalid cpu_count: %d\n", mp_state.cpu_count);
1144 return -1;
1145 }
1146
1147 /* Sanity check SMM state. */
1148 if (mp_state.perm_smsize != 0 && mp_state.smm_save_state_size != 0 &&
1149 mp_state.ops.relocation_handler != NULL)
1150 smm_enable();
1151
1152 if (is_smm_enabled())
1153 printk(BIOS_INFO, "Will perform SMM setup.\n");
1154
1155 mp_params.num_cpus = mp_state.cpu_count;
1156 /* Gather microcode information. */
1157 if (mp_state.ops.get_microcode_info != NULL)
1158 mp_state.ops.get_microcode_info(&mp_params.microcode_pointer,
1159 &mp_params.parallel_microcode_load);
Aaron Durbin82501922016-04-29 22:55:49 -05001160 mp_params.flight_plan = &mp_steps[0];
1161 mp_params.num_records = ARRAY_SIZE(mp_steps);
1162
1163 /* Perform backup of default SMM area. */
1164 default_smm_area = backup_default_smm_area();
1165
1166 ret = mp_init(cpu_bus, &mp_params);
1167
1168 restore_default_smm_area(default_smm_area);
1169
1170 /* Signal callback on success if it's provided. */
1171 if (ret == 0 && mp_state.ops.post_mp_init != NULL)
1172 mp_state.ops.post_mp_init();
1173
1174 return ret;
1175}