blob: e4e662e9ad960b3051d60e20644359bebe85c05c [file] [log] [blame]
Angel Ponsf23ae0b2020-04-02 23:48:12 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbine0785c02013-10-21 12:15:29 -05002
3#include <console/console.h>
Elyes HAOUASa1e22b82019-03-18 22:49:36 +01004#include <string.h>
Aaron Durbine0785c02013-10-21 12:15:29 -05005#include <rmodule.h>
Kyösti Mälkki2fbb6772018-05-15 19:50:20 +03006#include <commonlib/helpers.h>
Aaron Durbine0785c02013-10-21 12:15:29 -05007#include <cpu/cpu.h>
8#include <cpu/intel/microcode.h>
9#include <cpu/x86/cache.h>
Kyösti Mälkkibae775a2014-12-18 10:36:33 +020010#include <cpu/x86/gdt.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050011#include <cpu/x86/lapic.h>
12#include <cpu/x86/name.h>
13#include <cpu/x86/msr.h>
14#include <cpu/x86/mtrr.h>
15#include <cpu/x86/smm.h>
16#include <cpu/x86/mp.h>
17#include <delay.h>
18#include <device/device.h>
19#include <device/path.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050020#include <smp/atomic.h>
21#include <smp/spinlock.h>
Julius Wernerec5e5e02014-08-20 15:29:56 -070022#include <symbols.h>
Elyes HAOUASadd76f92019-03-21 09:55:49 +010023#include <timer.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050024#include <thread.h>
Felix Heldedc5af52021-10-19 18:04:27 +020025#include <types.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050026
Eugene Myersae438be2020-01-21 17:01:47 -050027#include <security/intel/stm/SmmStm.h>
28
Aaron Durbine0785c02013-10-21 12:15:29 -050029#define MAX_APIC_IDS 256
Aaron Durbin770d7c72016-05-03 17:49:57 -050030
Aaron Durbin223fb432018-05-03 13:49:41 -060031struct mp_callback {
Subrata Banik33374972018-04-24 13:45:30 +053032 void (*func)(void *);
33 void *arg;
Subrata Banik8a25cae2018-05-03 18:48:41 +053034 int logical_cpu_number;
Aaron Durbin223fb432018-05-03 13:49:41 -060035};
Aaron Durbin770d7c72016-05-03 17:49:57 -050036
Naresh G Solanki24635332018-05-31 23:13:18 +053037static char processor_name[49];
38
Aaron Durbin770d7c72016-05-03 17:49:57 -050039/*
40 * A mp_flight_record details a sequence of calls for the APs to perform
41 * along with the BSP to coordinate sequencing. Each flight record either
42 * provides a barrier for each AP before calling the callback or the APs
43 * are allowed to perform the callback without waiting. Regardless, each
44 * record has the cpus_entered field incremented for each record. When
45 * the BSP observes that the cpus_entered matches the number of APs
46 * the bsp_call is called with bsp_arg and upon returning releases the
47 * barrier allowing the APs to make further progress.
48 *
49 * Note that ap_call() and bsp_call() can be NULL. In the NULL case the
50 * callback will just not be called.
51 */
52struct mp_flight_record {
53 atomic_t barrier;
54 atomic_t cpus_entered;
Aaron Durbin223fb432018-05-03 13:49:41 -060055 void (*ap_call)(void);
56 void (*bsp_call)(void);
Aaron Durbin381feb82018-05-02 22:38:58 -060057} __aligned(CACHELINE_SIZE);
Aaron Durbin770d7c72016-05-03 17:49:57 -050058
59#define _MP_FLIGHT_RECORD(barrier_, ap_func_, bsp_func_) \
60 { \
61 .barrier = ATOMIC_INIT(barrier_), \
62 .cpus_entered = ATOMIC_INIT(0), \
63 .ap_call = ap_func_, \
64 .bsp_call = bsp_func_, \
65 }
66
67#define MP_FR_BLOCK_APS(ap_func_, bsp_func_) \
68 _MP_FLIGHT_RECORD(0, ap_func_, bsp_func_)
69
70#define MP_FR_NOBLOCK_APS(ap_func_, bsp_func_) \
71 _MP_FLIGHT_RECORD(1, ap_func_, bsp_func_)
72
73/* The mp_params structure provides the arguments to the mp subsystem
74 * for bringing up APs. */
75struct mp_params {
76 int num_cpus; /* Total cpus include BSP */
77 int parallel_microcode_load;
78 const void *microcode_pointer;
Aaron Durbin770d7c72016-05-03 17:49:57 -050079 /* Flight plan for APs and BSP. */
80 struct mp_flight_record *flight_plan;
81 int num_records;
82};
83
Aaron Durbine0785c02013-10-21 12:15:29 -050084/* This needs to match the layout in the .module_parametrs section. */
85struct sipi_params {
86 uint16_t gdtlimit;
87 uint32_t gdt;
88 uint16_t unused;
89 uint32_t idt_ptr;
Raul E Rangelb2346a52021-09-22 14:56:51 -060090 uint32_t per_cpu_segment_descriptors;
91 uint32_t per_cpu_segment_selector;
Aaron Durbine0785c02013-10-21 12:15:29 -050092 uint32_t stack_top;
93 uint32_t stack_size;
94 uint32_t microcode_lock; /* 0xffffffff means parallel loading. */
95 uint32_t microcode_ptr;
96 uint32_t msr_table_ptr;
97 uint32_t msr_count;
98 uint32_t c_handler;
99 atomic_t ap_count;
Stefan Reinauer6a001132017-07-13 02:20:27 +0200100} __packed;
Aaron Durbine0785c02013-10-21 12:15:29 -0500101
102/* This also needs to match the assembly code for saved MSR encoding. */
103struct saved_msr {
104 uint32_t index;
105 uint32_t lo;
106 uint32_t hi;
Stefan Reinauer6a001132017-07-13 02:20:27 +0200107} __packed;
Aaron Durbine0785c02013-10-21 12:15:29 -0500108
Aaron Durbine0785c02013-10-21 12:15:29 -0500109/* The sipi vector rmodule is included in the ramstage using 'objdump -B'. */
110extern char _binary_sipi_vector_start[];
Aaron Durbine0785c02013-10-21 12:15:29 -0500111
112/* The SIPI vector is loaded at the SMM_DEFAULT_BASE. The reason is at the
113 * memory range is already reserved so the OS cannot use it. That region is
114 * free to use for AP bringup before SMM is initialized. */
Patrick Rudolph6c46b6f2020-08-21 16:43:15 +0200115static const uintptr_t sipi_vector_location = SMM_DEFAULT_BASE;
Aaron Durbine0785c02013-10-21 12:15:29 -0500116static const int sipi_vector_location_size = SMM_DEFAULT_SIZE;
117
118struct mp_flight_plan {
119 int num_records;
120 struct mp_flight_record *records;
121};
122
Aaron Durbinb21e3622016-12-07 00:32:19 -0600123static int global_num_aps;
Aaron Durbine0785c02013-10-21 12:15:29 -0500124static struct mp_flight_plan mp_info;
125
Subrata Banik7bc90362019-05-10 11:58:37 +0530126/* Keep track of device structure for each CPU. */
127static struct device *cpus_dev[CONFIG_MAX_CPUS];
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600128
Aaron Durbin4c16f8f2018-05-02 22:35:33 -0600129static inline void barrier_wait(atomic_t *b)
Aaron Durbine0785c02013-10-21 12:15:29 -0500130{
Lee Leahya15d8af2017-03-15 14:49:35 -0700131 while (atomic_read(b) == 0)
Aaron Durbine0785c02013-10-21 12:15:29 -0500132 asm ("pause");
Aaron Durbine0785c02013-10-21 12:15:29 -0500133 mfence();
134}
135
Aaron Durbin4c16f8f2018-05-02 22:35:33 -0600136static inline void release_barrier(atomic_t *b)
Aaron Durbine0785c02013-10-21 12:15:29 -0500137{
138 mfence();
139 atomic_set(b, 1);
140}
141
Felix Held3dd9cfb2021-10-20 18:39:00 +0200142static enum cb_err wait_for_aps(atomic_t *val, int target, int total_delay,
Lee Leahya07d0dd2017-03-15 14:25:22 -0700143 int delay_step)
Aaron Durbine0785c02013-10-21 12:15:29 -0500144{
Aaron Durbine0785c02013-10-21 12:15:29 -0500145 int delayed = 0;
146 while (atomic_read(val) != target) {
147 udelay(delay_step);
148 delayed += delay_step;
149 if (delayed >= total_delay) {
Felix Held3dd9cfb2021-10-20 18:39:00 +0200150 /* Not all APs ready before timeout */
151 return CB_ERR;
Aaron Durbine0785c02013-10-21 12:15:29 -0500152 }
153 }
154
Felix Held3dd9cfb2021-10-20 18:39:00 +0200155 /* APs ready before timeout */
156 return CB_SUCCESS;
Aaron Durbine0785c02013-10-21 12:15:29 -0500157}
158
159static void ap_do_flight_plan(void)
160{
161 int i;
162
163 for (i = 0; i < mp_info.num_records; i++) {
164 struct mp_flight_record *rec = &mp_info.records[i];
165
166 atomic_inc(&rec->cpus_entered);
167 barrier_wait(&rec->barrier);
168
Lee Leahya15d8af2017-03-15 14:49:35 -0700169 if (rec->ap_call != NULL)
Aaron Durbin0e556322016-04-29 23:15:12 -0500170 rec->ap_call();
Aaron Durbine0785c02013-10-21 12:15:29 -0500171 }
172}
173
Subrata Banik33374972018-04-24 13:45:30 +0530174static void park_this_cpu(void *unused)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600175{
176 stop_this_cpu();
177}
178
Aaron Durbine0785c02013-10-21 12:15:29 -0500179/* By the time APs call ap_init() caching has been setup, and microcode has
180 * been loaded. */
Raul E Rangel36715972021-10-08 13:10:38 -0600181static void asmlinkage ap_init(void)
Aaron Durbine0785c02013-10-21 12:15:29 -0500182{
Raul E Rangel99c84782021-10-08 13:10:38 -0600183 struct cpu_info *info = cpu_info();
Aaron Durbine0785c02013-10-21 12:15:29 -0500184
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200185 /* Ensure the local APIC is enabled */
Aaron Durbine0785c02013-10-21 12:15:29 -0500186 enable_lapic();
Kyösti Mälkki9ec72272021-10-17 08:34:31 +0300187 setup_lapic_interrupts();
Aaron Durbine0785c02013-10-21 12:15:29 -0500188
Raul E Rangel99c84782021-10-08 13:10:38 -0600189 info->cpu = cpus_dev[info->index];
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600190
Subrata Banik7bc90362019-05-10 11:58:37 +0530191 cpu_add_map_entry(info->index);
Aaron Durbine0785c02013-10-21 12:15:29 -0500192
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600193 /* Fix up APIC id with reality. */
194 info->cpu->path.apic.apic_id = lapicid();
Aaron Durbine0785c02013-10-21 12:15:29 -0500195
Arthur Heymans98872642021-01-22 19:05:55 +0100196 if (cpu_is_intel())
Raul E Rangel99c84782021-10-08 13:10:38 -0600197 printk(BIOS_INFO, "AP: slot %zu apic_id %x, MCU rev: 0x%08x\n", info->index,
Arthur Heymans98872642021-01-22 19:05:55 +0100198 info->cpu->path.apic.apic_id, get_current_microcode_rev());
199 else
Raul E Rangel99c84782021-10-08 13:10:38 -0600200 printk(BIOS_INFO, "AP: slot %zu apic_id %x\n", info->index,
Arthur Heymans98872642021-01-22 19:05:55 +0100201 info->cpu->path.apic.apic_id);
Aaron Durbine0785c02013-10-21 12:15:29 -0500202
203 /* Walk the flight plan */
204 ap_do_flight_plan();
205
206 /* Park the AP. */
Subrata Banik33374972018-04-24 13:45:30 +0530207 park_this_cpu(NULL);
Aaron Durbine0785c02013-10-21 12:15:29 -0500208}
209
210static void setup_default_sipi_vector_params(struct sipi_params *sp)
211{
Kyösti Mälkki2fbb6772018-05-15 19:50:20 +0300212 sp->gdt = (uintptr_t)&gdt;
213 sp->gdtlimit = (uintptr_t)&gdt_end - (uintptr_t)&gdt - 1;
214 sp->idt_ptr = (uintptr_t)&idtarg;
Raul E Rangel99c84782021-10-08 13:10:38 -0600215 sp->per_cpu_segment_descriptors = (uintptr_t)&per_cpu_segment_descriptors;
216 sp->per_cpu_segment_selector = per_cpu_segment_selector;
Aaron Durbine0785c02013-10-21 12:15:29 -0500217 sp->stack_size = CONFIG_STACK_SIZE;
Kyösti Mälkki2fbb6772018-05-15 19:50:20 +0300218 sp->stack_top = ALIGN_DOWN((uintptr_t)&_estack, CONFIG_STACK_SIZE);
Aaron Durbine0785c02013-10-21 12:15:29 -0500219}
220
221#define NUM_FIXED_MTRRS 11
222static const unsigned int fixed_mtrrs[NUM_FIXED_MTRRS] = {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700223 MTRR_FIX_64K_00000, MTRR_FIX_16K_80000, MTRR_FIX_16K_A0000,
224 MTRR_FIX_4K_C0000, MTRR_FIX_4K_C8000, MTRR_FIX_4K_D0000,
225 MTRR_FIX_4K_D8000, MTRR_FIX_4K_E0000, MTRR_FIX_4K_E8000,
226 MTRR_FIX_4K_F0000, MTRR_FIX_4K_F8000,
Aaron Durbine0785c02013-10-21 12:15:29 -0500227};
228
229static inline struct saved_msr *save_msr(int index, struct saved_msr *entry)
230{
231 msr_t msr;
232
233 msr = rdmsr(index);
234 entry->index = index;
235 entry->lo = msr.lo;
236 entry->hi = msr.hi;
237
238 /* Return the next entry. */
239 entry++;
240 return entry;
241}
242
243static int save_bsp_msrs(char *start, int size)
244{
245 int msr_count;
246 int num_var_mtrrs;
247 struct saved_msr *msr_entry;
248 int i;
Aaron Durbine0785c02013-10-21 12:15:29 -0500249
250 /* Determine number of MTRRs need to be saved. */
Subrata Banik7578ea42022-03-30 23:57:37 +0530251 num_var_mtrrs = get_var_mtrr_count();
Aaron Durbine0785c02013-10-21 12:15:29 -0500252
253 /* 2 * num_var_mtrrs for base and mask. +1 for IA32_MTRR_DEF_TYPE. */
254 msr_count = 2 * num_var_mtrrs + NUM_FIXED_MTRRS + 1;
255
256 if ((msr_count * sizeof(struct saved_msr)) > size) {
257 printk(BIOS_CRIT, "Cannot mirror all %d msrs.\n", msr_count);
258 return -1;
259 }
260
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600261 fixed_mtrrs_expose_amd_rwdram();
262
Aaron Durbine0785c02013-10-21 12:15:29 -0500263 msr_entry = (void *)start;
Lee Leahya15d8af2017-03-15 14:49:35 -0700264 for (i = 0; i < NUM_FIXED_MTRRS; i++)
Aaron Durbine0785c02013-10-21 12:15:29 -0500265 msr_entry = save_msr(fixed_mtrrs[i], msr_entry);
Aaron Durbine0785c02013-10-21 12:15:29 -0500266
267 for (i = 0; i < num_var_mtrrs; i++) {
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700268 msr_entry = save_msr(MTRR_PHYS_BASE(i), msr_entry);
269 msr_entry = save_msr(MTRR_PHYS_MASK(i), msr_entry);
Aaron Durbine0785c02013-10-21 12:15:29 -0500270 }
271
Alexandru Gagniuc86091f92015-09-30 20:23:09 -0700272 msr_entry = save_msr(MTRR_DEF_TYPE_MSR, msr_entry);
Aaron Durbine0785c02013-10-21 12:15:29 -0500273
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600274 fixed_mtrrs_hide_amd_rwdram();
275
Richard Spiegel43bd5942018-08-08 09:45:23 -0700276 /* Tell static analysis we know value is left unused. */
277 (void)msr_entry;
278
Aaron Durbine0785c02013-10-21 12:15:29 -0500279 return msr_count;
280}
281
282static atomic_t *load_sipi_vector(struct mp_params *mp_params)
283{
284 struct rmodule sipi_mod;
285 int module_size;
286 int num_msrs;
287 struct sipi_params *sp;
288 char *mod_loc = (void *)sipi_vector_location;
289 const int loc_size = sipi_vector_location_size;
290 atomic_t *ap_count = NULL;
291
292 if (rmodule_parse(&_binary_sipi_vector_start, &sipi_mod)) {
293 printk(BIOS_CRIT, "Unable to parse sipi module.\n");
294 return ap_count;
295 }
296
297 if (rmodule_entry_offset(&sipi_mod) != 0) {
298 printk(BIOS_CRIT, "SIPI module entry offset is not 0!\n");
299 return ap_count;
300 }
301
302 if (rmodule_load_alignment(&sipi_mod) != 4096) {
303 printk(BIOS_CRIT, "SIPI module load alignment(%d) != 4096.\n",
304 rmodule_load_alignment(&sipi_mod));
305 return ap_count;
306 }
307
308 module_size = rmodule_memory_size(&sipi_mod);
309
310 /* Align to 4 bytes. */
Felix Heldf0cbb092019-06-20 14:45:16 +0200311 module_size = ALIGN_UP(module_size, 4);
Aaron Durbine0785c02013-10-21 12:15:29 -0500312
313 if (module_size > loc_size) {
314 printk(BIOS_CRIT, "SIPI module size (%d) > region size (%d).\n",
315 module_size, loc_size);
316 return ap_count;
317 }
318
319 num_msrs = save_bsp_msrs(&mod_loc[module_size], loc_size - module_size);
320
321 if (num_msrs < 0) {
322 printk(BIOS_CRIT, "Error mirroring BSP's msrs.\n");
323 return ap_count;
324 }
325
326 if (rmodule_load(mod_loc, &sipi_mod)) {
327 printk(BIOS_CRIT, "Unable to load SIPI module.\n");
328 return ap_count;
329 }
330
331 sp = rmodule_parameters(&sipi_mod);
332
333 if (sp == NULL) {
334 printk(BIOS_CRIT, "SIPI module has no parameters.\n");
335 return ap_count;
336 }
337
338 setup_default_sipi_vector_params(sp);
339 /* Setup MSR table. */
Patrick Rudolph6c46b6f2020-08-21 16:43:15 +0200340 sp->msr_table_ptr = (uintptr_t)&mod_loc[module_size];
Aaron Durbine0785c02013-10-21 12:15:29 -0500341 sp->msr_count = num_msrs;
342 /* Provide pointer to microcode patch. */
Patrick Rudolph6c46b6f2020-08-21 16:43:15 +0200343 sp->microcode_ptr = (uintptr_t)mp_params->microcode_pointer;
Elyes HAOUAS1a8dbfc2019-12-18 13:40:50 +0100344 /* Pass on ability to load microcode in parallel. */
Lee Leahya15d8af2017-03-15 14:49:35 -0700345 if (mp_params->parallel_microcode_load)
Aaron Durbine0785c02013-10-21 12:15:29 -0500346 sp->microcode_lock = ~0;
Patrick Rudolph393992f2021-01-11 09:35:49 +0100347 else
348 sp->microcode_lock = 0;
Patrick Rudolph6c46b6f2020-08-21 16:43:15 +0200349 sp->c_handler = (uintptr_t)&ap_init;
Aaron Durbine0785c02013-10-21 12:15:29 -0500350 ap_count = &sp->ap_count;
351 atomic_set(ap_count, 0);
352
353 return ap_count;
354}
355
356static int allocate_cpu_devices(struct bus *cpu_bus, struct mp_params *p)
357{
358 int i;
359 int max_cpus;
360 struct cpu_info *info;
361
362 max_cpus = p->num_cpus;
363 if (max_cpus > CONFIG_MAX_CPUS) {
364 printk(BIOS_CRIT, "CPU count(%d) exceeds CONFIG_MAX_CPUS(%d)\n",
365 max_cpus, CONFIG_MAX_CPUS);
366 max_cpus = CONFIG_MAX_CPUS;
367 }
368
369 info = cpu_info();
370 for (i = 1; i < max_cpus; i++) {
371 struct device_path cpu_path;
Edward O'Callaghan2c9d2cf2014-10-27 23:29:29 +1100372 struct device *new;
Aaron Durbine0785c02013-10-21 12:15:29 -0500373
Elyes HAOUASd82be922016-07-28 18:58:27 +0200374 /* Build the CPU device path */
Aaron Durbine0785c02013-10-21 12:15:29 -0500375 cpu_path.type = DEVICE_PATH_APIC;
376
Aaron Durbin5a1f9a82017-09-07 21:17:33 -0600377 /* Assuming linear APIC space allocation. AP will set its own
378 APIC id in the ap_init() path above. */
379 cpu_path.apic.apic_id = info->cpu->path.apic.apic_id + i;
Aaron Durbine0785c02013-10-21 12:15:29 -0500380
Elyes HAOUASd82be922016-07-28 18:58:27 +0200381 /* Allocate the new CPU device structure */
Aaron Durbine0785c02013-10-21 12:15:29 -0500382 new = alloc_find_dev(cpu_bus, &cpu_path);
383 if (new == NULL) {
Elyes HAOUASd82be922016-07-28 18:58:27 +0200384 printk(BIOS_CRIT, "Could not allocate CPU device\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500385 max_cpus--;
Richard Spiegel569711a2018-08-07 15:59:34 -0700386 continue;
Aaron Durbine0785c02013-10-21 12:15:29 -0500387 }
Naresh G Solanki24635332018-05-31 23:13:18 +0530388 new->name = processor_name;
Subrata Banik7bc90362019-05-10 11:58:37 +0530389 cpus_dev[i] = new;
Aaron Durbine0785c02013-10-21 12:15:29 -0500390 }
391
392 return max_cpus;
393}
394
Felix Held2939ebd2021-10-20 18:33:06 +0200395static enum cb_err apic_wait_timeout(int total_delay, int delay_step)
Aaron Durbine0785c02013-10-21 12:15:29 -0500396{
397 int total = 0;
Aaron Durbine0785c02013-10-21 12:15:29 -0500398
Arthur Heymansa4ceba42021-05-21 09:32:45 +0200399 while (lapic_busy()) {
Aaron Durbine0785c02013-10-21 12:15:29 -0500400 udelay(delay_step);
401 total += delay_step;
402 if (total >= total_delay) {
Felix Held2939ebd2021-10-20 18:33:06 +0200403 /* LAPIC not ready before the timeout */
404 return CB_ERR;
Aaron Durbine0785c02013-10-21 12:15:29 -0500405 }
406 }
407
Felix Held2939ebd2021-10-20 18:33:06 +0200408 /* LAPIC ready before the timeout */
409 return CB_SUCCESS;
Aaron Durbine0785c02013-10-21 12:15:29 -0500410}
411
Felix Heldb04e2ba2021-10-19 01:03:24 +0200412/* Send Startup IPI to APs */
413static enum cb_err send_sipi_to_aps(int ap_count, atomic_t *num_aps, int sipi_vector)
414{
415 if (lapic_busy()) {
Felix Heldb5376ff2021-10-19 17:44:37 +0200416 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...\n");
Felix Held2939ebd2021-10-20 18:33:06 +0200417 if (apic_wait_timeout(1000 /* 1 ms */, 50) != CB_SUCCESS) {
Felix Heldb04e2ba2021-10-19 01:03:24 +0200418 printk(BIOS_ERR, "timed out. Aborting.\n");
419 return CB_ERR;
420 }
421 printk(BIOS_DEBUG, "done.\n");
422 }
423
Kyösti Mälkki710bdc42021-10-15 17:14:20 +0300424 lapic_send_ipi_others(LAPIC_INT_ASSERT | LAPIC_DM_STARTUP | sipi_vector);
Felix Heldb5376ff2021-10-19 17:44:37 +0200425 printk(BIOS_DEBUG, "Waiting for SIPI to complete...\n");
Felix Held2939ebd2021-10-20 18:33:06 +0200426 if (apic_wait_timeout(10000 /* 10 ms */, 50 /* us */) != CB_SUCCESS) {
Felix Heldb04e2ba2021-10-19 01:03:24 +0200427 printk(BIOS_ERR, "timed out.\n");
428 return CB_ERR;
429 }
430 printk(BIOS_DEBUG, "done.\n");
431 return CB_SUCCESS;
432}
433
Felix Held59110962021-10-19 18:32:41 +0200434static enum cb_err start_aps(struct bus *cpu_bus, int ap_count, atomic_t *num_aps)
Aaron Durbine0785c02013-10-21 12:15:29 -0500435{
436 int sipi_vector;
437 /* Max location is 4KiB below 1MiB */
438 const int max_vector_loc = ((1 << 20) - (1 << 12)) >> 12;
439
440 if (ap_count == 0)
Felix Held59110962021-10-19 18:32:41 +0200441 return CB_SUCCESS;
Aaron Durbine0785c02013-10-21 12:15:29 -0500442
443 /* The vector is sent as a 4k aligned address in one byte. */
444 sipi_vector = sipi_vector_location >> 12;
445
446 if (sipi_vector > max_vector_loc) {
447 printk(BIOS_CRIT, "SIPI vector too large! 0x%08x\n",
448 sipi_vector);
Felix Held59110962021-10-19 18:32:41 +0200449 return CB_ERR;
Aaron Durbine0785c02013-10-21 12:15:29 -0500450 }
451
452 printk(BIOS_DEBUG, "Attempting to start %d APs\n", ap_count);
453
Arthur Heymansa4ceba42021-05-21 09:32:45 +0200454 if (lapic_busy()) {
Felix Heldb5376ff2021-10-19 17:44:37 +0200455 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...\n");
Felix Held2939ebd2021-10-20 18:33:06 +0200456 if (apic_wait_timeout(1000 /* 1 ms */, 50) != CB_SUCCESS) {
Jonathan Zhangcbbce66b2020-10-28 11:35:40 -0700457 printk(BIOS_ERR, "timed out. Aborting.\n");
Felix Held59110962021-10-19 18:32:41 +0200458 return CB_ERR;
Lee Leahya15d8af2017-03-15 14:49:35 -0700459 }
460 printk(BIOS_DEBUG, "done.\n");
Aaron Durbine0785c02013-10-21 12:15:29 -0500461 }
462
463 /* Send INIT IPI to all but self. */
Kyösti Mälkki710bdc42021-10-15 17:14:20 +0300464 lapic_send_ipi_others(LAPIC_INT_ASSERT | LAPIC_DM_INIT);
Felix Held98fb72f2021-07-21 16:50:10 +0200465
Subrata Banik9f91ced2021-07-28 15:38:32 +0530466 if (!CONFIG(X86_INIT_NEED_1_SIPI)) {
Felix Held98fb72f2021-07-21 16:50:10 +0200467 printk(BIOS_DEBUG, "Waiting for 10ms after sending INIT.\n");
468 mdelay(10);
Felix Helda8772db2021-10-19 02:57:22 +0200469
470 /* Send 1st Startup IPI (SIPI) */
471 if (send_sipi_to_aps(ap_count, num_aps, sipi_vector) != CB_SUCCESS)
472 return CB_ERR;
473
474 /* Wait for CPUs to check in up to 200 us. */
475 wait_for_aps(num_aps, ap_count, 200 /* us */, 15 /* us */);
Felix Held98fb72f2021-07-21 16:50:10 +0200476 }
Aaron Durbine0785c02013-10-21 12:15:29 -0500477
Felix Helda8772db2021-10-19 02:57:22 +0200478 /* Send final SIPI */
Felix Heldb04e2ba2021-10-19 01:03:24 +0200479 if (send_sipi_to_aps(ap_count, num_aps, sipi_vector) != CB_SUCCESS)
Felix Held59110962021-10-19 18:32:41 +0200480 return CB_ERR;
Aaron Durbine0785c02013-10-21 12:15:29 -0500481
482 /* Wait for CPUs to check in. */
Felix Held3dd9cfb2021-10-20 18:39:00 +0200483 if (wait_for_aps(num_aps, ap_count, 100000 /* 100 ms */, 50 /* us */) != CB_SUCCESS) {
Jonathan Zhangcbbce66b2020-10-28 11:35:40 -0700484 printk(BIOS_ERR, "Not all APs checked in: %d/%d.\n",
Aaron Durbine0785c02013-10-21 12:15:29 -0500485 atomic_read(num_aps), ap_count);
Felix Held59110962021-10-19 18:32:41 +0200486 return CB_ERR;
Aaron Durbine0785c02013-10-21 12:15:29 -0500487 }
488
Felix Held59110962021-10-19 18:32:41 +0200489 return CB_SUCCESS;
Aaron Durbine0785c02013-10-21 12:15:29 -0500490}
491
Felix Heldc6f0ed72021-10-20 19:27:14 +0200492static enum cb_err bsp_do_flight_plan(struct mp_params *mp_params)
Aaron Durbine0785c02013-10-21 12:15:29 -0500493{
494 int i;
Felix Heldc6f0ed72021-10-20 19:27:14 +0200495 enum cb_err ret = CB_SUCCESS;
Furquan Shaikhfa9f1072018-03-01 16:37:06 -0800496 /*
Jonathan Zhang6ec322e2020-01-16 11:11:09 -0800497 * Set time out for flight plan to a huge minimum value (>=1 second).
498 * CPUs with many APs may take longer if there is contention for
499 * resources such as UART, so scale the time out up by increments of
500 * 100ms if needed.
Furquan Shaikhfa9f1072018-03-01 16:37:06 -0800501 */
Jonathan Zhang6ec322e2020-01-16 11:11:09 -0800502 const int timeout_us = MAX(1000000, 100000 * mp_params->num_cpus);
Aaron Durbine0785c02013-10-21 12:15:29 -0500503 const int step_us = 100;
504 int num_aps = mp_params->num_cpus - 1;
Furquan Shaikh5d8faef2018-03-07 23:16:57 -0800505 struct stopwatch sw;
506
507 stopwatch_init(&sw);
Aaron Durbine0785c02013-10-21 12:15:29 -0500508
509 for (i = 0; i < mp_params->num_records; i++) {
510 struct mp_flight_record *rec = &mp_params->flight_plan[i];
511
512 /* Wait for APs if the record is not released. */
513 if (atomic_read(&rec->barrier) == 0) {
514 /* Wait for the APs to check in. */
515 if (wait_for_aps(&rec->cpus_entered, num_aps,
Felix Held3dd9cfb2021-10-20 18:39:00 +0200516 timeout_us, step_us) != CB_SUCCESS) {
Aaron Durbine0785c02013-10-21 12:15:29 -0500517 printk(BIOS_ERR, "MP record %d timeout.\n", i);
Felix Heldc6f0ed72021-10-20 19:27:14 +0200518 ret = CB_ERR;
Aaron Durbine0785c02013-10-21 12:15:29 -0500519 }
520 }
521
Lee Leahya15d8af2017-03-15 14:49:35 -0700522 if (rec->bsp_call != NULL)
Aaron Durbin0e556322016-04-29 23:15:12 -0500523 rec->bsp_call();
Aaron Durbine0785c02013-10-21 12:15:29 -0500524
525 release_barrier(&rec->barrier);
526 }
Furquan Shaikh5d8faef2018-03-07 23:16:57 -0800527
528 printk(BIOS_INFO, "%s done after %ld msecs.\n", __func__,
529 stopwatch_duration_msecs(&sw));
Aaron Durbine0785c02013-10-21 12:15:29 -0500530 return ret;
531}
532
533static void init_bsp(struct bus *cpu_bus)
534{
535 struct device_path cpu_path;
536 struct cpu_info *info;
Aaron Durbine0785c02013-10-21 12:15:29 -0500537
538 /* Print processor name */
539 fill_processor_name(processor_name);
540 printk(BIOS_INFO, "CPU: %s.\n", processor_name);
541
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200542 /* Ensure the local APIC is enabled */
Aaron Durbine0785c02013-10-21 12:15:29 -0500543 enable_lapic();
Kyösti Mälkki9ec72272021-10-17 08:34:31 +0300544 setup_lapic_interrupts();
Aaron Durbine0785c02013-10-21 12:15:29 -0500545
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200546 /* Set the device path of the boot CPU. */
Aaron Durbine0785c02013-10-21 12:15:29 -0500547 cpu_path.type = DEVICE_PATH_APIC;
548 cpu_path.apic.apic_id = lapicid();
549
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200550 /* Find the device structure for the boot CPU. */
Aaron Durbine0785c02013-10-21 12:15:29 -0500551 info = cpu_info();
552 info->cpu = alloc_find_dev(cpu_bus, &cpu_path);
Naresh G Solanki24635332018-05-31 23:13:18 +0530553 info->cpu->name = processor_name;
Aaron Durbine0785c02013-10-21 12:15:29 -0500554
555 if (info->index != 0)
Raul E Rangele279d902021-07-23 10:34:38 -0600556 printk(BIOS_CRIT, "BSP index(%zd) != 0!\n", info->index);
Aaron Durbine0785c02013-10-21 12:15:29 -0500557
558 /* Track BSP in cpu_map structures. */
Subrata Banik7bc90362019-05-10 11:58:37 +0530559 cpu_add_map_entry(info->index);
Aaron Durbine0785c02013-10-21 12:15:29 -0500560}
561
Aaron Durbin770d7c72016-05-03 17:49:57 -0500562/*
563 * mp_init() will set up the SIPI vector and bring up the APs according to
564 * mp_params. Each flight record will be executed according to the plan. Note
565 * that the MP infrastructure uses SMM default area without saving it. It's
566 * up to the chipset or mainboard to either e820 reserve this area or save this
567 * region prior to calling mp_init() and restoring it after mp_init returns.
568 *
569 * At the time mp_init() is called the MTRR MSRs are mirrored into APs then
570 * caching is enabled before running the flight plan.
571 *
572 * The MP initialization has the following properties:
573 * 1. APs are brought up in parallel.
Elyes HAOUASd82be922016-07-28 18:58:27 +0200574 * 2. The ordering of coreboot CPU number and APIC ids is not deterministic.
Aaron Durbin770d7c72016-05-03 17:49:57 -0500575 * Therefore, one cannot rely on this property or the order of devices in
576 * the device tree unless the chipset or mainboard know the APIC ids
577 * a priori.
Aaron Durbin770d7c72016-05-03 17:49:57 -0500578 */
Felix Heldc6f0ed72021-10-20 19:27:14 +0200579static enum cb_err mp_init(struct bus *cpu_bus, struct mp_params *p)
Aaron Durbine0785c02013-10-21 12:15:29 -0500580{
581 int num_cpus;
Aaron Durbine0785c02013-10-21 12:15:29 -0500582 atomic_t *ap_count;
583
584 init_bsp(cpu_bus);
585
586 if (p == NULL || p->flight_plan == NULL || p->num_records < 1) {
587 printk(BIOS_CRIT, "Invalid MP parameters\n");
Felix Heldc6f0ed72021-10-20 19:27:14 +0200588 return CB_ERR;
Aaron Durbine0785c02013-10-21 12:15:29 -0500589 }
590
Arthur Heymans48fbf2f2021-11-26 14:50:42 +0100591 /* We just need to run things on the BSP */
592 if (!CONFIG(SMP))
593 return bsp_do_flight_plan(p);
594
Aaron Durbine0785c02013-10-21 12:15:29 -0500595 /* Default to currently running CPU. */
596 num_cpus = allocate_cpu_devices(cpu_bus, p);
597
598 if (num_cpus < p->num_cpus) {
599 printk(BIOS_CRIT,
600 "ERROR: More cpus requested (%d) than supported (%d).\n",
601 p->num_cpus, num_cpus);
Felix Heldc6f0ed72021-10-20 19:27:14 +0200602 return CB_ERR;
Aaron Durbine0785c02013-10-21 12:15:29 -0500603 }
604
605 /* Copy needed parameters so that APs have a reference to the plan. */
606 mp_info.num_records = p->num_records;
607 mp_info.records = p->flight_plan;
608
609 /* Load the SIPI vector. */
610 ap_count = load_sipi_vector(p);
611 if (ap_count == NULL)
Felix Heldc6f0ed72021-10-20 19:27:14 +0200612 return CB_ERR;
Aaron Durbine0785c02013-10-21 12:15:29 -0500613
614 /* Make sure SIPI data hits RAM so the APs that come up will see
615 * the startup code even if the caches are disabled. */
616 wbinvd();
617
618 /* Start the APs providing number of APs and the cpus_entered field. */
Aaron Durbinb21e3622016-12-07 00:32:19 -0600619 global_num_aps = p->num_cpus - 1;
Felix Held59110962021-10-19 18:32:41 +0200620 if (start_aps(cpu_bus, global_num_aps, ap_count) != CB_SUCCESS) {
Aaron Durbine0785c02013-10-21 12:15:29 -0500621 mdelay(1000);
622 printk(BIOS_DEBUG, "%d/%d eventually checked in?\n",
Aaron Durbinb21e3622016-12-07 00:32:19 -0600623 atomic_read(ap_count), global_num_aps);
Felix Heldc6f0ed72021-10-20 19:27:14 +0200624 return CB_ERR;
Aaron Durbine0785c02013-10-21 12:15:29 -0500625 }
626
627 /* Walk the flight plan for the BSP. */
628 return bsp_do_flight_plan(p);
629}
630
Aaron Durbin770d7c72016-05-03 17:49:57 -0500631/* Calls cpu_initialize(info->index) which calls the coreboot CPU drivers. */
632static void mp_initialize_cpu(void)
Aaron Durbine0785c02013-10-21 12:15:29 -0500633{
634 /* Call back into driver infrastructure for the AP initialization. */
635 struct cpu_info *info = cpu_info();
636 cpu_initialize(info->index);
637}
638
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500639void smm_initiate_relocation_parallel(void)
640{
Arthur Heymansa4ceba42021-05-21 09:32:45 +0200641 if (lapic_busy()) {
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500642 printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
Felix Held2939ebd2021-10-20 18:33:06 +0200643 if (apic_wait_timeout(1000 /* 1 ms */, 50) != CB_SUCCESS) {
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500644 printk(BIOS_DEBUG, "timed out. Aborting.\n");
645 return;
Lee Leahya15d8af2017-03-15 14:49:35 -0700646 }
647 printk(BIOS_DEBUG, "done.\n");
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500648 }
649
Kyösti Mälkki710bdc42021-10-15 17:14:20 +0300650 lapic_send_ipi_self(LAPIC_INT_ASSERT | LAPIC_DM_SMI);
Arthur Heymans09a6d632021-05-21 09:32:45 +0200651
652 if (lapic_busy()) {
Felix Held2939ebd2021-10-20 18:33:06 +0200653 if (apic_wait_timeout(1000 /* 1 ms */, 100 /* us */) != CB_SUCCESS) {
Arthur Heymans09a6d632021-05-21 09:32:45 +0200654 printk(BIOS_DEBUG, "SMI Relocation timed out.\n");
655 return;
656 }
657 }
658 printk(BIOS_DEBUG, "Relocation complete.\n");
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500659}
660
661DECLARE_SPIN_LOCK(smm_relocation_lock);
662
663/* Send SMI to self with single user serialization. */
664void smm_initiate_relocation(void)
665{
666 spin_lock(&smm_relocation_lock);
667 smm_initiate_relocation_parallel();
668 spin_unlock(&smm_relocation_lock);
669}
Aaron Durbin82501922016-04-29 22:55:49 -0500670
671struct mp_state {
672 struct mp_ops ops;
673 int cpu_count;
674 uintptr_t perm_smbase;
675 size_t perm_smsize;
676 size_t smm_save_state_size;
Arthur Heymans1dfa46e2021-02-15 16:19:33 +0100677 uintptr_t reloc_start32_offset;
Aaron Durbin82501922016-04-29 22:55:49 -0500678 int do_smm;
679} mp_state;
680
681static int is_smm_enabled(void)
682{
Julius Wernercd49cce2019-03-05 16:53:33 -0800683 return CONFIG(HAVE_SMI_HANDLER) && mp_state.do_smm;
Aaron Durbin82501922016-04-29 22:55:49 -0500684}
685
686static void smm_disable(void)
687{
688 mp_state.do_smm = 0;
689}
690
691static void smm_enable(void)
692{
Julius Wernercd49cce2019-03-05 16:53:33 -0800693 if (CONFIG(HAVE_SMI_HANDLER))
Aaron Durbin82501922016-04-29 22:55:49 -0500694 mp_state.do_smm = 1;
695}
696
Raul E Rangel333652c2021-09-21 10:56:35 -0600697/*
698 * This code is built as part of ramstage, but it actually runs in SMM. This
699 * means that ENV_SMM is 0, but we are actually executing in the environment
700 * setup by the smm_stub.
701 */
Aaron Durbin82501922016-04-29 22:55:49 -0500702static void asmlinkage smm_do_relocation(void *arg)
703{
704 const struct smm_module_params *p;
Aaron Durbin82501922016-04-29 22:55:49 -0500705 int cpu;
Arthur Heymans50e849f2021-02-15 16:43:19 +0100706 const uintptr_t curr_smbase = SMM_DEFAULT_BASE;
Aaron Durbin82501922016-04-29 22:55:49 -0500707 uintptr_t perm_smbase;
708
709 p = arg;
Aaron Durbin82501922016-04-29 22:55:49 -0500710 cpu = p->cpu;
Aaron Durbin82501922016-04-29 22:55:49 -0500711
712 if (cpu >= CONFIG_MAX_CPUS) {
713 printk(BIOS_CRIT,
714 "Invalid CPU number assigned in SMM stub: %d\n", cpu);
715 return;
716 }
717
718 /*
719 * The permanent handler runs with all cpus concurrently. Precalculate
720 * the location of the new SMBASE. If using SMM modules then this
721 * calculation needs to match that of the module loader.
722 */
Arthur Heymans88407bc2021-03-02 16:07:52 +0100723 perm_smbase = smm_get_cpu_smbase(cpu);
724 if (!perm_smbase) {
725 printk(BIOS_ERR, "%s: bad SMBASE for CPU %d\n", __func__, cpu);
726 return;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700727 }
Aaron Durbin82501922016-04-29 22:55:49 -0500728
729 /* Setup code checks this callback for validity. */
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700730 printk(BIOS_INFO, "%s : curr_smbase 0x%x perm_smbase 0x%x, cpu = %d\n",
731 __func__, (int)curr_smbase, (int)perm_smbase, cpu);
Aaron Durbin82501922016-04-29 22:55:49 -0500732 mp_state.ops.relocation_handler(cpu, curr_smbase, perm_smbase);
Eugene Myersae438be2020-01-21 17:01:47 -0500733
734 if (CONFIG(STM)) {
Eugene Myers53e92362020-02-10 15:44:38 -0500735 uintptr_t mseg;
Eugene Myersae438be2020-01-21 17:01:47 -0500736
Eugene Myers53e92362020-02-10 15:44:38 -0500737 mseg = mp_state.perm_smbase +
738 (mp_state.perm_smsize - CONFIG_MSEG_SIZE);
Eugene Myersae438be2020-01-21 17:01:47 -0500739
Eugene D Myersf213f172020-04-15 19:11:52 -0400740 stm_setup(mseg, p->cpu,
Eugene Myers53e92362020-02-10 15:44:38 -0500741 perm_smbase,
742 mp_state.perm_smbase,
Arthur Heymans1dfa46e2021-02-15 16:19:33 +0100743 mp_state.reloc_start32_offset);
Eugene Myersae438be2020-01-21 17:01:47 -0500744 }
Aaron Durbin82501922016-04-29 22:55:49 -0500745}
746
747static void adjust_smm_apic_id_map(struct smm_loader_params *smm_params)
748{
749 int i;
Arthur Heymansed4be452021-02-15 13:20:35 +0100750 struct smm_stub_params *stub_params = smm_params->stub_params;
Aaron Durbin82501922016-04-29 22:55:49 -0500751
752 for (i = 0; i < CONFIG_MAX_CPUS; i++)
Arthur Heymansed4be452021-02-15 13:20:35 +0100753 stub_params->apic_id_to_cpu[i] = cpu_get_apic_id(i);
Aaron Durbin82501922016-04-29 22:55:49 -0500754}
755
Arthur Heymans1684b0a2022-04-07 21:50:16 +0200756static enum cb_err install_relocation_handler(int num_cpus, size_t save_state_size)
Aaron Durbin82501922016-04-29 22:55:49 -0500757{
758 struct smm_loader_params smm_params = {
Arthur Heymans2412c812021-10-28 15:19:39 +0200759 .num_cpus = num_cpus,
Arthur Heymans1684b0a2022-04-07 21:50:16 +0200760 .cpu_save_state_size = save_state_size,
Aaron Durbin82501922016-04-29 22:55:49 -0500761 .num_concurrent_save_states = 1,
762 .handler = smm_do_relocation,
763 };
764
Arthur Heymans96451a72021-10-28 15:14:18 +0200765 if (smm_setup_relocation_handler(&smm_params)) {
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700766 printk(BIOS_ERR, "%s: smm setup failed\n", __func__);
Felix Heldd04835e2021-10-20 19:37:15 +0200767 return CB_ERR;
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700768 }
Aaron Durbin82501922016-04-29 22:55:49 -0500769 adjust_smm_apic_id_map(&smm_params);
770
Arthur Heymans1dfa46e2021-02-15 16:19:33 +0100771 mp_state.reloc_start32_offset = smm_params.stub_params->start32_offset;
772
Felix Heldd04835e2021-10-20 19:37:15 +0200773 return CB_SUCCESS;
Aaron Durbin82501922016-04-29 22:55:49 -0500774}
775
Felix Held2461a092021-10-20 18:21:21 +0200776static enum cb_err install_permanent_handler(int num_cpus, uintptr_t smbase,
Arthur Heymans1684b0a2022-04-07 21:50:16 +0200777 size_t smsize, size_t save_state_size)
Aaron Durbin82501922016-04-29 22:55:49 -0500778{
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700779 /*
780 * All the CPUs will relocate to permanaent handler now. Set parameters
781 * needed for all CPUs. The placement of each CPUs entry point is
782 * determined by the loader. This code simply provides the beginning of
783 * SMRAM region, the number of CPUs who will use the handler, the stack
784 * size and save state size for each CPU.
785 */
Aaron Durbin82501922016-04-29 22:55:49 -0500786 struct smm_loader_params smm_params = {
Arthur Heymans2412c812021-10-28 15:19:39 +0200787 .num_cpus = num_cpus,
Arthur Heymans1684b0a2022-04-07 21:50:16 +0200788 .cpu_save_state_size = save_state_size,
Aaron Durbin82501922016-04-29 22:55:49 -0500789 .num_concurrent_save_states = num_cpus,
790 };
791
Rocky Phaguraafb7a812020-07-21 14:48:48 -0700792 printk(BIOS_DEBUG, "Installing permanent SMM handler to 0x%08lx\n", smbase);
Aaron Durbin82501922016-04-29 22:55:49 -0500793
Arthur Heymanscfd32242021-10-28 13:59:54 +0200794 if (smm_load_module(smbase, smsize, &smm_params))
Felix Held2461a092021-10-20 18:21:21 +0200795 return CB_ERR;
Aaron Durbin82501922016-04-29 22:55:49 -0500796
797 adjust_smm_apic_id_map(&smm_params);
798
Felix Held2461a092021-10-20 18:21:21 +0200799 return CB_SUCCESS;
Aaron Durbin82501922016-04-29 22:55:49 -0500800}
801
802/* Load SMM handlers as part of MP flight record. */
803static void load_smm_handlers(void)
804{
Arthur Heymans1684b0a2022-04-07 21:50:16 +0200805 const size_t save_state_size = mp_state.smm_save_state_size;
Aaron Durbin82501922016-04-29 22:55:49 -0500806
807 /* Do nothing if SMM is disabled.*/
808 if (!is_smm_enabled())
809 return;
810
Arthur Heymans96451a72021-10-28 15:14:18 +0200811 if (smm_setup_stack(mp_state.perm_smbase, mp_state.perm_smsize, mp_state.cpu_count,
812 CONFIG_SMM_MODULE_STACK_SIZE)) {
813 printk(BIOS_ERR, "Unable to install SMM relocation handler.\n");
814 smm_disable();
815 }
816
Aaron Durbin82501922016-04-29 22:55:49 -0500817 /* Install handlers. */
Arthur Heymans1684b0a2022-04-07 21:50:16 +0200818 if (install_relocation_handler(mp_state.cpu_count, save_state_size) != CB_SUCCESS) {
Aaron Durbin82501922016-04-29 22:55:49 -0500819 printk(BIOS_ERR, "Unable to install SMM relocation handler.\n");
820 smm_disable();
821 }
822
823 if (install_permanent_handler(mp_state.cpu_count, mp_state.perm_smbase,
Arthur Heymans1684b0a2022-04-07 21:50:16 +0200824 mp_state.perm_smsize, save_state_size) != CB_SUCCESS) {
Aaron Durbin82501922016-04-29 22:55:49 -0500825 printk(BIOS_ERR, "Unable to install SMM permanent handler.\n");
826 smm_disable();
827 }
828
829 /* Ensure the SMM handlers hit DRAM before performing first SMI. */
830 wbinvd();
831
832 /*
833 * Indicate that the SMM handlers have been loaded and MP
834 * initialization is about to start.
835 */
836 if (is_smm_enabled() && mp_state.ops.pre_mp_smm_init != NULL)
837 mp_state.ops.pre_mp_smm_init();
838}
839
840/* Trigger SMM as part of MP flight record. */
841static void trigger_smm_relocation(void)
842{
843 /* Do nothing if SMM is disabled.*/
844 if (!is_smm_enabled() || mp_state.ops.per_cpu_smm_trigger == NULL)
845 return;
846 /* Trigger SMM mode for the currently running processor. */
847 mp_state.ops.per_cpu_smm_trigger();
848}
849
Aaron Durbin223fb432018-05-03 13:49:41 -0600850static struct mp_callback *ap_callbacks[CONFIG_MAX_CPUS];
Aaron Durbinb21e3622016-12-07 00:32:19 -0600851
Kane Chenc9b1f8a2022-04-29 16:35:23 +0800852enum AP_STATUS {
853 /* AP takes the task but not yet finishes */
854 AP_BUSY = 1,
855 /* AP finishes the task or no task to run yet */
856 AP_NOT_BUSY
857};
858
859static atomic_t ap_status[CONFIG_MAX_CPUS];
860
Aaron Durbin223fb432018-05-03 13:49:41 -0600861static struct mp_callback *read_callback(struct mp_callback **slot)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600862{
Aaron Durbin223fb432018-05-03 13:49:41 -0600863 struct mp_callback *ret;
864
865 asm volatile ("mov %1, %0\n"
866 : "=r" (ret)
867 : "m" (*slot)
868 : "memory"
869 );
870 return ret;
Aaron Durbinb21e3622016-12-07 00:32:19 -0600871}
872
Aaron Durbin223fb432018-05-03 13:49:41 -0600873static void store_callback(struct mp_callback **slot, struct mp_callback *val)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600874{
Aaron Durbin223fb432018-05-03 13:49:41 -0600875 asm volatile ("mov %1, %0\n"
876 : "=m" (*slot)
877 : "r" (val)
878 : "memory"
879 );
Aaron Durbinb21e3622016-12-07 00:32:19 -0600880}
881
Kane Chenc9b1f8a2022-04-29 16:35:23 +0800882static enum cb_err run_ap_work(struct mp_callback *val, long expire_us, bool wait_ap_finish)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600883{
884 int i;
Kane Chenc9b1f8a2022-04-29 16:35:23 +0800885 int cpus_accepted, cpus_finish;
Aaron Durbinb21e3622016-12-07 00:32:19 -0600886 struct stopwatch sw;
Jacob Garberbc674762019-05-14 11:21:41 -0600887 int cur_cpu;
Aaron Durbinb21e3622016-12-07 00:32:19 -0600888
Julius Wernercd49cce2019-03-05 16:53:33 -0800889 if (!CONFIG(PARALLEL_MP_AP_WORK)) {
Aaron Durbinb21e3622016-12-07 00:32:19 -0600890 printk(BIOS_ERR, "APs already parked. PARALLEL_MP_AP_WORK not selected.\n");
Felix Helde1988802021-10-20 19:46:14 +0200891 return CB_ERR;
Aaron Durbinb21e3622016-12-07 00:32:19 -0600892 }
893
Jacob Garberbc674762019-05-14 11:21:41 -0600894 cur_cpu = cpu_index();
895
896 if (cur_cpu < 0) {
897 printk(BIOS_ERR, "Invalid CPU index.\n");
Felix Helde1988802021-10-20 19:46:14 +0200898 return CB_ERR;
Jacob Garberbc674762019-05-14 11:21:41 -0600899 }
900
Aaron Durbinb21e3622016-12-07 00:32:19 -0600901 /* Signal to all the APs to run the func. */
902 for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) {
903 if (cur_cpu == i)
904 continue;
Aaron Durbin223fb432018-05-03 13:49:41 -0600905 store_callback(&ap_callbacks[i], val);
Aaron Durbinb21e3622016-12-07 00:32:19 -0600906 }
907 mfence();
908
909 /* Wait for all the APs to signal back that call has been accepted. */
Subrata Banik838f2962018-04-11 18:45:57 +0530910 if (expire_us > 0)
911 stopwatch_init_usecs_expire(&sw, expire_us);
912
Paul Menzel6bb8ff42017-06-19 13:02:31 +0200913 do {
Aaron Durbin046848c2017-06-15 08:47:04 -0500914 cpus_accepted = 0;
Kane Chenc9b1f8a2022-04-29 16:35:23 +0800915 cpus_finish = 0;
Aaron Durbin046848c2017-06-15 08:47:04 -0500916
Aaron Durbinb21e3622016-12-07 00:32:19 -0600917 for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) {
918 if (cur_cpu == i)
919 continue;
Kane Chenc9b1f8a2022-04-29 16:35:23 +0800920
921 if (read_callback(&ap_callbacks[i]) == NULL) {
Aaron Durbinb21e3622016-12-07 00:32:19 -0600922 cpus_accepted++;
Kane Chenc9b1f8a2022-04-29 16:35:23 +0800923 /* Only increase cpus_finish if AP took the task and not busy */
924 if (atomic_read(&ap_status[i]) == AP_NOT_BUSY)
925 cpus_finish++;
926 }
Aaron Durbinb21e3622016-12-07 00:32:19 -0600927 }
Aaron Durbin046848c2017-06-15 08:47:04 -0500928
Kane Chenc9b1f8a2022-04-29 16:35:23 +0800929 /*
930 * if wait_ap_finish is true, need to make sure all CPUs finish task and return
931 * else just need to make sure all CPUs take task
932 */
Aaron Durbinb21e3622016-12-07 00:32:19 -0600933 if (cpus_accepted == global_num_aps)
Kane Chenc9b1f8a2022-04-29 16:35:23 +0800934 if (!wait_ap_finish || (cpus_finish == global_num_aps))
935 return CB_SUCCESS;
936
Subrata Banik838f2962018-04-11 18:45:57 +0530937 } while (expire_us <= 0 || !stopwatch_expired(&sw));
Aaron Durbinb21e3622016-12-07 00:32:19 -0600938
Subrata Banikdbcb0ce2020-03-19 20:51:09 +0530939 printk(BIOS_CRIT, "CRITICAL ERROR: AP call expired. %d/%d CPUs accepted.\n",
Aaron Durbinb21e3622016-12-07 00:32:19 -0600940 cpus_accepted, global_num_aps);
Felix Helde1988802021-10-20 19:46:14 +0200941 return CB_ERR;
Aaron Durbinb21e3622016-12-07 00:32:19 -0600942}
943
944static void ap_wait_for_instruction(void)
945{
Aaron Durbin223fb432018-05-03 13:49:41 -0600946 struct mp_callback lcb;
947 struct mp_callback **per_cpu_slot;
Subrata Banik8a25cae2018-05-03 18:48:41 +0530948 int cur_cpu;
Aaron Durbinb21e3622016-12-07 00:32:19 -0600949
Julius Wernercd49cce2019-03-05 16:53:33 -0800950 if (!CONFIG(PARALLEL_MP_AP_WORK))
Aaron Durbinb21e3622016-12-07 00:32:19 -0600951 return;
952
Subrata Banik8a25cae2018-05-03 18:48:41 +0530953 cur_cpu = cpu_index();
Jacob Garberbc674762019-05-14 11:21:41 -0600954
955 if (cur_cpu < 0) {
956 printk(BIOS_ERR, "Invalid CPU index.\n");
957 return;
958 }
959
Subrata Banik8a25cae2018-05-03 18:48:41 +0530960 per_cpu_slot = &ap_callbacks[cur_cpu];
Aaron Durbinb21e3622016-12-07 00:32:19 -0600961
Kane Chenc9b1f8a2022-04-29 16:35:23 +0800962 /* Init ap_status[cur_cpu] to AP_NOT_BUSY and ready to take job */
963 atomic_set(&ap_status[cur_cpu], AP_NOT_BUSY);
964
Aaron Durbin223fb432018-05-03 13:49:41 -0600965 while (1) {
966 struct mp_callback *cb = read_callback(per_cpu_slot);
967
968 if (cb == NULL) {
Aaron Durbinb21e3622016-12-07 00:32:19 -0600969 asm ("pause");
970 continue;
971 }
Kane Chenc9b1f8a2022-04-29 16:35:23 +0800972 /*
973 * Set ap_status to AP_BUSY before store_callback(per_cpu_slot, NULL).
974 * it's to let BSP know APs take tasks and busy to avoid race condition.
975 */
976 atomic_set(&ap_status[cur_cpu], AP_BUSY);
Aaron Durbinb21e3622016-12-07 00:32:19 -0600977
Raul E Rangel9ea77622018-08-02 15:12:17 -0600978 /* Copy to local variable before signaling consumption. */
Aaron Durbin223fb432018-05-03 13:49:41 -0600979 memcpy(&lcb, cb, sizeof(lcb));
Aaron Durbinb21e3622016-12-07 00:32:19 -0600980 mfence();
Aaron Durbin223fb432018-05-03 13:49:41 -0600981 store_callback(per_cpu_slot, NULL);
Kane Chenc9b1f8a2022-04-29 16:35:23 +0800982
983 if (lcb.logical_cpu_number == MP_RUN_ON_ALL_CPUS ||
984 (cur_cpu == lcb.logical_cpu_number))
Subrata Banik8a25cae2018-05-03 18:48:41 +0530985 lcb.func(lcb.arg);
Kane Chenc9b1f8a2022-04-29 16:35:23 +0800986
987 atomic_set(&ap_status[cur_cpu], AP_NOT_BUSY);
Aaron Durbinb21e3622016-12-07 00:32:19 -0600988 }
989}
990
Felix Held82faefb2021-10-20 20:50:58 +0200991enum cb_err mp_run_on_aps(void (*func)(void *), void *arg, int logical_cpu_num,
Subrata Banik8a25cae2018-05-03 18:48:41 +0530992 long expire_us)
Aaron Durbinb21e3622016-12-07 00:32:19 -0600993{
Subrata Banik8a25cae2018-05-03 18:48:41 +0530994 struct mp_callback lcb = { .func = func, .arg = arg,
995 .logical_cpu_number = logical_cpu_num};
Kane Chenc9b1f8a2022-04-29 16:35:23 +0800996 return run_ap_work(&lcb, expire_us, false);
997}
998
999static enum cb_err mp_run_on_aps_and_wait_for_complete(void (*func)(void *), void *arg,
1000 int logical_cpu_num, long expire_us)
1001{
1002 struct mp_callback lcb = { .func = func, .arg = arg,
1003 .logical_cpu_number = logical_cpu_num};
1004 return run_ap_work(&lcb, expire_us, true);
Aaron Durbinb21e3622016-12-07 00:32:19 -06001005}
1006
Felix Held82faefb2021-10-20 20:50:58 +02001007enum cb_err mp_run_on_all_aps(void (*func)(void *), void *arg, long expire_us,
1008 bool run_parallel)
Aamir Bohra7e0019e2021-03-05 09:41:20 +05301009{
1010 int ap_index, bsp_index;
1011
1012 if (run_parallel)
Subrata Banik51d0be72021-09-02 20:40:19 +05301013 return mp_run_on_aps(func, arg, MP_RUN_ON_ALL_CPUS, expire_us);
Aamir Bohra7e0019e2021-03-05 09:41:20 +05301014
1015 bsp_index = cpu_index();
1016
1017 const int total_threads = global_num_aps + 1; /* +1 for BSP */
1018
1019 for (ap_index = 0; ap_index < total_threads; ap_index++) {
1020 /* skip if BSP */
1021 if (ap_index == bsp_index)
1022 continue;
Felix Held82faefb2021-10-20 20:50:58 +02001023 if (mp_run_on_aps(func, arg, ap_index, expire_us) != CB_SUCCESS)
Aamir Bohra7e0019e2021-03-05 09:41:20 +05301024 return CB_ERR;
1025 }
1026
1027 return CB_SUCCESS;
1028}
1029
Felix Held82faefb2021-10-20 20:50:58 +02001030enum cb_err mp_run_on_all_cpus(void (*func)(void *), void *arg)
Aaron Durbinb21e3622016-12-07 00:32:19 -06001031{
1032 /* Run on BSP first. */
Subrata Banik33374972018-04-24 13:45:30 +05301033 func(arg);
1034
Patrick Rudolph5ec97ce2019-07-26 14:47:32 +02001035 /* For up to 1 second for AP to finish previous work. */
1036 return mp_run_on_aps(func, arg, MP_RUN_ON_ALL_CPUS, 1000 * USECS_PER_MSEC);
Aaron Durbinb21e3622016-12-07 00:32:19 -06001037}
1038
Kane Chenc9b1f8a2022-04-29 16:35:23 +08001039enum cb_err mp_run_on_all_cpus_synchronously(void (*func)(void *), void *arg)
1040{
1041 /* Run on BSP first. */
1042 func(arg);
1043
1044 /* For up to 1 second for AP to finish previous work. */
1045 return mp_run_on_aps_and_wait_for_complete(func, arg, MP_RUN_ON_ALL_CPUS,
1046 1000 * USECS_PER_MSEC);
1047}
1048
Felix Held82faefb2021-10-20 20:50:58 +02001049enum cb_err mp_park_aps(void)
Aaron Durbinb21e3622016-12-07 00:32:19 -06001050{
Furquan Shaikhd6630d12018-03-29 00:10:02 -07001051 struct stopwatch sw;
Felix Held82faefb2021-10-20 20:50:58 +02001052 enum cb_err ret;
Furquan Shaikhd6630d12018-03-29 00:10:02 -07001053 long duration_msecs;
1054
1055 stopwatch_init(&sw);
1056
Subrata Banik8a25cae2018-05-03 18:48:41 +05301057 ret = mp_run_on_aps(park_this_cpu, NULL, MP_RUN_ON_ALL_CPUS,
Patrick Rudolph5ec97ce2019-07-26 14:47:32 +02001058 1000 * USECS_PER_MSEC);
Furquan Shaikhd6630d12018-03-29 00:10:02 -07001059
1060 duration_msecs = stopwatch_duration_msecs(&sw);
1061
Felix Held82faefb2021-10-20 20:50:58 +02001062 if (ret == CB_SUCCESS)
Furquan Shaikhd6630d12018-03-29 00:10:02 -07001063 printk(BIOS_DEBUG, "%s done after %ld msecs.\n", __func__,
1064 duration_msecs);
1065 else
1066 printk(BIOS_ERR, "%s failed after %ld msecs.\n", __func__,
1067 duration_msecs);
1068
1069 return ret;
Aaron Durbinb21e3622016-12-07 00:32:19 -06001070}
1071
Aaron Durbin82501922016-04-29 22:55:49 -05001072static struct mp_flight_record mp_steps[] = {
1073 /* Once the APs are up load the SMM handlers. */
1074 MP_FR_BLOCK_APS(NULL, load_smm_handlers),
1075 /* Perform SMM relocation. */
1076 MP_FR_NOBLOCK_APS(trigger_smm_relocation, trigger_smm_relocation),
Elyes HAOUASd82be922016-07-28 18:58:27 +02001077 /* Initialize each CPU through the driver framework. */
Aaron Durbin82501922016-04-29 22:55:49 -05001078 MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu),
Aaron Durbinb21e3622016-12-07 00:32:19 -06001079 /* Wait for APs to finish then optionally start looking for work. */
1080 MP_FR_BLOCK_APS(ap_wait_for_instruction, NULL),
Aaron Durbin82501922016-04-29 22:55:49 -05001081};
1082
Arthur Heymans7d925c52021-12-06 11:42:03 +01001083static void fill_mp_state_smm(struct mp_state *state, const struct mp_ops *ops)
Aaron Durbin82501922016-04-29 22:55:49 -05001084{
Aaron Durbin82501922016-04-29 22:55:49 -05001085 if (ops->get_smm_info != NULL)
1086 ops->get_smm_info(&state->perm_smbase, &state->perm_smsize,
Arthur Heymans1684b0a2022-04-07 21:50:16 +02001087 &state->smm_save_state_size);
Aaron Durbin82501922016-04-29 22:55:49 -05001088
1089 /*
Eugene Myersae438be2020-01-21 17:01:47 -05001090 * Make sure there is enough room for the SMM descriptor
1091 */
Eugene Myersfaa11182020-02-06 10:37:01 -05001092 if (CONFIG(STM)) {
Eugene Myersae438be2020-01-21 17:01:47 -05001093 state->smm_save_state_size +=
Eugene Myers970ed2a2020-02-10 15:02:27 -05001094 ALIGN_UP(sizeof(TXT_PROCESSOR_SMM_DESCRIPTOR), 0x100);
Eugene Myersfaa11182020-02-06 10:37:01 -05001095 }
Eugene Myersae438be2020-01-21 17:01:47 -05001096
1097 /*
Aaron Durbin82501922016-04-29 22:55:49 -05001098 * Default to smm_initiate_relocation() if trigger callback isn't
1099 * provided.
1100 */
Arthur Heymans7d925c52021-12-06 11:42:03 +01001101 if (ops->per_cpu_smm_trigger == NULL)
Aaron Durbin82501922016-04-29 22:55:49 -05001102 mp_state.ops.per_cpu_smm_trigger = smm_initiate_relocation;
1103}
1104
Arthur Heymans7d925c52021-12-06 11:42:03 +01001105static void fill_mp_state(struct mp_state *state, const struct mp_ops *ops)
1106{
1107 /*
1108 * Make copy of the ops so that defaults can be set in the non-const
1109 * structure if needed.
1110 */
1111 memcpy(&state->ops, ops, sizeof(*ops));
1112
1113 if (ops->get_cpu_count != NULL)
1114 state->cpu_count = ops->get_cpu_count();
1115
1116 if (CONFIG(HAVE_SMI_HANDLER))
1117 fill_mp_state_smm(state, ops);
1118}
1119
Felix Held4dd7d112021-10-20 23:31:43 +02001120static enum cb_err do_mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops)
Aaron Durbin82501922016-04-29 22:55:49 -05001121{
Felix Heldd27ef5b2021-10-20 20:18:12 +02001122 enum cb_err ret;
Aaron Durbin82501922016-04-29 22:55:49 -05001123 void *default_smm_area;
1124 struct mp_params mp_params;
1125
1126 if (mp_ops->pre_mp_init != NULL)
1127 mp_ops->pre_mp_init();
1128
1129 fill_mp_state(&mp_state, mp_ops);
1130
1131 memset(&mp_params, 0, sizeof(mp_params));
1132
1133 if (mp_state.cpu_count <= 0) {
1134 printk(BIOS_ERR, "Invalid cpu_count: %d\n", mp_state.cpu_count);
Felix Heldd27ef5b2021-10-20 20:18:12 +02001135 return CB_ERR;
Aaron Durbin82501922016-04-29 22:55:49 -05001136 }
1137
1138 /* Sanity check SMM state. */
1139 if (mp_state.perm_smsize != 0 && mp_state.smm_save_state_size != 0 &&
1140 mp_state.ops.relocation_handler != NULL)
1141 smm_enable();
1142
1143 if (is_smm_enabled())
1144 printk(BIOS_INFO, "Will perform SMM setup.\n");
1145
1146 mp_params.num_cpus = mp_state.cpu_count;
1147 /* Gather microcode information. */
1148 if (mp_state.ops.get_microcode_info != NULL)
1149 mp_state.ops.get_microcode_info(&mp_params.microcode_pointer,
1150 &mp_params.parallel_microcode_load);
Aaron Durbin82501922016-04-29 22:55:49 -05001151 mp_params.flight_plan = &mp_steps[0];
1152 mp_params.num_records = ARRAY_SIZE(mp_steps);
1153
1154 /* Perform backup of default SMM area. */
1155 default_smm_area = backup_default_smm_area();
1156
Felix Heldd27ef5b2021-10-20 20:18:12 +02001157 ret = mp_init(cpu_bus, &mp_params);
Aaron Durbin82501922016-04-29 22:55:49 -05001158
1159 restore_default_smm_area(default_smm_area);
1160
1161 /* Signal callback on success if it's provided. */
Felix Heldd27ef5b2021-10-20 20:18:12 +02001162 if (ret == CB_SUCCESS && mp_state.ops.post_mp_init != NULL)
Aaron Durbin82501922016-04-29 22:55:49 -05001163 mp_state.ops.post_mp_init();
1164
1165 return ret;
1166}
Felix Held4dd7d112021-10-20 23:31:43 +02001167
1168enum cb_err mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops)
1169{
1170 enum cb_err ret = do_mp_init_with_smm(cpu_bus, mp_ops);
1171
1172 if (ret != CB_SUCCESS)
1173 printk(BIOS_ERR, "MP initialization failure.\n");
1174
1175 return ret;
1176}