blob: 1b929c7ffa8f9a9d420ca9a1ea000891a5add910 [file] [log] [blame]
Felix Heldbc134812021-02-10 02:26:10 +01001/* SPDX-License-Identifier: GPL-2.0-only */
2
Arthur Heymanse48dcb72022-05-31 21:48:15 +02003#include <cpu/x86/mtrr.h>
4#include <cpu/x86/mp.h>
5#include <amdblocks/cpu.h>
Felix Heldbc134812021-02-10 02:26:10 +01006#include <amdblocks/smm.h>
7#include <console/console.h>
8#include <cpu/amd/amd64_save_state.h>
9#include <cpu/amd/msr.h>
Arthur Heymanse48dcb72022-05-31 21:48:15 +020010#include <cpu/amd/mtrr.h>
Felix Heldbc134812021-02-10 02:26:10 +010011#include <cpu/cpu.h>
12#include <cpu/x86/msr.h>
13#include <cpu/x86/smm.h>
14#include <types.h>
15
Arthur Heymanse48dcb72022-05-31 21:48:15 +020016/* AP MTRRs will be synced to the BSP in the SIPI vector so set them up before MP init. */
17static void pre_mp_init(void)
18{
19 const msr_t syscfg = rdmsr(SYSCFG_MSR);
20 if (syscfg.lo & SYSCFG_MSR_TOM2WB)
21 x86_setup_mtrrs_with_detect_no_above_4gb();
22 else
23 x86_setup_mtrrs_with_detect();
24 x86_mtrr_check();
25}
26
27static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
28 size_t *smm_save_state_size)
Felix Heldbc134812021-02-10 02:26:10 +010029{
Arthur Heymans8cd1dfa2022-05-31 22:00:13 +020030 printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
31
Felix Heldbc134812021-02-10 02:26:10 +010032 uintptr_t tseg_base;
33 size_t tseg_size;
34
35 smm_region(&tseg_base, &tseg_size);
36
Arthur Heymans8cd1dfa2022-05-31 22:00:13 +020037 if (!IS_ALIGNED(tseg_base, tseg_size)) {
38 printk(BIOS_ERR, "TSEG base not aligned to TSEG size\n");
39 return;
40 }
41 /* Minimum granularity for TSEG MSRs */
42 if (tseg_size < 128 * KiB) {
43 printk(BIOS_ERR, "TSEG size (0x%zx) too small\n", tseg_size);
44 return;
45 }
Felix Heldbc134812021-02-10 02:26:10 +010046
Felix Heldbc134812021-02-10 02:26:10 +010047
48 smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
49 *smm_save_state_size = sizeof(amd64_smm_state_save_area_t);
50}
51
Arthur Heymans43ed5d22022-05-31 21:50:51 +020052static void tseg_valid(void)
53{
54 msr_t mask = rdmsr(SMM_MASK_MSR);
55 mask.lo |= SMM_TSEG_VALID;
56
57 wrmsr(SMM_MASK_MSR, mask);
58}
59
Arthur Heymans56776a12022-05-19 11:31:10 +020060static void smm_relocation_handler(void)
Felix Heldbc134812021-02-10 02:26:10 +010061{
Arthur Heymans8cd1dfa2022-05-31 22:00:13 +020062 uintptr_t tseg_base;
63 size_t tseg_size;
64
65 smm_region(&tseg_base, &tseg_size);
66
67 msr_t msr;
Felix Heldcabf6ea2023-03-09 19:43:18 +010068 msr.raw = tseg_base;
Arthur Heymans8cd1dfa2022-05-31 22:00:13 +020069 wrmsr(SMM_ADDR_MSR, msr);
70
71 msr.lo = ~(tseg_size - 1);
72 msr.lo |= SMM_TSEG_WB;
73 msr.hi = (1 << (cpu_phys_address_size() - 32)) - 1;
74 wrmsr(SMM_MASK_MSR, msr);
Felix Heldbc134812021-02-10 02:26:10 +010075
Arthur Heymans56776a12022-05-19 11:31:10 +020076 uintptr_t smbase = smm_get_cpu_smbase(cpu_index());
77 msr_t smm_base = {
Felix Heldcabf6ea2023-03-09 19:43:18 +010078 .raw = smbase
Arthur Heymans56776a12022-05-19 11:31:10 +020079 };
80 wrmsr(SMM_BASE_MSR, smm_base);
Arthur Heymans43ed5d22022-05-31 21:50:51 +020081
82 tseg_valid();
83 lock_smm();
Felix Heldbc134812021-02-10 02:26:10 +010084}
Arthur Heymanse48dcb72022-05-31 21:48:15 +020085
86const struct mp_ops amd_mp_ops_with_smm = {
87 .pre_mp_init = pre_mp_init,
88 .get_cpu_count = get_cpu_count,
89 .get_smm_info = get_smm_info,
Arthur Heymans56776a12022-05-19 11:31:10 +020090 .per_cpu_smm_trigger = smm_relocation_handler,
Arthur Heymanse48dcb72022-05-31 21:48:15 +020091 .post_mp_init = global_smi_enable,
92};