blob: e464cfca6d10e8733f37589f3f45a860c73bec2f [file] [log] [blame]
Felix Heldbc134812021-02-10 02:26:10 +01001/* SPDX-License-Identifier: GPL-2.0-only */
2
Arthur Heymanse48dcb72022-05-31 21:48:15 +02003#include <cpu/x86/mtrr.h>
4#include <cpu/x86/mp.h>
5#include <amdblocks/cpu.h>
Felix Heldbc134812021-02-10 02:26:10 +01006#include <amdblocks/smm.h>
7#include <console/console.h>
8#include <cpu/amd/amd64_save_state.h>
9#include <cpu/amd/msr.h>
Arthur Heymanse48dcb72022-05-31 21:48:15 +020010#include <cpu/amd/mtrr.h>
Felix Heldbc134812021-02-10 02:26:10 +010011#include <cpu/cpu.h>
12#include <cpu/x86/msr.h>
13#include <cpu/x86/smm.h>
14#include <types.h>
15
Arthur Heymanse48dcb72022-05-31 21:48:15 +020016/* AP MTRRs will be synced to the BSP in the SIPI vector so set them up before MP init. */
17static void pre_mp_init(void)
18{
19 const msr_t syscfg = rdmsr(SYSCFG_MSR);
20 if (syscfg.lo & SYSCFG_MSR_TOM2WB)
21 x86_setup_mtrrs_with_detect_no_above_4gb();
22 else
23 x86_setup_mtrrs_with_detect();
24 x86_mtrr_check();
25}
26
27static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
28 size_t *smm_save_state_size)
Felix Heldbc134812021-02-10 02:26:10 +010029{
Arthur Heymans8cd1dfa2022-05-31 22:00:13 +020030 printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
31
Felix Heldbc134812021-02-10 02:26:10 +010032 uintptr_t tseg_base;
33 size_t tseg_size;
34
35 smm_region(&tseg_base, &tseg_size);
36
Arthur Heymans8cd1dfa2022-05-31 22:00:13 +020037 if (!IS_ALIGNED(tseg_base, tseg_size)) {
38 printk(BIOS_ERR, "TSEG base not aligned to TSEG size\n");
39 return;
40 }
41 /* Minimum granularity for TSEG MSRs */
42 if (tseg_size < 128 * KiB) {
43 printk(BIOS_ERR, "TSEG size (0x%zx) too small\n", tseg_size);
44 return;
45 }
Felix Heldbc134812021-02-10 02:26:10 +010046
Felix Heldbc134812021-02-10 02:26:10 +010047
48 smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
49 *smm_save_state_size = sizeof(amd64_smm_state_save_area_t);
50}
51
Arthur Heymanse48dcb72022-05-31 21:48:15 +020052static void smm_relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_smbase)
Felix Heldbc134812021-02-10 02:26:10 +010053{
Felix Heldbc134812021-02-10 02:26:10 +010054 amd64_smm_state_save_area_t *smm_state;
55
Arthur Heymans8cd1dfa2022-05-31 22:00:13 +020056 uintptr_t tseg_base;
57 size_t tseg_size;
58
59 smm_region(&tseg_base, &tseg_size);
60
61 msr_t msr;
62 msr.lo = tseg_base;
63 msr.hi = 0;
64 wrmsr(SMM_ADDR_MSR, msr);
65
66 msr.lo = ~(tseg_size - 1);
67 msr.lo |= SMM_TSEG_WB;
68 msr.hi = (1 << (cpu_phys_address_size() - 32)) - 1;
69 wrmsr(SMM_MASK_MSR, msr);
Felix Heldbc134812021-02-10 02:26:10 +010070
71 smm_state = (void *)(SMM_AMD64_SAVE_STATE_OFFSET + curr_smbase);
72 smm_state->smbase = staggered_smbase;
73}
Arthur Heymanse48dcb72022-05-31 21:48:15 +020074
75const struct mp_ops amd_mp_ops_with_smm = {
76 .pre_mp_init = pre_mp_init,
77 .get_cpu_count = get_cpu_count,
78 .get_smm_info = get_smm_info,
79 .relocation_handler = smm_relocation_handler,
80 .post_mp_init = global_smi_enable,
81};