blob: 89f35c231902008eaaf6ef174b4da54ef9be0ea8 [file] [log] [blame]
Rocky Phagura17a798b2020-10-08 13:32:41 -07001/* SPDX-License-Identifier: GPL-2.0-or-later */
2
3#include <assert.h>
4#include <string.h>
Arthur Heymans78ab06a2021-05-29 06:58:38 +02005#include <cpu/x86/lapic.h>
Rocky Phagura17a798b2020-10-08 13:32:41 -07006#include <cpu/x86/mp.h>
7#include <cpu/intel/em64t101_save_state.h>
8#include <cpu/intel/smm_reloc.h>
9#include <console/console.h>
Patrick Rudolph7a593ab2024-01-25 15:15:00 +010010#include <device/pci_ids.h>
Rocky Phagura17a798b2020-10-08 13:32:41 -070011#include <smp/node.h>
12#include <soc/msr.h>
13#include <soc/smmrelocate.h>
Patrick Rudolph7a593ab2024-01-25 15:15:00 +010014#include <soc/pci_devs.h>
Rocky Phagura17a798b2020-10-08 13:32:41 -070015
16static void fill_in_relocation_params(struct smm_relocation_params *params)
17{
18 uintptr_t tseg_base;
19 size_t tseg_size;
20
21 smm_region(&tseg_base, &tseg_size);
22
23 if (!IS_ALIGNED(tseg_base, tseg_size)) {
24 /*
25 * Note SMRR2 is supported which might support base/size combinations.
26 * For now it looks like FSP-M always uses aligned base/size, so let's
27 * not care about that.
28 */
29 printk(BIOS_WARNING,
30 "TSEG base not aligned with TSEG SIZE! Not setting SMRR\n");
31 return;
32 }
33
34 /* SMRR has 32-bits of valid address aligned to 4KiB. */
35 if (!IS_ALIGNED(tseg_size, 4 * KiB)) {
36 printk(BIOS_WARNING,
37 "TSEG size not aligned to the minimum 4KiB! Not setting SMRR\n");
38 return;
39 }
40
41 smm_subregion(SMM_SUBREGION_CHIPSET, &params->ied_base, &params->ied_size);
42
43 params->smrr_base.lo = tseg_base | MTRR_TYPE_WRBACK;
44 params->smrr_base.hi = 0;
45 params->smrr_mask.lo = ~(tseg_size - 1) | MTRR_PHYS_MASK_VALID;
46 params->smrr_mask.hi = 0;
47}
48
49static void setup_ied_area(struct smm_relocation_params *params)
50{
51 char *ied_base;
52
53 const struct ied_header ied = {
54 .signature = "INTEL RSVD",
55 .size = params->ied_size,
56 .reserved = {0},
57 };
58
59 ied_base = (void *)params->ied_base;
60
61 printk(BIOS_DEBUG, "IED base = 0x%08x\n", (u32)params->ied_base);
62 printk(BIOS_DEBUG, "IED size = 0x%08x\n", (u32)params->ied_size);
63
64 /* Place IED header at IEDBASE. */
65 memcpy(ied_base, &ied, sizeof(ied));
66
67 assert(params->ied_size > 1 * MiB + 32 * KiB);
68
69 /* Zero out 32KiB at IEDBASE + 1MiB */
70 memset(ied_base + 1 * MiB, 0, 32 * KiB);
71}
72
73void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
74 size_t *smm_save_state_size)
75{
76 fill_in_relocation_params(&smm_reloc_params);
77
78 smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
79
80 if (smm_reloc_params.ied_size)
81 setup_ied_area(&smm_reloc_params);
82
83 *smm_save_state_size = sizeof(em64t101_smm_state_save_area_t);
84}
85
86static void update_save_state(int cpu, uintptr_t curr_smbase,
87 uintptr_t staggered_smbase,
88 struct smm_relocation_params *relo_params)
89{
90 u32 smbase;
91 u32 iedbase;
Rocky Phagura17a798b2020-10-08 13:32:41 -070092 em64t101_smm_state_save_area_t *save_state;
93 /*
94 * The relocated handler runs with all CPUs concurrently. Therefore
95 * stagger the entry points adjusting SMBASE downwards by save state
96 * size * CPU num.
97 */
98 smbase = staggered_smbase;
99 iedbase = relo_params->ied_base;
100
Rocky Phagura17a798b2020-10-08 13:32:41 -0700101 printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n apic_id=0x%x\n",
Arthur Heymans78ab06a2021-05-29 06:58:38 +0200102 smbase, iedbase, initial_lapicid());
Rocky Phagura17a798b2020-10-08 13:32:41 -0700103
104 save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE - sizeof(*save_state));
105
106 save_state->smbase = smbase;
107 save_state->iedbase = iedbase;
108}
109
110/*
111 * The relocation work is actually performed in SMM context, but the code
112 * resides in the ramstage module. This occurs by trampolining from the default
113 * SMRAM entry point to here.
114 */
115void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
116 uintptr_t staggered_smbase)
117{
Johnny Lin161d0902022-03-29 22:44:47 +0800118 msr_t mtrr_cap, msr;
Rocky Phagura17a798b2020-10-08 13:32:41 -0700119 struct smm_relocation_params *relo_params = &smm_reloc_params;
120
121 printk(BIOS_DEBUG, "%s : CPU %d\n", __func__, cpu);
122
123 /* Make appropriate changes to the save state map. */
124 update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
125
126 /* Write SMRR MSRs based on indicated support. */
127 mtrr_cap = rdmsr(MTRR_CAP_MSR);
Johnny Lin161d0902022-03-29 22:44:47 +0800128
129 /* Set Lock bit if supported */
130 if (mtrr_cap.lo & SMRR_LOCK_SUPPORTED) {
131 msr = rdmsr(IA32_SMRR_PHYS_MASK);
132 /* Don't write the same core scope MSR if another thread has locked it,
133 otherwise system would hang. */
134 if (msr.lo & SMRR_PHYS_MASK_LOCK)
135 return;
136 relo_params->smrr_mask.lo |= SMRR_PHYS_MASK_LOCK;
137 }
138
Rocky Phagura17a798b2020-10-08 13:32:41 -0700139 if (mtrr_cap.lo & SMRR_SUPPORTED)
140 write_smrr(relo_params);
141}
Patrick Rudolph7a593ab2024-01-25 15:15:00 +0100142
143void soc_ubox_store_resources(struct smm_pci_resource_info *slots, size_t size)
144{
145 struct device *devices[CONFIG_MAX_SOCKET] = {0};
146 size_t devices_count = 0;
147 struct device *dev = NULL;
148
149 /*
150 * Collect all UBOX DFX devices. Depending on the actual socket count
151 * the bus numbers changed and the PCI segment group might be different.
152 * Pass all devices to SMM for platform lockdown.
153 */
154 while ((dev = dev_find_device(PCI_VID_INTEL, UBOX_DFX_DEVID, dev))) {
155 devices[devices_count++] = dev;
156 }
157
158 smm_pci_resource_store_fill_resources(slots, size, (const struct device **)devices, devices_count);
159}