blob: 42957044bb89fec5bbc72356b21fa19732350820 [file] [log] [blame]
Andrey Petrov662da6c2020-03-16 22:46:57 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Andrey Petrov662da6c2020-03-16 22:46:57 -07002
Marc Jones18960ce2020-11-02 12:41:12 -07003#include <assert.h>
4#include <commonlib/sort.h>
Andrey Petrov662da6c2020-03-16 22:46:57 -07005#include <console/console.h>
Marc Jones5851f9d2020-11-02 15:30:10 -07006#include <delay.h>
Marc Jones18960ce2020-11-02 12:41:12 -07007#include <device/device.h>
Andrey Petrov662da6c2020-03-16 22:46:57 -07008#include <device/pci.h>
Marc Jones18960ce2020-11-02 12:41:12 -07009#include <intelblocks/cpulib.h>
Andrey Petrov662da6c2020-03-16 22:46:57 -070010#include <soc/pci_devs.h>
Marc Jones53b465d2020-10-15 15:16:45 -060011#include <soc/msr.h>
Marc Jones5851f9d2020-11-02 15:30:10 -070012#include <soc/soc_util.h>
Andrey Petrov662da6c2020-03-16 22:46:57 -070013#include <soc/util.h>
Marc Jones5851f9d2020-11-02 15:30:10 -070014#include <timer.h>
Andrey Petrov662da6c2020-03-16 22:46:57 -070015
Maxim Polyakov91a45122021-01-14 01:37:26 +030016uint8_t get_stack_busno(const uint8_t stack)
Andrey Petrov662da6c2020-03-16 22:46:57 -070017{
Maxim Polyakov91a45122021-01-14 01:37:26 +030018 if (stack >= MAX_IIO_STACK) {
19 printk(BIOS_ERR, "%s: Stack %u does not exist!\n", __func__, stack);
20 return 0;
21 }
22 const pci_devfn_t dev = PCI_DEV(UBOX_DECS_BUS, UBOX_DECS_DEV, UBOX_DECS_FUNC);
23 const uint16_t offset = stack / 4 ? UBOX_DECS_CPUBUSNO1_CSR : UBOX_DECS_CPUBUSNO_CSR;
24 return pci_io_read_config32(dev, offset) >> (8 * (stack % 4)) & 0xff;
Andrey Petrov662da6c2020-03-16 22:46:57 -070025}
26
27void unlock_pam_regions(void)
28{
Andrey Petrov662da6c2020-03-16 22:46:57 -070029 uint32_t pam0123_unlock_dram = 0x33333330;
30 uint32_t pam456_unlock_dram = 0x00333333;
Maxim Polyakov19d43642021-01-15 01:34:02 +030031 uint32_t bus1 = get_stack_busno(1);
Andrey Petrov662da6c2020-03-16 22:46:57 -070032
Andrey Petrov662da6c2020-03-16 22:46:57 -070033 pci_io_write_config32(PCI_DEV(bus1, SAD_ALL_DEV, SAD_ALL_FUNC),
34 SAD_ALL_PAM0123_CSR, pam0123_unlock_dram);
35 pci_io_write_config32(PCI_DEV(bus1, SAD_ALL_DEV, SAD_ALL_FUNC),
36 SAD_ALL_PAM456_CSR, pam456_unlock_dram);
37
38 uint32_t reg1 = pci_io_read_config32(PCI_DEV(bus1, SAD_ALL_DEV,
39 SAD_ALL_FUNC), SAD_ALL_PAM0123_CSR);
40 uint32_t reg2 = pci_io_read_config32(PCI_DEV(bus1, SAD_ALL_DEV,
41 SAD_ALL_FUNC), SAD_ALL_PAM456_CSR);
42 printk(BIOS_DEBUG, "%s:%s pam0123_csr: 0x%x, pam456_csr: 0x%x\n",
43 __FILE__, __func__, reg1, reg2);
44}
45
Marc Jones53b465d2020-10-15 15:16:45 -060046msr_t read_msr_ppin(void)
47{
48 msr_t ppin = {0};
49 msr_t msr;
50
51 /* If MSR_PLATFORM_INFO PPIN_CAP is 0, PPIN capability is not supported */
52 msr = rdmsr(MSR_PLATFORM_INFO);
53 if ((msr.lo & MSR_PPIN_CAP) == 0) {
54 printk(BIOS_ERR, "MSR_PPIN_CAP is 0, PPIN is not supported\n");
55 return ppin;
56 }
57
58 /* Access to MSR_PPIN is permitted only if MSR_PPIN_CTL LOCK is 0 and ENABLE is 1 */
59 msr = rdmsr(MSR_PPIN_CTL);
60 if (msr.lo & MSR_PPIN_CTL_LOCK) {
61 printk(BIOS_ERR, "MSR_PPIN_CTL_LOCK is 1, PPIN access is not allowed\n");
62 return ppin;
63 }
64
65 if ((msr.lo & MSR_PPIN_CTL_ENABLE) == 0) {
66 /* Set MSR_PPIN_CTL ENABLE to 1 */
67 msr.lo |= MSR_PPIN_CTL_ENABLE;
68 wrmsr(MSR_PPIN_CTL, msr);
69 }
70 ppin = rdmsr(MSR_PPIN);
71 /* Set enable to 0 after reading MSR_PPIN */
72 msr.lo &= ~MSR_PPIN_CTL_ENABLE;
73 wrmsr(MSR_PPIN_CTL, msr);
74 return ppin;
75}
Marc Jones18960ce2020-11-02 12:41:12 -070076
Angel Ponsd453da22021-11-03 16:10:56 +010077static unsigned int get_threads_per_package(void)
Marc Jones18960ce2020-11-02 12:41:12 -070078{
79 unsigned int core_count, thread_count;
80 cpu_read_topology(&core_count, &thread_count);
81 return thread_count;
82}
83
84int get_platform_thread_count(void)
85{
86 return soc_get_num_cpus() * get_threads_per_package();
87}
88
89const IIO_UDS *get_iio_uds(void)
90{
91 size_t hob_size;
Arthur Heymans12985c12020-11-06 11:45:41 +010092 static const IIO_UDS *hob;
Marc Jones18960ce2020-11-02 12:41:12 -070093 const uint8_t fsp_hob_iio_universal_data_guid[16] = FSP_HOB_IIO_UNIVERSAL_DATA_GUID;
94
Arthur Heymans12985c12020-11-06 11:45:41 +010095 if (hob != NULL)
96 return hob;
97
Marc Jones18960ce2020-11-02 12:41:12 -070098 hob = fsp_find_extension_hob_by_guid(fsp_hob_iio_universal_data_guid, &hob_size);
99 assert(hob != NULL && hob_size != 0);
100 return hob;
101}
102
Arthur Heymans6408ada2020-11-12 17:33:00 +0100103void get_iiostack_info(struct iiostack_resource *info)
104{
105 const IIO_UDS *hob = get_iio_uds();
106
107 // copy IIO Stack info from FSP HOB
108 info->no_of_stacks = 0;
109 for (int s = 0; s < hob->PlatformData.numofIIO; ++s) {
110 for (int x = 0; x < MAX_IIO_STACK; ++x) {
111 const STACK_RES *ri = &hob->PlatformData.IIO_resource[s].StackRes[x];
112 if (!is_iio_stack_res(ri))
113 continue;
114 assert(info->no_of_stacks < (CONFIG_MAX_SOCKET * MAX_IIO_STACK));
115 memcpy(&info->res[info->no_of_stacks++], ri, sizeof(STACK_RES));
116 }
117 }
118}
119
Marc Jones18960ce2020-11-02 12:41:12 -0700120unsigned int soc_get_num_cpus(void)
121{
122 /* The FSP IIO UDS HOB has field numCpus, it is actually socket count */
123 return get_iio_uds()->SystemStatus.numCpus;
124}
125
126#if ENV_RAMSTAGE /* Setting devtree variables is only allowed in ramstage. */
127static void get_core_thread_bits(uint32_t *core_bits, uint32_t *thread_bits)
128{
129 register int ecx;
130 struct cpuid_result cpuid_regs;
131
132 /* get max index of CPUID */
133 cpuid_regs = cpuid(0);
134 assert(cpuid_regs.eax >= 0xb); /* cpuid_regs.eax is max input value for cpuid */
135
136 *thread_bits = *core_bits = 0;
137 ecx = 0;
138 while (1) {
139 cpuid_regs = cpuid_ext(0xb, ecx);
140 if (ecx == 0) {
141 *thread_bits = (cpuid_regs.eax & 0x1f);
142 } else {
143 *core_bits = (cpuid_regs.eax & 0x1f) - *thread_bits;
144 break;
145 }
146 ecx++;
147 }
148}
149
150static void get_cpu_info_from_apicid(uint32_t apicid, uint32_t core_bits, uint32_t thread_bits,
151 uint8_t *package, uint8_t *core, uint8_t *thread)
152{
153 if (package != NULL)
154 *package = (apicid >> (thread_bits + core_bits));
155 if (core != NULL)
156 *core = (uint32_t)((apicid >> thread_bits) & ~((~0) << core_bits));
157 if (thread != NULL)
158 *thread = (uint32_t)(apicid & ~((~0) << thread_bits));
159}
160
161void xeonsp_init_cpu_config(void)
162{
163 struct device *dev;
164 int apic_ids[CONFIG_MAX_CPUS] = {0}, apic_ids_by_thread[CONFIG_MAX_CPUS] = {0};
165 int num_apics = 0;
166 uint32_t core_bits, thread_bits;
167 unsigned int core_count, thread_count;
168 unsigned int num_sockets;
169
170 /*
Martin Roth26f97f92021-10-01 14:53:22 -0600171 * sort APIC ids in ascending order to identify apicid ranges for
Marc Jones18960ce2020-11-02 12:41:12 -0700172 * each numa domain
173 */
174 for (dev = all_devices; dev; dev = dev->next) {
175 if ((dev->path.type != DEVICE_PATH_APIC) ||
176 (dev->bus->dev->path.type != DEVICE_PATH_CPU_CLUSTER)) {
177 continue;
178 }
179 if (!dev->enabled)
180 continue;
181 if (num_apics >= ARRAY_SIZE(apic_ids))
182 break;
183 apic_ids[num_apics++] = dev->path.apic.apic_id;
184 }
185 if (num_apics > 1)
186 bubblesort(apic_ids, num_apics, NUM_ASCENDING);
187
188 num_sockets = soc_get_num_cpus();
189 cpu_read_topology(&core_count, &thread_count);
190 assert(num_apics == (num_sockets * thread_count));
191
192 /* sort them by thread i.e., all cores with thread 0 and then thread 1 */
193 int index = 0;
194 for (int id = 0; id < num_apics; ++id) {
195 int apic_id = apic_ids[id];
196 if (apic_id & 0x1) { /* 2nd thread */
197 apic_ids_by_thread[index + (num_apics/2) - 1] = apic_id;
198 } else { /* 1st thread */
199 apic_ids_by_thread[index++] = apic_id;
200 }
201 }
202
203 /* update apic_id, node_id in sorted order */
204 num_apics = 0;
205 get_core_thread_bits(&core_bits, &thread_bits);
206 for (dev = all_devices; dev; dev = dev->next) {
207 uint8_t package;
208
209 if ((dev->path.type != DEVICE_PATH_APIC) ||
210 (dev->bus->dev->path.type != DEVICE_PATH_CPU_CLUSTER)) {
211 continue;
212 }
213 if (!dev->enabled)
214 continue;
215 if (num_apics >= ARRAY_SIZE(apic_ids))
216 break;
217 dev->path.apic.apic_id = apic_ids_by_thread[num_apics];
218 get_cpu_info_from_apicid(dev->path.apic.apic_id, core_bits, thread_bits,
219 &package, NULL, NULL);
220 dev->path.apic.node_id = package;
221 printk(BIOS_DEBUG, "CPU %d apic_id: 0x%x (%d), node_id: 0x%x\n",
222 num_apics, dev->path.apic.apic_id,
223 dev->path.apic.apic_id, dev->path.apic.node_id);
224
225 ++num_apics;
226 }
227}
Marc Jones5851f9d2020-11-02 15:30:10 -0700228
229/* return true if command timed out else false */
230static bool wait_for_bios_cmd_cpl(pci_devfn_t dev, uint32_t reg, uint32_t mask,
231 uint32_t target)
232{
233 const uint32_t max_delay = 5000; /* 5 seconds max */
234 const uint32_t step_delay = 50; /* 50 us */
235 struct stopwatch sw;
236
237 stopwatch_init_msecs_expire(&sw, max_delay);
238 while ((pci_s_read_config32(dev, reg) & mask) != target) {
239 udelay(step_delay);
240 if (stopwatch_expired(&sw)) {
241 printk(BIOS_ERR, "%s timed out for dev: %x, reg: 0x%x, "
242 "mask: 0x%x, target: 0x%x\n", __func__, dev, reg, mask, target);
243 return true; /* timedout */
244 }
245 }
246 return false; /* successful */
247}
248
249/* return true if command timed out else false */
250static bool write_bios_mailbox_cmd(pci_devfn_t dev, uint32_t command, uint32_t data)
251{
252 /* verify bios is not in busy state */
253 if (wait_for_bios_cmd_cpl(dev, PCU_CR1_BIOS_MB_INTERFACE_REG, BIOS_MB_RUN_BUSY_MASK, 0))
254 return true; /* timed out */
255
256 /* write data to data register */
257 printk(BIOS_SPEW, "%s - pci_s_write_config32 reg: 0x%x, data: 0x%x\n", __func__,
258 PCU_CR1_BIOS_MB_DATA_REG, data);
259 pci_s_write_config32(dev, PCU_CR1_BIOS_MB_DATA_REG, data);
260
261 /* write the command */
262 printk(BIOS_SPEW, "%s - pci_s_write_config32 reg: 0x%x, data: 0x%lx\n", __func__,
263 PCU_CR1_BIOS_MB_INTERFACE_REG, command | BIOS_MB_RUN_BUSY_MASK);
264 pci_s_write_config32(dev, PCU_CR1_BIOS_MB_INTERFACE_REG,
265 command | BIOS_MB_RUN_BUSY_MASK);
266
267 /* wait for completion or time out*/
268 return wait_for_bios_cmd_cpl(dev, PCU_CR1_BIOS_MB_INTERFACE_REG,
269 BIOS_MB_RUN_BUSY_MASK, 0);
270}
271
272/* return true if command timed out else false */
273static bool set_bios_reset_cpl_for_package(uint32_t socket, uint32_t rst_cpl_mask,
274 uint32_t pcode_init_mask, uint32_t val)
275{
276 const uint32_t bus = get_socket_stack_busno(socket, PCU_IIO_STACK);
277 const pci_devfn_t dev = PCI_DEV(bus, PCU_DEV, PCU_CR1_FUN);
278
279 uint32_t reg = pci_s_read_config32(dev, PCU_CR1_BIOS_RESET_CPL_REG);
280 reg &= (uint32_t) ~rst_cpl_mask;
281 reg |= val;
282
283 /* update BIOS RESET completion bit */
284 pci_s_write_config32(dev, PCU_CR1_BIOS_RESET_CPL_REG, reg);
285
286 /* wait for PCU ack */
287 return wait_for_bios_cmd_cpl(dev, PCU_CR1_BIOS_RESET_CPL_REG, pcode_init_mask,
288 pcode_init_mask);
289}
290
291static void set_bios_init_completion_for_package(uint32_t socket)
292{
293 uint32_t data;
294 bool timedout;
295 const uint32_t bus = get_socket_stack_busno(socket, PCU_IIO_STACK);
296 const pci_devfn_t dev = PCI_DEV(bus, PCU_DEV, PCU_CR1_FUN);
297
298 /* read PCU config */
299 timedout = write_bios_mailbox_cmd(dev, BIOS_CMD_READ_PCU_MISC_CFG, 0);
300 if (timedout) {
301 /* 2nd try */
302 timedout = write_bios_mailbox_cmd(dev, BIOS_CMD_READ_PCU_MISC_CFG, 0);
303 if (timedout)
304 die("BIOS PCU Misc Config Read timed out.\n");
305
306 /* Since the 1st try failed, we need to make sure PCU is in stable state */
307 data = pci_s_read_config32(dev, PCU_CR1_BIOS_MB_DATA_REG);
308 printk(BIOS_SPEW, "%s - pci_s_read_config32 reg: 0x%x, data: 0x%x\n",
309 __func__, PCU_CR1_BIOS_MB_DATA_REG, data);
310 timedout = write_bios_mailbox_cmd(dev, BIOS_CMD_WRITE_PCU_MISC_CFG, data);
311 if (timedout)
312 die("BIOS PCU Misc Config Write timed out.\n");
313 }
314
315 /* update RST_CPL3, PCODE_INIT_DONE3 */
316 timedout = set_bios_reset_cpl_for_package(socket, RST_CPL3_MASK,
317 PCODE_INIT_DONE3_MASK, RST_CPL3_MASK);
318 if (timedout)
319 die("BIOS RESET CPL3 timed out.\n");
320
Marc Jones4fad28f2021-04-01 14:47:52 -0600321 /* Set PMAX_LOCK - must be set before RESET CPL4 */
322 pci_or_config32(PCU_DEV_CR0(bus), PCU_CR0_PMAX, PMAX_LOCK);
323
Marc Jones5851f9d2020-11-02 15:30:10 -0700324 /* update RST_CPL4, PCODE_INIT_DONE4 */
325 timedout = set_bios_reset_cpl_for_package(socket, RST_CPL4_MASK,
326 PCODE_INIT_DONE4_MASK, RST_CPL4_MASK);
327 if (timedout)
328 die("BIOS RESET CPL4 timed out.\n");
329
330 /* set CSR_DESIRED_CORES_CFG2 lock bit */
331 data = pci_s_read_config32(dev, PCU_CR1_DESIRED_CORES_CFG2_REG);
332 data |= PCU_CR1_DESIRED_CORES_CFG2_REG_LOCK_MASK;
333 printk(BIOS_SPEW, "%s - pci_s_write_config32 PCU_CR1_DESIRED_CORES_CFG2_REG 0x%x, data: 0x%x\n",
334 __func__, PCU_CR1_DESIRED_CORES_CFG2_REG, data);
335 pci_s_write_config32(dev, PCU_CR1_DESIRED_CORES_CFG2_REG, data);
336}
337
338void set_bios_init_completion(void)
339{
340 /* FIXME: This may need to be changed for multi-socket platforms */
341 uint32_t sbsp_socket_id = 0;
342
343 /*
344 * According to the BIOS Writer's Guide, the SBSP must be the last socket
345 * to receive the BIOS init completion message. So, we send it to all non-SBSP
346 * sockets first.
347 */
348 for (uint32_t socket = 0; socket < soc_get_num_cpus(); ++socket) {
349 if (socket == sbsp_socket_id)
350 continue;
351 set_bios_init_completion_for_package(socket);
352 }
353
354 /* And finally, take care of the SBSP */
355 set_bios_init_completion_for_package(sbsp_socket_id);
356}
Marc Jones18960ce2020-11-02 12:41:12 -0700357#endif