blob: e8519f8eaeea066164f9fcb6ef4aa1b5e3c1b950 [file] [log] [blame]
Angel Ponsae593872020-04-04 18:50:57 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -06002
Arthur Heymans44807ac2022-09-13 12:43:37 +02003#include <amdblocks/cpu.h>
Felix Held199b10f2022-08-13 00:29:23 +02004#include <amdblocks/iomap.h>
Felix Heldf1093af2021-07-13 23:00:26 +02005#include <amdblocks/mca.h>
Felix Helda5cdf752021-03-10 15:47:00 +01006#include <amdblocks/reset.h>
Felix Heldbc134812021-02-10 02:26:10 +01007#include <amdblocks/smm.h>
Arthur Heymans615818f2022-05-31 21:33:43 +02008#include <console/console.h>
Felix Held285dd6e2021-02-17 22:16:40 +01009#include <cpu/amd/msr.h>
Arthur Heymans615818f2022-05-31 21:33:43 +020010#include <cpu/amd/mtrr.h>
Marshall Dawsonb6172112017-09-13 17:47:31 -060011#include <cpu/cpu.h>
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060012#include <cpu/x86/mp.h>
Marshall Dawsonb6172112017-09-13 17:47:31 -060013#include <cpu/x86/msr.h>
Arthur Heymans615818f2022-05-31 21:33:43 +020014#include <cpu/x86/mtrr.h>
Kyösti Mälkkib2a5f0b2019-08-04 19:54:32 +030015#include <cpu/x86/smm.h>
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060016#include <device/device.h>
Patrick Rudolphe56189c2018-04-18 10:11:59 +020017#include <device/pci_ops.h>
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060018#include <soc/cpu.h>
Marshall Dawson0814b122018-01-10 11:35:24 -070019#include <soc/iomap.h>
Arthur Heymans615818f2022-05-31 21:33:43 +020020#include <soc/northbridge.h>
21#include <soc/pci_devs.h>
22#include <soc/smi.h>
Felix Heldd27ef5b2021-10-20 20:18:12 +020023#include <types.h>
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060024
25/*
Marshall Dawsonb6172112017-09-13 17:47:31 -060026 * MP and SMM loading initialization.
27 */
Marshall Dawsonb6172112017-09-13 17:47:31 -060028
29/*
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060030 * Do essential initialization tasks before APs can be fired up -
31 *
32 * 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
33 * creates the MTRR solution that the APs will use. Otherwise APs will try to
34 * apply the incomplete solution as the BSP is calculating it.
35 */
36static void pre_mp_init(void)
37{
Arthur Heymans615818f2022-05-31 21:33:43 +020038 const msr_t syscfg = rdmsr(SYSCFG_MSR);
39 if (syscfg.lo & SYSCFG_MSR_TOM2WB)
40 x86_setup_mtrrs_with_detect_no_above_4gb();
41 else
42 x86_setup_mtrrs_with_detect();
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060043 x86_mtrr_check();
44}
45
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060046static const struct mp_ops mp_ops = {
47 .pre_mp_init = pre_mp_init,
48 .get_cpu_count = get_cpu_count,
Marshall Dawsonb6172112017-09-13 17:47:31 -060049 .get_smm_info = get_smm_info,
Felix Heldbc134812021-02-10 02:26:10 +010050 .relocation_handler = smm_relocation_handler,
Kyösti Mälkki87e67962020-05-31 09:59:14 +030051 .post_mp_init = global_smi_enable,
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060052};
53
Kyösti Mälkki79e12ab2020-05-31 09:21:07 +030054void mp_init_cpus(struct bus *cpu_bus)
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060055{
Felix Held28a0a142021-11-02 17:15:58 +010056 if (mp_init_with_smm(cpu_bus, &mp_ops) != CB_SUCCESS)
57 die_with_post_code(POST_HW_INIT_FAILURE,
58 "mp_init_with_smm failed. Halting.\n");
Marshall Dawson8f031d82018-04-09 22:15:06 -060059
60 /* The flash is now no longer cacheable. Reset to WP for performance. */
Felix Held199b10f2022-08-13 00:29:23 +020061 mtrr_use_temp_range(FLASH_BELOW_4GB_MAPPING_REGION_BASE,
62 FLASH_BELOW_4GB_MAPPING_REGION_SIZE, MTRR_TYPE_WRPROT);
Marshall Dawson2e49cf122018-08-03 17:05:22 -060063
64 set_warm_reset_flag();
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060065}
Marshall Dawson178e65d2017-10-20 13:20:25 -060066
Marshall Dawson74473ec2018-08-05 10:42:17 -060067static void model_15_init(struct device *dev)
68{
69 check_mca();
Marshall Dawson638bd132018-09-14 10:16:40 -060070
71 /*
72 * Per AMD, sync an undocumented MSR with the PSP base address.
73 * Experiments showed that if you write to the MSR after it has
74 * been previously programmed, it causes a general protection fault.
75 * Also, the MSR survives warm reset and S3 cycles, so we need to
76 * test if it was previously written before writing to it.
77 */
78 msr_t psp_msr;
79 uint32_t psp_bar; /* Note: NDA BKDG names this 32-bit register BAR3 */
80 psp_bar = pci_read_config32(SOC_PSP_DEV, PCI_BASE_ADDRESS_4);
81 psp_bar &= ~PCI_BASE_ADDRESS_MEM_ATTR_MASK;
Felix Helde09294f2021-02-17 22:22:21 +010082 psp_msr = rdmsr(PSP_ADDR_MSR);
Marshall Dawson638bd132018-09-14 10:16:40 -060083 if (psp_msr.lo == 0) {
84 psp_msr.lo = psp_bar;
Felix Helde09294f2021-02-17 22:22:21 +010085 wrmsr(PSP_ADDR_MSR, psp_msr);
Marshall Dawson638bd132018-09-14 10:16:40 -060086 }
Marshall Dawson178e65d2017-10-20 13:20:25 -060087}
88
89static struct device_operations cpu_dev_ops = {
90 .init = model_15_init,
91};
92
93static struct cpu_device_id cpu_table[] = {
Richard Spiegel9247e862019-06-28 09:18:47 -070094 { X86_VENDOR_AMD, 0x660f01 },
Marshall Dawson178e65d2017-10-20 13:20:25 -060095 { X86_VENDOR_AMD, 0x670f00 },
96 { 0, 0 },
97};
98
99static const struct cpu_driver model_15 __cpu_driver = {
100 .ops = &cpu_dev_ops,
101 .id_table = cpu_table,
102};