blob: e691a51b3992a87d9f94e909cf2b11464d0416fa [file] [log] [blame]
Angel Ponsae593872020-04-04 18:50:57 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -06002
Felix Heldf1093af2021-07-13 23:00:26 +02003#include <amdblocks/mca.h>
Felix Helda5cdf752021-03-10 15:47:00 +01004#include <amdblocks/reset.h>
Felix Heldbc134812021-02-10 02:26:10 +01005#include <amdblocks/smm.h>
Arthur Heymans615818f2022-05-31 21:33:43 +02006#include <console/console.h>
Felix Held285dd6e2021-02-17 22:16:40 +01007#include <cpu/amd/msr.h>
Arthur Heymans615818f2022-05-31 21:33:43 +02008#include <cpu/amd/mtrr.h>
Marshall Dawsonb6172112017-09-13 17:47:31 -06009#include <cpu/cpu.h>
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060010#include <cpu/x86/mp.h>
Marshall Dawsonb6172112017-09-13 17:47:31 -060011#include <cpu/x86/msr.h>
Arthur Heymans615818f2022-05-31 21:33:43 +020012#include <cpu/x86/mtrr.h>
Kyösti Mälkkib2a5f0b2019-08-04 19:54:32 +030013#include <cpu/x86/smm.h>
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060014#include <device/device.h>
Patrick Rudolphe56189c2018-04-18 10:11:59 +020015#include <device/pci_ops.h>
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060016#include <soc/cpu.h>
Marshall Dawson0814b122018-01-10 11:35:24 -070017#include <soc/iomap.h>
Arthur Heymans615818f2022-05-31 21:33:43 +020018#include <soc/northbridge.h>
19#include <soc/pci_devs.h>
20#include <soc/smi.h>
Felix Heldd27ef5b2021-10-20 20:18:12 +020021#include <types.h>
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060022
23/*
Marshall Dawsonb6172112017-09-13 17:47:31 -060024 * MP and SMM loading initialization.
25 */
Marshall Dawsonb6172112017-09-13 17:47:31 -060026
27/*
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060028 * Do essential initialization tasks before APs can be fired up -
29 *
30 * 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
31 * creates the MTRR solution that the APs will use. Otherwise APs will try to
32 * apply the incomplete solution as the BSP is calculating it.
33 */
34static void pre_mp_init(void)
35{
Arthur Heymans615818f2022-05-31 21:33:43 +020036 const msr_t syscfg = rdmsr(SYSCFG_MSR);
37 if (syscfg.lo & SYSCFG_MSR_TOM2WB)
38 x86_setup_mtrrs_with_detect_no_above_4gb();
39 else
40 x86_setup_mtrrs_with_detect();
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060041 x86_mtrr_check();
42}
43
44static int get_cpu_count(void)
45{
Felix Held0a361782021-11-02 17:40:59 +010046 return (pci_read_config16(SOC_HT_DEV, D18F0_CPU_CNT) & CPU_CNT_MASK) + 1;
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060047}
48
49static const struct mp_ops mp_ops = {
50 .pre_mp_init = pre_mp_init,
51 .get_cpu_count = get_cpu_count,
Marshall Dawsonb6172112017-09-13 17:47:31 -060052 .get_smm_info = get_smm_info,
Felix Heldbc134812021-02-10 02:26:10 +010053 .relocation_handler = smm_relocation_handler,
Kyösti Mälkki87e67962020-05-31 09:59:14 +030054 .post_mp_init = global_smi_enable,
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060055};
56
Kyösti Mälkki79e12ab2020-05-31 09:21:07 +030057void mp_init_cpus(struct bus *cpu_bus)
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060058{
Felix Held28a0a142021-11-02 17:15:58 +010059 if (mp_init_with_smm(cpu_bus, &mp_ops) != CB_SUCCESS)
60 die_with_post_code(POST_HW_INIT_FAILURE,
61 "mp_init_with_smm failed. Halting.\n");
Marshall Dawson8f031d82018-04-09 22:15:06 -060062
63 /* The flash is now no longer cacheable. Reset to WP for performance. */
64 mtrr_use_temp_range(FLASH_BASE_ADDR, CONFIG_ROM_SIZE, MTRR_TYPE_WRPROT);
Marshall Dawson2e49cf122018-08-03 17:05:22 -060065
66 set_warm_reset_flag();
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060067}
Marshall Dawson178e65d2017-10-20 13:20:25 -060068
Marshall Dawson74473ec2018-08-05 10:42:17 -060069static void model_15_init(struct device *dev)
70{
71 check_mca();
Marshall Dawson638bd132018-09-14 10:16:40 -060072
73 /*
74 * Per AMD, sync an undocumented MSR with the PSP base address.
75 * Experiments showed that if you write to the MSR after it has
76 * been previously programmed, it causes a general protection fault.
77 * Also, the MSR survives warm reset and S3 cycles, so we need to
78 * test if it was previously written before writing to it.
79 */
80 msr_t psp_msr;
81 uint32_t psp_bar; /* Note: NDA BKDG names this 32-bit register BAR3 */
82 psp_bar = pci_read_config32(SOC_PSP_DEV, PCI_BASE_ADDRESS_4);
83 psp_bar &= ~PCI_BASE_ADDRESS_MEM_ATTR_MASK;
Felix Helde09294f2021-02-17 22:22:21 +010084 psp_msr = rdmsr(PSP_ADDR_MSR);
Marshall Dawson638bd132018-09-14 10:16:40 -060085 if (psp_msr.lo == 0) {
86 psp_msr.lo = psp_bar;
Felix Helde09294f2021-02-17 22:22:21 +010087 wrmsr(PSP_ADDR_MSR, psp_msr);
Marshall Dawson638bd132018-09-14 10:16:40 -060088 }
Marshall Dawson178e65d2017-10-20 13:20:25 -060089}
90
91static struct device_operations cpu_dev_ops = {
92 .init = model_15_init,
93};
94
95static struct cpu_device_id cpu_table[] = {
Richard Spiegel9247e862019-06-28 09:18:47 -070096 { X86_VENDOR_AMD, 0x660f01 },
Marshall Dawson178e65d2017-10-20 13:20:25 -060097 { X86_VENDOR_AMD, 0x670f00 },
98 { 0, 0 },
99};
100
101static const struct cpu_driver model_15 __cpu_driver = {
102 .ops = &cpu_dev_ops,
103 .id_table = cpu_table,
104};