blob: 7f71703f1cc6c9f5e196832b420942aa16f2bc03 [file] [log] [blame]
Angel Ponsae593872020-04-04 18:50:57 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -06002
Felix Held199b10f2022-08-13 00:29:23 +02003#include <amdblocks/iomap.h>
Felix Heldf1093af2021-07-13 23:00:26 +02004#include <amdblocks/mca.h>
Felix Helda5cdf752021-03-10 15:47:00 +01005#include <amdblocks/reset.h>
Felix Heldbc134812021-02-10 02:26:10 +01006#include <amdblocks/smm.h>
Arthur Heymans615818f2022-05-31 21:33:43 +02007#include <console/console.h>
Felix Held285dd6e2021-02-17 22:16:40 +01008#include <cpu/amd/msr.h>
Arthur Heymans615818f2022-05-31 21:33:43 +02009#include <cpu/amd/mtrr.h>
Marshall Dawsonb6172112017-09-13 17:47:31 -060010#include <cpu/cpu.h>
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060011#include <cpu/x86/mp.h>
Marshall Dawsonb6172112017-09-13 17:47:31 -060012#include <cpu/x86/msr.h>
Arthur Heymans615818f2022-05-31 21:33:43 +020013#include <cpu/x86/mtrr.h>
Kyösti Mälkkib2a5f0b2019-08-04 19:54:32 +030014#include <cpu/x86/smm.h>
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060015#include <device/device.h>
Patrick Rudolphe56189c2018-04-18 10:11:59 +020016#include <device/pci_ops.h>
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060017#include <soc/cpu.h>
Marshall Dawson0814b122018-01-10 11:35:24 -070018#include <soc/iomap.h>
Arthur Heymans615818f2022-05-31 21:33:43 +020019#include <soc/northbridge.h>
20#include <soc/pci_devs.h>
21#include <soc/smi.h>
Felix Heldd27ef5b2021-10-20 20:18:12 +020022#include <types.h>
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060023
24/*
Marshall Dawsonb6172112017-09-13 17:47:31 -060025 * MP and SMM loading initialization.
26 */
Marshall Dawsonb6172112017-09-13 17:47:31 -060027
28/*
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060029 * Do essential initialization tasks before APs can be fired up -
30 *
31 * 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
32 * creates the MTRR solution that the APs will use. Otherwise APs will try to
33 * apply the incomplete solution as the BSP is calculating it.
34 */
35static void pre_mp_init(void)
36{
Arthur Heymans615818f2022-05-31 21:33:43 +020037 const msr_t syscfg = rdmsr(SYSCFG_MSR);
38 if (syscfg.lo & SYSCFG_MSR_TOM2WB)
39 x86_setup_mtrrs_with_detect_no_above_4gb();
40 else
41 x86_setup_mtrrs_with_detect();
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060042 x86_mtrr_check();
43}
44
45static int get_cpu_count(void)
46{
Arthur Heymansc056d182022-05-31 21:40:30 +020047 return 1 + (cpuid_ecx(0x80000008) & 0xff);
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060048}
49
50static const struct mp_ops mp_ops = {
51 .pre_mp_init = pre_mp_init,
52 .get_cpu_count = get_cpu_count,
Marshall Dawsonb6172112017-09-13 17:47:31 -060053 .get_smm_info = get_smm_info,
Felix Heldbc134812021-02-10 02:26:10 +010054 .relocation_handler = smm_relocation_handler,
Kyösti Mälkki87e67962020-05-31 09:59:14 +030055 .post_mp_init = global_smi_enable,
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060056};
57
Kyösti Mälkki79e12ab2020-05-31 09:21:07 +030058void mp_init_cpus(struct bus *cpu_bus)
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060059{
Felix Held28a0a142021-11-02 17:15:58 +010060 if (mp_init_with_smm(cpu_bus, &mp_ops) != CB_SUCCESS)
61 die_with_post_code(POST_HW_INIT_FAILURE,
62 "mp_init_with_smm failed. Halting.\n");
Marshall Dawson8f031d82018-04-09 22:15:06 -060063
64 /* The flash is now no longer cacheable. Reset to WP for performance. */
Felix Held199b10f2022-08-13 00:29:23 +020065 mtrr_use_temp_range(FLASH_BELOW_4GB_MAPPING_REGION_BASE,
66 FLASH_BELOW_4GB_MAPPING_REGION_SIZE, MTRR_TYPE_WRPROT);
Marshall Dawson2e49cf122018-08-03 17:05:22 -060067
68 set_warm_reset_flag();
Marshall Dawsona7bfbbe2017-09-13 17:24:53 -060069}
Marshall Dawson178e65d2017-10-20 13:20:25 -060070
Marshall Dawson74473ec2018-08-05 10:42:17 -060071static void model_15_init(struct device *dev)
72{
73 check_mca();
Marshall Dawson638bd132018-09-14 10:16:40 -060074
75 /*
76 * Per AMD, sync an undocumented MSR with the PSP base address.
77 * Experiments showed that if you write to the MSR after it has
78 * been previously programmed, it causes a general protection fault.
79 * Also, the MSR survives warm reset and S3 cycles, so we need to
80 * test if it was previously written before writing to it.
81 */
82 msr_t psp_msr;
83 uint32_t psp_bar; /* Note: NDA BKDG names this 32-bit register BAR3 */
84 psp_bar = pci_read_config32(SOC_PSP_DEV, PCI_BASE_ADDRESS_4);
85 psp_bar &= ~PCI_BASE_ADDRESS_MEM_ATTR_MASK;
Felix Helde09294f2021-02-17 22:22:21 +010086 psp_msr = rdmsr(PSP_ADDR_MSR);
Marshall Dawson638bd132018-09-14 10:16:40 -060087 if (psp_msr.lo == 0) {
88 psp_msr.lo = psp_bar;
Felix Helde09294f2021-02-17 22:22:21 +010089 wrmsr(PSP_ADDR_MSR, psp_msr);
Marshall Dawson638bd132018-09-14 10:16:40 -060090 }
Marshall Dawson178e65d2017-10-20 13:20:25 -060091}
92
93static struct device_operations cpu_dev_ops = {
94 .init = model_15_init,
95};
96
97static struct cpu_device_id cpu_table[] = {
Richard Spiegel9247e862019-06-28 09:18:47 -070098 { X86_VENDOR_AMD, 0x660f01 },
Marshall Dawson178e65d2017-10-20 13:20:25 -060099 { X86_VENDOR_AMD, 0x670f00 },
100 { 0, 0 },
101};
102
103static const struct cpu_driver model_15 __cpu_driver = {
104 .ops = &cpu_dev_ops,
105 .id_table = cpu_table,
106};