Angel Pons | ae59387 | 2020-04-04 18:50:57 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 2 | |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 3 | #include <cpu/cpu.h> |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 4 | #include <cpu/x86/mp.h> |
| 5 | #include <cpu/x86/mtrr.h> |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 6 | #include <cpu/x86/msr.h> |
Kyösti Mälkki | b2a5f0b | 2019-08-04 19:54:32 +0300 | [diff] [blame] | 7 | #include <cpu/x86/smm.h> |
Elyes HAOUAS | 400ce55 | 2018-10-12 10:54:30 +0200 | [diff] [blame] | 8 | #include <cpu/amd/msr.h> |
Kyösti Mälkki | e31ec29 | 2019-08-10 17:27:01 +0300 | [diff] [blame] | 9 | #include <cpu/amd/amd64_save_state.h> |
Marshall Dawson | 178e65d | 2017-10-20 13:20:25 -0600 | [diff] [blame] | 10 | #include <cpu/x86/lapic.h> |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 11 | #include <device/device.h> |
Patrick Rudolph | e56189c | 2018-04-18 10:11:59 +0200 | [diff] [blame] | 12 | #include <device/pci_ops.h> |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 13 | #include <soc/pci_devs.h> |
| 14 | #include <soc/cpu.h> |
| 15 | #include <soc/northbridge.h> |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 16 | #include <soc/smi.h> |
Marshall Dawson | 0814b12 | 2018-01-10 11:35:24 -0700 | [diff] [blame] | 17 | #include <soc/iomap.h> |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 18 | #include <console/console.h> |
Felix Held | aecca75 | 2021-02-08 22:14:17 +0100 | [diff] [blame^] | 19 | #include <amdblocks/psp.h> |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 20 | |
| 21 | /* |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 22 | * MP and SMM loading initialization. |
| 23 | */ |
Kyösti Mälkki | 0d4d09c | 2019-08-06 01:44:58 +0300 | [diff] [blame] | 24 | struct smm_relocation_params { |
| 25 | msr_t tseg_base; |
| 26 | msr_t tseg_mask; |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 27 | }; |
| 28 | |
Kyösti Mälkki | 0d4d09c | 2019-08-06 01:44:58 +0300 | [diff] [blame] | 29 | static struct smm_relocation_params smm_reloc_params; |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 30 | |
| 31 | /* |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 32 | * Do essential initialization tasks before APs can be fired up - |
| 33 | * |
| 34 | * 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This |
| 35 | * creates the MTRR solution that the APs will use. Otherwise APs will try to |
| 36 | * apply the incomplete solution as the BSP is calculating it. |
| 37 | */ |
| 38 | static void pre_mp_init(void) |
| 39 | { |
| 40 | x86_setup_mtrrs_with_detect(); |
| 41 | x86_mtrr_check(); |
| 42 | } |
| 43 | |
| 44 | static int get_cpu_count(void) |
| 45 | { |
Martin Roth | 1956a00 | 2018-10-30 22:31:40 -0600 | [diff] [blame] | 46 | return (pci_read_config16(SOC_HT_DEV, D18F0_CPU_CNT) & CPU_CNT_MASK) |
Richard Spiegel | 41baf0c | 2018-10-22 13:57:18 -0700 | [diff] [blame] | 47 | + 1; |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 48 | } |
| 49 | |
Kyösti Mälkki | 0d4d09c | 2019-08-06 01:44:58 +0300 | [diff] [blame] | 50 | static void fill_in_relocation_params(struct smm_relocation_params *params) |
| 51 | { |
| 52 | uintptr_t tseg_base; |
| 53 | size_t tseg_size; |
| 54 | |
| 55 | smm_region(&tseg_base, &tseg_size); |
| 56 | |
| 57 | params->tseg_base.lo = ALIGN_DOWN(tseg_base, 128 * KiB); |
| 58 | params->tseg_base.hi = 0; |
| 59 | params->tseg_mask.lo = ALIGN_DOWN(~(tseg_size - 1), 128 * KiB); |
| 60 | params->tseg_mask.hi = ((1 << (cpu_phys_address_size() - 32)) - 1); |
| 61 | |
| 62 | params->tseg_mask.lo |= SMM_TSEG_WB; |
| 63 | } |
| 64 | |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 65 | static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, |
| 66 | size_t *smm_save_state_size) |
| 67 | { |
Kyösti Mälkki | 0d4d09c | 2019-08-06 01:44:58 +0300 | [diff] [blame] | 68 | printk(BIOS_DEBUG, "Setting up SMI for CPU\n"); |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 69 | |
Kyösti Mälkki | 0d4d09c | 2019-08-06 01:44:58 +0300 | [diff] [blame] | 70 | fill_in_relocation_params(&smm_reloc_params); |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 71 | |
Kyösti Mälkki | 0d4d09c | 2019-08-06 01:44:58 +0300 | [diff] [blame] | 72 | smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize); |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 73 | *smm_save_state_size = sizeof(amd64_smm_state_save_area_t); |
| 74 | } |
| 75 | |
| 76 | static void relocation_handler(int cpu, uintptr_t curr_smbase, |
| 77 | uintptr_t staggered_smbase) |
| 78 | { |
Kyösti Mälkki | 0d4d09c | 2019-08-06 01:44:58 +0300 | [diff] [blame] | 79 | struct smm_relocation_params *relo_params = &smm_reloc_params; |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 80 | amd64_smm_state_save_area_t *smm_state; |
| 81 | |
Kyösti Mälkki | 0d4d09c | 2019-08-06 01:44:58 +0300 | [diff] [blame] | 82 | wrmsr(SMM_ADDR_MSR, relo_params->tseg_base); |
| 83 | wrmsr(SMM_MASK_MSR, relo_params->tseg_mask); |
| 84 | |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 85 | smm_state = (void *)(SMM_AMD64_SAVE_STATE_OFFSET + curr_smbase); |
| 86 | smm_state->smbase = staggered_smbase; |
| 87 | } |
| 88 | |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 89 | static const struct mp_ops mp_ops = { |
| 90 | .pre_mp_init = pre_mp_init, |
| 91 | .get_cpu_count = get_cpu_count, |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 92 | .get_smm_info = get_smm_info, |
| 93 | .relocation_handler = relocation_handler, |
Kyösti Mälkki | 87e6796 | 2020-05-31 09:59:14 +0300 | [diff] [blame] | 94 | .post_mp_init = global_smi_enable, |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 95 | }; |
| 96 | |
Kyösti Mälkki | 79e12ab | 2020-05-31 09:21:07 +0300 | [diff] [blame] | 97 | void mp_init_cpus(struct bus *cpu_bus) |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 98 | { |
| 99 | /* Clear for take-off */ |
Kyösti Mälkki | 79e12ab | 2020-05-31 09:21:07 +0300 | [diff] [blame] | 100 | if (mp_init_with_smm(cpu_bus, &mp_ops) < 0) |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 101 | printk(BIOS_ERR, "MP initialization failure.\n"); |
Marshall Dawson | 8f031d8 | 2018-04-09 22:15:06 -0600 | [diff] [blame] | 102 | |
| 103 | /* The flash is now no longer cacheable. Reset to WP for performance. */ |
| 104 | mtrr_use_temp_range(FLASH_BASE_ADDR, CONFIG_ROM_SIZE, MTRR_TYPE_WRPROT); |
Marshall Dawson | 2e49cf12 | 2018-08-03 17:05:22 -0600 | [diff] [blame] | 105 | |
| 106 | set_warm_reset_flag(); |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 107 | } |
Marshall Dawson | 178e65d | 2017-10-20 13:20:25 -0600 | [diff] [blame] | 108 | |
Marshall Dawson | 74473ec | 2018-08-05 10:42:17 -0600 | [diff] [blame] | 109 | static void model_15_init(struct device *dev) |
| 110 | { |
| 111 | check_mca(); |
Marshall Dawson | 178e65d | 2017-10-20 13:20:25 -0600 | [diff] [blame] | 112 | setup_lapic(); |
Marshall Dawson | 638bd13 | 2018-09-14 10:16:40 -0600 | [diff] [blame] | 113 | |
| 114 | /* |
| 115 | * Per AMD, sync an undocumented MSR with the PSP base address. |
| 116 | * Experiments showed that if you write to the MSR after it has |
| 117 | * been previously programmed, it causes a general protection fault. |
| 118 | * Also, the MSR survives warm reset and S3 cycles, so we need to |
| 119 | * test if it was previously written before writing to it. |
| 120 | */ |
| 121 | msr_t psp_msr; |
| 122 | uint32_t psp_bar; /* Note: NDA BKDG names this 32-bit register BAR3 */ |
| 123 | psp_bar = pci_read_config32(SOC_PSP_DEV, PCI_BASE_ADDRESS_4); |
| 124 | psp_bar &= ~PCI_BASE_ADDRESS_MEM_ATTR_MASK; |
Felix Held | aecca75 | 2021-02-08 22:14:17 +0100 | [diff] [blame^] | 125 | psp_msr = rdmsr(MSR_PSP_ADDR); |
Marshall Dawson | 638bd13 | 2018-09-14 10:16:40 -0600 | [diff] [blame] | 126 | if (psp_msr.lo == 0) { |
| 127 | psp_msr.lo = psp_bar; |
Felix Held | aecca75 | 2021-02-08 22:14:17 +0100 | [diff] [blame^] | 128 | wrmsr(MSR_PSP_ADDR, psp_msr); |
Marshall Dawson | 638bd13 | 2018-09-14 10:16:40 -0600 | [diff] [blame] | 129 | } |
Marshall Dawson | 178e65d | 2017-10-20 13:20:25 -0600 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | static struct device_operations cpu_dev_ops = { |
| 133 | .init = model_15_init, |
| 134 | }; |
| 135 | |
| 136 | static struct cpu_device_id cpu_table[] = { |
Richard Spiegel | 9247e86 | 2019-06-28 09:18:47 -0700 | [diff] [blame] | 137 | { X86_VENDOR_AMD, 0x660f01 }, |
Marshall Dawson | 178e65d | 2017-10-20 13:20:25 -0600 | [diff] [blame] | 138 | { X86_VENDOR_AMD, 0x670f00 }, |
| 139 | { 0, 0 }, |
| 140 | }; |
| 141 | |
| 142 | static const struct cpu_driver model_15 __cpu_driver = { |
| 143 | .ops = &cpu_dev_ops, |
| 144 | .id_table = cpu_table, |
| 145 | }; |