Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 1 | /* |
| 2 | * This file is part of the coreboot project. |
| 3 | * |
| 4 | * Copyright (C) 2015-2016 Intel Corp. |
| 5 | * Copyright (C) 2017 Advanced Micro Devices, Inc. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License as published by |
| 9 | * the Free Software Foundation; version 2 of the License. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | */ |
| 16 | |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 17 | #include <cpu/cpu.h> |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 18 | #include <cpu/x86/mp.h> |
| 19 | #include <cpu/x86/mtrr.h> |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 20 | #include <cpu/x86/msr.h> |
Kyösti Mälkki | b2a5f0b | 2019-08-04 19:54:32 +0300 | [diff] [blame] | 21 | #include <cpu/x86/smm.h> |
Elyes HAOUAS | 400ce55 | 2018-10-12 10:54:30 +0200 | [diff] [blame] | 22 | #include <cpu/amd/msr.h> |
Kyösti Mälkki | e31ec29 | 2019-08-10 17:27:01 +0300 | [diff] [blame^] | 23 | #include <cpu/amd/amd64_save_state.h> |
Marshall Dawson | 178e65d | 2017-10-20 13:20:25 -0600 | [diff] [blame] | 24 | #include <cpu/x86/lapic.h> |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 25 | #include <device/device.h> |
Patrick Rudolph | e56189c | 2018-04-18 10:11:59 +0200 | [diff] [blame] | 26 | #include <device/pci_ops.h> |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 27 | #include <soc/pci_devs.h> |
| 28 | #include <soc/cpu.h> |
| 29 | #include <soc/northbridge.h> |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 30 | #include <soc/smi.h> |
Marshall Dawson | 0814b12 | 2018-01-10 11:35:24 -0700 | [diff] [blame] | 31 | #include <soc/iomap.h> |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 32 | #include <console/console.h> |
| 33 | |
| 34 | /* |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 35 | * MP and SMM loading initialization. |
| 36 | */ |
| 37 | struct smm_relocation_attrs { |
| 38 | uint32_t smbase; |
| 39 | uint32_t tseg_base; |
| 40 | uint32_t tseg_mask; |
| 41 | }; |
| 42 | |
| 43 | static struct smm_relocation_attrs relo_attrs; |
| 44 | |
| 45 | /* |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 46 | * Do essential initialization tasks before APs can be fired up - |
| 47 | * |
| 48 | * 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This |
| 49 | * creates the MTRR solution that the APs will use. Otherwise APs will try to |
| 50 | * apply the incomplete solution as the BSP is calculating it. |
| 51 | */ |
| 52 | static void pre_mp_init(void) |
| 53 | { |
| 54 | x86_setup_mtrrs_with_detect(); |
| 55 | x86_mtrr_check(); |
| 56 | } |
| 57 | |
| 58 | static int get_cpu_count(void) |
| 59 | { |
Martin Roth | 1956a00 | 2018-10-30 22:31:40 -0600 | [diff] [blame] | 60 | return (pci_read_config16(SOC_HT_DEV, D18F0_CPU_CNT) & CPU_CNT_MASK) |
Richard Spiegel | 41baf0c | 2018-10-22 13:57:18 -0700 | [diff] [blame] | 61 | + 1; |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 62 | } |
| 63 | |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 64 | static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, |
| 65 | size_t *smm_save_state_size) |
| 66 | { |
Kyösti Mälkki | 14222d8 | 2019-08-05 15:10:18 +0300 | [diff] [blame] | 67 | uintptr_t smm_base; |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 68 | size_t smm_size; |
Kyösti Mälkki | 14222d8 | 2019-08-05 15:10:18 +0300 | [diff] [blame] | 69 | uintptr_t handler_base; |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 70 | size_t handler_size; |
| 71 | |
| 72 | /* Initialize global tracking state. */ |
Kyösti Mälkki | 7db852a | 2019-08-04 20:26:53 +0300 | [diff] [blame] | 73 | smm_region(&smm_base, &smm_size); |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 74 | smm_subregion(SMM_SUBREGION_HANDLER, &handler_base, &handler_size); |
| 75 | |
Kyösti Mälkki | 14222d8 | 2019-08-05 15:10:18 +0300 | [diff] [blame] | 76 | relo_attrs.smbase = smm_base; |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 77 | relo_attrs.tseg_base = relo_attrs.smbase; |
| 78 | relo_attrs.tseg_mask = ALIGN_DOWN(~(smm_size - 1), 128 * KiB); |
Marshall Dawson | 2a5e15c | 2018-01-24 12:07:11 -0700 | [diff] [blame] | 79 | relo_attrs.tseg_mask |= SMM_TSEG_WB; |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 80 | |
Kyösti Mälkki | 14222d8 | 2019-08-05 15:10:18 +0300 | [diff] [blame] | 81 | *perm_smbase = handler_base; |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 82 | *perm_smsize = handler_size; |
| 83 | *smm_save_state_size = sizeof(amd64_smm_state_save_area_t); |
| 84 | } |
| 85 | |
| 86 | static void relocation_handler(int cpu, uintptr_t curr_smbase, |
| 87 | uintptr_t staggered_smbase) |
| 88 | { |
| 89 | msr_t tseg_base, tseg_mask; |
| 90 | amd64_smm_state_save_area_t *smm_state; |
| 91 | |
| 92 | tseg_base.lo = relo_attrs.tseg_base; |
| 93 | tseg_base.hi = 0; |
Elyes HAOUAS | 400ce55 | 2018-10-12 10:54:30 +0200 | [diff] [blame] | 94 | wrmsr(SMM_ADDR_MSR, tseg_base); |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 95 | tseg_mask.lo = relo_attrs.tseg_mask; |
| 96 | tseg_mask.hi = ((1 << (cpu_phys_address_size() - 32)) - 1); |
Elyes HAOUAS | 400ce55 | 2018-10-12 10:54:30 +0200 | [diff] [blame] | 97 | wrmsr(SMM_MASK_MSR, tseg_mask); |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 98 | smm_state = (void *)(SMM_AMD64_SAVE_STATE_OFFSET + curr_smbase); |
| 99 | smm_state->smbase = staggered_smbase; |
| 100 | } |
| 101 | |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 102 | static const struct mp_ops mp_ops = { |
| 103 | .pre_mp_init = pre_mp_init, |
| 104 | .get_cpu_count = get_cpu_count, |
Marshall Dawson | b617211 | 2017-09-13 17:47:31 -0600 | [diff] [blame] | 105 | .get_smm_info = get_smm_info, |
| 106 | .relocation_handler = relocation_handler, |
| 107 | .post_mp_init = enable_smi_generation, |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 108 | }; |
| 109 | |
| 110 | void stoney_init_cpus(struct device *dev) |
| 111 | { |
| 112 | /* Clear for take-off */ |
| 113 | if (mp_init_with_smm(dev->link_list, &mp_ops) < 0) |
| 114 | printk(BIOS_ERR, "MP initialization failure.\n"); |
Marshall Dawson | 8f031d8 | 2018-04-09 22:15:06 -0600 | [diff] [blame] | 115 | |
| 116 | /* The flash is now no longer cacheable. Reset to WP for performance. */ |
| 117 | mtrr_use_temp_range(FLASH_BASE_ADDR, CONFIG_ROM_SIZE, MTRR_TYPE_WRPROT); |
Marshall Dawson | 2e49cf12 | 2018-08-03 17:05:22 -0600 | [diff] [blame] | 118 | |
| 119 | set_warm_reset_flag(); |
Marshall Dawson | a7bfbbe | 2017-09-13 17:24:53 -0600 | [diff] [blame] | 120 | } |
Marshall Dawson | 178e65d | 2017-10-20 13:20:25 -0600 | [diff] [blame] | 121 | |
Marshall Dawson | 74473ec | 2018-08-05 10:42:17 -0600 | [diff] [blame] | 122 | static void model_15_init(struct device *dev) |
| 123 | { |
| 124 | check_mca(); |
Marshall Dawson | 178e65d | 2017-10-20 13:20:25 -0600 | [diff] [blame] | 125 | setup_lapic(); |
Marshall Dawson | 638bd13 | 2018-09-14 10:16:40 -0600 | [diff] [blame] | 126 | |
| 127 | /* |
| 128 | * Per AMD, sync an undocumented MSR with the PSP base address. |
| 129 | * Experiments showed that if you write to the MSR after it has |
| 130 | * been previously programmed, it causes a general protection fault. |
| 131 | * Also, the MSR survives warm reset and S3 cycles, so we need to |
| 132 | * test if it was previously written before writing to it. |
| 133 | */ |
| 134 | msr_t psp_msr; |
| 135 | uint32_t psp_bar; /* Note: NDA BKDG names this 32-bit register BAR3 */ |
| 136 | psp_bar = pci_read_config32(SOC_PSP_DEV, PCI_BASE_ADDRESS_4); |
| 137 | psp_bar &= ~PCI_BASE_ADDRESS_MEM_ATTR_MASK; |
| 138 | psp_msr = rdmsr(0xc00110a2); |
| 139 | if (psp_msr.lo == 0) { |
| 140 | psp_msr.lo = psp_bar; |
| 141 | wrmsr(0xc00110a2, psp_msr); |
| 142 | } |
Marshall Dawson | 178e65d | 2017-10-20 13:20:25 -0600 | [diff] [blame] | 143 | } |
| 144 | |
| 145 | static struct device_operations cpu_dev_ops = { |
| 146 | .init = model_15_init, |
| 147 | }; |
| 148 | |
| 149 | static struct cpu_device_id cpu_table[] = { |
Richard Spiegel | 9247e86 | 2019-06-28 09:18:47 -0700 | [diff] [blame] | 150 | { X86_VENDOR_AMD, 0x660f01 }, |
Marshall Dawson | 178e65d | 2017-10-20 13:20:25 -0600 | [diff] [blame] | 151 | { X86_VENDOR_AMD, 0x670f00 }, |
| 152 | { 0, 0 }, |
| 153 | }; |
| 154 | |
| 155 | static const struct cpu_driver model_15 __cpu_driver = { |
| 156 | .ops = &cpu_dev_ops, |
| 157 | .id_table = cpu_table, |
| 158 | }; |