Patrick Georgi | ac95903 | 2020-05-05 22:49:26 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 2 | |
Furquan Shaikh | 76cedd2 | 2020-05-02 10:24:23 -0700 | [diff] [blame] | 3 | #include <acpi/acpi.h> |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 4 | #include <assert.h> |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 5 | #include <console/console.h> |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 6 | #include "chip.h" |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 7 | #include <cpu/cpu.h> |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 8 | #include <cpu/x86/mp.h> |
John Zhao | 3156934 | 2016-08-23 16:38:05 -0700 | [diff] [blame] | 9 | #include <cpu/intel/microcode.h> |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 10 | #include <cpu/intel/turbo.h> |
Michael Niewöhner | 6303243 | 2020-10-11 17:34:54 +0200 | [diff] [blame] | 11 | #include <cpu/intel/common/common.h> |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 12 | #include <cpu/x86/msr.h> |
| 13 | #include <cpu/x86/mtrr.h> |
Kyösti Mälkki | b2a5f0b | 2019-08-04 19:54:32 +0300 | [diff] [blame] | 14 | #include <cpu/x86/smm.h> |
Kyösti Mälkki | e31ec29 | 2019-08-10 17:27:01 +0300 | [diff] [blame] | 15 | #include <cpu/intel/em64t100_save_state.h> |
Kyösti Mälkki | faf20d3 | 2019-08-14 05:41:41 +0300 | [diff] [blame] | 16 | #include <cpu/intel/smm_reloc.h> |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 17 | #include <device/device.h> |
| 18 | #include <device/pci.h> |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 19 | #include <fsp/api.h> |
Barnali Sarkar | 66fe0c4 | 2017-05-23 18:17:14 +0530 | [diff] [blame] | 20 | #include <intelblocks/cpulib.h> |
Aaron Durbin | efc92a8 | 2017-06-08 10:54:59 -0500 | [diff] [blame] | 21 | #include <intelblocks/fast_spi.h> |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 22 | #include <intelblocks/mp_init.h> |
Barnali Sarkar | 66fe0c4 | 2017-05-23 18:17:14 +0530 | [diff] [blame] | 23 | #include <intelblocks/msr.h> |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 24 | #include <intelblocks/sgx.h> |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 25 | #include <reg_script.h> |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 26 | #include <soc/cpu.h> |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 27 | #include <soc/iomap.h> |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 28 | #include <soc/pci_devs.h> |
Andrey Petrov | 3b63753 | 2016-11-30 17:39:16 -0800 | [diff] [blame] | 29 | #include <soc/pm.h> |
Felix Held | d27ef5b | 2021-10-20 20:18:12 +0200 | [diff] [blame] | 30 | #include <types.h> |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 31 | |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 32 | static const struct reg_script core_msr_script[] = { |
Angel Pons | b36100f | 2020-09-07 13:18:10 +0200 | [diff] [blame] | 33 | #if !CONFIG(SOC_INTEL_GEMINILAKE) |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 34 | /* Enable C-state and IO/MWAIT redirect */ |
Elyes HAOUAS | 4e6b790 | 2018-10-02 08:44:47 +0200 | [diff] [blame] | 35 | REG_MSR_WRITE(MSR_PKG_CST_CONFIG_CONTROL, |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 36 | (PKG_C_STATE_LIMIT_C2_MASK | CORE_C_STATE_LIMIT_C10_MASK |
| 37 | | IO_MWAIT_REDIRECT_MASK | CST_CFG_LOCK_MASK)), |
| 38 | /* Power Management I/O base address for I/O trapping to C-states */ |
| 39 | REG_MSR_WRITE(MSR_PMG_IO_CAPTURE_BASE, |
| 40 | (ACPI_PMIO_CST_REG | (PMG_IO_BASE_CST_RNG_BLK_SIZE << 16))), |
Venkateswarlu Vinjamuri | 362180a | 2016-10-31 17:03:55 -0700 | [diff] [blame] | 41 | /* Disable support for MONITOR and MWAIT instructions */ |
Elyes HAOUAS | 419bfbc | 2018-10-01 08:47:51 +0200 | [diff] [blame] | 42 | REG_MSR_RMW(IA32_MISC_ENABLE, ~MONITOR_MWAIT_DIS_MASK, 0), |
Cole Nelson | f357c25 | 2017-05-16 11:38:59 -0700 | [diff] [blame] | 43 | #endif |
Cole Nelson | 9d0950f | 2018-06-12 10:02:49 -0700 | [diff] [blame] | 44 | /* Disable C1E */ |
| 45 | REG_MSR_RMW(MSR_POWER_CTL, ~POWER_CTL_C1E_MASK, 0), |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 46 | REG_SCRIPT_END |
| 47 | }; |
| 48 | |
Subrata Banik | 56ab8e2 | 2022-01-07 13:40:19 +0000 | [diff] [blame] | 49 | bool cpu_soc_is_in_untrusted_mode(void) |
| 50 | { |
| 51 | msr_t msr; |
| 52 | |
| 53 | msr = rdmsr(MSR_POWER_MISC); |
| 54 | return !!(msr.lo & ENABLE_IA_UNTRUSTED); |
| 55 | } |
| 56 | |
Subrata Banik | 37a55d1 | 2022-05-30 18:11:12 +0000 | [diff] [blame] | 57 | void cpu_soc_bios_done(void) |
| 58 | { |
| 59 | msr_t msr; |
| 60 | |
| 61 | msr = rdmsr(MSR_POWER_MISC); |
| 62 | msr.lo |= ENABLE_IA_UNTRUSTED; |
| 63 | wrmsr(MSR_POWER_MISC, msr); |
| 64 | } |
| 65 | |
Elyes HAOUAS | 06e8315 | 2018-05-24 22:48:14 +0200 | [diff] [blame] | 66 | void soc_core_init(struct device *cpu) |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 67 | { |
Patrick Rudolph | fc36e9f | 2021-01-25 10:46:16 +0100 | [diff] [blame] | 68 | /* Configure Core PRMRR for SGX. */ |
| 69 | if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE)) |
| 70 | prmrr_core_configure(); |
| 71 | |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 72 | /* Clear out pending MCEs */ |
Nico Huber | aa4d9b9 | 2019-02-01 14:20:37 +0100 | [diff] [blame] | 73 | /* TODO(adurbin): Some of these banks are core vs package |
| 74 | scope. For now every CPU clears every bank. */ |
Michael Niewöhner | 7736bfc | 2019-10-22 23:05:06 +0200 | [diff] [blame] | 75 | if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE) || acpi_get_sleep_type() == ACPI_S5) |
Subrata Banik | f91344c | 2019-05-06 19:23:26 +0530 | [diff] [blame] | 76 | mca_configure(); |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 77 | |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 78 | /* Set core MSRs */ |
| 79 | reg_script_run(core_msr_script); |
Michael Niewöhner | 6303243 | 2020-10-11 17:34:54 +0200 | [diff] [blame] | 80 | |
| 81 | set_aesni_lock(); |
| 82 | |
Andrey Petrov | 3b63753 | 2016-11-30 17:39:16 -0800 | [diff] [blame] | 83 | /* |
| 84 | * Enable ACPI PM timer emulation, which also lets microcode know |
Barnali Sarkar | 9e55ff6 | 2017-06-05 20:01:14 +0530 | [diff] [blame] | 85 | * location of ACPI_BASE_ADDRESS. This also enables other features |
Andrey Petrov | 3b63753 | 2016-11-30 17:39:16 -0800 | [diff] [blame] | 86 | * implemented in microcode. |
| 87 | */ |
| 88 | enable_pm_timer_emulation(); |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 89 | |
Mario Scheithauer | d0e5133 | 2017-10-24 16:57:26 +0200 | [diff] [blame] | 90 | /* Set Max Non-Turbo ratio if RAPL is disabled. */ |
Uwe Poeche | 539fd2a | 2022-03-28 12:39:01 +0200 | [diff] [blame] | 91 | if (CONFIG(SOC_INTEL_DISABLE_POWER_LIMITS)) { |
Mario Scheithauer | d0e5133 | 2017-10-24 16:57:26 +0200 | [diff] [blame] | 92 | cpu_set_p_state_to_max_non_turbo_ratio(); |
Subrata Banik | 6d56916 | 2019-04-10 12:19:27 +0530 | [diff] [blame] | 93 | /* Disable speed step */ |
| 94 | cpu_set_eist(false); |
Uwe Poeche | 539fd2a | 2022-03-28 12:39:01 +0200 | [diff] [blame] | 95 | } else if (CONFIG(SOC_INTEL_SET_MIN_CLOCK_RATIO)) { |
Werner Zeh | 2636186 | 2018-11-21 12:36:21 +0100 | [diff] [blame] | 96 | cpu_set_p_state_to_min_clock_ratio(); |
Subrata Banik | 6d56916 | 2019-04-10 12:19:27 +0530 | [diff] [blame] | 97 | /* Disable speed step */ |
| 98 | cpu_set_eist(false); |
Mario Scheithauer | d0e5133 | 2017-10-24 16:57:26 +0200 | [diff] [blame] | 99 | } |
Ravi Sarawadi | ec729365 | 2016-09-09 14:08:50 -0700 | [diff] [blame] | 100 | } |
| 101 | |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 102 | #if !CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT) |
Elyes HAOUAS | 06e8315 | 2018-05-24 22:48:14 +0200 | [diff] [blame] | 103 | static void soc_init_core(struct device *cpu) |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 104 | { |
Pratik Prajapati | 9cd6a26 | 2017-08-14 13:57:46 -0700 | [diff] [blame] | 105 | soc_core_init(cpu); |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 106 | } |
| 107 | |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 108 | static struct device_operations cpu_dev_ops = { |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 109 | .init = soc_init_core, |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 110 | }; |
| 111 | |
Jonathan Neuschäfer | 8f06ce3 | 2017-11-20 01:56:44 +0100 | [diff] [blame] | 112 | static const struct cpu_device_id cpu_table[] = { |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 113 | { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_A0 }, |
| 114 | { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_B0 }, |
Mario Scheithauer | 545593d | 2017-10-24 17:41:19 +0200 | [diff] [blame] | 115 | { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_E0 }, |
Hannah Williams | 3ff14a0 | 2017-05-05 16:30:22 -0700 | [diff] [blame] | 116 | { X86_VENDOR_INTEL, CPUID_GLK_A0 }, |
| 117 | { X86_VENDOR_INTEL, CPUID_GLK_B0 }, |
John Zhao | 7528f83 | 2019-05-10 10:51:52 -0700 | [diff] [blame] | 118 | { X86_VENDOR_INTEL, CPUID_GLK_R0 }, |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 119 | { 0, 0 }, |
| 120 | }; |
| 121 | |
| 122 | static const struct cpu_driver driver __cpu_driver = { |
| 123 | .ops = &cpu_dev_ops, |
| 124 | .id_table = cpu_table, |
| 125 | }; |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 126 | #endif |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 127 | |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 128 | /* |
| 129 | * MP and SMM loading initialization. |
| 130 | */ |
| 131 | struct smm_relocation_attrs { |
| 132 | uint32_t smbase; |
| 133 | uint32_t smrr_base; |
| 134 | uint32_t smrr_mask; |
| 135 | }; |
| 136 | |
| 137 | static struct smm_relocation_attrs relo_attrs; |
| 138 | |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 139 | /* |
| 140 | * Do essential initialization tasks before APs can be fired up. |
| 141 | * |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 142 | * IF (CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)) - |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 143 | * Skip Pre MP init MTRR programming, as MTRRs are mirrored from BSP, |
| 144 | * that are set prior to ramstage. |
| 145 | * Real MTRRs are programmed after resource allocation. |
| 146 | * |
| 147 | * Do FSP loading before MP Init to ensure that the FSP component stored in |
| 148 | * external stage cache in TSEG does not flush off due to SMM relocation |
| 149 | * during MP Init stage. |
| 150 | * |
| 151 | * ELSE - |
| 152 | * Enable MTRRs on the BSP. This creates the MTRR solution that the |
| 153 | * APs will use. Otherwise APs will try to apply the incomplete solution |
| 154 | * as the BSP is calculating it. |
| 155 | */ |
| 156 | static void pre_mp_init(void) |
| 157 | { |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 158 | if (CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)) { |
Kyösti Mälkki | cc93c6e | 2021-01-09 22:53:52 +0200 | [diff] [blame] | 159 | fsps_load(); |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 160 | return; |
| 161 | } |
| 162 | x86_setup_mtrrs_with_detect(); |
| 163 | x86_mtrr_check(); |
Mario Scheithauer | e4cb23c | 2019-03-07 09:48:33 +0100 | [diff] [blame] | 164 | |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 165 | } |
| 166 | |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 167 | #if !CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT) |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 168 | static void read_cpu_topology(unsigned int *num_phys, unsigned int *num_virt) |
| 169 | { |
| 170 | msr_t msr; |
| 171 | msr = rdmsr(MSR_CORE_THREAD_COUNT); |
| 172 | *num_virt = (msr.lo >> 0) & 0xffff; |
| 173 | *num_phys = (msr.lo >> 16) & 0xffff; |
| 174 | } |
| 175 | |
Aaron Durbin | e72b9d4 | 2016-05-03 15:56:24 -0500 | [diff] [blame] | 176 | /* Find CPU topology */ |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 177 | int get_cpu_count(void) |
Aaron Durbin | e72b9d4 | 2016-05-03 15:56:24 -0500 | [diff] [blame] | 178 | { |
| 179 | unsigned int num_virt_cores, num_phys_cores; |
| 180 | |
| 181 | read_cpu_topology(&num_phys_cores, &num_virt_cores); |
| 182 | |
| 183 | printk(BIOS_DEBUG, "Detected %u core, %u thread CPU.\n", |
| 184 | num_phys_cores, num_virt_cores); |
| 185 | |
| 186 | return num_virt_cores; |
| 187 | } |
| 188 | |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 189 | void get_microcode_info(const void **microcode, int *parallel) |
John Zhao | 3156934 | 2016-08-23 16:38:05 -0700 | [diff] [blame] | 190 | { |
| 191 | *microcode = intel_microcode_find(); |
| 192 | *parallel = 1; |
Barnali Sarkar | 97daf98 | 2017-06-07 13:47:51 +0530 | [diff] [blame] | 193 | |
| 194 | /* Make sure BSP is using the microcode from cbfs */ |
| 195 | intel_microcode_load_unlocked(*microcode); |
John Zhao | 3156934 | 2016-08-23 16:38:05 -0700 | [diff] [blame] | 196 | } |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 197 | #endif |
John Zhao | 3156934 | 2016-08-23 16:38:05 -0700 | [diff] [blame] | 198 | |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 199 | static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, |
| 200 | size_t *smm_save_state_size) |
| 201 | { |
Kyösti Mälkki | 14222d8 | 2019-08-05 15:10:18 +0300 | [diff] [blame] | 202 | uintptr_t smm_base; |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 203 | size_t smm_size; |
Kyösti Mälkki | 14222d8 | 2019-08-05 15:10:18 +0300 | [diff] [blame] | 204 | uintptr_t handler_base; |
Brandon Breitenstein | 135eae9 | 2016-09-30 13:57:12 -0700 | [diff] [blame] | 205 | size_t handler_size; |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 206 | |
| 207 | /* All range registers are aligned to 4KiB */ |
| 208 | const uint32_t rmask = ~((1 << 12) - 1); |
| 209 | |
| 210 | /* Initialize global tracking state. */ |
Kyösti Mälkki | dc6c322 | 2019-08-04 20:17:28 +0300 | [diff] [blame] | 211 | smm_region(&smm_base, &smm_size); |
Brandon Breitenstein | 135eae9 | 2016-09-30 13:57:12 -0700 | [diff] [blame] | 212 | smm_subregion(SMM_SUBREGION_HANDLER, &handler_base, &handler_size); |
| 213 | |
Kyösti Mälkki | 14222d8 | 2019-08-05 15:10:18 +0300 | [diff] [blame] | 214 | relo_attrs.smbase = smm_base; |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 215 | relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK; |
| 216 | relo_attrs.smrr_mask = ~(smm_size - 1) & rmask; |
| 217 | relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID; |
| 218 | |
Kyösti Mälkki | 14222d8 | 2019-08-05 15:10:18 +0300 | [diff] [blame] | 219 | *perm_smbase = handler_base; |
Brandon Breitenstein | 135eae9 | 2016-09-30 13:57:12 -0700 | [diff] [blame] | 220 | *perm_smsize = handler_size; |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 221 | *smm_save_state_size = sizeof(em64t100_smm_state_save_area_t); |
| 222 | } |
| 223 | |
| 224 | static void relocation_handler(int cpu, uintptr_t curr_smbase, |
| 225 | uintptr_t staggered_smbase) |
| 226 | { |
| 227 | msr_t smrr; |
| 228 | em64t100_smm_state_save_area_t *smm_state; |
| 229 | /* Set up SMRR. */ |
| 230 | smrr.lo = relo_attrs.smrr_base; |
| 231 | smrr.hi = 0; |
Arthur Heymans | e750b38e | 2018-07-20 23:31:59 +0200 | [diff] [blame] | 232 | wrmsr(IA32_SMRR_PHYS_BASE, smrr); |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 233 | smrr.lo = relo_attrs.smrr_mask; |
| 234 | smrr.hi = 0; |
Arthur Heymans | e750b38e | 2018-07-20 23:31:59 +0200 | [diff] [blame] | 235 | wrmsr(IA32_SMRR_PHYS_MASK, smrr); |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 236 | smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + curr_smbase); |
| 237 | smm_state->smbase = staggered_smbase; |
| 238 | } |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 239 | /* |
| 240 | * CPU initialization recipe |
| 241 | * |
| 242 | * Note that no microcode update is passed to the init function. CSE updates |
| 243 | * the microcode on all cores before releasing them from reset. That means that |
| 244 | * the BSP and all APs will come up with the same microcode revision. |
| 245 | */ |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 246 | |
| 247 | static void post_mp_init(void) |
| 248 | { |
Kyösti Mälkki | 040c531 | 2020-05-31 20:03:11 +0300 | [diff] [blame] | 249 | global_smi_enable(); |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 250 | |
Michael Niewöhner | 7736bfc | 2019-10-22 23:05:06 +0200 | [diff] [blame] | 251 | if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE)) |
Patrick Rudolph | 5ec97ce | 2019-07-26 14:47:32 +0200 | [diff] [blame] | 252 | mp_run_on_all_cpus(sgx_configure, NULL); |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 253 | } |
| 254 | |
Aaron Durbin | e72b9d4 | 2016-05-03 15:56:24 -0500 | [diff] [blame] | 255 | static const struct mp_ops mp_ops = { |
| 256 | .pre_mp_init = pre_mp_init, |
| 257 | .get_cpu_count = get_cpu_count, |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 258 | .get_smm_info = get_smm_info, |
John Zhao | 3156934 | 2016-08-23 16:38:05 -0700 | [diff] [blame] | 259 | .get_microcode_info = get_microcode_info, |
Brandon Breitenstein | a86d1b8 | 2017-06-08 17:32:02 -0700 | [diff] [blame] | 260 | .pre_mp_smm_init = smm_southbridge_clear_state, |
Hannah Williams | d9c84ca | 2016-05-13 00:47:14 -0700 | [diff] [blame] | 261 | .relocation_handler = relocation_handler, |
Pratik Prajapati | dc194e2 | 2017-08-29 14:27:07 -0700 | [diff] [blame] | 262 | .post_mp_init = post_mp_init, |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 263 | }; |
| 264 | |
Pratik Prajapati | 9cd6a26 | 2017-08-14 13:57:46 -0700 | [diff] [blame] | 265 | void soc_init_cpus(struct bus *cpu_bus) |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 266 | { |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 267 | /* Clear for take-off */ |
Felix Held | 4dd7d11 | 2021-10-20 23:31:43 +0200 | [diff] [blame] | 268 | /* TODO: Handle mp_init_with_smm failure? */ |
| 269 | mp_init_with_smm(cpu_bus, &mp_ops); |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 270 | } |
| 271 | |
| 272 | void apollolake_init_cpus(struct device *dev) |
| 273 | { |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 274 | if (CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)) |
Barnali Sarkar | 1e6b980 | 2017-08-07 18:26:31 +0530 | [diff] [blame] | 275 | return; |
Arthur Heymans | 6c88e6e | 2023-01-30 13:51:02 +0100 | [diff] [blame] | 276 | if (!dev->link_list) |
| 277 | add_more_links(dev, 1); |
Pratik Prajapati | 9cd6a26 | 2017-08-14 13:57:46 -0700 | [diff] [blame] | 278 | soc_init_cpus(dev->link_list); |
Aaron Durbin | bf696f5 | 2016-11-10 20:04:19 -0600 | [diff] [blame] | 279 | |
| 280 | /* Temporarily cache the memory-mapped boot media. */ |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 281 | if (CONFIG(BOOT_DEVICE_MEMORY_MAPPED) && |
| 282 | CONFIG(BOOT_DEVICE_SPI_FLASH)) |
Aaron Durbin | efc92a8 | 2017-06-08 10:54:59 -0500 | [diff] [blame] | 283 | fast_spi_cache_bios_region(); |
Ravi Sarawadi | 9d903a1 | 2016-03-04 21:33:04 -0800 | [diff] [blame] | 284 | } |