Angel Pons | c3f58f6 | 2020-04-05 15:46:41 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Aaron Durbin | 302cbd6 | 2013-10-21 12:36:17 -0500 | [diff] [blame] | 2 | |
Aaron Durbin | 302cbd6 | 2013-10-21 12:36:17 -0500 | [diff] [blame] | 3 | #include <console/console.h> |
| 4 | #include <cpu/cpu.h> |
Matt DeVillier | e5a1a4c | 2017-01-19 21:13:02 -0600 | [diff] [blame] | 5 | #include <cpu/intel/common/common.h> |
Kyösti Mälkki | faf20d3 | 2019-08-14 05:41:41 +0300 | [diff] [blame] | 6 | #include <cpu/intel/em64t100_save_state.h> |
Aaron Durbin | 302cbd6 | 2013-10-21 12:36:17 -0500 | [diff] [blame] | 7 | #include <cpu/intel/microcode.h> |
Kyösti Mälkki | faf20d3 | 2019-08-14 05:41:41 +0300 | [diff] [blame] | 8 | #include <cpu/intel/smm_reloc.h> |
Duncan Laurie | 05a3393 | 2013-11-05 12:59:50 -0800 | [diff] [blame] | 9 | #include <cpu/intel/turbo.h> |
Aaron Durbin | 7837be6 | 2013-10-21 22:32:00 -0500 | [diff] [blame] | 10 | #include <cpu/x86/lapic.h> |
Aaron Durbin | 302cbd6 | 2013-10-21 12:36:17 -0500 | [diff] [blame] | 11 | #include <cpu/x86/mp.h> |
Aaron Durbin | 7837be6 | 2013-10-21 22:32:00 -0500 | [diff] [blame] | 12 | #include <cpu/x86/msr.h> |
| 13 | #include <cpu/x86/mtrr.h> |
| 14 | #include <cpu/x86/smm.h> |
Felix Held | 37e160e | 2021-10-20 23:43:12 +0200 | [diff] [blame] | 15 | #include <device/device.h> |
Duncan Laurie | 05a3393 | 2013-11-05 12:59:50 -0800 | [diff] [blame] | 16 | #include <reg_script.h> |
Julius Werner | 18ea2d3 | 2014-10-07 16:42:17 -0700 | [diff] [blame] | 17 | #include <soc/iosf.h> |
| 18 | #include <soc/msr.h> |
| 19 | #include <soc/pattrs.h> |
| 20 | #include <soc/ramstage.h> |
Felix Held | d27ef5b | 2021-10-20 20:18:12 +0200 | [diff] [blame] | 21 | #include <types.h> |
Aaron Durbin | 7837be6 | 2013-10-21 22:32:00 -0500 | [diff] [blame] | 22 | |
Duncan Laurie | 05a3393 | 2013-11-05 12:59:50 -0800 | [diff] [blame] | 23 | /* Core level MSRs */ |
Angel Pons | 1fb17d6 | 2020-07-07 18:13:47 +0200 | [diff] [blame] | 24 | static const struct reg_script core_msr_script[] = { |
Duncan Laurie | 31ac9e3 | 2014-03-28 10:52:13 -0700 | [diff] [blame] | 25 | /* Dynamic L2 shrink enable and threshold, clear SINGLE_PCTL bit 11 */ |
Elyes HAOUAS | 4e6b790 | 2018-10-02 08:44:47 +0200 | [diff] [blame] | 26 | REG_MSR_RMW(MSR_PKG_CST_CONFIG_CONTROL, ~0x3f080f, 0xe0008), |
Angel Pons | b046bfa | 2020-07-07 17:36:08 +0200 | [diff] [blame] | 27 | REG_MSR_RMW(MSR_POWER_MISC, ~(ENABLE_ULFM_AUTOCM_MASK | ENABLE_INDP_AUTOCM_MASK), 0), |
| 28 | |
Duncan Laurie | 05a3393 | 2013-11-05 12:59:50 -0800 | [diff] [blame] | 29 | /* Disable C1E */ |
| 30 | REG_MSR_RMW(MSR_POWER_CTL, ~0x2, 0), |
| 31 | REG_MSR_OR(MSR_POWER_MISC, 0x44), |
| 32 | REG_SCRIPT_END |
| 33 | }; |
| 34 | |
Angel Pons | 1fb17d6 | 2020-07-07 18:13:47 +0200 | [diff] [blame] | 35 | static void soc_core_init(struct device *cpu) |
Aaron Durbin | 302cbd6 | 2013-10-21 12:36:17 -0500 | [diff] [blame] | 36 | { |
| 37 | printk(BIOS_DEBUG, "Init BayTrail core.\n"); |
Duncan Laurie | 05a3393 | 2013-11-05 12:59:50 -0800 | [diff] [blame] | 38 | |
Angel Pons | 26b49cc | 2020-07-07 17:17:51 +0200 | [diff] [blame] | 39 | /* |
| 40 | * The turbo disable bit is actually scoped at building block level -- not package. |
| 41 | * For non-BSP cores that are within a building block, enable turbo. The cores within |
| 42 | * the BSP's building block will just see it already enabled and move on. |
| 43 | */ |
Aaron Durbin | 59d1d87 | 2014-01-14 17:34:10 -0600 | [diff] [blame] | 44 | if (lapicid()) |
| 45 | enable_turbo(); |
| 46 | |
Matt DeVillier | e5a1a4c | 2017-01-19 21:13:02 -0600 | [diff] [blame] | 47 | /* Set virtualization based on Kconfig option */ |
Matt DeVillier | f9aed65 | 2018-12-15 15:57:33 -0600 | [diff] [blame] | 48 | set_vmx_and_lock(); |
Matt DeVillier | e5a1a4c | 2017-01-19 21:13:02 -0600 | [diff] [blame] | 49 | |
Duncan Laurie | 05a3393 | 2013-11-05 12:59:50 -0800 | [diff] [blame] | 50 | /* Set core MSRs */ |
| 51 | reg_script_run(core_msr_script); |
| 52 | |
| 53 | /* Set this core to max frequency ratio */ |
| 54 | set_max_freq(); |
Aaron Durbin | 302cbd6 | 2013-10-21 12:36:17 -0500 | [diff] [blame] | 55 | } |
| 56 | |
| 57 | static struct device_operations cpu_dev_ops = { |
Angel Pons | 1fb17d6 | 2020-07-07 18:13:47 +0200 | [diff] [blame] | 58 | .init = soc_core_init, |
Aaron Durbin | 302cbd6 | 2013-10-21 12:36:17 -0500 | [diff] [blame] | 59 | }; |
| 60 | |
Jonathan Neuschäfer | 8f06ce3 | 2017-11-20 01:56:44 +0100 | [diff] [blame] | 61 | static const struct cpu_device_id cpu_table[] = { |
Felix Held | 6a6ac1e | 2023-02-06 15:19:11 +0100 | [diff] [blame] | 62 | { X86_VENDOR_INTEL, 0x30673, CPUID_EXACT_MATCH_MASK }, |
| 63 | { X86_VENDOR_INTEL, 0x30678, CPUID_EXACT_MATCH_MASK }, |
| 64 | { X86_VENDOR_INTEL, 0x30679, CPUID_EXACT_MATCH_MASK }, |
Felix Held | 1e78165 | 2023-02-08 11:39:16 +0100 | [diff] [blame] | 65 | CPU_TABLE_END |
Aaron Durbin | 302cbd6 | 2013-10-21 12:36:17 -0500 | [diff] [blame] | 66 | }; |
| 67 | |
| 68 | static const struct cpu_driver driver __cpu_driver = { |
| 69 | .ops = &cpu_dev_ops, |
| 70 | .id_table = cpu_table, |
| 71 | }; |
| 72 | |
Aaron Durbin | 7837be6 | 2013-10-21 22:32:00 -0500 | [diff] [blame] | 73 | /* |
Aaron Durbin | b04bb65 | 2016-05-03 11:12:52 -0500 | [diff] [blame] | 74 | * MP and SMM loading initialization. |
Aaron Durbin | 7837be6 | 2013-10-21 22:32:00 -0500 | [diff] [blame] | 75 | */ |
| 76 | |
Aaron Durbin | b04bb65 | 2016-05-03 11:12:52 -0500 | [diff] [blame] | 77 | /* Package level MSRs */ |
| 78 | static const struct reg_script package_msr_script[] = { |
| 79 | /* Set Package TDP to ~7W */ |
| 80 | REG_MSR_WRITE(MSR_PKG_POWER_LIMIT, 0x3880fa), |
| 81 | REG_MSR_RMW(MSR_PP1_POWER_LIMIT, ~(0x7f << 17), 0), |
| 82 | REG_MSR_WRITE(MSR_PKG_TURBO_CFG1, 0x702), |
| 83 | REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG1, 0x200b), |
| 84 | REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG2, 0), |
| 85 | REG_MSR_WRITE(MSR_CPU_THERM_CFG1, 0x00000305), |
| 86 | REG_MSR_WRITE(MSR_CPU_THERM_CFG2, 0x0405500d), |
| 87 | REG_MSR_WRITE(MSR_CPU_THERM_SENS_CFG, 0x27), |
| 88 | REG_SCRIPT_END |
| 89 | }; |
Aaron Durbin | 7837be6 | 2013-10-21 22:32:00 -0500 | [diff] [blame] | 90 | |
Aaron Durbin | b04bb65 | 2016-05-03 11:12:52 -0500 | [diff] [blame] | 91 | static void pre_mp_init(void) |
| 92 | { |
| 93 | uint32_t bsmrwac; |
| 94 | |
| 95 | /* Set up MTRRs based on physical address size. */ |
| 96 | x86_setup_mtrrs_with_detect(); |
| 97 | x86_mtrr_check(); |
| 98 | |
| 99 | /* |
Angel Pons | 26b49cc | 2020-07-07 17:17:51 +0200 | [diff] [blame] | 100 | * Configure the BUNIT to allow dirty cache line evictions in non-SMM mode for lines |
| 101 | * that were dirtied while in SMM mode. Otherwise the writes would be silently dropped. |
Aaron Durbin | b04bb65 | 2016-05-03 11:12:52 -0500 | [diff] [blame] | 102 | */ |
| 103 | bsmrwac = iosf_bunit_read(BUNIT_SMRWAC) | SAI_IA_UNTRUSTED; |
| 104 | iosf_bunit_write(BUNIT_SMRWAC, bsmrwac); |
| 105 | |
| 106 | /* Set package MSRs */ |
| 107 | reg_script_run(package_msr_script); |
| 108 | |
| 109 | /* Enable Turbo Mode on BSP and siblings of the BSP's building block. */ |
| 110 | enable_turbo(); |
Aaron Durbin | 7837be6 | 2013-10-21 22:32:00 -0500 | [diff] [blame] | 111 | } |
| 112 | |
Aaron Durbin | b04bb65 | 2016-05-03 11:12:52 -0500 | [diff] [blame] | 113 | static int get_cpu_count(void) |
| 114 | { |
| 115 | const struct pattrs *pattrs = pattrs_get(); |
| 116 | |
| 117 | return pattrs->num_cpus; |
| 118 | } |
| 119 | |
Kyösti Mälkki | 568a42a | 2019-08-14 06:48:28 +0300 | [diff] [blame] | 120 | static void fill_in_relocation_params(struct smm_relocation_params *params) |
| 121 | { |
| 122 | uintptr_t tseg_base; |
| 123 | size_t tseg_size; |
| 124 | |
| 125 | /* All range registers are aligned to 4KiB */ |
| 126 | const u32 rmask = ~((1 << 12) - 1); |
| 127 | |
| 128 | smm_region(&tseg_base, &tseg_size); |
| 129 | |
| 130 | /* SMRR has 32-bits of valid address aligned to 4KiB. */ |
| 131 | params->smrr_base.lo = (tseg_base & rmask) | MTRR_TYPE_WRBACK; |
| 132 | params->smrr_base.hi = 0; |
| 133 | params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID; |
| 134 | params->smrr_mask.hi = 0; |
| 135 | } |
| 136 | |
Aaron Durbin | b04bb65 | 2016-05-03 11:12:52 -0500 | [diff] [blame] | 137 | static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, |
| 138 | size_t *smm_save_state_size) |
| 139 | { |
Kyösti Mälkki | 568a42a | 2019-08-14 06:48:28 +0300 | [diff] [blame] | 140 | printk(BIOS_DEBUG, "Setting up SMI for CPU\n"); |
Aaron Durbin | b04bb65 | 2016-05-03 11:12:52 -0500 | [diff] [blame] | 141 | |
Kyösti Mälkki | 568a42a | 2019-08-14 06:48:28 +0300 | [diff] [blame] | 142 | fill_in_relocation_params(&smm_reloc_params); |
Aaron Durbin | b04bb65 | 2016-05-03 11:12:52 -0500 | [diff] [blame] | 143 | |
Kyösti Mälkki | 568a42a | 2019-08-14 06:48:28 +0300 | [diff] [blame] | 144 | smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize); |
| 145 | |
Aaron Durbin | b04bb65 | 2016-05-03 11:12:52 -0500 | [diff] [blame] | 146 | *smm_save_state_size = sizeof(em64t100_smm_state_save_area_t); |
| 147 | } |
| 148 | |
Aaron Durbin | b04bb65 | 2016-05-03 11:12:52 -0500 | [diff] [blame] | 149 | static void get_microcode_info(const void **microcode, int *parallel) |
| 150 | { |
| 151 | const struct pattrs *pattrs = pattrs_get(); |
| 152 | |
| 153 | *microcode = pattrs->microcode_patch; |
Patrick Rudolph | ce51b34 | 2021-01-11 09:21:58 +0100 | [diff] [blame] | 154 | *parallel = !intel_ht_supported(); |
Aaron Durbin | b04bb65 | 2016-05-03 11:12:52 -0500 | [diff] [blame] | 155 | } |
| 156 | |
| 157 | static void per_cpu_smm_trigger(void) |
| 158 | { |
| 159 | const struct pattrs *pattrs = pattrs_get(); |
| 160 | |
| 161 | /* Relocate SMM space. */ |
| 162 | smm_initiate_relocation(); |
| 163 | |
| 164 | /* Load microcode after SMM relocation. */ |
| 165 | intel_microcode_load_unlocked(pattrs->microcode_patch); |
| 166 | } |
| 167 | |
Angel Pons | b046bfa | 2020-07-07 17:36:08 +0200 | [diff] [blame] | 168 | static void relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_smbase) |
Aaron Durbin | 7837be6 | 2013-10-21 22:32:00 -0500 | [diff] [blame] | 169 | { |
Kyösti Mälkki | 568a42a | 2019-08-14 06:48:28 +0300 | [diff] [blame] | 170 | struct smm_relocation_params *relo_params = &smm_reloc_params; |
Aaron Durbin | 7837be6 | 2013-10-21 22:32:00 -0500 | [diff] [blame] | 171 | em64t100_smm_state_save_area_t *smm_state; |
Aaron Durbin | 7837be6 | 2013-10-21 22:32:00 -0500 | [diff] [blame] | 172 | |
| 173 | /* Set up SMRR. */ |
Kyösti Mälkki | 568a42a | 2019-08-14 06:48:28 +0300 | [diff] [blame] | 174 | wrmsr(IA32_SMRR_PHYS_BASE, relo_params->smrr_base); |
| 175 | wrmsr(IA32_SMRR_PHYS_MASK, relo_params->smrr_mask); |
Aaron Durbin | 7837be6 | 2013-10-21 22:32:00 -0500 | [diff] [blame] | 176 | |
Aaron Durbin | b04bb65 | 2016-05-03 11:12:52 -0500 | [diff] [blame] | 177 | smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + curr_smbase); |
| 178 | smm_state->smbase = staggered_smbase; |
Aaron Durbin | 7837be6 | 2013-10-21 22:32:00 -0500 | [diff] [blame] | 179 | } |
| 180 | |
Kyösti Mälkki | 0778c86 | 2020-06-10 12:44:03 +0300 | [diff] [blame] | 181 | static void post_mp_init(void) |
| 182 | { |
| 183 | global_smi_enable(); |
| 184 | } |
| 185 | |
Aaron Durbin | b04bb65 | 2016-05-03 11:12:52 -0500 | [diff] [blame] | 186 | static const struct mp_ops mp_ops = { |
Angel Pons | b046bfa | 2020-07-07 17:36:08 +0200 | [diff] [blame] | 187 | .pre_mp_init = pre_mp_init, |
| 188 | .get_cpu_count = get_cpu_count, |
| 189 | .get_smm_info = get_smm_info, |
| 190 | .get_microcode_info = get_microcode_info, |
| 191 | .pre_mp_smm_init = smm_southbridge_clear_state, |
Aaron Durbin | b04bb65 | 2016-05-03 11:12:52 -0500 | [diff] [blame] | 192 | .per_cpu_smm_trigger = per_cpu_smm_trigger, |
Angel Pons | b046bfa | 2020-07-07 17:36:08 +0200 | [diff] [blame] | 193 | .relocation_handler = relocation_handler, |
| 194 | .post_mp_init = post_mp_init, |
Aaron Durbin | b04bb65 | 2016-05-03 11:12:52 -0500 | [diff] [blame] | 195 | }; |
| 196 | |
Felix Held | 37e160e | 2021-10-20 23:43:12 +0200 | [diff] [blame] | 197 | void mp_init_cpus(struct bus *cpu_bus) |
Aaron Durbin | 7837be6 | 2013-10-21 22:32:00 -0500 | [diff] [blame] | 198 | { |
Felix Held | 4dd7d11 | 2021-10-20 23:31:43 +0200 | [diff] [blame] | 199 | /* TODO: Handle mp_init_with_smm failure? */ |
| 200 | mp_init_with_smm(cpu_bus, &mp_ops); |
Aaron Durbin | 7837be6 | 2013-10-21 22:32:00 -0500 | [diff] [blame] | 201 | } |