blob: 23663c3c71564a130dc983868c01ef0bb97e0acd [file] [log] [blame]
Angel Ponsba38f372020-04-05 15:46:45 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Lee Leahy77ff0b12015-05-05 15:07:29 -07002
Lee Leahy77ff0b12015-05-05 15:07:29 -07003#include <console/console.h>
4#include <cpu/cpu.h>
Matt DeVillierd3d0f072018-11-10 17:44:36 -06005#include <cpu/intel/common/common.h>
Kyösti Mälkkifaf20d32019-08-14 05:41:41 +03006#include <cpu/intel/em64t100_save_state.h>
Lee Leahy77ff0b12015-05-05 15:07:29 -07007#include <cpu/intel/microcode.h>
Kyösti Mälkkifaf20d32019-08-14 05:41:41 +03008#include <cpu/intel/smm_reloc.h>
Lee Leahy77ff0b12015-05-05 15:07:29 -07009#include <cpu/intel/turbo.h>
Lee Leahy77ff0b12015-05-05 15:07:29 -070010#include <cpu/x86/lapic.h>
11#include <cpu/x86/mp.h>
12#include <cpu/x86/msr.h>
13#include <cpu/x86/mtrr.h>
14#include <cpu/x86/smm.h>
Felix Helde8601f42021-10-20 23:56:18 +020015#include <device/device.h>
Lee Leahyacb9c0b2015-07-02 11:55:18 -070016#include <reg_script.h>
Chiranjeevi Rapolufd016a42015-08-11 14:09:46 -070017#include <soc/iosf.h>
Lee Leahy77ff0b12015-05-05 15:07:29 -070018#include <soc/msr.h>
19#include <soc/pattrs.h>
20#include <soc/ramstage.h>
Felix Heldd27ef5b2021-10-20 20:18:12 +020021#include <types.h>
Lee Leahy77ff0b12015-05-05 15:07:29 -070022
Lee Leahyacb9c0b2015-07-02 11:55:18 -070023/* Core level MSRs */
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -050024static const struct reg_script core_msr_script[] = {
Lee Leahyacb9c0b2015-07-02 11:55:18 -070025 /* Dynamic L2 shrink enable and threshold, clear SINGLE_PCTL bit 11 */
Elyes HAOUAS4e6b7902018-10-02 08:44:47 +020026 REG_MSR_RMW(MSR_PKG_CST_CONFIG_CONTROL, ~0x3f080f, 0xe0008),
Angel Ponsaee7ab22020-03-19 00:31:58 +010027 REG_MSR_RMW(MSR_POWER_MISC, ~(ENABLE_ULFM_AUTOCM_MASK | ENABLE_INDP_AUTOCM_MASK), 0),
28
Lee Leahyacb9c0b2015-07-02 11:55:18 -070029 /* Disable C1E */
30 REG_MSR_RMW(MSR_POWER_CTL, ~0x2, 0),
31 REG_MSR_OR(MSR_POWER_MISC, 0x44),
32 REG_SCRIPT_END
33};
Lee Leahy77ff0b12015-05-05 15:07:29 -070034
Elyes HAOUASb13fac32018-05-24 22:29:44 +020035static void soc_core_init(struct device *cpu)
Lee Leahyacb9c0b2015-07-02 11:55:18 -070036{
Lee Leahyacb9c0b2015-07-02 11:55:18 -070037 printk(BIOS_DEBUG, "Init Braswell core.\n");
38
39 /*
Angel Ponsaee7ab22020-03-19 00:31:58 +010040 * The turbo disable bit is actually scoped at building block level -- not package.
41 * For non-BSP cores that are within a building block, enable turbo. The cores within
42 * the BSP's building block will just see it already enabled and move on.
Lee Leahyacb9c0b2015-07-02 11:55:18 -070043 */
44 if (lapicid())
45 enable_turbo();
46
Matt DeVillierd3d0f072018-11-10 17:44:36 -060047 /* Set virtualization based on Kconfig option */
Matt DeVillierf9aed652018-12-15 15:57:33 -060048 set_vmx_and_lock();
Matt DeVillierd3d0f072018-11-10 17:44:36 -060049
Lee Leahyacb9c0b2015-07-02 11:55:18 -070050 /* Set core MSRs */
51 reg_script_run(core_msr_script);
52
53 /* Set this core to max frequency ratio */
54 set_max_freq();
55}
Lee Leahy77ff0b12015-05-05 15:07:29 -070056
57static struct device_operations cpu_dev_ops = {
Lee Leahyacb9c0b2015-07-02 11:55:18 -070058 .init = soc_core_init,
Lee Leahy77ff0b12015-05-05 15:07:29 -070059};
60
Jonathan Neuschäfer8f06ce32017-11-20 01:56:44 +010061static const struct cpu_device_id cpu_table[] = {
Felix Held6a6ac1e2023-02-06 15:19:11 +010062 { X86_VENDOR_INTEL, 0x406c4, CPUID_EXACT_MATCH_MASK },
63 { X86_VENDOR_INTEL, 0x406c3, CPUID_EXACT_MATCH_MASK },
64 { X86_VENDOR_INTEL, 0x406c2, CPUID_EXACT_MATCH_MASK },
Felix Held1e781652023-02-08 11:39:16 +010065 CPU_TABLE_END
Lee Leahy77ff0b12015-05-05 15:07:29 -070066};
67
68static const struct cpu_driver driver __cpu_driver = {
69 .ops = &cpu_dev_ops,
70 .id_table = cpu_table,
71};
72
Lee Leahy77ff0b12015-05-05 15:07:29 -070073/*
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -050074 * MP and SMM loading initialization.
Lee Leahy77ff0b12015-05-05 15:07:29 -070075 */
76
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -050077/* Package level MSRs */
78static const struct reg_script package_msr_script[] = {
79 /* Set Package TDP to ~7W */
80 REG_MSR_WRITE(MSR_PKG_POWER_LIMIT, 0x3880fa),
81 REG_MSR_RMW(MSR_PP1_POWER_LIMIT, ~(0x7f << 17), 0),
82 REG_MSR_WRITE(MSR_PKG_TURBO_CFG1, 0x702),
83 REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG1, 0x200b),
84 REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG2, 0),
85 REG_MSR_WRITE(MSR_CPU_THERM_CFG1, 0x00000305),
86 REG_MSR_WRITE(MSR_CPU_THERM_CFG2, 0x0405500d),
87 REG_MSR_WRITE(MSR_CPU_THERM_SENS_CFG, 0x27),
88 REG_SCRIPT_END
89};
Lee Leahy77ff0b12015-05-05 15:07:29 -070090
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -050091static void pre_mp_init(void)
92{
93 uint32_t bsmrwac;
94
95 /* Set up MTRRs based on physical address size. */
96 x86_setup_mtrrs_with_detect();
97 x86_mtrr_check();
98
99 /*
Angel Ponsaee7ab22020-03-19 00:31:58 +0100100 * Configure the BUNIT to allow dirty cache line evictions in non-SMM mode for lines
101 * that were dirtied while in SMM mode. Otherwise the writes would be silently dropped.
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -0500102 */
103 bsmrwac = iosf_bunit_read(BUNIT_SMRWAC) | SAI_IA_UNTRUSTED;
104 iosf_bunit_write(BUNIT_SMRWAC, bsmrwac);
105
106 /* Set package MSRs */
107 reg_script_run(package_msr_script);
108
109 /* Enable Turbo Mode on BSP and siblings of the BSP's building block. */
110 enable_turbo();
Lee Leahy77ff0b12015-05-05 15:07:29 -0700111}
112
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -0500113static int get_cpu_count(void)
114{
115 const struct pattrs *pattrs = pattrs_get();
116
117 return pattrs->num_cpus;
118}
119
Kyösti Mälkkib371e232019-08-06 06:32:46 +0300120static void fill_in_relocation_params(struct smm_relocation_params *params)
121{
122 uintptr_t tseg_base;
123 size_t tseg_size;
124
125 /* All range registers are aligned to 4KiB */
126 const u32 rmask = ~((1 << 12) - 1);
127
128 smm_region(&tseg_base, &tseg_size);
129
130 /* SMRR has 32-bits of valid address aligned to 4KiB. */
131 params->smrr_base.lo = (tseg_base & rmask) | MTRR_TYPE_WRBACK;
132 params->smrr_base.hi = 0;
133 params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
134 params->smrr_mask.hi = 0;
135}
136
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -0500137static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
138 size_t *smm_save_state_size)
139{
Kyösti Mälkkib371e232019-08-06 06:32:46 +0300140 printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -0500141
Kyösti Mälkkib371e232019-08-06 06:32:46 +0300142 fill_in_relocation_params(&smm_reloc_params);
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -0500143
Kyösti Mälkkib371e232019-08-06 06:32:46 +0300144 smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -0500145
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -0500146 *smm_save_state_size = sizeof(em64t100_smm_state_save_area_t);
147}
148
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -0500149static void get_microcode_info(const void **microcode, int *parallel)
150{
151 const struct pattrs *pattrs = pattrs_get();
152
153 *microcode = pattrs->microcode_patch;
Patrick Rudolphce51b342021-01-11 09:21:58 +0100154 *parallel = !intel_ht_supported();
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -0500155}
156
157static void per_cpu_smm_trigger(void)
158{
159 const struct pattrs *pattrs = pattrs_get();
160 msr_t msr_value;
161
162 /* Need to make sure that all cores have microcode loaded. */
Elyes HAOUAS419bfbc2018-10-01 08:47:51 +0200163 msr_value = rdmsr(IA32_BIOS_SIGN_ID);
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -0500164 if (msr_value.hi == 0)
165 intel_microcode_load_unlocked(pattrs->microcode_patch);
166
167 /* Relocate SMM space. */
168 smm_initiate_relocation();
169
170 /* Load microcode after SMM relocation. */
171 intel_microcode_load_unlocked(pattrs->microcode_patch);
172}
173
Angel Ponsaee7ab22020-03-19 00:31:58 +0100174static void relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_smbase)
Lee Leahy77ff0b12015-05-05 15:07:29 -0700175{
Kyösti Mälkkib371e232019-08-06 06:32:46 +0300176 struct smm_relocation_params *relo_params = &smm_reloc_params;
Lee Leahy77ff0b12015-05-05 15:07:29 -0700177 em64t100_smm_state_save_area_t *smm_state;
Lee Leahy77ff0b12015-05-05 15:07:29 -0700178
179 /* Set up SMRR. */
Kyösti Mälkkib371e232019-08-06 06:32:46 +0300180 wrmsr(IA32_SMRR_PHYS_BASE, relo_params->smrr_base);
181 wrmsr(IA32_SMRR_PHYS_MASK, relo_params->smrr_mask);
Lee Leahy77ff0b12015-05-05 15:07:29 -0700182
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -0500183 smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + curr_smbase);
184 smm_state->smbase = staggered_smbase;
Lee Leahy77ff0b12015-05-05 15:07:29 -0700185}
186
Kyösti Mälkki0778c862020-06-10 12:44:03 +0300187static void post_mp_init(void)
188{
189 global_smi_enable();
190}
191
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -0500192static const struct mp_ops mp_ops = {
Angel Ponsaee7ab22020-03-19 00:31:58 +0100193 .pre_mp_init = pre_mp_init,
194 .get_cpu_count = get_cpu_count,
195 .get_smm_info = get_smm_info,
196 .get_microcode_info = get_microcode_info,
197 .pre_mp_smm_init = smm_southbridge_clear_state,
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -0500198 .per_cpu_smm_trigger = per_cpu_smm_trigger,
Angel Ponsaee7ab22020-03-19 00:31:58 +0100199 .relocation_handler = relocation_handler,
Kyösti Mälkki0778c862020-06-10 12:44:03 +0300200 .post_mp_init = post_mp_init,
Aaron Durbinbbe4a7e2016-05-03 15:47:48 -0500201};
202
Felix Helde8601f42021-10-20 23:56:18 +0200203void mp_init_cpus(struct bus *cpu_bus)
Lee Leahy77ff0b12015-05-05 15:07:29 -0700204{
Felix Held4dd7d112021-10-20 23:31:43 +0200205 /* TODO: Handle mp_init_with_smm failure? */
206 mp_init_with_smm(cpu_bus, &mp_ops);
Lee Leahy77ff0b12015-05-05 15:07:29 -0700207}