blob: 49e418a4b59982bf29feacec519428ff33e6ff88 [file] [log] [blame]
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -07001/* SPDX-License-Identifier: GPL-2.0-only */
2
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -07003#include <cpu/intel/smm_reloc.h>
4#include <cpu/intel/turbo.h>
Michael Niewöhner10ae1cf2020-10-11 14:05:32 +02005#include <cpu/intel/common/common.h>
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -07006#include <cpu/x86/mp.h>
7#include <cpu/x86/msr.h>
8#include <device/pci.h>
9#include <fsp/api.h>
10#include <intelblocks/cpulib.h>
11#include <intelblocks/mp_init.h>
12#include <intelblocks/msr.h>
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -070013#include <soc/cpu.h>
14#include <soc/msr.h>
15#include <soc/pci_devs.h>
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -070016#include <soc/soc_chip.h>
Felix Heldd27ef5b2021-10-20 20:18:12 +020017#include <types.h>
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -070018
Subrata Banik56ab8e22022-01-07 13:40:19 +000019bool cpu_soc_is_in_untrusted_mode(void)
20{
21 msr_t msr;
22
23 msr = rdmsr(MSR_BIOS_DONE);
24 return !!(msr.lo & ENABLE_IA_UNTRUSTED);
25}
26
Subrata Banik37a55d12022-05-30 18:11:12 +000027void cpu_soc_bios_done(void)
28{
29 msr_t msr;
30
31 msr = rdmsr(MSR_BIOS_DONE);
32 msr.lo |= ENABLE_IA_UNTRUSTED;
33 wrmsr(MSR_BIOS_DONE, msr);
34}
35
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -070036static void soc_fsp_load(void)
37{
Kyösti Mälkkicc93c6e2021-01-09 22:53:52 +020038 fsps_load();
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -070039}
40
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -070041static void configure_misc(void)
42{
43 msr_t msr;
44
45 config_t *conf = config_of_soc();
46
47 msr = rdmsr(IA32_MISC_ENABLE);
48 msr.lo |= (1 << 0); /* Fast String enable */
49 msr.lo |= (1 << 3); /* TM1/TM2/EMTTM enable */
50 wrmsr(IA32_MISC_ENABLE, msr);
51
52 /* Set EIST status */
53 cpu_set_eist(conf->eist_enable);
54
55 /* Disable Thermal interrupts */
56 msr.lo = 0;
57 msr.hi = 0;
58 wrmsr(IA32_THERM_INTERRUPT, msr);
59
60 /* Enable package critical interrupt only */
61 msr.lo = 1 << 4;
62 msr.hi = 0;
63 wrmsr(IA32_PACKAGE_THERM_INTERRUPT, msr);
64
65 /* Enable PROCHOT */
66 msr = rdmsr(MSR_POWER_CTL);
Angel Pons4d794bd2021-10-11 14:00:54 +020067 msr.lo |= (1 << 0); /* Enable Bi-directional PROCHOT as an input */
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -070068 msr.lo |= (1 << 23); /* Lock it */
69 wrmsr(MSR_POWER_CTL, msr);
Werner Zehd03e8962022-10-21 11:09:27 +020070
71 /* In some cases it is beneficial for the performance to disable the
72 L1 prefetcher as on Elkhart Lake it is set up a bit too aggressive. */
73 if (conf->L1_prefetcher_disable) {
74 msr = rdmsr(MSR_PREFETCH_CTL);
75 msr.lo |= PREFETCH_L1_DISABLE;
76 wrmsr(MSR_PREFETCH_CTL, msr);
77 }
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -070078}
79
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -070080/* All CPUs including BSP will run the following function. */
81void soc_core_init(struct device *cpu)
82{
83 /* Clear out pending MCEs */
84 /* TODO(adurbin): This should only be done on a cold boot. Also, some
85 * of these banks are core vs package scope. For now every CPU clears
86 * every bank. */
87 mca_configure();
88
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -070089 enable_lapic_tpr();
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -070090
91 /* Configure Enhanced SpeedStep and Thermal Sensors */
92 configure_misc();
93
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -070094 enable_pm_timer_emulation();
95
96 /* Enable Direct Cache Access */
97 configure_dca_cap();
98
99 /* Set energy policy */
100 set_energy_perf_bias(ENERGY_POLICY_NORMAL);
101
102 /* Enable Turbo */
103 enable_turbo();
104}
105
106static void per_cpu_smm_trigger(void)
107{
108 /* Relocate the SMM handler. */
109 smm_relocate();
110}
111
112static void post_mp_init(void)
113{
114 /* Set Max Ratio */
115 cpu_set_max_ratio();
116
117 /*
118 * Now that all APs have been relocated as well as the BSP let SMIs
119 * start flowing.
120 */
121 global_smi_enable();
122}
123
124static const struct mp_ops mp_ops = {
125 /*
126 * Skip Pre MP init MTRR programming as MTRRs are mirrored from BSP,
127 * that are set prior to ramstage.
128 * Real MTRRs programming are being done after resource allocation.
129 */
130 .pre_mp_init = soc_fsp_load,
131 .get_cpu_count = get_cpu_count,
132 .get_smm_info = smm_info,
133 .get_microcode_info = get_microcode_info,
134 .pre_mp_smm_init = smm_initialize,
135 .per_cpu_smm_trigger = per_cpu_smm_trigger,
136 .relocation_handler = smm_relocation_handler,
137 .post_mp_init = post_mp_init,
138};
139
Arthur Heymans829e8e62023-01-30 19:09:34 +0100140void mp_init_cpus(struct bus *cpu_bus)
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -0700141{
Felix Held4dd7d112021-10-20 23:31:43 +0200142 /* TODO: Handle mp_init_with_smm failure? */
143 mp_init_with_smm(cpu_bus, &mp_ops);
Tan, Lean Sheng05dfe312020-08-25 20:40:17 -0700144
145 /* Thermal throttle activation offset */
146 configure_tcc_thermal_target();
147}