blob: e6a21c170e6d33499376f8ed5b5309ccce7a452b [file] [log] [blame]
Subrata Banik2871e0e2020-09-27 11:30:58 +05301/* SPDX-License-Identifier: GPL-2.0-only */
2
3/*
4 * This file is created based on Intel Alder Lake Processor CPU Datasheet
5 * Document number: 619501
6 * Chapter number: 14
7 */
8
9#include <arch/cpu.h>
10#include <console/console.h>
11#include <device/pci.h>
12#include <cpu/x86/lapic.h>
13#include <cpu/x86/mp.h>
14#include <cpu/x86/msr.h>
15#include <cpu/intel/smm_reloc.h>
16#include <cpu/intel/turbo.h>
17#include <fsp/api.h>
18#include <intelblocks/cpulib.h>
19#include <intelblocks/mp_init.h>
20#include <intelblocks/msr.h>
21#include <romstage_handoff.h>
22#include <soc/cpu.h>
23#include <soc/msr.h>
24#include <soc/pci_devs.h>
25#include <soc/pm.h>
26#include <soc/soc_chip.h>
27
28static void soc_fsp_load(void)
29{
30 fsps_load(romstage_handoff_is_resume());
31}
32
33static void configure_isst(void)
34{
35 config_t *conf = config_of_soc();
36 msr_t msr;
37
38 if (conf->speed_shift_enable) {
39 /*
40 * Kernel driver checks CPUID.06h:EAX[Bit 7] to determine if HWP
41 * is supported or not. coreboot needs to configure MSR 0x1AA
42 * which is then reflected in the CPUID register.
43 */
44 msr = rdmsr(MSR_MISC_PWR_MGMT);
45 msr.lo |= MISC_PWR_MGMT_ISST_EN; /* Enable Speed Shift */
46 msr.lo |= MISC_PWR_MGMT_ISST_EN_INT; /* Enable Interrupt */
47 msr.lo |= MISC_PWR_MGMT_ISST_EN_EPP; /* Enable EPP */
48 wrmsr(MSR_MISC_PWR_MGMT, msr);
49 } else {
50 msr = rdmsr(MSR_MISC_PWR_MGMT);
51 msr.lo &= ~MISC_PWR_MGMT_ISST_EN; /* Disable Speed Shift */
52 msr.lo &= ~MISC_PWR_MGMT_ISST_EN_INT; /* Disable Interrupt */
53 msr.lo &= ~MISC_PWR_MGMT_ISST_EN_EPP; /* Disable EPP */
54 wrmsr(MSR_MISC_PWR_MGMT, msr);
55 }
56}
57
58static void configure_misc(void)
59{
60 msr_t msr;
61
62 config_t *conf = config_of_soc();
63
64 msr = rdmsr(IA32_MISC_ENABLE);
65 msr.lo |= (1 << 0); /* Fast String enable */
66 msr.lo |= (1 << 3); /* TM1/TM2/EMTTM enable */
67 wrmsr(IA32_MISC_ENABLE, msr);
68
69 /* Set EIST status */
70 cpu_set_eist(conf->eist_enable);
71
72 /* Disable Thermal interrupts */
73 msr.lo = 0;
74 msr.hi = 0;
75 wrmsr(IA32_THERM_INTERRUPT, msr);
76
77 /* Enable package critical interrupt only */
78 msr.lo = 1 << 4;
79 msr.hi = 0;
80 wrmsr(IA32_PACKAGE_THERM_INTERRUPT, msr);
81
82 /* Enable PROCHOT */
83 msr = rdmsr(MSR_POWER_CTL);
84 msr.lo |= (1 << 0); /* Enable Bi-directional PROCHOT as an input*/
85 msr.lo |= (1 << 23); /* Lock it */
86 wrmsr(MSR_POWER_CTL, msr);
87}
88
89static void enable_lapic_tpr(void)
90{
91 msr_t msr;
92
93 msr = rdmsr(MSR_PIC_MSG_CONTROL);
94 msr.lo &= ~(1 << 10); /* Enable APIC TPR updates */
95 wrmsr(MSR_PIC_MSG_CONTROL, msr);
96}
97
98static void configure_dca_cap(void)
99{
100 uint32_t feature_flag;
101 msr_t msr;
102
103 /* Check feature flag in CPUID.(EAX=1):ECX[18]==1 */
104 feature_flag = cpu_get_feature_flags_ecx();
105 if (feature_flag & CPUID_DCA) {
106 msr = rdmsr(IA32_PLATFORM_DCA_CAP);
107 msr.lo |= 1;
108 wrmsr(IA32_PLATFORM_DCA_CAP, msr);
109 }
110}
111
112static void enable_pm_timer_emulation(void)
113{
Subrata Banik2871e0e2020-09-27 11:30:58 +0530114 msr_t msr;
Michael Niewöhnerdadcbfb2020-10-04 14:48:05 +0200115
116 if (!CONFIG_CPU_XTAL_HZ)
117 return;
118
Subrata Banik2871e0e2020-09-27 11:30:58 +0530119 /*
120 * The derived frequency is calculated as follows:
Michael Niewöhnerdadcbfb2020-10-04 14:48:05 +0200121 * (clock * msr[63:32]) >> 32 = target frequency.
122 * Back solve the multiplier so the 3.579545MHz ACPI timer frequency is used.
Subrata Banik2871e0e2020-09-27 11:30:58 +0530123 */
Michael Niewöhnerdadcbfb2020-10-04 14:48:05 +0200124 msr.hi = (3579545ULL << 32) / CONFIG_CPU_XTAL_HZ;
Subrata Banik2871e0e2020-09-27 11:30:58 +0530125 /* Set PM1 timer IO port and enable */
126 msr.lo = (EMULATE_DELAY_VALUE << EMULATE_DELAY_OFFSET_VALUE) |
127 EMULATE_PM_TMR_EN | (ACPI_BASE_ADDRESS + PM1_TMR);
128 wrmsr(MSR_EMULATE_PM_TIMER, msr);
129}
130
131static void set_energy_perf_bias(u8 policy)
132{
133 msr_t msr;
134 int ecx;
135
136 /* Determine if energy efficient policy is supported. */
137 ecx = cpuid_ecx(0x6);
138 if (!(ecx & (1 << 3)))
139 return;
140
141 /* Energy Policy is bits 3:0 */
142 msr = rdmsr(IA32_ENERGY_PERF_BIAS);
143 msr.lo &= ~0xf;
144 msr.lo |= policy & 0xf;
145 wrmsr(IA32_ENERGY_PERF_BIAS, msr);
146}
147
148/* All CPUs including BSP will run the following function. */
149void soc_core_init(struct device *cpu)
150{
151 /* Clear out pending MCEs */
152 /* TODO(adurbin): This should only be done on a cold boot. Also, some
153 * of these banks are core vs package scope. For now every CPU clears
154 * every bank. */
155 mca_configure();
156
157 /* Enable the local CPU apics */
158 enable_lapic_tpr();
159 setup_lapic();
160
161 /* Configure Enhanced SpeedStep and Thermal Sensors */
162 configure_misc();
163
164 /* Configure Intel Speed Shift */
165 configure_isst();
166
167 /* Enable PM timer emulation */
168 enable_pm_timer_emulation();
169
170 /* Enable Direct Cache Access */
171 configure_dca_cap();
172
173 /* Set energy policy */
174 set_energy_perf_bias(ENERGY_POLICY_NORMAL);
175
176 /* Enable Turbo */
177 enable_turbo();
178}
179
180static void per_cpu_smm_trigger(void)
181{
182 /* Relocate the SMM handler. */
183 smm_relocate();
184}
185
186static void post_mp_init(void)
187{
188 /* Set Max Ratio */
189 cpu_set_max_ratio();
190
191 /*
192 * Now that all APs have been relocated as well as the BSP let SMIs
193 * start flowing.
194 */
195 global_smi_enable();
196}
197
198static const struct mp_ops mp_ops = {
199 /*
200 * Skip Pre MP init MTRR programming as MTRRs are mirrored from BSP,
201 * that are set prior to ramstage.
202 * Real MTRRs programming are being done after resource allocation.
203 */
204 .pre_mp_init = soc_fsp_load,
205 .get_cpu_count = get_cpu_count,
206 .get_smm_info = smm_info,
207 .get_microcode_info = get_microcode_info,
208 .pre_mp_smm_init = smm_initialize,
209 .per_cpu_smm_trigger = per_cpu_smm_trigger,
210 .relocation_handler = smm_relocation_handler,
211 .post_mp_init = post_mp_init,
212};
213
214void soc_init_cpus(struct bus *cpu_bus)
215{
216 if (mp_init_with_smm(cpu_bus, &mp_ops))
217 printk(BIOS_ERR, "MP initialization failure.\n");
218
219 /* Thermal throttle activation offset */
220 configure_tcc_thermal_target();
221}