cpu/intel/haswell: convert to using common MP and SMM init

In order to reduce duplication of code use the common MP and SMM
initialization flow.

Change-Id: I80b5b94b62bdd001581eb56513a0d532fffb64e8
Signed-off-by: Aaron Durbin <adurbin@chromium.org>
Reviewed-on: https://review.coreboot.org/14596
Tested-by: build bot (Jenkins)
Reviewed-by: Duncan Laurie <dlaurie@google.com>
diff --git a/src/cpu/intel/haswell/haswell_init.c b/src/cpu/intel/haswell/haswell_init.c
index f488ef6..74147ab 100644
--- a/src/cpu/intel/haswell/haswell_init.c
+++ b/src/cpu/intel/haswell/haswell_init.c
@@ -714,20 +714,6 @@
 		wrmsr(IA32_MC0_STATUS + (i * 4), msr);
 }
 
-static void bsp_init_before_ap_bringup(struct bus *cpu_bus)
-{
-	/* Setup MTRRs based on physical address size. */
-	x86_setup_mtrrs_with_detect();
-	x86_mtrr_check();
-
-	initialize_vr_config();
-
-	if (haswell_is_ult()) {
-		calibrate_24mhz_bclk();
-		configure_pch_power_sharing();
-	}
-}
-
 /* All CPUs including BSP will run the following function. */
 static void haswell_init(struct device *cpu)
 {
@@ -765,47 +751,27 @@
 
 /* MP initialization support. */
 static const void *microcode_patch;
-int ht_disabled;
+static int ht_disabled;
 
-static int adjust_apic_id_ht_disabled(int index, int apic_id)
+static void pre_mp_init(void)
 {
-	return 2 * index;
+	/* Setup MTRRs based on physical address size. */
+	x86_setup_mtrrs_with_detect();
+	x86_mtrr_check();
+
+	initialize_vr_config();
+
+	if (haswell_is_ult()) {
+		calibrate_24mhz_bclk();
+		configure_pch_power_sharing();
+	}
 }
 
-static void relocate_and_load_microcode(void)
+static int get_cpu_count(void)
 {
-	/* Relocate the SMM handler. */
-	smm_relocate();
-
-	/* After SMM relocation a 2nd microcode load is required. */
-	intel_microcode_load_unlocked(microcode_patch);
-}
-
-static void enable_smis(void)
-{
-	/* Now that all APs have been relocated as well as the BSP let SMIs
-	 * start flowing. */
-	southbridge_smm_enable_smi();
-
-	/* Lock down the SMRAM space. */
-	smm_lock();
-}
-
-static struct mp_flight_record mp_steps[] = {
-	MP_FR_NOBLOCK_APS(relocate_and_load_microcode,
-	                  relocate_and_load_microcode),
-	MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu),
-	/* Wait for APs to finish initialization before proceeding. */
-	MP_FR_BLOCK_APS(NULL, enable_smis),
-};
-
-void bsp_init_and_start_aps(struct bus *cpu_bus)
-{
-	void *smm_save_area;
+	msr_t msr;
 	int num_threads;
 	int num_cores;
-	msr_t msr;
-	struct mp_params mp_params;
 
 	msr = rdmsr(CORE_THREAD_COUNT_MSR);
 	num_threads = (msr.lo >> 0) & 0xffff;
@@ -815,36 +781,60 @@
 
 	ht_disabled = num_threads == num_cores;
 
-	/* Perform any necessary BSP initialization before APs are brought up.
-	 * This call also allows the BSP to prepare for any secondary effects
-	 * from calling cpu_initialize() such as smm_init(). */
-	bsp_init_before_ap_bringup(cpu_bus);
+	return num_threads;
+}
 
+static void get_microcode_info(const void **microcode, int *parallel)
+{
 	microcode_patch = intel_microcode_find();
+	*microcode = microcode_patch;
+	*parallel = 1;
+}
 
-	/* Save default SMM area before relocation occurs. */
-	smm_save_area = backup_default_smm_area();
-
-	mp_params.num_cpus = num_threads;
-	mp_params.parallel_microcode_load = 1;
+static int adjust_apic_id(int index, int apic_id)
+{
 	if (ht_disabled)
-		mp_params.adjust_apic_id = adjust_apic_id_ht_disabled;
+		return 2 * index;
 	else
-		mp_params.adjust_apic_id = NULL;
-	mp_params.flight_plan = &mp_steps[0];
-	mp_params.num_records = ARRAY_SIZE(mp_steps);
-	mp_params.microcode_pointer = microcode_patch;
+		return index;
+}
 
-	/* Load relocation and permeanent handlers. Then initiate relocation. */
-	if (smm_initialize())
-		printk(BIOS_CRIT, "SMM Initialiazation failed...\n");
+static void per_cpu_smm_trigger(void)
+{
+	/* Relocate the SMM handler. */
+	smm_relocate();
 
-	if (mp_init(cpu_bus, &mp_params)) {
+	/* After SMM relocation a 2nd microcode load is required. */
+	intel_microcode_load_unlocked(microcode_patch);
+}
+
+static void post_mp_init(void)
+{
+	/* Now that all APs have been relocated as well as the BSP let SMIs
+	 * start flowing. */
+	southbridge_smm_enable_smi();
+
+	/* Lock down the SMRAM space. */
+	smm_lock();
+}
+
+static const struct mp_ops mp_ops = {
+	.pre_mp_init = pre_mp_init,
+	.get_cpu_count = get_cpu_count,
+	.get_smm_info = smm_info,
+	.get_microcode_info = get_microcode_info,
+	.adjust_cpu_apic_entry = adjust_apic_id,
+	.pre_mp_smm_init = smm_initialize,
+	.per_cpu_smm_trigger = per_cpu_smm_trigger,
+	.relocation_handler = smm_relocation_handler,
+	.post_mp_init = post_mp_init,
+};
+
+void bsp_init_and_start_aps(struct bus *cpu_bus)
+{
+	if (mp_init_with_smm(cpu_bus, &mp_ops)) {
 		printk(BIOS_ERR, "MP initialization failure.\n");
 	}
-
-	/* Restore the default SMM region. */
-	restore_default_smm_area(smm_save_area);
 }
 
 static struct device_operations cpu_dev_ops = {