blob: b9b4d5772cd0fade8ce1c21a6df48d8ed4243750 [file] [log] [blame]
Aaron Durbine0785c02013-10-21 12:15:29 -05001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Aaron Durbine0785c02013-10-21 12:15:29 -050014 */
15
16#ifndef _X86_MP_H_
17#define _X86_MP_H_
18
19#include <arch/smp/atomic.h>
Aaron Durbin82501922016-04-29 22:55:49 -050020#include <cpu/x86/smm.h>
Aaron Durbine0785c02013-10-21 12:15:29 -050021
22#define CACHELINE_SIZE 64
23
24struct cpu_info;
25struct bus;
26
27static inline void mfence(void)
28{
29 __asm__ __volatile__("mfence\t\n": : :"memory");
30}
31
Aaron Durbin82501922016-04-29 22:55:49 -050032/* The sequence of the callbacks are in calling order. */
33struct mp_ops {
34 /*
35 * Optionally provide a callback prior to kicking off MP
36 * startup. This callback is done prior to loading the SIPI
37 * vector but after gathering the MP state information. Please
38 * see the sequence below.
39 */
40 void (*pre_mp_init)(void);
41 /*
42 * Return the number of logical x86 execution contexts that
43 * need to be brought out of SIPI state as well as have SMM
44 * handlers installed.
45 */
46 int (*get_cpu_count)(void);
47 /*
48 * Optionally fill in permanent SMM region and save state size. If
49 * this callback is not present no SMM handlers will be installed.
50 * The perm_smsize is the size available to house the permanent SMM
51 * handler.
52 */
53 void (*get_smm_info)(uintptr_t *perm_smbase, size_t *perm_smsize,
54 size_t *smm_save_state_size);
55 /*
56 * Optionally fill in pointer to microcode and indicate if the APs
57 * can load the microcode in parallel.
58 */
59 void (*get_microcode_info)(const void **microcode, int *parallel);
60 /*
61 * Optionally provide a function which adjusts the APIC id
Elyes HAOUAS918535a2016-07-28 21:25:21 +020062 * map to CPU number. By default the CPU number and APIC id
63 * are 1:1. To change the APIC id for a given CPU return the
64 * new APIC id. It's called for each CPU as indicated by
Aaron Durbin82501922016-04-29 22:55:49 -050065 * get_cpu_count().
66 */
67 int (*adjust_cpu_apic_entry)(int cpu, int cur_apic_id);
68 /*
69 * Optionally adjust SMM handler parameters to override the default
70 * values. The is_perm variable indicates if the parameters to adjust
71 * are for the relocation handler or the permanent handler. This
72 * function is therefore called twice -- once for each handler.
73 * By default the parameters for each SMM handler are:
74 * stack_size num_concurrent_stacks num_concurrent_save_states
75 * relo: save_state_size get_cpu_count() 1
76 * perm: save_state_size get_cpu_count() get_cpu_count()
77 */
78 void (*adjust_smm_params)(struct smm_loader_params *slp, int is_perm);
79 /*
80 * Optionally provide a callback prior to the APs starting SMM
Elyes HAOUAS918535a2016-07-28 21:25:21 +020081 * relocation or CPU driver initialization. However, note that
Aaron Durbin82501922016-04-29 22:55:49 -050082 * this callback is called after SMM handlers have been loaded.
83 */
84 void (*pre_mp_smm_init)(void);
85 /*
86 * Optional function to use to trigger SMM to perform relocation. If
87 * not provided, smm_initiate_relocation() is used.
88 */
89 void (*per_cpu_smm_trigger)(void);
90 /*
Elyes HAOUAS918535a2016-07-28 21:25:21 +020091 * This function is called while each CPU is in the SMM relocation
Aaron Durbin82501922016-04-29 22:55:49 -050092 * handler. Its primary purpose is to adjust the SMBASE for the
93 * permanent handler. The parameters passed are the current cpu
94 * running the relocation handler, current SMBASE of relocation handler,
Elyes HAOUAS918535a2016-07-28 21:25:21 +020095 * and the pre-calculated staggered CPU SMBASE address of the permanent
Aaron Durbin82501922016-04-29 22:55:49 -050096 * SMM handler.
97 */
98 void (*relocation_handler)(int cpu, uintptr_t curr_smbase,
99 uintptr_t staggered_smbase);
100 /*
101 * Optionally provide a callback that is called after the APs
102 * and the BSP have gone through the initialion sequence.
103 */
104 void (*post_mp_init)(void);
105};
106
107/*
108 * mp_init_with_smm() returns < 0 on failure and 0 on success. The mp_ops
109 * argument is used to drive the multiprocess initialization. Unless otherwise
110 * stated each callback is called on the BSP only. The sequence of operations
111 * is the following:
112 * 1. pre_mp_init()
113 * 2. get_cpu_count()
114 * 3. get_smm_info()
115 * 4. get_microcode_info()
116 * 5. adjust_cpu_apic_entry() for each number of get_cpu_count()
Elyes HAOUASa0fed372016-09-16 20:17:40 +0200117 * 6. adjust_smm_params(is_perm = 0)
118 * 7. adjust_smm_params(is_perm = 1)
Aaron Durbin82501922016-04-29 22:55:49 -0500119 * 8. pre_mp_smm_init()
120 * 9. per_cpu_smm_trigger() in parallel for all cpus which calls
121 * relocation_handler() in SMM.
122 * 10. mp_initialize_cpu() for each cpu
123 * 11. post_mp_init()
124 */
125int mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops);
126
Aaron Durbinb21e3622016-12-07 00:32:19 -0600127
128/*
129 * After APs are up and PARALLEL_MP_AP_WORK is enabled one can issue work
130 * to all the APs to perform. Currently the BSP is the only CPU that is allowed
131 * to issue work. i.e. the APs should not call any of these functions.
132 * All functions return < 0 on error, 0 on success.
133 */
134int mp_run_on_aps(void (*func)(void), long expire_us);
135
136/* Like mp_run_on_aps() but also runs func on BSP. */
137int mp_run_on_all_cpus(void (*func)(void), long expire_us);
138
139/*
140 * Park all APs to prepare for OS boot. This is handled automatically
141 * by the coreboot infrastructure.
142 */
143int mp_park_aps(void);
144
Aaron Durbine0785c02013-10-21 12:15:29 -0500145/*
Aaron Durbincd3f8ad2013-10-21 22:24:40 -0500146 * SMM helpers to use with initializing CPUs.
147 */
148
149/* Send SMI to self without any serialization. */
150void smm_initiate_relocation_parallel(void);
151/* Send SMI to self with single execution. */
152void smm_initiate_relocation(void);
153
Aaron Durbine0785c02013-10-21 12:15:29 -0500154#endif /* _X86_MP_H_ */