blob: f1733ea63772414892dc049f19a9e4986ab56c06 [file] [log] [blame]
Furquan Shaikh2af76f42014-04-28 16:39:40 -07001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright 2013 Google Inc.
5 *
Aaron Durbin0b0a1e32014-09-06 01:28:54 -05006 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
Furquan Shaikh2af76f42014-04-28 16:39:40 -07009 *
Aaron Durbin0b0a1e32014-09-06 01:28:54 -050010 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Furquan Shaikh2af76f42014-04-28 16:39:40 -070014 *
Aaron Durbin0b0a1e32014-09-06 01:28:54 -050015 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
Furquan Shaikh2af76f42014-04-28 16:39:40 -070018 */
Aaron Durbin0b0a1e32014-09-06 01:28:54 -050019
Aaron Durbin9fd4dc72014-09-06 02:31:30 -050020#include <stdint.h>
Furquan Shaikh2af76f42014-04-28 16:39:40 -070021#include <stdlib.h>
Aaron Durbin9fd4dc72014-09-06 02:31:30 -050022#include <arch/barrier.h>
23#include <arch/lib_helpers.h>
24#include <cpu/cpu.h>
25#include <console/console.h>
26#include "cpu-internal.h"
Furquan Shaikh2af76f42014-04-28 16:39:40 -070027
Aaron Durbin1b315d02014-08-27 10:30:39 -050028static struct cpu_info cpu_infos[CONFIG_MAX_CPUS];
29
Aaron Durbin9fd4dc72014-09-06 02:31:30 -050030static inline struct cpu_info *cpu_info_for_cpu(unsigned int id)
31{
32 return &cpu_infos[id];
33}
34
Furquan Shaikh2af76f42014-04-28 16:39:40 -070035struct cpu_info *cpu_info(void)
36{
Aaron Durbin9fd4dc72014-09-06 02:31:30 -050037 return cpu_info_for_cpu(smp_processor_id());
38}
39
40static int cpu_online(struct cpu_info *ci)
41{
42 return load_acquire(&ci->online) != 0;
43}
44
45static void cpu_mark_online(struct cpu_info *ci)
46{
47 store_release(&ci->online, 1);
48}
49
50static inline void cpu_disable_dev(device_t dev)
51{
52 dev->enabled = 0;
53}
54
55static struct cpu_driver *locate_cpu_driver(uint32_t midr)
56{
57 struct cpu_driver *cur;
58
59 for (cur = cpu_drivers; cur != ecpu_drivers; cur++) {
60 const struct cpu_device_id *id_table = cur->id_table;
61
62 for (; id_table->midr != CPU_ID_END; id_table++) {
63 if (id_table->midr == midr)
64 return cur;
65 }
66 }
67 return NULL;
68}
69
70static int cpu_set_device_operations(device_t dev)
71{
72 uint32_t midr;
73 struct cpu_driver *driver;
74
75 midr = raw_read_midr_el1();
76 driver = locate_cpu_driver(midr);
77
78 if (driver == NULL) {
79 printk(BIOS_WARNING, "No CPU driver for MIDR %08x\n", midr);
80 return -1;
81 }
82 dev->ops = driver->ops;
83 return 0;
84}
85
86static void init_this_cpu(void *arg)
87{
88 struct cpu_info *ci = arg;
89 device_t dev = ci->cpu;
90
91 cpu_set_device_operations(dev);
92
93 if (dev->ops != NULL && dev->ops->init != NULL) {
94 dev->initialized = 1;
95 printk(BIOS_DEBUG, "%s init\n", dev_path(dev));
96 dev->ops->init(dev);
97 }
98}
99
100/* Fill in cpu_info structures according to device tree. */
101static void init_cpu_info(struct bus *bus)
102{
103 device_t cur;
104
105 for (cur = bus->children; cur != NULL; cur = cur->sibling) {
106 struct cpu_info *ci;
107 unsigned int id = cur->path.cpu.id;
108
109 if (cur->path.type != DEVICE_PATH_CPU)
110 continue;
111
112 /* IDs are currently mapped 1:1 with logical CPU numbers. */
113 if (id >= CONFIG_MAX_CPUS) {
114 printk(BIOS_WARNING,
115 "CPU id %x too large. Disabling.\n", id);
116 cpu_disable_dev(cur);
117 continue;
118 }
119
120 ci = cpu_info_for_cpu(id);
121 if (ci->cpu != NULL) {
122 printk(BIOS_WARNING,
123 "Duplicate ID %x in device tree.\n", id);
124 cpu_disable_dev(cur);
125 }
126
127 ci->cpu = cur;
128 ci->id = cur->path.cpu.id;
129 }
130
131 /* Mark current cpu online. */
132 cpu_mark_online(cpu_info());
133}
134
135static inline int action_queue_empty(struct cpu_action_queue *q)
136{
137 return load_acquire_exclusive(&q->todo) == NULL;
138}
139
140static inline int action_completed(struct cpu_action_queue *q,
141 struct cpu_action *action)
142{
143 return load_acquire(&q->completed) == action;
144}
145
146static inline void wait_for_action_queue_slot(struct cpu_action_queue *q)
147{
148 while (!action_queue_empty(q))
149 wfe();
150}
151
152static void wait_for_action_complete(struct cpu_action_queue *q,
153 struct cpu_action *a)
154{
155 while (!action_completed(q, a))
156 wfe();
157}
158
159static struct cpu_action *wait_for_action(struct cpu_action_queue *q,
160 struct cpu_action *local)
161{
162 struct cpu_action *action;
163
164 while (action_queue_empty(q))
165 wfe();
166
167 /*
168 * Keep original address, but use a local copy for async processing.
169 */
170 do {
171 action = load_acquire_exclusive(&q->todo);
172 *local = *action;
173 } while (!store_release_exclusive(&q->todo, NULL));
174
175 return action;
176}
177
178static void queue_action(struct cpu_action_queue *q, struct cpu_action *action)
179{
180 do {
181 wait_for_action_queue_slot(q);
182 if (load_acquire_exclusive(&q->todo) != NULL)
183 continue;
184 } while (!store_release_exclusive(&q->todo, action));
185}
186
187static void action_queue_complete(struct cpu_action_queue *q,
188 struct cpu_action *action)
189{
190 /* Mark completion and send events to waiters. */
191 store_release(&q->completed, action);
192 sev();
193}
194
195static void action_run(struct cpu_action *action)
196{
197 action->run(action->arg);
198}
199
200static void action_run_on_cpu(struct cpu_info *ci, struct cpu_action *action,
201 int sync)
202{
203 struct cpu_action_queue *q = &ci->action_queue;
204
205 /* Don't run actions on non-online or enabled devices. */
206 if (!cpu_online(ci) || ci->cpu == NULL || !ci->cpu->enabled)
207 return;
208
209 if (ci->id == smp_processor_id()) {
210 action->run(action->arg);
211 return;
212 }
213
214 queue_action(q, action);
215 /* Wait for CPU to pick it up. Empty slot means it was picked up. */
216 wait_for_action_queue_slot(q);
217 /* Wait for completion if requested. */
218 if (sync)
219 wait_for_action_complete(q, action);
220}
221
222static int __arch_run_on_cpu(unsigned int cpu, struct cpu_action *action,
223 int sync)
224{
225 struct cpu_info *ci;
226
227 if (cpu >= CONFIG_MAX_CPUS)
228 return -1;
229
230 ci = cpu_info_for_cpu(cpu);
231
232 action_run_on_cpu(ci, action, sync);
233
234 return 0;
235}
236
237int arch_run_on_cpu(unsigned int cpu, struct cpu_action *action)
238{
239 return __arch_run_on_cpu(cpu, action, 1);
240}
241
242int arch_run_on_cpu_async(unsigned int cpu, struct cpu_action *action)
243{
244 return __arch_run_on_cpu(cpu, action, 0);
245}
246
247static int __arch_run_on_all_cpus(struct cpu_action *action, int sync)
248{
249 int i;
250
251 for (i = 0; i < CONFIG_MAX_CPUS; i++)
252 action_run_on_cpu(cpu_info_for_cpu(i), action, sync);
253
254 return 0;
255}
256
257int arch_run_on_all_cpus(struct cpu_action *action)
258{
259 return __arch_run_on_all_cpus(action, 1);
260}
261
262int arch_run_on_all_cpus_async(struct cpu_action *action)
263{
264 return __arch_run_on_all_cpus(action, 0);
265}
266
267void arch_secondary_cpu_init(void)
268{
269 struct cpu_info *ci = cpu_info();
270 struct cpu_action_queue *q = &ci->action_queue;
271
272 /* Mark this CPU online. */
273 cpu_mark_online(ci);
274
275 while (1) {
276 struct cpu_action *orig;
277 struct cpu_action action;
278
279 orig = wait_for_action(q, &action);
280
281 action_run(&action);
282 action_queue_complete(q, orig);
283 }
284}
285
286void arch_initialize_cpus(device_t cluster, struct cpu_control_ops *cntrl_ops)
287{
288 size_t max_cpus;
289 size_t i;
290 struct cpu_info *ci;
291 void (*entry)(void);
292 struct bus *bus;
293
294 if (cluster->path.type != DEVICE_PATH_CPU_CLUSTER) {
295 printk(BIOS_ERR,
296 "CPU init failed. Device is not a CPU_CLUSTER: %s\n",
297 dev_path(cluster));
298 return;
299 }
300
301 bus = cluster->link_list;
302 entry = prepare_secondary_cpu_startup();
303
304 /* Initialize the cpu_info structures. */
305 init_cpu_info(bus);
306 max_cpus = cntrl_ops->total_cpus();
307
308 if (max_cpus > CONFIG_MAX_CPUS) {
309 printk(BIOS_WARNING,
310 "max_cpus (%zu) exceeds CONFIG_MAX_CPUS (%zu).\n",
311 max_cpus, (size_t)CONFIG_MAX_CPUS);
312 max_cpus = CONFIG_MAX_CPUS;
313 }
314
315 for (i = 0; i < max_cpus; i++) {
316 device_t dev;
317 struct cpu_action action;
318
319 ci = cpu_info_for_cpu(i);
320 dev = ci->cpu;
321
322 /* Disregard CPUs not in device tree. */
323 if (dev == NULL)
324 continue;
325
326 /* Skip disabled CPUs. */
327 if (!dev->enabled)
328 continue;
329
330 if (!cpu_online(ci)) {
331 /* Start the CPU. */
332 printk(BIOS_DEBUG, "Starting CPU%x\n", ci->id);
333 if (cntrl_ops->start_cpu(ci->id, entry)) {
334 printk(BIOS_ERR,
335 "Failed to start CPU%x\n", ci->id);
336 continue;
337 }
338 /* Wait for CPU to come online. */
339 while (!cpu_online(ci));
340 printk(BIOS_DEBUG, "CPU%x online.\n", ci->id);
341 }
342
343 /* Send it the init action. */
344 action.run = init_this_cpu;
345 action.arg = ci;
346 action_run_on_cpu(ci, &action, 1);
347 }
Furquan Shaikh2af76f42014-04-28 16:39:40 -0700348}