blob: c0dc3c3fa29344794d3808c70672e256abb3c468 [file] [log] [blame]
Furquan Shaikh2af76f42014-04-28 16:39:40 -07001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright 2013 Google Inc.
5 *
Aaron Durbin0b0a1e32014-09-06 01:28:54 -05006 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
Furquan Shaikh2af76f42014-04-28 16:39:40 -07009 *
Aaron Durbin0b0a1e32014-09-06 01:28:54 -050010 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Furquan Shaikh2af76f42014-04-28 16:39:40 -070014 *
Aaron Durbin0b0a1e32014-09-06 01:28:54 -050015 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
Furquan Shaikh2af76f42014-04-28 16:39:40 -070018 */
Aaron Durbin0b0a1e32014-09-06 01:28:54 -050019
Aaron Durbin9fd4dc72014-09-06 02:31:30 -050020#include <stdint.h>
Furquan Shaikh2af76f42014-04-28 16:39:40 -070021#include <stdlib.h>
Aaron Durbin9fd4dc72014-09-06 02:31:30 -050022#include <arch/barrier.h>
23#include <arch/lib_helpers.h>
24#include <cpu/cpu.h>
25#include <console/console.h>
Aaron Durbinb9b8ebc2014-09-11 21:57:41 -050026#include <gic.h>
Aaron Durbin9fd4dc72014-09-06 02:31:30 -050027#include "cpu-internal.h"
Furquan Shaikh2af76f42014-04-28 16:39:40 -070028
Aaron Durbin1b315d02014-08-27 10:30:39 -050029static struct cpu_info cpu_infos[CONFIG_MAX_CPUS];
30
Aaron Durbin9fd4dc72014-09-06 02:31:30 -050031static inline struct cpu_info *cpu_info_for_cpu(unsigned int id)
32{
33 return &cpu_infos[id];
34}
35
Furquan Shaikh2af76f42014-04-28 16:39:40 -070036struct cpu_info *cpu_info(void)
37{
Aaron Durbin9fd4dc72014-09-06 02:31:30 -050038 return cpu_info_for_cpu(smp_processor_id());
39}
40
41static int cpu_online(struct cpu_info *ci)
42{
43 return load_acquire(&ci->online) != 0;
44}
45
46static void cpu_mark_online(struct cpu_info *ci)
47{
48 store_release(&ci->online, 1);
49}
50
51static inline void cpu_disable_dev(device_t dev)
52{
53 dev->enabled = 0;
54}
55
56static struct cpu_driver *locate_cpu_driver(uint32_t midr)
57{
58 struct cpu_driver *cur;
59
60 for (cur = cpu_drivers; cur != ecpu_drivers; cur++) {
61 const struct cpu_device_id *id_table = cur->id_table;
62
63 for (; id_table->midr != CPU_ID_END; id_table++) {
64 if (id_table->midr == midr)
65 return cur;
66 }
67 }
68 return NULL;
69}
70
71static int cpu_set_device_operations(device_t dev)
72{
73 uint32_t midr;
74 struct cpu_driver *driver;
75
76 midr = raw_read_midr_el1();
77 driver = locate_cpu_driver(midr);
78
79 if (driver == NULL) {
80 printk(BIOS_WARNING, "No CPU driver for MIDR %08x\n", midr);
81 return -1;
82 }
83 dev->ops = driver->ops;
84 return 0;
85}
86
Aaron Durbinf228e8d2014-09-15 14:19:21 -050087/* Set up default SCR values. */
88static void el3_init(void)
89{
90 uint32_t scr;
91
92 if (get_current_el() != EL3)
93 return;
94
95 scr = raw_read_scr_el3();
96 /* Default to non-secure EL1 and EL0. */
97 scr &= ~(SCR_NS_MASK);
98 scr |= SCR_NS_ENABLE;
99 /* Disable IRQ, FIQ, and external abort interrupt routing. */
100 scr &= ~(SCR_IRQ_MASK | SCR_FIQ_MASK | SCR_EA_MASK);
101 scr |= SCR_IRQ_DISABLE | SCR_FIQ_DISABLE | SCR_EA_DISABLE;
102 /* Enable HVC */
103 scr &= ~(SCR_HVC_MASK);
104 scr |= SCR_HVC_ENABLE;
105 /* Disable SMC */
106 scr &= ~(SCR_SMC_MASK);
107 scr |= SCR_SMC_DISABLE;
108 /* Disable secure instruction fetches. */
109 scr &= ~(SCR_SIF_MASK);
110 scr |= SCR_SIF_DISABLE;
111 /* All lower exception levels 64-bit by default. */
112 scr &= ~(SCR_RW_MASK);
113 scr |= SCR_LOWER_AARCH64;
114 /* Disable secure EL1 access to secure timer. */
115 scr &= ~(SCR_ST_MASK);
116 scr |= SCR_ST_DISABLE;
117 /* Don't trap on WFE or WFI instructions. */
118 scr &= ~(SCR_TWI_MASK | SCR_TWE_MASK);
119 scr |= SCR_TWI_DISABLE | SCR_TWE_DISABLE;
120 raw_write_scr_el3(scr);
121 isb();
122}
123
Aaron Durbin9fd4dc72014-09-06 02:31:30 -0500124static void init_this_cpu(void *arg)
125{
126 struct cpu_info *ci = arg;
127 device_t dev = ci->cpu;
128
129 cpu_set_device_operations(dev);
130
Aaron Durbinf228e8d2014-09-15 14:19:21 -0500131 el3_init();
132
Aaron Durbinb9b8ebc2014-09-11 21:57:41 -0500133 /* Initialize the GIC. */
134 gic_init();
135
Aaron Durbin9fd4dc72014-09-06 02:31:30 -0500136 if (dev->ops != NULL && dev->ops->init != NULL) {
137 dev->initialized = 1;
138 printk(BIOS_DEBUG, "%s init\n", dev_path(dev));
139 dev->ops->init(dev);
140 }
141}
142
143/* Fill in cpu_info structures according to device tree. */
144static void init_cpu_info(struct bus *bus)
145{
146 device_t cur;
147
148 for (cur = bus->children; cur != NULL; cur = cur->sibling) {
149 struct cpu_info *ci;
150 unsigned int id = cur->path.cpu.id;
151
152 if (cur->path.type != DEVICE_PATH_CPU)
153 continue;
154
155 /* IDs are currently mapped 1:1 with logical CPU numbers. */
156 if (id >= CONFIG_MAX_CPUS) {
157 printk(BIOS_WARNING,
158 "CPU id %x too large. Disabling.\n", id);
159 cpu_disable_dev(cur);
160 continue;
161 }
162
163 ci = cpu_info_for_cpu(id);
164 if (ci->cpu != NULL) {
165 printk(BIOS_WARNING,
166 "Duplicate ID %x in device tree.\n", id);
167 cpu_disable_dev(cur);
168 }
169
170 ci->cpu = cur;
171 ci->id = cur->path.cpu.id;
172 }
173
174 /* Mark current cpu online. */
175 cpu_mark_online(cpu_info());
176}
177
178static inline int action_queue_empty(struct cpu_action_queue *q)
179{
180 return load_acquire_exclusive(&q->todo) == NULL;
181}
182
183static inline int action_completed(struct cpu_action_queue *q,
184 struct cpu_action *action)
185{
186 return load_acquire(&q->completed) == action;
187}
188
189static inline void wait_for_action_queue_slot(struct cpu_action_queue *q)
190{
191 while (!action_queue_empty(q))
192 wfe();
193}
194
195static void wait_for_action_complete(struct cpu_action_queue *q,
196 struct cpu_action *a)
197{
198 while (!action_completed(q, a))
199 wfe();
200}
201
202static struct cpu_action *wait_for_action(struct cpu_action_queue *q,
203 struct cpu_action *local)
204{
205 struct cpu_action *action;
206
207 while (action_queue_empty(q))
208 wfe();
209
210 /*
211 * Keep original address, but use a local copy for async processing.
212 */
213 do {
214 action = load_acquire_exclusive(&q->todo);
215 *local = *action;
216 } while (!store_release_exclusive(&q->todo, NULL));
217
218 return action;
219}
220
221static void queue_action(struct cpu_action_queue *q, struct cpu_action *action)
222{
223 do {
224 wait_for_action_queue_slot(q);
225 if (load_acquire_exclusive(&q->todo) != NULL)
226 continue;
227 } while (!store_release_exclusive(&q->todo, action));
228}
229
230static void action_queue_complete(struct cpu_action_queue *q,
231 struct cpu_action *action)
232{
233 /* Mark completion and send events to waiters. */
234 store_release(&q->completed, action);
235 sev();
236}
237
238static void action_run(struct cpu_action *action)
239{
240 action->run(action->arg);
241}
242
243static void action_run_on_cpu(struct cpu_info *ci, struct cpu_action *action,
244 int sync)
245{
246 struct cpu_action_queue *q = &ci->action_queue;
247
248 /* Don't run actions on non-online or enabled devices. */
249 if (!cpu_online(ci) || ci->cpu == NULL || !ci->cpu->enabled)
250 return;
251
252 if (ci->id == smp_processor_id()) {
253 action->run(action->arg);
254 return;
255 }
256
257 queue_action(q, action);
258 /* Wait for CPU to pick it up. Empty slot means it was picked up. */
259 wait_for_action_queue_slot(q);
260 /* Wait for completion if requested. */
261 if (sync)
262 wait_for_action_complete(q, action);
263}
264
265static int __arch_run_on_cpu(unsigned int cpu, struct cpu_action *action,
266 int sync)
267{
268 struct cpu_info *ci;
269
270 if (cpu >= CONFIG_MAX_CPUS)
271 return -1;
272
273 ci = cpu_info_for_cpu(cpu);
274
275 action_run_on_cpu(ci, action, sync);
276
277 return 0;
278}
279
280int arch_run_on_cpu(unsigned int cpu, struct cpu_action *action)
281{
282 return __arch_run_on_cpu(cpu, action, 1);
283}
284
285int arch_run_on_cpu_async(unsigned int cpu, struct cpu_action *action)
286{
287 return __arch_run_on_cpu(cpu, action, 0);
288}
289
290static int __arch_run_on_all_cpus(struct cpu_action *action, int sync)
291{
292 int i;
293
294 for (i = 0; i < CONFIG_MAX_CPUS; i++)
295 action_run_on_cpu(cpu_info_for_cpu(i), action, sync);
296
297 return 0;
298}
299
Aaron Durbincf5b6272014-09-17 12:00:57 -0500300static int __arch_run_on_all_cpus_but_self(struct cpu_action *action, int sync)
301{
302 int i;
303 struct cpu_info *me = cpu_info();
304
305 for (i = 0; i < CONFIG_MAX_CPUS; i++) {
306 struct cpu_info *ci = cpu_info_for_cpu(i);
307 if (ci == me)
308 continue;
309 action_run_on_cpu(ci, action, sync);
310 }
311
312 return 0;
313}
314
Aaron Durbin9fd4dc72014-09-06 02:31:30 -0500315int arch_run_on_all_cpus(struct cpu_action *action)
316{
317 return __arch_run_on_all_cpus(action, 1);
318}
319
320int arch_run_on_all_cpus_async(struct cpu_action *action)
321{
322 return __arch_run_on_all_cpus(action, 0);
323}
324
Aaron Durbincf5b6272014-09-17 12:00:57 -0500325int arch_run_on_all_cpus_but_self(struct cpu_action *action)
326{
327 return __arch_run_on_all_cpus_but_self(action, 1);
328}
329
330int arch_run_on_all_cpus_but_self_async(struct cpu_action *action)
331{
332 return __arch_run_on_all_cpus_but_self(action, 0);
333}
334
Aaron Durbin9fd4dc72014-09-06 02:31:30 -0500335void arch_secondary_cpu_init(void)
336{
337 struct cpu_info *ci = cpu_info();
338 struct cpu_action_queue *q = &ci->action_queue;
339
340 /* Mark this CPU online. */
341 cpu_mark_online(ci);
342
343 while (1) {
344 struct cpu_action *orig;
345 struct cpu_action action;
346
347 orig = wait_for_action(q, &action);
348
349 action_run(&action);
350 action_queue_complete(q, orig);
351 }
352}
353
354void arch_initialize_cpus(device_t cluster, struct cpu_control_ops *cntrl_ops)
355{
356 size_t max_cpus;
357 size_t i;
358 struct cpu_info *ci;
359 void (*entry)(void);
360 struct bus *bus;
361
362 if (cluster->path.type != DEVICE_PATH_CPU_CLUSTER) {
363 printk(BIOS_ERR,
364 "CPU init failed. Device is not a CPU_CLUSTER: %s\n",
365 dev_path(cluster));
366 return;
367 }
368
369 bus = cluster->link_list;
Aaron Durbin0ea07042014-09-11 16:03:17 -0500370
371 /* Check if no children under this device. */
372 if (bus == NULL)
373 return;
374
Aaron Durbin9fd4dc72014-09-06 02:31:30 -0500375 entry = prepare_secondary_cpu_startup();
376
377 /* Initialize the cpu_info structures. */
378 init_cpu_info(bus);
379 max_cpus = cntrl_ops->total_cpus();
380
381 if (max_cpus > CONFIG_MAX_CPUS) {
382 printk(BIOS_WARNING,
383 "max_cpus (%zu) exceeds CONFIG_MAX_CPUS (%zu).\n",
384 max_cpus, (size_t)CONFIG_MAX_CPUS);
385 max_cpus = CONFIG_MAX_CPUS;
386 }
387
388 for (i = 0; i < max_cpus; i++) {
389 device_t dev;
390 struct cpu_action action;
391
392 ci = cpu_info_for_cpu(i);
393 dev = ci->cpu;
394
395 /* Disregard CPUs not in device tree. */
396 if (dev == NULL)
397 continue;
398
399 /* Skip disabled CPUs. */
400 if (!dev->enabled)
401 continue;
402
403 if (!cpu_online(ci)) {
404 /* Start the CPU. */
405 printk(BIOS_DEBUG, "Starting CPU%x\n", ci->id);
406 if (cntrl_ops->start_cpu(ci->id, entry)) {
407 printk(BIOS_ERR,
408 "Failed to start CPU%x\n", ci->id);
409 continue;
410 }
411 /* Wait for CPU to come online. */
412 while (!cpu_online(ci));
413 printk(BIOS_DEBUG, "CPU%x online.\n", ci->id);
414 }
415
416 /* Send it the init action. */
417 action.run = init_this_cpu;
418 action.arg = ci;
419 action_run_on_cpu(ci, &action, 1);
420 }
Furquan Shaikh2af76f42014-04-28 16:39:40 -0700421}