blob: a263e1b873e2eadc79314a6bea17bbd268f587da [file] [log] [blame]
Furquan Shaikh2af76f42014-04-28 16:39:40 -07001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright 2013 Google Inc.
5 *
Aaron Durbin0b0a1e32014-09-06 01:28:54 -05006 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
Furquan Shaikh2af76f42014-04-28 16:39:40 -07009 *
Aaron Durbin0b0a1e32014-09-06 01:28:54 -050010 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Furquan Shaikh2af76f42014-04-28 16:39:40 -070014 *
Aaron Durbin0b0a1e32014-09-06 01:28:54 -050015 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
Furquan Shaikh2af76f42014-04-28 16:39:40 -070018 */
Aaron Durbin0b0a1e32014-09-06 01:28:54 -050019
Aaron Durbin9fd4dc72014-09-06 02:31:30 -050020#include <stdint.h>
Furquan Shaikh2af76f42014-04-28 16:39:40 -070021#include <stdlib.h>
Aaron Durbin9fd4dc72014-09-06 02:31:30 -050022#include <arch/barrier.h>
23#include <arch/lib_helpers.h>
24#include <cpu/cpu.h>
25#include <console/console.h>
Aaron Durbinb9b8ebc2014-09-11 21:57:41 -050026#include <gic.h>
Aaron Durbin9fd4dc72014-09-06 02:31:30 -050027#include "cpu-internal.h"
Furquan Shaikh2af76f42014-04-28 16:39:40 -070028
Aaron Durbin1b315d02014-08-27 10:30:39 -050029static struct cpu_info cpu_infos[CONFIG_MAX_CPUS];
Aaron Durbin74ff69f2014-09-17 11:51:41 -050030struct cpu_info *bsp_cpu_info;
Aaron Durbin1b315d02014-08-27 10:30:39 -050031
Aaron Durbin9fd4dc72014-09-06 02:31:30 -050032static inline struct cpu_info *cpu_info_for_cpu(unsigned int id)
33{
34 return &cpu_infos[id];
35}
36
Furquan Shaikh2af76f42014-04-28 16:39:40 -070037struct cpu_info *cpu_info(void)
38{
Aaron Durbin9fd4dc72014-09-06 02:31:30 -050039 return cpu_info_for_cpu(smp_processor_id());
40}
41
42static int cpu_online(struct cpu_info *ci)
43{
44 return load_acquire(&ci->online) != 0;
45}
46
47static void cpu_mark_online(struct cpu_info *ci)
48{
49 store_release(&ci->online, 1);
50}
51
52static inline void cpu_disable_dev(device_t dev)
53{
54 dev->enabled = 0;
55}
56
57static struct cpu_driver *locate_cpu_driver(uint32_t midr)
58{
59 struct cpu_driver *cur;
60
61 for (cur = cpu_drivers; cur != ecpu_drivers; cur++) {
62 const struct cpu_device_id *id_table = cur->id_table;
63
64 for (; id_table->midr != CPU_ID_END; id_table++) {
65 if (id_table->midr == midr)
66 return cur;
67 }
68 }
69 return NULL;
70}
71
72static int cpu_set_device_operations(device_t dev)
73{
74 uint32_t midr;
75 struct cpu_driver *driver;
76
77 midr = raw_read_midr_el1();
78 driver = locate_cpu_driver(midr);
79
80 if (driver == NULL) {
81 printk(BIOS_WARNING, "No CPU driver for MIDR %08x\n", midr);
82 return -1;
83 }
84 dev->ops = driver->ops;
85 return 0;
86}
87
Aaron Durbinf228e8d2014-09-15 14:19:21 -050088/* Set up default SCR values. */
89static void el3_init(void)
90{
91 uint32_t scr;
92
93 if (get_current_el() != EL3)
94 return;
95
96 scr = raw_read_scr_el3();
97 /* Default to non-secure EL1 and EL0. */
98 scr &= ~(SCR_NS_MASK);
99 scr |= SCR_NS_ENABLE;
100 /* Disable IRQ, FIQ, and external abort interrupt routing. */
101 scr &= ~(SCR_IRQ_MASK | SCR_FIQ_MASK | SCR_EA_MASK);
102 scr |= SCR_IRQ_DISABLE | SCR_FIQ_DISABLE | SCR_EA_DISABLE;
103 /* Enable HVC */
104 scr &= ~(SCR_HVC_MASK);
105 scr |= SCR_HVC_ENABLE;
106 /* Disable SMC */
107 scr &= ~(SCR_SMC_MASK);
108 scr |= SCR_SMC_DISABLE;
109 /* Disable secure instruction fetches. */
110 scr &= ~(SCR_SIF_MASK);
111 scr |= SCR_SIF_DISABLE;
112 /* All lower exception levels 64-bit by default. */
113 scr &= ~(SCR_RW_MASK);
114 scr |= SCR_LOWER_AARCH64;
115 /* Disable secure EL1 access to secure timer. */
116 scr &= ~(SCR_ST_MASK);
117 scr |= SCR_ST_DISABLE;
118 /* Don't trap on WFE or WFI instructions. */
119 scr &= ~(SCR_TWI_MASK | SCR_TWE_MASK);
120 scr |= SCR_TWI_DISABLE | SCR_TWE_DISABLE;
121 raw_write_scr_el3(scr);
122 isb();
123}
124
Aaron Durbin9fd4dc72014-09-06 02:31:30 -0500125static void init_this_cpu(void *arg)
126{
127 struct cpu_info *ci = arg;
128 device_t dev = ci->cpu;
129
130 cpu_set_device_operations(dev);
131
Aaron Durbinf228e8d2014-09-15 14:19:21 -0500132 el3_init();
133
Aaron Durbinb9b8ebc2014-09-11 21:57:41 -0500134 /* Initialize the GIC. */
135 gic_init();
136
Aaron Durbin9fd4dc72014-09-06 02:31:30 -0500137 if (dev->ops != NULL && dev->ops->init != NULL) {
138 dev->initialized = 1;
139 printk(BIOS_DEBUG, "%s init\n", dev_path(dev));
140 dev->ops->init(dev);
141 }
142}
143
144/* Fill in cpu_info structures according to device tree. */
145static void init_cpu_info(struct bus *bus)
146{
147 device_t cur;
148
149 for (cur = bus->children; cur != NULL; cur = cur->sibling) {
150 struct cpu_info *ci;
151 unsigned int id = cur->path.cpu.id;
152
153 if (cur->path.type != DEVICE_PATH_CPU)
154 continue;
155
156 /* IDs are currently mapped 1:1 with logical CPU numbers. */
157 if (id >= CONFIG_MAX_CPUS) {
158 printk(BIOS_WARNING,
159 "CPU id %x too large. Disabling.\n", id);
160 cpu_disable_dev(cur);
161 continue;
162 }
163
164 ci = cpu_info_for_cpu(id);
165 if (ci->cpu != NULL) {
166 printk(BIOS_WARNING,
167 "Duplicate ID %x in device tree.\n", id);
168 cpu_disable_dev(cur);
169 }
170
171 ci->cpu = cur;
172 ci->id = cur->path.cpu.id;
173 }
174
175 /* Mark current cpu online. */
176 cpu_mark_online(cpu_info());
177}
178
179static inline int action_queue_empty(struct cpu_action_queue *q)
180{
181 return load_acquire_exclusive(&q->todo) == NULL;
182}
183
184static inline int action_completed(struct cpu_action_queue *q,
185 struct cpu_action *action)
186{
187 return load_acquire(&q->completed) == action;
188}
189
190static inline void wait_for_action_queue_slot(struct cpu_action_queue *q)
191{
192 while (!action_queue_empty(q))
193 wfe();
194}
195
196static void wait_for_action_complete(struct cpu_action_queue *q,
197 struct cpu_action *a)
198{
199 while (!action_completed(q, a))
200 wfe();
201}
202
203static struct cpu_action *wait_for_action(struct cpu_action_queue *q,
204 struct cpu_action *local)
205{
206 struct cpu_action *action;
207
208 while (action_queue_empty(q))
209 wfe();
210
211 /*
212 * Keep original address, but use a local copy for async processing.
213 */
214 do {
215 action = load_acquire_exclusive(&q->todo);
216 *local = *action;
217 } while (!store_release_exclusive(&q->todo, NULL));
218
219 return action;
220}
221
222static void queue_action(struct cpu_action_queue *q, struct cpu_action *action)
223{
224 do {
225 wait_for_action_queue_slot(q);
226 if (load_acquire_exclusive(&q->todo) != NULL)
227 continue;
228 } while (!store_release_exclusive(&q->todo, action));
229}
230
231static void action_queue_complete(struct cpu_action_queue *q,
232 struct cpu_action *action)
233{
234 /* Mark completion and send events to waiters. */
235 store_release(&q->completed, action);
236 sev();
237}
238
239static void action_run(struct cpu_action *action)
240{
241 action->run(action->arg);
242}
243
244static void action_run_on_cpu(struct cpu_info *ci, struct cpu_action *action,
245 int sync)
246{
247 struct cpu_action_queue *q = &ci->action_queue;
248
249 /* Don't run actions on non-online or enabled devices. */
250 if (!cpu_online(ci) || ci->cpu == NULL || !ci->cpu->enabled)
251 return;
252
253 if (ci->id == smp_processor_id()) {
254 action->run(action->arg);
255 return;
256 }
257
258 queue_action(q, action);
259 /* Wait for CPU to pick it up. Empty slot means it was picked up. */
260 wait_for_action_queue_slot(q);
261 /* Wait for completion if requested. */
262 if (sync)
263 wait_for_action_complete(q, action);
264}
265
266static int __arch_run_on_cpu(unsigned int cpu, struct cpu_action *action,
267 int sync)
268{
269 struct cpu_info *ci;
270
271 if (cpu >= CONFIG_MAX_CPUS)
272 return -1;
273
274 ci = cpu_info_for_cpu(cpu);
275
276 action_run_on_cpu(ci, action, sync);
277
278 return 0;
279}
280
281int arch_run_on_cpu(unsigned int cpu, struct cpu_action *action)
282{
283 return __arch_run_on_cpu(cpu, action, 1);
284}
285
286int arch_run_on_cpu_async(unsigned int cpu, struct cpu_action *action)
287{
288 return __arch_run_on_cpu(cpu, action, 0);
289}
290
291static int __arch_run_on_all_cpus(struct cpu_action *action, int sync)
292{
293 int i;
294
295 for (i = 0; i < CONFIG_MAX_CPUS; i++)
296 action_run_on_cpu(cpu_info_for_cpu(i), action, sync);
297
298 return 0;
299}
300
Aaron Durbincf5b6272014-09-17 12:00:57 -0500301static int __arch_run_on_all_cpus_but_self(struct cpu_action *action, int sync)
302{
303 int i;
304 struct cpu_info *me = cpu_info();
305
306 for (i = 0; i < CONFIG_MAX_CPUS; i++) {
307 struct cpu_info *ci = cpu_info_for_cpu(i);
308 if (ci == me)
309 continue;
310 action_run_on_cpu(ci, action, sync);
311 }
312
313 return 0;
314}
315
Aaron Durbin9fd4dc72014-09-06 02:31:30 -0500316int arch_run_on_all_cpus(struct cpu_action *action)
317{
318 return __arch_run_on_all_cpus(action, 1);
319}
320
321int arch_run_on_all_cpus_async(struct cpu_action *action)
322{
323 return __arch_run_on_all_cpus(action, 0);
324}
325
Aaron Durbincf5b6272014-09-17 12:00:57 -0500326int arch_run_on_all_cpus_but_self(struct cpu_action *action)
327{
328 return __arch_run_on_all_cpus_but_self(action, 1);
329}
330
331int arch_run_on_all_cpus_but_self_async(struct cpu_action *action)
332{
333 return __arch_run_on_all_cpus_but_self(action, 0);
334}
335
Aaron Durbin9fd4dc72014-09-06 02:31:30 -0500336void arch_secondary_cpu_init(void)
337{
338 struct cpu_info *ci = cpu_info();
339 struct cpu_action_queue *q = &ci->action_queue;
340
341 /* Mark this CPU online. */
342 cpu_mark_online(ci);
343
344 while (1) {
345 struct cpu_action *orig;
346 struct cpu_action action;
347
348 orig = wait_for_action(q, &action);
349
350 action_run(&action);
351 action_queue_complete(q, orig);
352 }
353}
354
355void arch_initialize_cpus(device_t cluster, struct cpu_control_ops *cntrl_ops)
356{
357 size_t max_cpus;
358 size_t i;
359 struct cpu_info *ci;
360 void (*entry)(void);
361 struct bus *bus;
362
363 if (cluster->path.type != DEVICE_PATH_CPU_CLUSTER) {
364 printk(BIOS_ERR,
365 "CPU init failed. Device is not a CPU_CLUSTER: %s\n",
366 dev_path(cluster));
367 return;
368 }
369
370 bus = cluster->link_list;
Aaron Durbin0ea07042014-09-11 16:03:17 -0500371
372 /* Check if no children under this device. */
373 if (bus == NULL)
374 return;
375
Aaron Durbin9fd4dc72014-09-06 02:31:30 -0500376 entry = prepare_secondary_cpu_startup();
377
378 /* Initialize the cpu_info structures. */
379 init_cpu_info(bus);
380 max_cpus = cntrl_ops->total_cpus();
381
382 if (max_cpus > CONFIG_MAX_CPUS) {
383 printk(BIOS_WARNING,
384 "max_cpus (%zu) exceeds CONFIG_MAX_CPUS (%zu).\n",
385 max_cpus, (size_t)CONFIG_MAX_CPUS);
386 max_cpus = CONFIG_MAX_CPUS;
387 }
388
389 for (i = 0; i < max_cpus; i++) {
390 device_t dev;
391 struct cpu_action action;
392
393 ci = cpu_info_for_cpu(i);
394 dev = ci->cpu;
395
396 /* Disregard CPUs not in device tree. */
397 if (dev == NULL)
398 continue;
399
400 /* Skip disabled CPUs. */
401 if (!dev->enabled)
402 continue;
403
404 if (!cpu_online(ci)) {
405 /* Start the CPU. */
406 printk(BIOS_DEBUG, "Starting CPU%x\n", ci->id);
407 if (cntrl_ops->start_cpu(ci->id, entry)) {
408 printk(BIOS_ERR,
409 "Failed to start CPU%x\n", ci->id);
410 continue;
411 }
412 /* Wait for CPU to come online. */
413 while (!cpu_online(ci));
414 printk(BIOS_DEBUG, "CPU%x online.\n", ci->id);
415 }
416
417 /* Send it the init action. */
418 action.run = init_this_cpu;
419 action.arg = ci;
420 action_run_on_cpu(ci, &action, 1);
421 }
Furquan Shaikh2af76f42014-04-28 16:39:40 -0700422}