blob: 089ae3f04a3317f8b8f780d66c1c2b109b978454 [file] [log] [blame]
Aaron Durbin4409a5e2013-05-06 12:20:52 -05001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <stddef.h>
20#include <stdint.h>
21#include <stdlib.h>
22#include <arch/cpu.h>
23#include <bootstate.h>
24#include <console/console.h>
25#include <thread.h>
26
27static void idle_thread_init(void);
28
29/* There needs to be at least one thread to run the ramstate state machine. */
30#define TOTAL_NUM_THREADS (CONFIG_NUM_THREADS + 1)
Aaron Durbin4409a5e2013-05-06 12:20:52 -050031
32/* Storage space for the thread structs .*/
33static struct thread all_threads[TOTAL_NUM_THREADS];
34
35/* All runnable (but not running) and free threads are kept on their
36 * respective lists. */
37static struct thread *runnable_threads;
38static struct thread *free_threads;
39
40static inline struct cpu_info *thread_cpu_info(const struct thread *t)
41{
42 return (void *)(t->stack_orig);
43}
44
45static inline int thread_can_yield(const struct thread *t)
46{
47 return (t != NULL && t->can_yield);
48}
49
50/* Assumes current cpu info can switch. */
51static inline struct thread *cpu_info_to_thread(const struct cpu_info *ci)
52{
53 return ci->thread;
54}
55
56static inline struct thread *current_thread(void)
57{
58 return cpu_info_to_thread(cpu_info());
59}
60
61static inline int thread_list_empty(struct thread **list)
62{
63 return *list == NULL;
64}
65
66static inline struct thread *pop_thread(struct thread **list)
67{
68 struct thread *t;
69
70 t = *list;
71 *list = t->next;
72 t->next = NULL;
73 return t;
74}
75
76static inline void push_thread(struct thread **list, struct thread *t)
77{
78 t->next = *list;
79 *list = t;
80}
81
82static inline void push_runnable(struct thread *t)
83{
84 push_thread(&runnable_threads, t);
85}
86
87static inline struct thread *pop_runnable(void)
88{
89 return pop_thread(&runnable_threads);
90}
91
92static inline struct thread *get_free_thread(void)
93{
94 struct thread *t;
95 struct cpu_info *ci;
96 struct cpu_info *new_ci;
97
98 if (thread_list_empty(&free_threads))
99 return NULL;
100
101 t = pop_thread(&free_threads);
102
103 ci = cpu_info();
104
105 /* Initialize the cpu_info structure on the new stack. */
106 new_ci = thread_cpu_info(t);
107 *new_ci = *ci;
108 new_ci->thread = t;
109
110 /* Reset the current stack value to the original. */
111 t->stack_current = t->stack_orig;
112
113 return t;
114}
115
116static inline void free_thread(struct thread *t)
117{
118 push_thread(&free_threads, t);
119}
120
121/* The idle thread is ran whenever there isn't anything else that is runnable.
122 * It's sole responsibility is to ensure progress is made by running the timer
123 * callbacks. */
124static void idle_thread(void *unused)
125{
126 /* This thread never voluntarily yields. */
127 thread_prevent_coop();
128 while (1) {
129 timers_run();
130 }
131}
132
133static void schedule(struct thread *t)
134{
135 struct thread *current = current_thread();
136
137 /* If t is NULL need to find new runnable thread. */
138 if (t == NULL) {
139 if (thread_list_empty(&runnable_threads))
140 die("Runnable thread list is empty!\n");
141 t = pop_runnable();
142 } else {
143 /* current is still runnable. */
144 push_runnable(current);
145 }
146 switch_to_thread(t->stack_current, &current->stack_current);
147}
148
149static void terminate_thread(struct thread *t)
150{
151 free_thread(t);
152 schedule(NULL);
153}
154
155static void asmlinkage call_wrapper(void *unused)
156{
157 struct thread *current = current_thread();
158
159 current->entry(current->entry_arg);
160 terminate_thread(current);
161}
162
163/* Block the current state transitions until thread is complete. */
164static void asmlinkage call_wrapper_block_current(void *unused)
165{
166 struct thread *current = current_thread();
167
168 boot_state_current_block();
169 current->entry(current->entry_arg);
170 boot_state_current_unblock();
171 terminate_thread(current);
172}
173
174struct block_boot_state {
175 boot_state_t state;
176 boot_state_sequence_t seq;
177};
178
179/* Block the provided state until thread is complete. */
180static void asmlinkage call_wrapper_block_state(void *arg)
181{
182 struct block_boot_state *bbs = arg;
183 struct thread *current = current_thread();
184
185 boot_state_block(bbs->state, bbs->seq);
186 current->entry(current->entry_arg);
187 boot_state_unblock(bbs->state, bbs->seq);
188 terminate_thread(current);
189}
190
191/* Prepare a thread so that it starts by executing thread_entry(thread_arg).
192 * Within thread_entry() it will call func(arg). */
193static void prepare_thread(struct thread *t, void *func, void *arg,
194 void asmlinkage (*thread_entry)(void *),
195 void *thread_arg)
196{
197 /* Stash the function and argument to run. */
198 t->entry = func;
199 t->entry_arg = arg;
200
201 /* All new threads can yield by default. */
202 t->can_yield = 1;
203
204 arch_prepare_thread(t, thread_entry, thread_arg);
205}
206
207static void thread_resume_from_timeout(struct timeout_callback *tocb)
208{
209 struct thread *to;
210
211 to = tocb->priv;
212 schedule(to);
213}
214
215static void idle_thread_init(void)
216{
217 struct thread *t;
218
219 t = get_free_thread();
220
221 if (t == NULL) {
222 die("No threads available for idle thread!\n");
223 }
224
225 /* Queue idle thread to run once all other threads have yielded. */
226 prepare_thread(t, idle_thread, NULL, call_wrapper, NULL);
227 push_runnable(t);
228 /* Mark the currently executing thread to cooperate. */
229 thread_cooperate();
230}
231
232/* Don't inline this function so the timeout_callback won't have its storage
233 * space on the stack cleaned up before the call to schedule(). */
234static int __attribute__((noinline))
235thread_yield_timed_callback(struct timeout_callback *tocb, unsigned microsecs)
236{
237 tocb->priv = current_thread();
238 tocb->callback = thread_resume_from_timeout;
239
240 if (timer_sched_callback(tocb, microsecs))
241 return -1;
242
243 /* The timer callback will wake up the current thread. */
244 schedule(NULL);
245 return 0;
246}
247
248static void *thread_alloc_space(struct thread *t, size_t bytes)
249{
250 /* Allocate the amount of space on the stack keeping the stack
251 * aligned to the pointer size. */
252 t->stack_current -= ALIGN_UP(bytes, sizeof(uintptr_t));
253
254 return (void *)t->stack_current;
255}
256
257void threads_initialize(void)
258{
259 int i;
260 struct thread *t;
Ronald G. Minnich34352d12013-08-21 16:03:32 -0700261 u8 *stack_top;
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500262 struct cpu_info *ci;
Ronald G. Minnich34352d12013-08-21 16:03:32 -0700263 u8 *thread_stacks;
264
265 thread_stacks = arch_get_thread_stackbase();
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500266
267 /* Initialize the BSP thread first. The cpu_info structure is assumed
268 * to be just under the top of the stack. */
269 t = &all_threads[0];
270 ci = cpu_info();
271 ci->thread = t;
272 t->stack_orig = (uintptr_t)ci;
273 t->id = 0;
274
275 stack_top = &thread_stacks[CONFIG_STACK_SIZE] - sizeof(struct cpu_info);
276 for (i = 1; i < TOTAL_NUM_THREADS; i++) {
277 t = &all_threads[i];
278 t->stack_orig = (uintptr_t)stack_top;
279 t->id = i;
280 stack_top += CONFIG_STACK_SIZE;
281 free_thread(t);
282 }
283
284 idle_thread_init();
285}
286
287int thread_run(void (*func)(void *), void *arg)
288{
289 struct thread *current;
290 struct thread *t;
291
292 current = current_thread();
293
294 if (!thread_can_yield(current)) {
295 printk(BIOS_ERR,
296 "thread_run() called from non-yielding context!\n");
297 return -1;
298 }
299
300 t = get_free_thread();
301
302 if (t == NULL) {
303 printk(BIOS_ERR, "thread_run() No more threads!\n");
304 return -1;
305 }
306
307 prepare_thread(t, func, arg, call_wrapper_block_current, NULL);
308 schedule(t);
309
310 return 0;
311}
312
313int thread_run_until(void (*func)(void *), void *arg,
314 boot_state_t state, boot_state_sequence_t seq)
315{
316 struct thread *current;
317 struct thread *t;
318 struct block_boot_state *bbs;
319
320 current = current_thread();
321
322 if (!thread_can_yield(current)) {
323 printk(BIOS_ERR,
324 "thread_run() called from non-yielding context!\n");
325 return -1;
326 }
327
328 t = get_free_thread();
329
330 if (t == NULL) {
331 printk(BIOS_ERR, "thread_run() No more threads!\n");
332 return -1;
333 }
334
335 bbs = thread_alloc_space(t, sizeof(*bbs));
336 bbs->state = state;
337 bbs->seq = seq;
338 prepare_thread(t, func, arg, call_wrapper_block_state, bbs);
339 schedule(t);
340
341 return 0;
342}
343
344int thread_yield_microseconds(unsigned microsecs)
345{
346 struct thread *current;
347 struct timeout_callback tocb;
348
349 current = current_thread();
350
351 if (!thread_can_yield(current))
352 return -1;
353
354 if (thread_yield_timed_callback(&tocb, microsecs))
355 return -1;
356
357 return 0;
358}
359
360void thread_cooperate(void)
361{
362 struct thread *current;
363
364 current = current_thread();
365
366 if (current != NULL)
367 current->can_yield = 1;
368}
369
370void thread_prevent_coop(void)
371{
372 struct thread *current;
373
374 current = current_thread();
375
376 if (current != NULL)
377 current->can_yield = 0;
378}