blob: 281885ff9b26ccea220e4d5fcefb4994de75c594 [file] [log] [blame]
Aaron Durbin4409a5e2013-05-06 12:20:52 -05001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Aaron Durbin4409a5e2013-05-06 12:20:52 -050014 */
Elyes HAOUASadd76f92019-03-21 09:55:49 +010015
Aaron Durbin4409a5e2013-05-06 12:20:52 -050016#include <stddef.h>
17#include <stdint.h>
18#include <stdlib.h>
19#include <arch/cpu.h>
20#include <bootstate.h>
21#include <console/console.h>
22#include <thread.h>
Elyes HAOUASadd76f92019-03-21 09:55:49 +010023#include <timer.h>
Aaron Durbin4409a5e2013-05-06 12:20:52 -050024
25static void idle_thread_init(void);
26
27/* There needs to be at least one thread to run the ramstate state machine. */
28#define TOTAL_NUM_THREADS (CONFIG_NUM_THREADS + 1)
Aaron Durbin4409a5e2013-05-06 12:20:52 -050029
30/* Storage space for the thread structs .*/
31static struct thread all_threads[TOTAL_NUM_THREADS];
32
33/* All runnable (but not running) and free threads are kept on their
34 * respective lists. */
35static struct thread *runnable_threads;
36static struct thread *free_threads;
37
38static inline struct cpu_info *thread_cpu_info(const struct thread *t)
39{
40 return (void *)(t->stack_orig);
41}
42
43static inline int thread_can_yield(const struct thread *t)
44{
45 return (t != NULL && t->can_yield);
46}
47
Elyes HAOUAS91e0e3c2016-07-30 15:51:13 +020048/* Assumes current CPU info can switch. */
Aaron Durbin4409a5e2013-05-06 12:20:52 -050049static inline struct thread *cpu_info_to_thread(const struct cpu_info *ci)
50{
51 return ci->thread;
52}
53
54static inline struct thread *current_thread(void)
55{
56 return cpu_info_to_thread(cpu_info());
57}
58
59static inline int thread_list_empty(struct thread **list)
60{
61 return *list == NULL;
62}
63
64static inline struct thread *pop_thread(struct thread **list)
65{
66 struct thread *t;
67
68 t = *list;
69 *list = t->next;
70 t->next = NULL;
71 return t;
72}
73
74static inline void push_thread(struct thread **list, struct thread *t)
75{
76 t->next = *list;
77 *list = t;
78}
79
80static inline void push_runnable(struct thread *t)
81{
82 push_thread(&runnable_threads, t);
83}
84
85static inline struct thread *pop_runnable(void)
86{
87 return pop_thread(&runnable_threads);
88}
89
90static inline struct thread *get_free_thread(void)
91{
92 struct thread *t;
93 struct cpu_info *ci;
94 struct cpu_info *new_ci;
95
96 if (thread_list_empty(&free_threads))
97 return NULL;
98
99 t = pop_thread(&free_threads);
100
101 ci = cpu_info();
102
103 /* Initialize the cpu_info structure on the new stack. */
104 new_ci = thread_cpu_info(t);
105 *new_ci = *ci;
106 new_ci->thread = t;
107
108 /* Reset the current stack value to the original. */
109 t->stack_current = t->stack_orig;
110
111 return t;
112}
113
114static inline void free_thread(struct thread *t)
115{
116 push_thread(&free_threads, t);
117}
118
119/* The idle thread is ran whenever there isn't anything else that is runnable.
120 * It's sole responsibility is to ensure progress is made by running the timer
121 * callbacks. */
122static void idle_thread(void *unused)
123{
124 /* This thread never voluntarily yields. */
125 thread_prevent_coop();
Lee Leahy2f919ec2017-03-08 17:37:06 -0800126 while (1)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500127 timers_run();
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500128}
129
130static void schedule(struct thread *t)
131{
132 struct thread *current = current_thread();
133
134 /* If t is NULL need to find new runnable thread. */
135 if (t == NULL) {
136 if (thread_list_empty(&runnable_threads))
137 die("Runnable thread list is empty!\n");
138 t = pop_runnable();
139 } else {
140 /* current is still runnable. */
141 push_runnable(current);
142 }
143 switch_to_thread(t->stack_current, &current->stack_current);
144}
145
146static void terminate_thread(struct thread *t)
147{
148 free_thread(t);
149 schedule(NULL);
150}
151
152static void asmlinkage call_wrapper(void *unused)
153{
154 struct thread *current = current_thread();
155
156 current->entry(current->entry_arg);
157 terminate_thread(current);
158}
159
160/* Block the current state transitions until thread is complete. */
161static void asmlinkage call_wrapper_block_current(void *unused)
162{
163 struct thread *current = current_thread();
164
165 boot_state_current_block();
166 current->entry(current->entry_arg);
167 boot_state_current_unblock();
168 terminate_thread(current);
169}
170
171struct block_boot_state {
172 boot_state_t state;
173 boot_state_sequence_t seq;
174};
175
176/* Block the provided state until thread is complete. */
177static void asmlinkage call_wrapper_block_state(void *arg)
178{
179 struct block_boot_state *bbs = arg;
180 struct thread *current = current_thread();
181
182 boot_state_block(bbs->state, bbs->seq);
183 current->entry(current->entry_arg);
184 boot_state_unblock(bbs->state, bbs->seq);
185 terminate_thread(current);
186}
187
188/* Prepare a thread so that it starts by executing thread_entry(thread_arg).
189 * Within thread_entry() it will call func(arg). */
190static void prepare_thread(struct thread *t, void *func, void *arg,
Lee Leahy35af5c42017-03-09 17:35:28 -0800191 asmlinkage void (*thread_entry)(void *),
Lee Leahye20a3192017-03-09 16:21:34 -0800192 void *thread_arg)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500193{
194 /* Stash the function and argument to run. */
195 t->entry = func;
196 t->entry_arg = arg;
197
198 /* All new threads can yield by default. */
199 t->can_yield = 1;
200
201 arch_prepare_thread(t, thread_entry, thread_arg);
202}
203
204static void thread_resume_from_timeout(struct timeout_callback *tocb)
205{
206 struct thread *to;
207
208 to = tocb->priv;
209 schedule(to);
210}
211
212static void idle_thread_init(void)
213{
214 struct thread *t;
215
216 t = get_free_thread();
217
Lee Leahy2f919ec2017-03-08 17:37:06 -0800218 if (t == NULL)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500219 die("No threads available for idle thread!\n");
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500220
221 /* Queue idle thread to run once all other threads have yielded. */
222 prepare_thread(t, idle_thread, NULL, call_wrapper, NULL);
223 push_runnable(t);
224 /* Mark the currently executing thread to cooperate. */
225 thread_cooperate();
226}
227
228/* Don't inline this function so the timeout_callback won't have its storage
229 * space on the stack cleaned up before the call to schedule(). */
230static int __attribute__((noinline))
Lee Leahy73402172017-03-10 15:23:24 -0800231thread_yield_timed_callback(struct timeout_callback *tocb,
232 unsigned int microsecs)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500233{
234 tocb->priv = current_thread();
235 tocb->callback = thread_resume_from_timeout;
236
237 if (timer_sched_callback(tocb, microsecs))
238 return -1;
239
240 /* The timer callback will wake up the current thread. */
241 schedule(NULL);
242 return 0;
243}
244
245static void *thread_alloc_space(struct thread *t, size_t bytes)
246{
247 /* Allocate the amount of space on the stack keeping the stack
248 * aligned to the pointer size. */
249 t->stack_current -= ALIGN_UP(bytes, sizeof(uintptr_t));
250
251 return (void *)t->stack_current;
252}
253
254void threads_initialize(void)
255{
256 int i;
257 struct thread *t;
Ronald G. Minnich34352d12013-08-21 16:03:32 -0700258 u8 *stack_top;
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500259 struct cpu_info *ci;
Ronald G. Minnich34352d12013-08-21 16:03:32 -0700260 u8 *thread_stacks;
261
262 thread_stacks = arch_get_thread_stackbase();
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500263
264 /* Initialize the BSP thread first. The cpu_info structure is assumed
265 * to be just under the top of the stack. */
266 t = &all_threads[0];
267 ci = cpu_info();
268 ci->thread = t;
269 t->stack_orig = (uintptr_t)ci;
270 t->id = 0;
271
272 stack_top = &thread_stacks[CONFIG_STACK_SIZE] - sizeof(struct cpu_info);
273 for (i = 1; i < TOTAL_NUM_THREADS; i++) {
274 t = &all_threads[i];
275 t->stack_orig = (uintptr_t)stack_top;
276 t->id = i;
277 stack_top += CONFIG_STACK_SIZE;
278 free_thread(t);
279 }
280
281 idle_thread_init();
282}
283
284int thread_run(void (*func)(void *), void *arg)
285{
286 struct thread *current;
287 struct thread *t;
288
289 current = current_thread();
290
291 if (!thread_can_yield(current)) {
292 printk(BIOS_ERR,
293 "thread_run() called from non-yielding context!\n");
294 return -1;
295 }
296
297 t = get_free_thread();
298
299 if (t == NULL) {
300 printk(BIOS_ERR, "thread_run() No more threads!\n");
301 return -1;
302 }
303
304 prepare_thread(t, func, arg, call_wrapper_block_current, NULL);
305 schedule(t);
306
307 return 0;
308}
309
310int thread_run_until(void (*func)(void *), void *arg,
Lee Leahye20a3192017-03-09 16:21:34 -0800311 boot_state_t state, boot_state_sequence_t seq)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500312{
313 struct thread *current;
314 struct thread *t;
315 struct block_boot_state *bbs;
316
317 current = current_thread();
318
319 if (!thread_can_yield(current)) {
320 printk(BIOS_ERR,
321 "thread_run() called from non-yielding context!\n");
322 return -1;
323 }
324
325 t = get_free_thread();
326
327 if (t == NULL) {
328 printk(BIOS_ERR, "thread_run() No more threads!\n");
329 return -1;
330 }
331
332 bbs = thread_alloc_space(t, sizeof(*bbs));
333 bbs->state = state;
334 bbs->seq = seq;
335 prepare_thread(t, func, arg, call_wrapper_block_state, bbs);
336 schedule(t);
337
338 return 0;
339}
340
Lee Leahy75b85992017-03-08 16:34:12 -0800341int thread_yield_microseconds(unsigned int microsecs)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500342{
343 struct thread *current;
344 struct timeout_callback tocb;
345
346 current = current_thread();
347
348 if (!thread_can_yield(current))
349 return -1;
350
351 if (thread_yield_timed_callback(&tocb, microsecs))
352 return -1;
353
354 return 0;
355}
356
357void thread_cooperate(void)
358{
359 struct thread *current;
360
361 current = current_thread();
362
363 if (current != NULL)
364 current->can_yield = 1;
365}
366
367void thread_prevent_coop(void)
368{
369 struct thread *current;
370
371 current = current_thread();
372
373 if (current != NULL)
374 current->can_yield = 0;
375}