blob: a1c84dce591ea4de33d5532ceab1927cd63fbe59 [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Elyes HAOUASadd76f92019-03-21 09:55:49 +01002
Raul E Rangelb29f9d42021-07-12 13:49:59 -06003#include <assert.h>
Aaron Durbin4409a5e2013-05-06 12:20:52 -05004#include <stddef.h>
5#include <stdint.h>
6#include <stdlib.h>
Aaron Durbin4409a5e2013-05-06 12:20:52 -05007#include <bootstate.h>
8#include <console/console.h>
Raul E Rangelc2c38f52021-10-08 13:10:38 -06009#include <smp/node.h>
Aaron Durbin4409a5e2013-05-06 12:20:52 -050010#include <thread.h>
Elyes HAOUASadd76f92019-03-21 09:55:49 +010011#include <timer.h>
Aaron Durbin4409a5e2013-05-06 12:20:52 -050012
Raul E Rangeldb16ac952021-09-24 14:00:56 -060013static u8 thread_stacks[CONFIG_STACK_SIZE * CONFIG_NUM_THREADS] __aligned(sizeof(uint64_t));
Raul E Rangel000138e62021-07-14 11:44:51 -060014static bool initialized;
15
Aaron Durbin4409a5e2013-05-06 12:20:52 -050016static void idle_thread_init(void);
17
18/* There needs to be at least one thread to run the ramstate state machine. */
19#define TOTAL_NUM_THREADS (CONFIG_NUM_THREADS + 1)
Aaron Durbin4409a5e2013-05-06 12:20:52 -050020
21/* Storage space for the thread structs .*/
22static struct thread all_threads[TOTAL_NUM_THREADS];
23
24/* All runnable (but not running) and free threads are kept on their
25 * respective lists. */
26static struct thread *runnable_threads;
27static struct thread *free_threads;
28
Raul E Rangelc2c38f52021-10-08 13:10:38 -060029static struct thread *active_thread;
30
Aaron Durbin4409a5e2013-05-06 12:20:52 -050031static inline int thread_can_yield(const struct thread *t)
32{
Raul E Rangelbe60a0d2021-07-15 13:52:03 -060033 return (t != NULL && t->can_yield > 0);
Aaron Durbin4409a5e2013-05-06 12:20:52 -050034}
35
Raul E Rangelc2c38f52021-10-08 13:10:38 -060036static inline void set_current_thread(struct thread *t)
37{
38 assert(boot_cpu());
39 active_thread = t;
40}
41
Aaron Durbin4409a5e2013-05-06 12:20:52 -050042static inline struct thread *current_thread(void)
43{
Raul E Rangelc2c38f52021-10-08 13:10:38 -060044 if (!initialized || !boot_cpu())
Raul E Rangel000138e62021-07-14 11:44:51 -060045 return NULL;
46
Raul E Rangelc2c38f52021-10-08 13:10:38 -060047 return active_thread;
Aaron Durbin4409a5e2013-05-06 12:20:52 -050048}
49
50static inline int thread_list_empty(struct thread **list)
51{
52 return *list == NULL;
53}
54
55static inline struct thread *pop_thread(struct thread **list)
56{
57 struct thread *t;
58
59 t = *list;
60 *list = t->next;
61 t->next = NULL;
62 return t;
63}
64
65static inline void push_thread(struct thread **list, struct thread *t)
66{
67 t->next = *list;
68 *list = t;
69}
70
71static inline void push_runnable(struct thread *t)
72{
73 push_thread(&runnable_threads, t);
74}
75
76static inline struct thread *pop_runnable(void)
77{
78 return pop_thread(&runnable_threads);
79}
80
81static inline struct thread *get_free_thread(void)
82{
83 struct thread *t;
Aaron Durbin4409a5e2013-05-06 12:20:52 -050084
85 if (thread_list_empty(&free_threads))
86 return NULL;
87
88 t = pop_thread(&free_threads);
89
Aaron Durbin4409a5e2013-05-06 12:20:52 -050090 /* Reset the current stack value to the original. */
Raul E Rangeldb16ac952021-09-24 14:00:56 -060091 if (!t->stack_orig)
92 die("%s: Invalid stack value\n", __func__);
93
Aaron Durbin4409a5e2013-05-06 12:20:52 -050094 t->stack_current = t->stack_orig;
95
96 return t;
97}
98
99static inline void free_thread(struct thread *t)
100{
101 push_thread(&free_threads, t);
102}
103
104/* The idle thread is ran whenever there isn't anything else that is runnable.
105 * It's sole responsibility is to ensure progress is made by running the timer
106 * callbacks. */
Raul E Rangelcc01da52021-07-12 13:43:48 -0600107__noreturn static enum cb_err idle_thread(void *unused)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500108{
109 /* This thread never voluntarily yields. */
Raul E Rangel9ba36ab2021-07-15 17:34:05 -0600110 thread_coop_disable();
Lee Leahy2f919ec2017-03-08 17:37:06 -0800111 while (1)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500112 timers_run();
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500113}
114
115static void schedule(struct thread *t)
116{
117 struct thread *current = current_thread();
118
119 /* If t is NULL need to find new runnable thread. */
120 if (t == NULL) {
121 if (thread_list_empty(&runnable_threads))
122 die("Runnable thread list is empty!\n");
123 t = pop_runnable();
124 } else {
125 /* current is still runnable. */
126 push_runnable(current);
127 }
Raul E Rangelcc01da52021-07-12 13:43:48 -0600128
129 if (t->handle)
130 t->handle->state = THREAD_STARTED;
131
Raul E Rangelc2c38f52021-10-08 13:10:38 -0600132 set_current_thread(t);
Raul E Rangelc842c592021-09-13 14:24:55 -0600133
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500134 switch_to_thread(t->stack_current, &current->stack_current);
135}
136
Raul E Rangelcc01da52021-07-12 13:43:48 -0600137static void terminate_thread(struct thread *t, enum cb_err error)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500138{
Raul E Rangelcc01da52021-07-12 13:43:48 -0600139 if (t->handle) {
140 t->handle->error = error;
141 t->handle->state = THREAD_DONE;
142 }
143
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500144 free_thread(t);
145 schedule(NULL);
146}
147
148static void asmlinkage call_wrapper(void *unused)
149{
150 struct thread *current = current_thread();
Raul E Rangelcc01da52021-07-12 13:43:48 -0600151 enum cb_err error;
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500152
Raul E Rangelcc01da52021-07-12 13:43:48 -0600153 error = current->entry(current->entry_arg);
154
155 terminate_thread(current, error);
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500156}
157
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500158struct block_boot_state {
159 boot_state_t state;
160 boot_state_sequence_t seq;
161};
162
163/* Block the provided state until thread is complete. */
164static void asmlinkage call_wrapper_block_state(void *arg)
165{
166 struct block_boot_state *bbs = arg;
167 struct thread *current = current_thread();
Raul E Rangelcc01da52021-07-12 13:43:48 -0600168 enum cb_err error;
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500169
170 boot_state_block(bbs->state, bbs->seq);
Raul E Rangelcc01da52021-07-12 13:43:48 -0600171 error = current->entry(current->entry_arg);
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500172 boot_state_unblock(bbs->state, bbs->seq);
Raul E Rangelcc01da52021-07-12 13:43:48 -0600173 terminate_thread(current, error);
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500174}
175
176/* Prepare a thread so that it starts by executing thread_entry(thread_arg).
177 * Within thread_entry() it will call func(arg). */
Raul E Rangelcc01da52021-07-12 13:43:48 -0600178static void prepare_thread(struct thread *t, struct thread_handle *handle,
179 enum cb_err (*func)(void *), void *arg,
180 asmlinkage void (*thread_entry)(void *), void *thread_arg)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500181{
182 /* Stash the function and argument to run. */
183 t->entry = func;
184 t->entry_arg = arg;
185
186 /* All new threads can yield by default. */
187 t->can_yield = 1;
188
Raul E Rangelcc01da52021-07-12 13:43:48 -0600189 /* Pointer used to publish the state of thread */
190 t->handle = handle;
191
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500192 arch_prepare_thread(t, thread_entry, thread_arg);
193}
194
195static void thread_resume_from_timeout(struct timeout_callback *tocb)
196{
197 struct thread *to;
198
199 to = tocb->priv;
200 schedule(to);
201}
202
203static void idle_thread_init(void)
204{
205 struct thread *t;
206
207 t = get_free_thread();
208
Lee Leahy2f919ec2017-03-08 17:37:06 -0800209 if (t == NULL)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500210 die("No threads available for idle thread!\n");
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500211
212 /* Queue idle thread to run once all other threads have yielded. */
Raul E Rangelcc01da52021-07-12 13:43:48 -0600213 prepare_thread(t, NULL, idle_thread, NULL, call_wrapper, NULL);
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500214 push_runnable(t);
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500215}
216
217/* Don't inline this function so the timeout_callback won't have its storage
218 * space on the stack cleaned up before the call to schedule(). */
219static int __attribute__((noinline))
Lee Leahy73402172017-03-10 15:23:24 -0800220thread_yield_timed_callback(struct timeout_callback *tocb,
221 unsigned int microsecs)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500222{
223 tocb->priv = current_thread();
224 tocb->callback = thread_resume_from_timeout;
225
226 if (timer_sched_callback(tocb, microsecs))
227 return -1;
228
229 /* The timer callback will wake up the current thread. */
230 schedule(NULL);
231 return 0;
232}
233
234static void *thread_alloc_space(struct thread *t, size_t bytes)
235{
236 /* Allocate the amount of space on the stack keeping the stack
237 * aligned to the pointer size. */
238 t->stack_current -= ALIGN_UP(bytes, sizeof(uintptr_t));
239
240 return (void *)t->stack_current;
241}
242
Raul E Rangela2d83c682021-07-22 11:16:19 -0600243static void threads_initialize(void)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500244{
245 int i;
246 struct thread *t;
Ronald G. Minnich34352d12013-08-21 16:03:32 -0700247 u8 *stack_top;
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500248
Raul E Rangela2d83c682021-07-22 11:16:19 -0600249 if (initialized)
250 return;
251
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500252 t = &all_threads[0];
Raul E Rangelc2c38f52021-10-08 13:10:38 -0600253
254 set_current_thread(t);
255
Raul E Rangeldb16ac952021-09-24 14:00:56 -0600256 t->stack_orig = (uintptr_t)NULL; /* We never free the main thread */
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500257 t->id = 0;
Raul E Rangelb95369c2021-07-15 17:28:13 -0600258 t->can_yield = 1;
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500259
Raul E Rangelc842c592021-09-13 14:24:55 -0600260 stack_top = &thread_stacks[CONFIG_STACK_SIZE];
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500261 for (i = 1; i < TOTAL_NUM_THREADS; i++) {
262 t = &all_threads[i];
263 t->stack_orig = (uintptr_t)stack_top;
264 t->id = i;
265 stack_top += CONFIG_STACK_SIZE;
266 free_thread(t);
267 }
268
269 idle_thread_init();
Raul E Rangelb95369c2021-07-15 17:28:13 -0600270
271 initialized = 1;
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500272}
273
Raul E Rangelcc01da52021-07-12 13:43:48 -0600274int thread_run(struct thread_handle *handle, enum cb_err (*func)(void *), void *arg)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500275{
276 struct thread *current;
277 struct thread *t;
278
Raul E Rangela2d83c682021-07-22 11:16:19 -0600279 /* Lazy initialization */
280 threads_initialize();
281
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500282 current = current_thread();
283
284 if (!thread_can_yield(current)) {
285 printk(BIOS_ERR,
Raul E Rangel58618c22021-11-02 13:45:16 -0600286 "ERROR: %s() called from non-yielding context!\n", __func__);
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500287 return -1;
288 }
289
290 t = get_free_thread();
291
292 if (t == NULL) {
Raul E Rangel58618c22021-11-02 13:45:16 -0600293 printk(BIOS_ERR, "ERROR: %s: No more threads!\n", __func__);
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500294 return -1;
295 }
296
Raul E Rangel4aec58d2021-07-15 13:20:58 -0600297 prepare_thread(t, handle, func, arg, call_wrapper, NULL);
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500298 schedule(t);
299
300 return 0;
301}
302
Raul E Rangelcc01da52021-07-12 13:43:48 -0600303int thread_run_until(struct thread_handle *handle, enum cb_err (*func)(void *), void *arg,
Lee Leahye20a3192017-03-09 16:21:34 -0800304 boot_state_t state, boot_state_sequence_t seq)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500305{
306 struct thread *current;
307 struct thread *t;
308 struct block_boot_state *bbs;
309
Raul E Rangel8c892072021-07-22 12:40:26 -0600310 /* This is a ramstage specific API */
311 if (!ENV_RAMSTAGE)
312 dead_code();
313
Raul E Rangela2d83c682021-07-22 11:16:19 -0600314 /* Lazy initialization */
315 threads_initialize();
316
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500317 current = current_thread();
318
319 if (!thread_can_yield(current)) {
320 printk(BIOS_ERR,
Raul E Rangel58618c22021-11-02 13:45:16 -0600321 "ERROR: %s() called from non-yielding context!\n", __func__);
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500322 return -1;
323 }
324
325 t = get_free_thread();
326
327 if (t == NULL) {
Raul E Rangel58618c22021-11-02 13:45:16 -0600328 printk(BIOS_ERR, "ERROR: %s: No more threads!\n", __func__);
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500329 return -1;
330 }
331
332 bbs = thread_alloc_space(t, sizeof(*bbs));
333 bbs->state = state;
334 bbs->seq = seq;
Raul E Rangelcc01da52021-07-12 13:43:48 -0600335 prepare_thread(t, handle, func, arg, call_wrapper_block_state, bbs);
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500336 schedule(t);
337
338 return 0;
339}
340
Raul E Rangeld5dca212021-07-15 11:48:48 -0600341int thread_yield(void)
342{
343 return thread_yield_microseconds(0);
344}
345
Lee Leahy75b85992017-03-08 16:34:12 -0800346int thread_yield_microseconds(unsigned int microsecs)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500347{
348 struct thread *current;
349 struct timeout_callback tocb;
350
351 current = current_thread();
352
353 if (!thread_can_yield(current))
354 return -1;
355
356 if (thread_yield_timed_callback(&tocb, microsecs))
357 return -1;
358
359 return 0;
360}
361
Raul E Rangel9ba36ab2021-07-15 17:34:05 -0600362void thread_coop_enable(void)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500363{
364 struct thread *current;
365
366 current = current_thread();
367
Raul E Rangelbe60a0d2021-07-15 13:52:03 -0600368 if (current == NULL)
369 return;
370
371 assert(current->can_yield <= 0);
372
373 current->can_yield++;
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500374}
375
Raul E Rangel9ba36ab2021-07-15 17:34:05 -0600376void thread_coop_disable(void)
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500377{
378 struct thread *current;
379
380 current = current_thread();
381
Raul E Rangelbe60a0d2021-07-15 13:52:03 -0600382 if (current == NULL)
383 return;
384
385 current->can_yield--;
Aaron Durbin4409a5e2013-05-06 12:20:52 -0500386}
Raul E Rangelb29f9d42021-07-12 13:49:59 -0600387
Raul E Rangelcc01da52021-07-12 13:43:48 -0600388enum cb_err thread_join(struct thread_handle *handle)
389{
390 struct stopwatch sw;
391 struct thread *current = current_thread();
392
393 assert(handle);
394 assert(current);
395 assert(current->handle != handle);
396
397 if (handle->state == THREAD_UNINITIALIZED)
398 return CB_ERR_ARG;
399
Raul E Rangelcc01da52021-07-12 13:43:48 -0600400 printk(BIOS_SPEW, "waiting for thread\n");
401
Raul E Rangelfae525f2021-11-04 15:57:00 -0600402 stopwatch_init(&sw);
403
Raul E Rangelcc01da52021-07-12 13:43:48 -0600404 while (handle->state != THREAD_DONE)
405 assert(thread_yield() == 0);
406
407 printk(BIOS_SPEW, "took %lu us\n", stopwatch_duration_usecs(&sw));
408
409 return handle->error;
410}
411
Raul E Rangelb29f9d42021-07-12 13:49:59 -0600412void thread_mutex_lock(struct thread_mutex *mutex)
413{
414 struct stopwatch sw;
415
416 stopwatch_init(&sw);
417
418 while (mutex->locked)
419 assert(thread_yield() == 0);
420 mutex->locked = true;
421
422 printk(BIOS_SPEW, "took %lu us to acquire mutex\n", stopwatch_duration_usecs(&sw));
423}
424
425void thread_mutex_unlock(struct thread_mutex *mutex)
426{
427 assert(mutex->locked);
428 mutex->locked = 0;
429}