Angel Pons | 118a9c7 | 2020-04-02 23:48:34 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Elyes HAOUAS | add76f9 | 2019-03-21 09:55:49 +0100 | [diff] [blame] | 2 | |
Raul E Rangel | b29f9d4 | 2021-07-12 13:49:59 -0600 | [diff] [blame] | 3 | #include <assert.h> |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 4 | #include <stddef.h> |
| 5 | #include <stdint.h> |
| 6 | #include <stdlib.h> |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 7 | #include <bootstate.h> |
| 8 | #include <console/console.h> |
Raul E Rangel | c2c38f5 | 2021-10-08 13:10:38 -0600 | [diff] [blame] | 9 | #include <smp/node.h> |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 10 | #include <thread.h> |
Elyes HAOUAS | add76f9 | 2019-03-21 09:55:49 +0100 | [diff] [blame] | 11 | #include <timer.h> |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 12 | |
Raul E Rangel | db16ac95 | 2021-09-24 14:00:56 -0600 | [diff] [blame] | 13 | static u8 thread_stacks[CONFIG_STACK_SIZE * CONFIG_NUM_THREADS] __aligned(sizeof(uint64_t)); |
Raul E Rangel | 000138e6 | 2021-07-14 11:44:51 -0600 | [diff] [blame] | 14 | static bool initialized; |
| 15 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 16 | static void idle_thread_init(void); |
| 17 | |
| 18 | /* There needs to be at least one thread to run the ramstate state machine. */ |
| 19 | #define TOTAL_NUM_THREADS (CONFIG_NUM_THREADS + 1) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 20 | |
| 21 | /* Storage space for the thread structs .*/ |
| 22 | static struct thread all_threads[TOTAL_NUM_THREADS]; |
| 23 | |
| 24 | /* All runnable (but not running) and free threads are kept on their |
| 25 | * respective lists. */ |
| 26 | static struct thread *runnable_threads; |
| 27 | static struct thread *free_threads; |
| 28 | |
Raul E Rangel | c2c38f5 | 2021-10-08 13:10:38 -0600 | [diff] [blame] | 29 | static struct thread *active_thread; |
| 30 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 31 | static inline int thread_can_yield(const struct thread *t) |
| 32 | { |
Raul E Rangel | be60a0d | 2021-07-15 13:52:03 -0600 | [diff] [blame] | 33 | return (t != NULL && t->can_yield > 0); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 34 | } |
| 35 | |
Raul E Rangel | c2c38f5 | 2021-10-08 13:10:38 -0600 | [diff] [blame] | 36 | static inline void set_current_thread(struct thread *t) |
| 37 | { |
| 38 | assert(boot_cpu()); |
| 39 | active_thread = t; |
| 40 | } |
| 41 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 42 | static inline struct thread *current_thread(void) |
| 43 | { |
Raul E Rangel | c2c38f5 | 2021-10-08 13:10:38 -0600 | [diff] [blame] | 44 | if (!initialized || !boot_cpu()) |
Raul E Rangel | 000138e6 | 2021-07-14 11:44:51 -0600 | [diff] [blame] | 45 | return NULL; |
| 46 | |
Raul E Rangel | c2c38f5 | 2021-10-08 13:10:38 -0600 | [diff] [blame] | 47 | return active_thread; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 48 | } |
| 49 | |
| 50 | static inline int thread_list_empty(struct thread **list) |
| 51 | { |
| 52 | return *list == NULL; |
| 53 | } |
| 54 | |
| 55 | static inline struct thread *pop_thread(struct thread **list) |
| 56 | { |
| 57 | struct thread *t; |
| 58 | |
| 59 | t = *list; |
| 60 | *list = t->next; |
| 61 | t->next = NULL; |
| 62 | return t; |
| 63 | } |
| 64 | |
| 65 | static inline void push_thread(struct thread **list, struct thread *t) |
| 66 | { |
| 67 | t->next = *list; |
| 68 | *list = t; |
| 69 | } |
| 70 | |
| 71 | static inline void push_runnable(struct thread *t) |
| 72 | { |
| 73 | push_thread(&runnable_threads, t); |
| 74 | } |
| 75 | |
| 76 | static inline struct thread *pop_runnable(void) |
| 77 | { |
| 78 | return pop_thread(&runnable_threads); |
| 79 | } |
| 80 | |
| 81 | static inline struct thread *get_free_thread(void) |
| 82 | { |
| 83 | struct thread *t; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 84 | |
| 85 | if (thread_list_empty(&free_threads)) |
| 86 | return NULL; |
| 87 | |
| 88 | t = pop_thread(&free_threads); |
| 89 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 90 | /* Reset the current stack value to the original. */ |
Raul E Rangel | db16ac95 | 2021-09-24 14:00:56 -0600 | [diff] [blame] | 91 | if (!t->stack_orig) |
| 92 | die("%s: Invalid stack value\n", __func__); |
| 93 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 94 | t->stack_current = t->stack_orig; |
| 95 | |
| 96 | return t; |
| 97 | } |
| 98 | |
| 99 | static inline void free_thread(struct thread *t) |
| 100 | { |
| 101 | push_thread(&free_threads, t); |
| 102 | } |
| 103 | |
| 104 | /* The idle thread is ran whenever there isn't anything else that is runnable. |
| 105 | * It's sole responsibility is to ensure progress is made by running the timer |
| 106 | * callbacks. */ |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 107 | __noreturn static enum cb_err idle_thread(void *unused) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 108 | { |
| 109 | /* This thread never voluntarily yields. */ |
Raul E Rangel | 9ba36ab | 2021-07-15 17:34:05 -0600 | [diff] [blame] | 110 | thread_coop_disable(); |
Lee Leahy | 2f919ec | 2017-03-08 17:37:06 -0800 | [diff] [blame] | 111 | while (1) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 112 | timers_run(); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 113 | } |
| 114 | |
| 115 | static void schedule(struct thread *t) |
| 116 | { |
| 117 | struct thread *current = current_thread(); |
| 118 | |
| 119 | /* If t is NULL need to find new runnable thread. */ |
| 120 | if (t == NULL) { |
| 121 | if (thread_list_empty(&runnable_threads)) |
| 122 | die("Runnable thread list is empty!\n"); |
| 123 | t = pop_runnable(); |
| 124 | } else { |
| 125 | /* current is still runnable. */ |
| 126 | push_runnable(current); |
| 127 | } |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 128 | |
| 129 | if (t->handle) |
| 130 | t->handle->state = THREAD_STARTED; |
| 131 | |
Raul E Rangel | c2c38f5 | 2021-10-08 13:10:38 -0600 | [diff] [blame] | 132 | set_current_thread(t); |
Raul E Rangel | c842c59 | 2021-09-13 14:24:55 -0600 | [diff] [blame] | 133 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 134 | switch_to_thread(t->stack_current, ¤t->stack_current); |
| 135 | } |
| 136 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 137 | static void terminate_thread(struct thread *t, enum cb_err error) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 138 | { |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 139 | if (t->handle) { |
| 140 | t->handle->error = error; |
| 141 | t->handle->state = THREAD_DONE; |
| 142 | } |
| 143 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 144 | free_thread(t); |
| 145 | schedule(NULL); |
| 146 | } |
| 147 | |
| 148 | static void asmlinkage call_wrapper(void *unused) |
| 149 | { |
| 150 | struct thread *current = current_thread(); |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 151 | enum cb_err error; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 152 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 153 | error = current->entry(current->entry_arg); |
| 154 | |
| 155 | terminate_thread(current, error); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 156 | } |
| 157 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 158 | struct block_boot_state { |
| 159 | boot_state_t state; |
| 160 | boot_state_sequence_t seq; |
| 161 | }; |
| 162 | |
| 163 | /* Block the provided state until thread is complete. */ |
| 164 | static void asmlinkage call_wrapper_block_state(void *arg) |
| 165 | { |
| 166 | struct block_boot_state *bbs = arg; |
| 167 | struct thread *current = current_thread(); |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 168 | enum cb_err error; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 169 | |
| 170 | boot_state_block(bbs->state, bbs->seq); |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 171 | error = current->entry(current->entry_arg); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 172 | boot_state_unblock(bbs->state, bbs->seq); |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 173 | terminate_thread(current, error); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 174 | } |
| 175 | |
| 176 | /* Prepare a thread so that it starts by executing thread_entry(thread_arg). |
| 177 | * Within thread_entry() it will call func(arg). */ |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 178 | static void prepare_thread(struct thread *t, struct thread_handle *handle, |
| 179 | enum cb_err (*func)(void *), void *arg, |
| 180 | asmlinkage void (*thread_entry)(void *), void *thread_arg) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 181 | { |
| 182 | /* Stash the function and argument to run. */ |
| 183 | t->entry = func; |
| 184 | t->entry_arg = arg; |
| 185 | |
| 186 | /* All new threads can yield by default. */ |
| 187 | t->can_yield = 1; |
| 188 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 189 | /* Pointer used to publish the state of thread */ |
| 190 | t->handle = handle; |
| 191 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 192 | arch_prepare_thread(t, thread_entry, thread_arg); |
| 193 | } |
| 194 | |
| 195 | static void thread_resume_from_timeout(struct timeout_callback *tocb) |
| 196 | { |
| 197 | struct thread *to; |
| 198 | |
| 199 | to = tocb->priv; |
| 200 | schedule(to); |
| 201 | } |
| 202 | |
| 203 | static void idle_thread_init(void) |
| 204 | { |
| 205 | struct thread *t; |
| 206 | |
| 207 | t = get_free_thread(); |
| 208 | |
Lee Leahy | 2f919ec | 2017-03-08 17:37:06 -0800 | [diff] [blame] | 209 | if (t == NULL) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 210 | die("No threads available for idle thread!\n"); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 211 | |
| 212 | /* Queue idle thread to run once all other threads have yielded. */ |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 213 | prepare_thread(t, NULL, idle_thread, NULL, call_wrapper, NULL); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 214 | push_runnable(t); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 215 | } |
| 216 | |
| 217 | /* Don't inline this function so the timeout_callback won't have its storage |
| 218 | * space on the stack cleaned up before the call to schedule(). */ |
| 219 | static int __attribute__((noinline)) |
Lee Leahy | 7340217 | 2017-03-10 15:23:24 -0800 | [diff] [blame] | 220 | thread_yield_timed_callback(struct timeout_callback *tocb, |
| 221 | unsigned int microsecs) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 222 | { |
| 223 | tocb->priv = current_thread(); |
| 224 | tocb->callback = thread_resume_from_timeout; |
| 225 | |
| 226 | if (timer_sched_callback(tocb, microsecs)) |
| 227 | return -1; |
| 228 | |
| 229 | /* The timer callback will wake up the current thread. */ |
| 230 | schedule(NULL); |
| 231 | return 0; |
| 232 | } |
| 233 | |
| 234 | static void *thread_alloc_space(struct thread *t, size_t bytes) |
| 235 | { |
| 236 | /* Allocate the amount of space on the stack keeping the stack |
| 237 | * aligned to the pointer size. */ |
| 238 | t->stack_current -= ALIGN_UP(bytes, sizeof(uintptr_t)); |
| 239 | |
| 240 | return (void *)t->stack_current; |
| 241 | } |
| 242 | |
Raul E Rangel | a2d83c68 | 2021-07-22 11:16:19 -0600 | [diff] [blame] | 243 | static void threads_initialize(void) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 244 | { |
| 245 | int i; |
| 246 | struct thread *t; |
Ronald G. Minnich | 34352d1 | 2013-08-21 16:03:32 -0700 | [diff] [blame] | 247 | u8 *stack_top; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 248 | |
Raul E Rangel | a2d83c68 | 2021-07-22 11:16:19 -0600 | [diff] [blame] | 249 | if (initialized) |
| 250 | return; |
| 251 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 252 | t = &all_threads[0]; |
Raul E Rangel | c2c38f5 | 2021-10-08 13:10:38 -0600 | [diff] [blame] | 253 | |
| 254 | set_current_thread(t); |
| 255 | |
Raul E Rangel | db16ac95 | 2021-09-24 14:00:56 -0600 | [diff] [blame] | 256 | t->stack_orig = (uintptr_t)NULL; /* We never free the main thread */ |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 257 | t->id = 0; |
Raul E Rangel | b95369c | 2021-07-15 17:28:13 -0600 | [diff] [blame] | 258 | t->can_yield = 1; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 259 | |
Raul E Rangel | c842c59 | 2021-09-13 14:24:55 -0600 | [diff] [blame] | 260 | stack_top = &thread_stacks[CONFIG_STACK_SIZE]; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 261 | for (i = 1; i < TOTAL_NUM_THREADS; i++) { |
| 262 | t = &all_threads[i]; |
| 263 | t->stack_orig = (uintptr_t)stack_top; |
| 264 | t->id = i; |
| 265 | stack_top += CONFIG_STACK_SIZE; |
| 266 | free_thread(t); |
| 267 | } |
| 268 | |
| 269 | idle_thread_init(); |
Raul E Rangel | b95369c | 2021-07-15 17:28:13 -0600 | [diff] [blame] | 270 | |
| 271 | initialized = 1; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 272 | } |
| 273 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 274 | int thread_run(struct thread_handle *handle, enum cb_err (*func)(void *), void *arg) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 275 | { |
| 276 | struct thread *current; |
| 277 | struct thread *t; |
| 278 | |
Raul E Rangel | a2d83c68 | 2021-07-22 11:16:19 -0600 | [diff] [blame] | 279 | /* Lazy initialization */ |
| 280 | threads_initialize(); |
| 281 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 282 | current = current_thread(); |
| 283 | |
| 284 | if (!thread_can_yield(current)) { |
| 285 | printk(BIOS_ERR, |
Raul E Rangel | 58618c2 | 2021-11-02 13:45:16 -0600 | [diff] [blame] | 286 | "ERROR: %s() called from non-yielding context!\n", __func__); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 287 | return -1; |
| 288 | } |
| 289 | |
| 290 | t = get_free_thread(); |
| 291 | |
| 292 | if (t == NULL) { |
Raul E Rangel | 58618c2 | 2021-11-02 13:45:16 -0600 | [diff] [blame] | 293 | printk(BIOS_ERR, "ERROR: %s: No more threads!\n", __func__); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 294 | return -1; |
| 295 | } |
| 296 | |
Raul E Rangel | 4aec58d | 2021-07-15 13:20:58 -0600 | [diff] [blame] | 297 | prepare_thread(t, handle, func, arg, call_wrapper, NULL); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 298 | schedule(t); |
| 299 | |
| 300 | return 0; |
| 301 | } |
| 302 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 303 | int thread_run_until(struct thread_handle *handle, enum cb_err (*func)(void *), void *arg, |
Lee Leahy | e20a319 | 2017-03-09 16:21:34 -0800 | [diff] [blame] | 304 | boot_state_t state, boot_state_sequence_t seq) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 305 | { |
| 306 | struct thread *current; |
| 307 | struct thread *t; |
| 308 | struct block_boot_state *bbs; |
| 309 | |
Raul E Rangel | 8c89207 | 2021-07-22 12:40:26 -0600 | [diff] [blame] | 310 | /* This is a ramstage specific API */ |
| 311 | if (!ENV_RAMSTAGE) |
| 312 | dead_code(); |
| 313 | |
Raul E Rangel | a2d83c68 | 2021-07-22 11:16:19 -0600 | [diff] [blame] | 314 | /* Lazy initialization */ |
| 315 | threads_initialize(); |
| 316 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 317 | current = current_thread(); |
| 318 | |
| 319 | if (!thread_can_yield(current)) { |
| 320 | printk(BIOS_ERR, |
Raul E Rangel | 58618c2 | 2021-11-02 13:45:16 -0600 | [diff] [blame] | 321 | "ERROR: %s() called from non-yielding context!\n", __func__); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 322 | return -1; |
| 323 | } |
| 324 | |
| 325 | t = get_free_thread(); |
| 326 | |
| 327 | if (t == NULL) { |
Raul E Rangel | 58618c2 | 2021-11-02 13:45:16 -0600 | [diff] [blame] | 328 | printk(BIOS_ERR, "ERROR: %s: No more threads!\n", __func__); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 329 | return -1; |
| 330 | } |
| 331 | |
| 332 | bbs = thread_alloc_space(t, sizeof(*bbs)); |
| 333 | bbs->state = state; |
| 334 | bbs->seq = seq; |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 335 | prepare_thread(t, handle, func, arg, call_wrapper_block_state, bbs); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 336 | schedule(t); |
| 337 | |
| 338 | return 0; |
| 339 | } |
| 340 | |
Raul E Rangel | d5dca21 | 2021-07-15 11:48:48 -0600 | [diff] [blame] | 341 | int thread_yield(void) |
| 342 | { |
| 343 | return thread_yield_microseconds(0); |
| 344 | } |
| 345 | |
Lee Leahy | 75b8599 | 2017-03-08 16:34:12 -0800 | [diff] [blame] | 346 | int thread_yield_microseconds(unsigned int microsecs) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 347 | { |
| 348 | struct thread *current; |
| 349 | struct timeout_callback tocb; |
| 350 | |
| 351 | current = current_thread(); |
| 352 | |
| 353 | if (!thread_can_yield(current)) |
| 354 | return -1; |
| 355 | |
| 356 | if (thread_yield_timed_callback(&tocb, microsecs)) |
| 357 | return -1; |
| 358 | |
| 359 | return 0; |
| 360 | } |
| 361 | |
Raul E Rangel | 9ba36ab | 2021-07-15 17:34:05 -0600 | [diff] [blame] | 362 | void thread_coop_enable(void) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 363 | { |
| 364 | struct thread *current; |
| 365 | |
| 366 | current = current_thread(); |
| 367 | |
Raul E Rangel | be60a0d | 2021-07-15 13:52:03 -0600 | [diff] [blame] | 368 | if (current == NULL) |
| 369 | return; |
| 370 | |
| 371 | assert(current->can_yield <= 0); |
| 372 | |
| 373 | current->can_yield++; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 374 | } |
| 375 | |
Raul E Rangel | 9ba36ab | 2021-07-15 17:34:05 -0600 | [diff] [blame] | 376 | void thread_coop_disable(void) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 377 | { |
| 378 | struct thread *current; |
| 379 | |
| 380 | current = current_thread(); |
| 381 | |
Raul E Rangel | be60a0d | 2021-07-15 13:52:03 -0600 | [diff] [blame] | 382 | if (current == NULL) |
| 383 | return; |
| 384 | |
| 385 | current->can_yield--; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 386 | } |
Raul E Rangel | b29f9d4 | 2021-07-12 13:49:59 -0600 | [diff] [blame] | 387 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 388 | enum cb_err thread_join(struct thread_handle *handle) |
| 389 | { |
| 390 | struct stopwatch sw; |
| 391 | struct thread *current = current_thread(); |
| 392 | |
| 393 | assert(handle); |
| 394 | assert(current); |
| 395 | assert(current->handle != handle); |
| 396 | |
| 397 | if (handle->state == THREAD_UNINITIALIZED) |
| 398 | return CB_ERR_ARG; |
| 399 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 400 | printk(BIOS_SPEW, "waiting for thread\n"); |
| 401 | |
Raul E Rangel | fae525f | 2021-11-04 15:57:00 -0600 | [diff] [blame] | 402 | stopwatch_init(&sw); |
| 403 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 404 | while (handle->state != THREAD_DONE) |
| 405 | assert(thread_yield() == 0); |
| 406 | |
| 407 | printk(BIOS_SPEW, "took %lu us\n", stopwatch_duration_usecs(&sw)); |
| 408 | |
| 409 | return handle->error; |
| 410 | } |
| 411 | |
Raul E Rangel | b29f9d4 | 2021-07-12 13:49:59 -0600 | [diff] [blame] | 412 | void thread_mutex_lock(struct thread_mutex *mutex) |
| 413 | { |
| 414 | struct stopwatch sw; |
| 415 | |
| 416 | stopwatch_init(&sw); |
| 417 | |
| 418 | while (mutex->locked) |
| 419 | assert(thread_yield() == 0); |
| 420 | mutex->locked = true; |
| 421 | |
| 422 | printk(BIOS_SPEW, "took %lu us to acquire mutex\n", stopwatch_duration_usecs(&sw)); |
| 423 | } |
| 424 | |
| 425 | void thread_mutex_unlock(struct thread_mutex *mutex) |
| 426 | { |
| 427 | assert(mutex->locked); |
| 428 | mutex->locked = 0; |
| 429 | } |