Angel Pons | 118a9c7 | 2020-04-02 23:48:34 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Elyes HAOUAS | add76f9 | 2019-03-21 09:55:49 +0100 | [diff] [blame] | 2 | |
Raul E Rangel | b29f9d4 | 2021-07-12 13:49:59 -0600 | [diff] [blame] | 3 | #include <assert.h> |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 4 | #include <bootstate.h> |
| 5 | #include <console/console.h> |
Raul E Rangel | c2c38f5 | 2021-10-08 13:10:38 -0600 | [diff] [blame] | 6 | #include <smp/node.h> |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 7 | #include <thread.h> |
Elyes HAOUAS | add76f9 | 2019-03-21 09:55:49 +0100 | [diff] [blame] | 8 | #include <timer.h> |
Elyes HAOUAS | 93a195c | 2021-12-31 18:46:13 +0100 | [diff] [blame] | 9 | #include <types.h> |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 10 | |
Raul E Rangel | db16ac95 | 2021-09-24 14:00:56 -0600 | [diff] [blame] | 11 | static u8 thread_stacks[CONFIG_STACK_SIZE * CONFIG_NUM_THREADS] __aligned(sizeof(uint64_t)); |
Raul E Rangel | 000138e6 | 2021-07-14 11:44:51 -0600 | [diff] [blame] | 12 | static bool initialized; |
| 13 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 14 | static void idle_thread_init(void); |
| 15 | |
| 16 | /* There needs to be at least one thread to run the ramstate state machine. */ |
| 17 | #define TOTAL_NUM_THREADS (CONFIG_NUM_THREADS + 1) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 18 | |
| 19 | /* Storage space for the thread structs .*/ |
| 20 | static struct thread all_threads[TOTAL_NUM_THREADS]; |
| 21 | |
| 22 | /* All runnable (but not running) and free threads are kept on their |
| 23 | * respective lists. */ |
| 24 | static struct thread *runnable_threads; |
| 25 | static struct thread *free_threads; |
| 26 | |
Raul E Rangel | c2c38f5 | 2021-10-08 13:10:38 -0600 | [diff] [blame] | 27 | static struct thread *active_thread; |
| 28 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 29 | static inline int thread_can_yield(const struct thread *t) |
| 30 | { |
Raul E Rangel | be60a0d | 2021-07-15 13:52:03 -0600 | [diff] [blame] | 31 | return (t != NULL && t->can_yield > 0); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 32 | } |
| 33 | |
Raul E Rangel | c2c38f5 | 2021-10-08 13:10:38 -0600 | [diff] [blame] | 34 | static inline void set_current_thread(struct thread *t) |
| 35 | { |
| 36 | assert(boot_cpu()); |
| 37 | active_thread = t; |
| 38 | } |
| 39 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 40 | static inline struct thread *current_thread(void) |
| 41 | { |
Raul E Rangel | c2c38f5 | 2021-10-08 13:10:38 -0600 | [diff] [blame] | 42 | if (!initialized || !boot_cpu()) |
Raul E Rangel | 000138e6 | 2021-07-14 11:44:51 -0600 | [diff] [blame] | 43 | return NULL; |
| 44 | |
Raul E Rangel | c2c38f5 | 2021-10-08 13:10:38 -0600 | [diff] [blame] | 45 | return active_thread; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 46 | } |
| 47 | |
| 48 | static inline int thread_list_empty(struct thread **list) |
| 49 | { |
| 50 | return *list == NULL; |
| 51 | } |
| 52 | |
| 53 | static inline struct thread *pop_thread(struct thread **list) |
| 54 | { |
| 55 | struct thread *t; |
| 56 | |
| 57 | t = *list; |
| 58 | *list = t->next; |
| 59 | t->next = NULL; |
| 60 | return t; |
| 61 | } |
| 62 | |
| 63 | static inline void push_thread(struct thread **list, struct thread *t) |
| 64 | { |
| 65 | t->next = *list; |
| 66 | *list = t; |
| 67 | } |
| 68 | |
| 69 | static inline void push_runnable(struct thread *t) |
| 70 | { |
| 71 | push_thread(&runnable_threads, t); |
| 72 | } |
| 73 | |
| 74 | static inline struct thread *pop_runnable(void) |
| 75 | { |
| 76 | return pop_thread(&runnable_threads); |
| 77 | } |
| 78 | |
| 79 | static inline struct thread *get_free_thread(void) |
| 80 | { |
| 81 | struct thread *t; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 82 | |
| 83 | if (thread_list_empty(&free_threads)) |
| 84 | return NULL; |
| 85 | |
| 86 | t = pop_thread(&free_threads); |
| 87 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 88 | /* Reset the current stack value to the original. */ |
Raul E Rangel | db16ac95 | 2021-09-24 14:00:56 -0600 | [diff] [blame] | 89 | if (!t->stack_orig) |
| 90 | die("%s: Invalid stack value\n", __func__); |
| 91 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 92 | t->stack_current = t->stack_orig; |
| 93 | |
| 94 | return t; |
| 95 | } |
| 96 | |
| 97 | static inline void free_thread(struct thread *t) |
| 98 | { |
| 99 | push_thread(&free_threads, t); |
| 100 | } |
| 101 | |
| 102 | /* The idle thread is ran whenever there isn't anything else that is runnable. |
| 103 | * It's sole responsibility is to ensure progress is made by running the timer |
| 104 | * callbacks. */ |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 105 | __noreturn static enum cb_err idle_thread(void *unused) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 106 | { |
| 107 | /* This thread never voluntarily yields. */ |
Raul E Rangel | 9ba36ab | 2021-07-15 17:34:05 -0600 | [diff] [blame] | 108 | thread_coop_disable(); |
Lee Leahy | 2f919ec | 2017-03-08 17:37:06 -0800 | [diff] [blame] | 109 | while (1) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 110 | timers_run(); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 111 | } |
| 112 | |
| 113 | static void schedule(struct thread *t) |
| 114 | { |
| 115 | struct thread *current = current_thread(); |
| 116 | |
| 117 | /* If t is NULL need to find new runnable thread. */ |
| 118 | if (t == NULL) { |
| 119 | if (thread_list_empty(&runnable_threads)) |
| 120 | die("Runnable thread list is empty!\n"); |
| 121 | t = pop_runnable(); |
| 122 | } else { |
| 123 | /* current is still runnable. */ |
| 124 | push_runnable(current); |
| 125 | } |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 126 | |
| 127 | if (t->handle) |
| 128 | t->handle->state = THREAD_STARTED; |
| 129 | |
Raul E Rangel | c2c38f5 | 2021-10-08 13:10:38 -0600 | [diff] [blame] | 130 | set_current_thread(t); |
Raul E Rangel | c842c59 | 2021-09-13 14:24:55 -0600 | [diff] [blame] | 131 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 132 | switch_to_thread(t->stack_current, ¤t->stack_current); |
| 133 | } |
| 134 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 135 | static void terminate_thread(struct thread *t, enum cb_err error) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 136 | { |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 137 | if (t->handle) { |
| 138 | t->handle->error = error; |
| 139 | t->handle->state = THREAD_DONE; |
| 140 | } |
| 141 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 142 | free_thread(t); |
| 143 | schedule(NULL); |
| 144 | } |
| 145 | |
Elyes Haouas | e6940c0 | 2024-03-30 09:51:05 +0100 | [diff] [blame] | 146 | static asmlinkage void call_wrapper(void *unused) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 147 | { |
| 148 | struct thread *current = current_thread(); |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 149 | enum cb_err error; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 150 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 151 | error = current->entry(current->entry_arg); |
| 152 | |
| 153 | terminate_thread(current, error); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 154 | } |
| 155 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 156 | struct block_boot_state { |
| 157 | boot_state_t state; |
| 158 | boot_state_sequence_t seq; |
| 159 | }; |
| 160 | |
| 161 | /* Block the provided state until thread is complete. */ |
Elyes Haouas | e6940c0 | 2024-03-30 09:51:05 +0100 | [diff] [blame] | 162 | static asmlinkage void call_wrapper_block_state(void *arg) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 163 | { |
| 164 | struct block_boot_state *bbs = arg; |
| 165 | struct thread *current = current_thread(); |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 166 | enum cb_err error; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 167 | |
| 168 | boot_state_block(bbs->state, bbs->seq); |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 169 | error = current->entry(current->entry_arg); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 170 | boot_state_unblock(bbs->state, bbs->seq); |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 171 | terminate_thread(current, error); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 172 | } |
| 173 | |
| 174 | /* Prepare a thread so that it starts by executing thread_entry(thread_arg). |
| 175 | * Within thread_entry() it will call func(arg). */ |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 176 | static void prepare_thread(struct thread *t, struct thread_handle *handle, |
| 177 | enum cb_err (*func)(void *), void *arg, |
| 178 | asmlinkage void (*thread_entry)(void *), void *thread_arg) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 179 | { |
| 180 | /* Stash the function and argument to run. */ |
| 181 | t->entry = func; |
| 182 | t->entry_arg = arg; |
| 183 | |
| 184 | /* All new threads can yield by default. */ |
| 185 | t->can_yield = 1; |
| 186 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 187 | /* Pointer used to publish the state of thread */ |
| 188 | t->handle = handle; |
| 189 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 190 | arch_prepare_thread(t, thread_entry, thread_arg); |
| 191 | } |
| 192 | |
| 193 | static void thread_resume_from_timeout(struct timeout_callback *tocb) |
| 194 | { |
| 195 | struct thread *to; |
| 196 | |
| 197 | to = tocb->priv; |
| 198 | schedule(to); |
| 199 | } |
| 200 | |
| 201 | static void idle_thread_init(void) |
| 202 | { |
| 203 | struct thread *t; |
| 204 | |
| 205 | t = get_free_thread(); |
| 206 | |
Lee Leahy | 2f919ec | 2017-03-08 17:37:06 -0800 | [diff] [blame] | 207 | if (t == NULL) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 208 | die("No threads available for idle thread!\n"); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 209 | |
| 210 | /* Queue idle thread to run once all other threads have yielded. */ |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 211 | prepare_thread(t, NULL, idle_thread, NULL, call_wrapper, NULL); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 212 | push_runnable(t); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 213 | } |
| 214 | |
| 215 | /* Don't inline this function so the timeout_callback won't have its storage |
| 216 | * space on the stack cleaned up before the call to schedule(). */ |
| 217 | static int __attribute__((noinline)) |
Lee Leahy | 7340217 | 2017-03-10 15:23:24 -0800 | [diff] [blame] | 218 | thread_yield_timed_callback(struct timeout_callback *tocb, |
| 219 | unsigned int microsecs) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 220 | { |
| 221 | tocb->priv = current_thread(); |
| 222 | tocb->callback = thread_resume_from_timeout; |
| 223 | |
| 224 | if (timer_sched_callback(tocb, microsecs)) |
| 225 | return -1; |
| 226 | |
| 227 | /* The timer callback will wake up the current thread. */ |
| 228 | schedule(NULL); |
| 229 | return 0; |
| 230 | } |
| 231 | |
| 232 | static void *thread_alloc_space(struct thread *t, size_t bytes) |
| 233 | { |
| 234 | /* Allocate the amount of space on the stack keeping the stack |
| 235 | * aligned to the pointer size. */ |
| 236 | t->stack_current -= ALIGN_UP(bytes, sizeof(uintptr_t)); |
| 237 | |
| 238 | return (void *)t->stack_current; |
| 239 | } |
| 240 | |
Raul E Rangel | a2d83c68 | 2021-07-22 11:16:19 -0600 | [diff] [blame] | 241 | static void threads_initialize(void) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 242 | { |
| 243 | int i; |
| 244 | struct thread *t; |
Ronald G. Minnich | 34352d1 | 2013-08-21 16:03:32 -0700 | [diff] [blame] | 245 | u8 *stack_top; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 246 | |
Raul E Rangel | a2d83c68 | 2021-07-22 11:16:19 -0600 | [diff] [blame] | 247 | if (initialized) |
| 248 | return; |
| 249 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 250 | t = &all_threads[0]; |
Raul E Rangel | c2c38f5 | 2021-10-08 13:10:38 -0600 | [diff] [blame] | 251 | |
| 252 | set_current_thread(t); |
| 253 | |
Raul E Rangel | db16ac95 | 2021-09-24 14:00:56 -0600 | [diff] [blame] | 254 | t->stack_orig = (uintptr_t)NULL; /* We never free the main thread */ |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 255 | t->id = 0; |
Raul E Rangel | b95369c | 2021-07-15 17:28:13 -0600 | [diff] [blame] | 256 | t->can_yield = 1; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 257 | |
Raul E Rangel | c842c59 | 2021-09-13 14:24:55 -0600 | [diff] [blame] | 258 | stack_top = &thread_stacks[CONFIG_STACK_SIZE]; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 259 | for (i = 1; i < TOTAL_NUM_THREADS; i++) { |
| 260 | t = &all_threads[i]; |
| 261 | t->stack_orig = (uintptr_t)stack_top; |
| 262 | t->id = i; |
| 263 | stack_top += CONFIG_STACK_SIZE; |
| 264 | free_thread(t); |
| 265 | } |
| 266 | |
| 267 | idle_thread_init(); |
Raul E Rangel | b95369c | 2021-07-15 17:28:13 -0600 | [diff] [blame] | 268 | |
| 269 | initialized = 1; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 270 | } |
| 271 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 272 | int thread_run(struct thread_handle *handle, enum cb_err (*func)(void *), void *arg) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 273 | { |
| 274 | struct thread *current; |
| 275 | struct thread *t; |
| 276 | |
Raul E Rangel | a2d83c68 | 2021-07-22 11:16:19 -0600 | [diff] [blame] | 277 | /* Lazy initialization */ |
| 278 | threads_initialize(); |
| 279 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 280 | current = current_thread(); |
| 281 | |
| 282 | if (!thread_can_yield(current)) { |
Julius Werner | e966595 | 2022-01-21 17:06:20 -0800 | [diff] [blame] | 283 | printk(BIOS_ERR, "%s() called from non-yielding context!\n", __func__); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 284 | return -1; |
| 285 | } |
| 286 | |
| 287 | t = get_free_thread(); |
| 288 | |
| 289 | if (t == NULL) { |
Julius Werner | e966595 | 2022-01-21 17:06:20 -0800 | [diff] [blame] | 290 | printk(BIOS_ERR, "%s: No more threads!\n", __func__); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 291 | return -1; |
| 292 | } |
| 293 | |
Raul E Rangel | 4aec58d | 2021-07-15 13:20:58 -0600 | [diff] [blame] | 294 | prepare_thread(t, handle, func, arg, call_wrapper, NULL); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 295 | schedule(t); |
| 296 | |
| 297 | return 0; |
| 298 | } |
| 299 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 300 | int thread_run_until(struct thread_handle *handle, enum cb_err (*func)(void *), void *arg, |
Lee Leahy | e20a319 | 2017-03-09 16:21:34 -0800 | [diff] [blame] | 301 | boot_state_t state, boot_state_sequence_t seq) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 302 | { |
| 303 | struct thread *current; |
| 304 | struct thread *t; |
| 305 | struct block_boot_state *bbs; |
| 306 | |
Raul E Rangel | 8c89207 | 2021-07-22 12:40:26 -0600 | [diff] [blame] | 307 | /* This is a ramstage specific API */ |
| 308 | if (!ENV_RAMSTAGE) |
| 309 | dead_code(); |
| 310 | |
Raul E Rangel | a2d83c68 | 2021-07-22 11:16:19 -0600 | [diff] [blame] | 311 | /* Lazy initialization */ |
| 312 | threads_initialize(); |
| 313 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 314 | current = current_thread(); |
| 315 | |
| 316 | if (!thread_can_yield(current)) { |
Julius Werner | e966595 | 2022-01-21 17:06:20 -0800 | [diff] [blame] | 317 | printk(BIOS_ERR, "%s() called from non-yielding context!\n", __func__); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 318 | return -1; |
| 319 | } |
| 320 | |
| 321 | t = get_free_thread(); |
| 322 | |
| 323 | if (t == NULL) { |
Julius Werner | e966595 | 2022-01-21 17:06:20 -0800 | [diff] [blame] | 324 | printk(BIOS_ERR, "%s: No more threads!\n", __func__); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 325 | return -1; |
| 326 | } |
| 327 | |
| 328 | bbs = thread_alloc_space(t, sizeof(*bbs)); |
| 329 | bbs->state = state; |
| 330 | bbs->seq = seq; |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 331 | prepare_thread(t, handle, func, arg, call_wrapper_block_state, bbs); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 332 | schedule(t); |
| 333 | |
| 334 | return 0; |
| 335 | } |
| 336 | |
Raul E Rangel | d5dca21 | 2021-07-15 11:48:48 -0600 | [diff] [blame] | 337 | int thread_yield(void) |
| 338 | { |
| 339 | return thread_yield_microseconds(0); |
| 340 | } |
| 341 | |
Lee Leahy | 75b8599 | 2017-03-08 16:34:12 -0800 | [diff] [blame] | 342 | int thread_yield_microseconds(unsigned int microsecs) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 343 | { |
| 344 | struct thread *current; |
| 345 | struct timeout_callback tocb; |
| 346 | |
| 347 | current = current_thread(); |
| 348 | |
| 349 | if (!thread_can_yield(current)) |
| 350 | return -1; |
| 351 | |
| 352 | if (thread_yield_timed_callback(&tocb, microsecs)) |
| 353 | return -1; |
| 354 | |
| 355 | return 0; |
| 356 | } |
| 357 | |
Raul E Rangel | 9ba36ab | 2021-07-15 17:34:05 -0600 | [diff] [blame] | 358 | void thread_coop_enable(void) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 359 | { |
| 360 | struct thread *current; |
| 361 | |
| 362 | current = current_thread(); |
| 363 | |
Raul E Rangel | be60a0d | 2021-07-15 13:52:03 -0600 | [diff] [blame] | 364 | if (current == NULL) |
| 365 | return; |
| 366 | |
| 367 | assert(current->can_yield <= 0); |
| 368 | |
| 369 | current->can_yield++; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 370 | } |
| 371 | |
Raul E Rangel | 9ba36ab | 2021-07-15 17:34:05 -0600 | [diff] [blame] | 372 | void thread_coop_disable(void) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 373 | { |
| 374 | struct thread *current; |
| 375 | |
| 376 | current = current_thread(); |
| 377 | |
Raul E Rangel | be60a0d | 2021-07-15 13:52:03 -0600 | [diff] [blame] | 378 | if (current == NULL) |
| 379 | return; |
| 380 | |
| 381 | current->can_yield--; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 382 | } |
Raul E Rangel | b29f9d4 | 2021-07-12 13:49:59 -0600 | [diff] [blame] | 383 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 384 | enum cb_err thread_join(struct thread_handle *handle) |
| 385 | { |
| 386 | struct stopwatch sw; |
| 387 | struct thread *current = current_thread(); |
| 388 | |
| 389 | assert(handle); |
| 390 | assert(current); |
| 391 | assert(current->handle != handle); |
| 392 | |
| 393 | if (handle->state == THREAD_UNINITIALIZED) |
| 394 | return CB_ERR_ARG; |
| 395 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 396 | printk(BIOS_SPEW, "waiting for thread\n"); |
| 397 | |
Raul E Rangel | fae525f | 2021-11-04 15:57:00 -0600 | [diff] [blame] | 398 | stopwatch_init(&sw); |
| 399 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 400 | while (handle->state != THREAD_DONE) |
| 401 | assert(thread_yield() == 0); |
| 402 | |
Rob Barnes | d522f38 | 2022-09-12 06:31:47 -0600 | [diff] [blame] | 403 | printk(BIOS_SPEW, "took %lld us\n", stopwatch_duration_usecs(&sw)); |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 404 | |
| 405 | return handle->error; |
| 406 | } |
| 407 | |
Raul E Rangel | b29f9d4 | 2021-07-12 13:49:59 -0600 | [diff] [blame] | 408 | void thread_mutex_lock(struct thread_mutex *mutex) |
| 409 | { |
| 410 | struct stopwatch sw; |
| 411 | |
| 412 | stopwatch_init(&sw); |
| 413 | |
| 414 | while (mutex->locked) |
| 415 | assert(thread_yield() == 0); |
| 416 | mutex->locked = true; |
| 417 | |
Rob Barnes | d522f38 | 2022-09-12 06:31:47 -0600 | [diff] [blame] | 418 | printk(BIOS_SPEW, "took %lld us to acquire mutex\n", stopwatch_duration_usecs(&sw)); |
Raul E Rangel | b29f9d4 | 2021-07-12 13:49:59 -0600 | [diff] [blame] | 419 | } |
| 420 | |
| 421 | void thread_mutex_unlock(struct thread_mutex *mutex) |
| 422 | { |
| 423 | assert(mutex->locked); |
| 424 | mutex->locked = 0; |
| 425 | } |