Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 1 | /* |
| 2 | * This file is part of the coreboot project. |
| 3 | * |
| 4 | * Copyright (C) 2013 Google, Inc. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; version 2 of the License. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 14 | */ |
| 15 | #include <stddef.h> |
| 16 | #include <stdint.h> |
| 17 | #include <stdlib.h> |
| 18 | #include <arch/cpu.h> |
| 19 | #include <bootstate.h> |
| 20 | #include <console/console.h> |
| 21 | #include <thread.h> |
| 22 | |
| 23 | static void idle_thread_init(void); |
| 24 | |
| 25 | /* There needs to be at least one thread to run the ramstate state machine. */ |
| 26 | #define TOTAL_NUM_THREADS (CONFIG_NUM_THREADS + 1) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 27 | |
| 28 | /* Storage space for the thread structs .*/ |
| 29 | static struct thread all_threads[TOTAL_NUM_THREADS]; |
| 30 | |
| 31 | /* All runnable (but not running) and free threads are kept on their |
| 32 | * respective lists. */ |
| 33 | static struct thread *runnable_threads; |
| 34 | static struct thread *free_threads; |
| 35 | |
| 36 | static inline struct cpu_info *thread_cpu_info(const struct thread *t) |
| 37 | { |
| 38 | return (void *)(t->stack_orig); |
| 39 | } |
| 40 | |
| 41 | static inline int thread_can_yield(const struct thread *t) |
| 42 | { |
| 43 | return (t != NULL && t->can_yield); |
| 44 | } |
| 45 | |
Elyes HAOUAS | 91e0e3c | 2016-07-30 15:51:13 +0200 | [diff] [blame] | 46 | /* Assumes current CPU info can switch. */ |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 47 | static inline struct thread *cpu_info_to_thread(const struct cpu_info *ci) |
| 48 | { |
| 49 | return ci->thread; |
| 50 | } |
| 51 | |
| 52 | static inline struct thread *current_thread(void) |
| 53 | { |
| 54 | return cpu_info_to_thread(cpu_info()); |
| 55 | } |
| 56 | |
| 57 | static inline int thread_list_empty(struct thread **list) |
| 58 | { |
| 59 | return *list == NULL; |
| 60 | } |
| 61 | |
| 62 | static inline struct thread *pop_thread(struct thread **list) |
| 63 | { |
| 64 | struct thread *t; |
| 65 | |
| 66 | t = *list; |
| 67 | *list = t->next; |
| 68 | t->next = NULL; |
| 69 | return t; |
| 70 | } |
| 71 | |
| 72 | static inline void push_thread(struct thread **list, struct thread *t) |
| 73 | { |
| 74 | t->next = *list; |
| 75 | *list = t; |
| 76 | } |
| 77 | |
| 78 | static inline void push_runnable(struct thread *t) |
| 79 | { |
| 80 | push_thread(&runnable_threads, t); |
| 81 | } |
| 82 | |
| 83 | static inline struct thread *pop_runnable(void) |
| 84 | { |
| 85 | return pop_thread(&runnable_threads); |
| 86 | } |
| 87 | |
| 88 | static inline struct thread *get_free_thread(void) |
| 89 | { |
| 90 | struct thread *t; |
| 91 | struct cpu_info *ci; |
| 92 | struct cpu_info *new_ci; |
| 93 | |
| 94 | if (thread_list_empty(&free_threads)) |
| 95 | return NULL; |
| 96 | |
| 97 | t = pop_thread(&free_threads); |
| 98 | |
| 99 | ci = cpu_info(); |
| 100 | |
| 101 | /* Initialize the cpu_info structure on the new stack. */ |
| 102 | new_ci = thread_cpu_info(t); |
| 103 | *new_ci = *ci; |
| 104 | new_ci->thread = t; |
| 105 | |
| 106 | /* Reset the current stack value to the original. */ |
| 107 | t->stack_current = t->stack_orig; |
| 108 | |
| 109 | return t; |
| 110 | } |
| 111 | |
| 112 | static inline void free_thread(struct thread *t) |
| 113 | { |
| 114 | push_thread(&free_threads, t); |
| 115 | } |
| 116 | |
| 117 | /* The idle thread is ran whenever there isn't anything else that is runnable. |
| 118 | * It's sole responsibility is to ensure progress is made by running the timer |
| 119 | * callbacks. */ |
| 120 | static void idle_thread(void *unused) |
| 121 | { |
| 122 | /* This thread never voluntarily yields. */ |
| 123 | thread_prevent_coop(); |
Lee Leahy | 2f919ec | 2017-03-08 17:37:06 -0800 | [diff] [blame] | 124 | while (1) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 125 | timers_run(); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | static void schedule(struct thread *t) |
| 129 | { |
| 130 | struct thread *current = current_thread(); |
| 131 | |
| 132 | /* If t is NULL need to find new runnable thread. */ |
| 133 | if (t == NULL) { |
| 134 | if (thread_list_empty(&runnable_threads)) |
| 135 | die("Runnable thread list is empty!\n"); |
| 136 | t = pop_runnable(); |
| 137 | } else { |
| 138 | /* current is still runnable. */ |
| 139 | push_runnable(current); |
| 140 | } |
| 141 | switch_to_thread(t->stack_current, ¤t->stack_current); |
| 142 | } |
| 143 | |
| 144 | static void terminate_thread(struct thread *t) |
| 145 | { |
| 146 | free_thread(t); |
| 147 | schedule(NULL); |
| 148 | } |
| 149 | |
| 150 | static void asmlinkage call_wrapper(void *unused) |
| 151 | { |
| 152 | struct thread *current = current_thread(); |
| 153 | |
| 154 | current->entry(current->entry_arg); |
| 155 | terminate_thread(current); |
| 156 | } |
| 157 | |
| 158 | /* Block the current state transitions until thread is complete. */ |
| 159 | static void asmlinkage call_wrapper_block_current(void *unused) |
| 160 | { |
| 161 | struct thread *current = current_thread(); |
| 162 | |
| 163 | boot_state_current_block(); |
| 164 | current->entry(current->entry_arg); |
| 165 | boot_state_current_unblock(); |
| 166 | terminate_thread(current); |
| 167 | } |
| 168 | |
| 169 | struct block_boot_state { |
| 170 | boot_state_t state; |
| 171 | boot_state_sequence_t seq; |
| 172 | }; |
| 173 | |
| 174 | /* Block the provided state until thread is complete. */ |
| 175 | static void asmlinkage call_wrapper_block_state(void *arg) |
| 176 | { |
| 177 | struct block_boot_state *bbs = arg; |
| 178 | struct thread *current = current_thread(); |
| 179 | |
| 180 | boot_state_block(bbs->state, bbs->seq); |
| 181 | current->entry(current->entry_arg); |
| 182 | boot_state_unblock(bbs->state, bbs->seq); |
| 183 | terminate_thread(current); |
| 184 | } |
| 185 | |
| 186 | /* Prepare a thread so that it starts by executing thread_entry(thread_arg). |
| 187 | * Within thread_entry() it will call func(arg). */ |
| 188 | static void prepare_thread(struct thread *t, void *func, void *arg, |
Lee Leahy | 38768c3 | 2017-03-09 14:07:18 -0800 | [diff] [blame] | 189 | asmlinkage void(*thread_entry)(void *), |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 190 | void *thread_arg) |
| 191 | { |
| 192 | /* Stash the function and argument to run. */ |
| 193 | t->entry = func; |
| 194 | t->entry_arg = arg; |
| 195 | |
| 196 | /* All new threads can yield by default. */ |
| 197 | t->can_yield = 1; |
| 198 | |
| 199 | arch_prepare_thread(t, thread_entry, thread_arg); |
| 200 | } |
| 201 | |
| 202 | static void thread_resume_from_timeout(struct timeout_callback *tocb) |
| 203 | { |
| 204 | struct thread *to; |
| 205 | |
| 206 | to = tocb->priv; |
| 207 | schedule(to); |
| 208 | } |
| 209 | |
| 210 | static void idle_thread_init(void) |
| 211 | { |
| 212 | struct thread *t; |
| 213 | |
| 214 | t = get_free_thread(); |
| 215 | |
Lee Leahy | 2f919ec | 2017-03-08 17:37:06 -0800 | [diff] [blame] | 216 | if (t == NULL) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 217 | die("No threads available for idle thread!\n"); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 218 | |
| 219 | /* Queue idle thread to run once all other threads have yielded. */ |
| 220 | prepare_thread(t, idle_thread, NULL, call_wrapper, NULL); |
| 221 | push_runnable(t); |
| 222 | /* Mark the currently executing thread to cooperate. */ |
| 223 | thread_cooperate(); |
| 224 | } |
| 225 | |
| 226 | /* Don't inline this function so the timeout_callback won't have its storage |
| 227 | * space on the stack cleaned up before the call to schedule(). */ |
| 228 | static int __attribute__((noinline)) |
Lee Leahy | 75b8599 | 2017-03-08 16:34:12 -0800 | [diff] [blame] | 229 | thread_yield_timed_callback(struct timeout_callback *tocb, unsigned int microsecs) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 230 | { |
| 231 | tocb->priv = current_thread(); |
| 232 | tocb->callback = thread_resume_from_timeout; |
| 233 | |
| 234 | if (timer_sched_callback(tocb, microsecs)) |
| 235 | return -1; |
| 236 | |
| 237 | /* The timer callback will wake up the current thread. */ |
| 238 | schedule(NULL); |
| 239 | return 0; |
| 240 | } |
| 241 | |
| 242 | static void *thread_alloc_space(struct thread *t, size_t bytes) |
| 243 | { |
| 244 | /* Allocate the amount of space on the stack keeping the stack |
| 245 | * aligned to the pointer size. */ |
| 246 | t->stack_current -= ALIGN_UP(bytes, sizeof(uintptr_t)); |
| 247 | |
| 248 | return (void *)t->stack_current; |
| 249 | } |
| 250 | |
| 251 | void threads_initialize(void) |
| 252 | { |
| 253 | int i; |
| 254 | struct thread *t; |
Ronald G. Minnich | 34352d1 | 2013-08-21 16:03:32 -0700 | [diff] [blame] | 255 | u8 *stack_top; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 256 | struct cpu_info *ci; |
Ronald G. Minnich | 34352d1 | 2013-08-21 16:03:32 -0700 | [diff] [blame] | 257 | u8 *thread_stacks; |
| 258 | |
| 259 | thread_stacks = arch_get_thread_stackbase(); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 260 | |
| 261 | /* Initialize the BSP thread first. The cpu_info structure is assumed |
| 262 | * to be just under the top of the stack. */ |
| 263 | t = &all_threads[0]; |
| 264 | ci = cpu_info(); |
| 265 | ci->thread = t; |
| 266 | t->stack_orig = (uintptr_t)ci; |
| 267 | t->id = 0; |
| 268 | |
| 269 | stack_top = &thread_stacks[CONFIG_STACK_SIZE] - sizeof(struct cpu_info); |
| 270 | for (i = 1; i < TOTAL_NUM_THREADS; i++) { |
| 271 | t = &all_threads[i]; |
| 272 | t->stack_orig = (uintptr_t)stack_top; |
| 273 | t->id = i; |
| 274 | stack_top += CONFIG_STACK_SIZE; |
| 275 | free_thread(t); |
| 276 | } |
| 277 | |
| 278 | idle_thread_init(); |
| 279 | } |
| 280 | |
| 281 | int thread_run(void (*func)(void *), void *arg) |
| 282 | { |
| 283 | struct thread *current; |
| 284 | struct thread *t; |
| 285 | |
| 286 | current = current_thread(); |
| 287 | |
| 288 | if (!thread_can_yield(current)) { |
| 289 | printk(BIOS_ERR, |
| 290 | "thread_run() called from non-yielding context!\n"); |
| 291 | return -1; |
| 292 | } |
| 293 | |
| 294 | t = get_free_thread(); |
| 295 | |
| 296 | if (t == NULL) { |
| 297 | printk(BIOS_ERR, "thread_run() No more threads!\n"); |
| 298 | return -1; |
| 299 | } |
| 300 | |
| 301 | prepare_thread(t, func, arg, call_wrapper_block_current, NULL); |
| 302 | schedule(t); |
| 303 | |
| 304 | return 0; |
| 305 | } |
| 306 | |
| 307 | int thread_run_until(void (*func)(void *), void *arg, |
| 308 | boot_state_t state, boot_state_sequence_t seq) |
| 309 | { |
| 310 | struct thread *current; |
| 311 | struct thread *t; |
| 312 | struct block_boot_state *bbs; |
| 313 | |
| 314 | current = current_thread(); |
| 315 | |
| 316 | if (!thread_can_yield(current)) { |
| 317 | printk(BIOS_ERR, |
| 318 | "thread_run() called from non-yielding context!\n"); |
| 319 | return -1; |
| 320 | } |
| 321 | |
| 322 | t = get_free_thread(); |
| 323 | |
| 324 | if (t == NULL) { |
| 325 | printk(BIOS_ERR, "thread_run() No more threads!\n"); |
| 326 | return -1; |
| 327 | } |
| 328 | |
| 329 | bbs = thread_alloc_space(t, sizeof(*bbs)); |
| 330 | bbs->state = state; |
| 331 | bbs->seq = seq; |
| 332 | prepare_thread(t, func, arg, call_wrapper_block_state, bbs); |
| 333 | schedule(t); |
| 334 | |
| 335 | return 0; |
| 336 | } |
| 337 | |
Lee Leahy | 75b8599 | 2017-03-08 16:34:12 -0800 | [diff] [blame] | 338 | int thread_yield_microseconds(unsigned int microsecs) |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 339 | { |
| 340 | struct thread *current; |
| 341 | struct timeout_callback tocb; |
| 342 | |
| 343 | current = current_thread(); |
| 344 | |
| 345 | if (!thread_can_yield(current)) |
| 346 | return -1; |
| 347 | |
| 348 | if (thread_yield_timed_callback(&tocb, microsecs)) |
| 349 | return -1; |
| 350 | |
| 351 | return 0; |
| 352 | } |
| 353 | |
| 354 | void thread_cooperate(void) |
| 355 | { |
| 356 | struct thread *current; |
| 357 | |
| 358 | current = current_thread(); |
| 359 | |
| 360 | if (current != NULL) |
| 361 | current->can_yield = 1; |
| 362 | } |
| 363 | |
| 364 | void thread_prevent_coop(void) |
| 365 | { |
| 366 | struct thread *current; |
| 367 | |
| 368 | current = current_thread(); |
| 369 | |
| 370 | if (current != NULL) |
| 371 | current->can_yield = 0; |
| 372 | } |