Angel Pons | 32859fc | 2020-04-02 23:48:27 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 2 | #ifndef THREAD_H_ |
| 3 | #define THREAD_H_ |
| 4 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 5 | #include <arch/cpu.h> |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 6 | #include <bootstate.h> |
Raul E Rangel | a3b2907 | 2021-11-05 16:58:12 -0600 | [diff] [blame] | 7 | #include <types.h> |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 8 | |
Raul E Rangel | b29f9d4 | 2021-07-12 13:49:59 -0600 | [diff] [blame] | 9 | struct thread_mutex { |
| 10 | bool locked; |
| 11 | }; |
| 12 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 13 | enum thread_state { |
| 14 | THREAD_UNINITIALIZED, |
| 15 | THREAD_STARTED, |
| 16 | THREAD_DONE, |
| 17 | }; |
| 18 | |
| 19 | struct thread_handle { |
| 20 | enum thread_state state; |
| 21 | /* Only valid when state == THREAD_DONE */ |
| 22 | enum cb_err error; |
| 23 | }; |
| 24 | |
Raul E Rangel | 5dd7602 | 2021-07-15 13:26:52 -0600 | [diff] [blame] | 25 | /* Run func(arg) on a new thread. Return 0 on successful start of thread, < 0 |
| 26 | * when thread could not be started. The thread handle if populated, will |
| 27 | * reflect the state and return code of the thread. |
| 28 | */ |
| 29 | int thread_run(struct thread_handle *handle, enum cb_err (*func)(void *), void *arg); |
| 30 | |
| 31 | /* thread_run_until is the same as thread_run() except that it blocks state |
| 32 | * transitions from occurring in the (state, seq) pair of the boot state |
| 33 | * machine. */ |
| 34 | int thread_run_until(struct thread_handle *handle, enum cb_err (*func)(void *), void *arg, |
| 35 | boot_state_t state, boot_state_sequence_t seq); |
| 36 | |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 37 | /* Waits until the thread has terminated and returns the error code */ |
| 38 | enum cb_err thread_join(struct thread_handle *handle); |
| 39 | |
Arthur Heymans | 6acc05e | 2022-05-12 18:01:13 +0200 | [diff] [blame] | 40 | #if ENV_SUPPORTS_COOP |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 41 | |
| 42 | struct thread { |
| 43 | int id; |
| 44 | uintptr_t stack_current; |
| 45 | uintptr_t stack_orig; |
| 46 | struct thread *next; |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 47 | enum cb_err (*entry)(void *); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 48 | void *entry_arg; |
| 49 | int can_yield; |
Raul E Rangel | cc01da5 | 2021-07-12 13:43:48 -0600 | [diff] [blame] | 50 | struct thread_handle *handle; |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 51 | }; |
| 52 | |
Raul E Rangel | d5dca21 | 2021-07-15 11:48:48 -0600 | [diff] [blame] | 53 | /* Return 0 on successful yield, < 0 when thread did not yield. */ |
| 54 | int thread_yield(void); |
| 55 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 56 | /* Return 0 on successful yield for the given amount of time, < 0 when thread |
| 57 | * did not yield. */ |
Lee Leahy | 0ca2a06 | 2017-03-06 18:01:04 -0800 | [diff] [blame] | 58 | int thread_yield_microseconds(unsigned int microsecs); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 59 | |
| 60 | /* Allow and prevent thread cooperation on current running thread. By default |
Martin Roth | 0cb07e3 | 2013-07-09 21:46:01 -0600 | [diff] [blame] | 61 | * all threads are marked to be cooperative. That means a thread can yield |
Raul E Rangel | be60a0d | 2021-07-15 13:52:03 -0600 | [diff] [blame] | 62 | * to another thread at a pre-determined switch point. i.e., udelay, |
| 63 | * thread_yield, or thread_yield_microseconds. |
| 64 | * |
| 65 | * These methods should be used to guard critical sections so a dead lock does |
| 66 | * not occur. The critical sections can be nested. Just make sure the methods |
| 67 | * are used in pairs. |
| 68 | */ |
Raul E Rangel | 9ba36ab | 2021-07-15 17:34:05 -0600 | [diff] [blame] | 69 | void thread_coop_enable(void); |
| 70 | void thread_coop_disable(void); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 71 | |
Raul E Rangel | b29f9d4 | 2021-07-12 13:49:59 -0600 | [diff] [blame] | 72 | void thread_mutex_lock(struct thread_mutex *mutex); |
| 73 | void thread_mutex_unlock(struct thread_mutex *mutex); |
| 74 | |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 75 | /* Architecture specific thread functions. */ |
Lee Leahy | 22c28e0 | 2017-03-07 15:47:44 -0800 | [diff] [blame] | 76 | asmlinkage void switch_to_thread(uintptr_t new_stack, uintptr_t *saved_stack); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 77 | /* Set up the stack frame for a new thread so that a switch_to_thread() call |
| 78 | * will enter the thread_entry() function with arg as a parameter. The |
| 79 | * saved_stack field in the struct thread needs to be updated accordingly. */ |
| 80 | void arch_prepare_thread(struct thread *t, |
Lee Leahy | 746d4af | 2017-03-07 15:31:49 -0800 | [diff] [blame] | 81 | asmlinkage void (*thread_entry)(void *), void *arg); |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 82 | #else |
Raul E Rangel | d5dca21 | 2021-07-15 11:48:48 -0600 | [diff] [blame] | 83 | static inline int thread_yield(void) |
| 84 | { |
| 85 | return -1; |
| 86 | } |
Lee Leahy | 0ca2a06 | 2017-03-06 18:01:04 -0800 | [diff] [blame] | 87 | static inline int thread_yield_microseconds(unsigned int microsecs) |
| 88 | { |
| 89 | return -1; |
| 90 | } |
Raul E Rangel | 9ba36ab | 2021-07-15 17:34:05 -0600 | [diff] [blame] | 91 | static inline void thread_coop_enable(void) {} |
| 92 | static inline void thread_coop_disable(void) {} |
Raul E Rangel | b29f9d4 | 2021-07-12 13:49:59 -0600 | [diff] [blame] | 93 | |
| 94 | static inline void thread_mutex_lock(struct thread_mutex *mutex) {} |
| 95 | |
| 96 | static inline void thread_mutex_unlock(struct thread_mutex *mutex) {} |
Aaron Durbin | 4409a5e | 2013-05-06 12:20:52 -0500 | [diff] [blame] | 97 | #endif |
| 98 | |
| 99 | #endif /* THREAD_H_ */ |