1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
|
/* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (c) 2014, STMicroelectronics International N.V.
* Copyright (c) 2016-2017, Linaro Limited
* Copyright (c) 2020-2021, Arm Limited
*/
#ifndef __KERNEL_THREAD_H
#define __KERNEL_THREAD_H
#ifndef __ASSEMBLER__
#include <types_ext.h>
#include <compiler.h>
#include <mm/pgt_cache.h>
#endif
#include <util.h>
#include <kernel/thread_arch.h>
#define THREAD_FLAGS_COPY_ARGS_ON_RETURN BIT(0)
#define THREAD_FLAGS_FOREIGN_INTR_ENABLE BIT(1)
#define THREAD_FLAGS_EXIT_ON_FOREIGN_INTR BIT(2)
#define THREAD_FLAGS_FFA_ONLY BIT(3)
#define THREAD_ID_0 0
#define THREAD_ID_INVALID -1
#define THREAD_RPC_MAX_NUM_PARAMS U(4)
#ifndef __ASSEMBLER__
struct thread_specific_data {
TAILQ_HEAD(, ts_session) sess_stack;
struct ts_ctx *ctx;
#ifdef CFG_CORE_FFA
uint32_t rpc_target_info;
#endif
uint32_t abort_type;
uint32_t abort_descr;
vaddr_t abort_va;
unsigned int abort_core;
struct thread_abort_regs abort_regs;
#ifdef CFG_CORE_DEBUG_CHECK_STACKS
bool stackcheck_recursion;
#endif
unsigned int syscall_recursion;
#ifdef CFG_FAULT_MITIGATION
struct ftmn_func_arg *ftmn_arg;
#endif
};
void thread_init_canaries(void);
void thread_init_primary(void);
void thread_init_per_cpu(void);
#if defined(CFG_WITH_STACK_CANARIES)
void thread_update_canaries(void);
#else
static inline void thread_update_canaries(void) { }
#endif
struct thread_core_local *thread_get_core_local(void);
/*
* thread_init_threads() - Initialize threads
* @thread_count: Number of threads to configure
*
* Initializes thread contexts. Called in thread_init_boot_thread() if
* virtualization is disabled. Virtualization subsystem calls it for every
* new guest otherwise. @thread_count must be <= CFG_NUM_THREADS, and will
* initialize the number of threads to @thread_count if configured with
* CFG_DYN_CONFIG=y, else @thread_count must equal CFG_NUM_THREADS.
*/
void thread_init_threads(size_t thread_count);
vaddr_t thread_get_abt_stack(void);
/*
* thread_init_thread_core_local() - Initialize thread_core_local
* @core_count: Number of cores in the system
*
* Called by the init CPU. Sets temporary stack mode for all CPUs
* (curr_thread = -1 and THREAD_CLF_TMP) and sets the temporary stack limit
* for the init CPU. @core_count must be <= CFG_TEE_CORE_NB_CORE, and will
* set the number of supported cores to @core_count if configured with
* CFG_DYN_CONFIG=y, else @core_count must equal CFG_TEE_CORE_NB_CORE.
*/
void thread_init_thread_core_local(size_t core_count);
#if defined(CFG_CORE_PAUTH)
void thread_init_thread_pauth_keys(void);
void thread_init_core_local_pauth_keys(void);
#else
static inline void thread_init_thread_pauth_keys(void) { }
static inline void thread_init_core_local_pauth_keys(void) { }
#endif
/*
* Initializes a thread to be used during boot
*/
void thread_init_boot_thread(void);
/*
* Clears the current thread id
* Only supposed to be used during initialization.
*/
void thread_clr_boot_thread(void);
/*
* Returns current thread id.
*/
short int thread_get_id(void);
/*
* Returns current thread id, return -1 on failure.
*/
short int thread_get_id_may_fail(void);
/* Returns Thread Specific Data (TSD) pointer. */
struct thread_specific_data *thread_get_tsd(void);
/*
* Sets foreign interrupts status for current thread, must only be called
* from an active thread context.
*
* enable == true -> enable foreign interrupts
* enable == false -> disable foreign interrupts
*/
void thread_set_foreign_intr(bool enable);
/*
* Restores the foreign interrupts status (in CPSR) for current thread, must
* only be called from an active thread context.
*/
void thread_restore_foreign_intr(void);
/*
* thread_get_exceptions() - return current exception mask
*/
uint32_t thread_get_exceptions(void);
/*
* thread_set_exceptions() - set exception mask
* @exceptions: exception mask to set
*
* Any previous exception mask is replaced by this exception mask, that is,
* old bits are cleared and replaced by these.
*/
void thread_set_exceptions(uint32_t exceptions);
/*
* thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions
* @exceptions exceptions to mask
* @returns old exception state
*/
uint32_t thread_mask_exceptions(uint32_t exceptions);
/*
* thread_unmask_exceptions() - Unmasks asynchronous exceptions
* @state Old asynchronous exception state to restore (returned by
* thread_mask_exceptions())
*/
void thread_unmask_exceptions(uint32_t state);
static inline bool __nostackcheck thread_foreign_intr_disabled(void)
{
return !!(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
}
/*
* thread_enter_user_mode() - Enters user mode
* @a0: Passed in r/x0 for user_func
* @a1: Passed in r/x1 for user_func
* @a2: Passed in r/x2 for user_func
* @a3: Passed in r/x3 for user_func
* @user_sp: Assigned sp value in user mode
* @user_func: Function to execute in user mode
* @is_32bit: True if TA should execute in Aarch32, false if Aarch64
* @exit_status0: Pointer to opaque exit staus 0
* @exit_status1: Pointer to opaque exit staus 1
*
* This functions enters user mode with the argument described above,
* @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode()
* when returning back to the caller of this function through an exception
* handler.
*
* @Returns what's passed in "ret" to thread_unwind_user_mode()
*/
uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long user_sp,
unsigned long entry_func, bool is_32bit,
uint32_t *exit_status0, uint32_t *exit_status1);
/*
* thread_unwind_user_mode() - Unwinds kernel stack from user entry
* @ret: Value to return from thread_enter_user_mode()
* @exit_status0: Exit status 0
* @exit_status1: Exit status 1
*
* This is the function that exception handlers can return into
* to resume execution in kernel mode instead of user mode.
*
* This function is closely coupled with thread_enter_user_mode() since it
* need to restore registers saved by thread_enter_user_mode() and when it
* returns make it look like thread_enter_user_mode() just returned. It is
* expected that the stack pointer is where thread_enter_user_mode() left
* it. The stack will be unwound and the function will return to where
* thread_enter_user_mode() was called from. Exit_status0 and exit_status1
* are filled in the corresponding pointers supplied to
* thread_enter_user_mode().
*/
void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
uint32_t exit_status1);
/*
* Returns the start address (bottom) of the stack for the current thread,
* zero if there is no current thread.
*/
vaddr_t thread_stack_start(void);
/* Returns the stack size for the current thread */
size_t thread_stack_size(void);
/*
* Returns the start (top, lowest address) and end (bottom, highest address) of
* the current stack (thread, temporary or abort stack).
* When CFG_CORE_DEBUG_CHECK_STACKS=y, the @hard parameter tells if the hard or
* soft limits are queried. The difference between soft and hard is that for the
* latter, the stack start includes some additional space to let any function
* overflow the soft limit and still be able to print a stack dump in this case.
*/
bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard);
static inline bool __nostackcheck get_stack_soft_limits(vaddr_t *start,
vaddr_t *end)
{
return get_stack_limits(start, end, false);
}
static inline bool __nostackcheck get_stack_hard_limits(vaddr_t *start,
vaddr_t *end)
{
return get_stack_limits(start, end, true);
}
bool thread_is_in_normal_mode(void);
/*
* Returns true if previous exeception also was in abort mode.
*
* Note: it's only valid to call this function from an abort exception
* handler before interrupts has been re-enabled.
*/
bool thread_is_from_abort_mode(void);
/**
* Allocates data for payload buffers shared with a non-secure user space
* application. Ensure consistency with the enumeration
* THREAD_SHM_TYPE_APPLICATION.
*
* @size: size in bytes of payload buffer
*
* @returns mobj that describes allocated buffer or NULL on error
*/
struct mobj *thread_rpc_alloc_payload(size_t size);
/**
* Free physical memory previously allocated with thread_rpc_alloc_payload()
*
* @mobj: mobj that describes the buffer
*/
void thread_rpc_free_payload(struct mobj *mobj);
/**
* Allocate data for payload buffers shared with the non-secure kernel.
* Ensure consistency with the enumeration THREAD_SHM_TYPE_KERNEL_PRIVATE.
*
* @size: size in bytes of payload buffer
*
* @returns mobj that describes allocated buffer or NULL on error
*/
struct mobj *thread_rpc_alloc_kernel_payload(size_t size);
/**
* Free physical memory previously allocated with
* thread_rpc_alloc_kernel_payload()
*
* @mobj: mobj that describes the buffer
*/
void thread_rpc_free_kernel_payload(struct mobj *mobj);
struct thread_param_memref {
size_t offs;
size_t size;
struct mobj *mobj;
};
struct thread_param_value {
uint64_t a;
uint64_t b;
uint64_t c;
};
/*
* Note that there's some arithmetics done on the value so it's important
* to keep in IN, OUT, INOUT order.
*/
enum thread_param_attr {
THREAD_PARAM_ATTR_NONE = 0,
THREAD_PARAM_ATTR_VALUE_IN,
THREAD_PARAM_ATTR_VALUE_OUT,
THREAD_PARAM_ATTR_VALUE_INOUT,
THREAD_PARAM_ATTR_MEMREF_IN,
THREAD_PARAM_ATTR_MEMREF_OUT,
THREAD_PARAM_ATTR_MEMREF_INOUT,
};
struct thread_param {
enum thread_param_attr attr;
union {
struct thread_param_memref memref;
struct thread_param_value value;
} u;
};
#define THREAD_PARAM_MEMREF(_direction, _mobj, _offs, _size) \
(struct thread_param){ \
.attr = THREAD_PARAM_ATTR_MEMREF_ ## _direction, .u.memref = { \
.mobj = (_mobj), .offs = (_offs), .size = (_size) } \
}
#define THREAD_PARAM_VALUE(_direction, _a, _b, _c) \
(struct thread_param){ \
.attr = THREAD_PARAM_ATTR_VALUE_ ## _direction, .u.value = { \
.a = (_a), .b = (_b), .c = (_c) } \
}
/**
* Does an RPC using a preallocated argument buffer
* @cmd: RPC cmd
* @num_params: number of parameters
* @params: RPC parameters
* @returns RPC return value
*/
uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
struct thread_param *params);
/**
* Allocate data for payload buffers shared with both user space applications
* and the non-secure kernel. Ensure consistency with the enumeration
* THREAD_SHM_TYPE_GLOBAL.
*
* @size: size in bytes of payload buffer
*
* @returns mobj that describes allocated buffer or NULL on error
*/
struct mobj *thread_rpc_alloc_global_payload(size_t size);
/**
* Free physical memory previously allocated with
* thread_rpc_alloc_global_payload()
*
* @mobj: mobj that describes the buffer
*/
void thread_rpc_free_global_payload(struct mobj *mobj);
/*
* enum thread_shm_type - type of non-secure shared memory
* @THREAD_SHM_TYPE_APPLICATION - user space application shared memory
* @THREAD_SHM_TYPE_KERNEL_PRIVATE - kernel private shared memory
* @THREAD_SHM_TYPE_GLOBAL - user space and kernel shared memory
*/
enum thread_shm_type {
THREAD_SHM_TYPE_APPLICATION,
THREAD_SHM_TYPE_KERNEL_PRIVATE,
THREAD_SHM_TYPE_GLOBAL,
};
/*
* enum thread_shm_cache_user - user of a cache allocation
* @THREAD_SHM_CACHE_USER_SOCKET - socket communication
* @THREAD_SHM_CACHE_USER_FS - filesystem access
* @THREAD_SHM_CACHE_USER_I2C - I2C communication
* @THREAD_SHM_CACHE_USER_RPMB - RPMB communication
*
* To ensure that each user of the shared memory cache doesn't interfere
* with each other a unique ID per user is used.
*/
enum thread_shm_cache_user {
THREAD_SHM_CACHE_USER_SOCKET,
THREAD_SHM_CACHE_USER_FS,
THREAD_SHM_CACHE_USER_I2C,
THREAD_SHM_CACHE_USER_RPMB,
};
/*
* Returns a pointer to the cached RPC memory. Each thread and @user tuple
* has a unique cache. The pointer is guaranteed to point to a large enough
* area or to be NULL.
*/
void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user,
enum thread_shm_type shm_type,
size_t size, struct mobj **mobj);
#endif /*__ASSEMBLER__*/
#endif /*__KERNEL_THREAD_H*/
|