1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
|
/* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (c) 2016, Linaro Limited
*/
#ifndef __MM_PGT_CACHE_H
#define __MM_PGT_CACHE_H
#include <assert.h>
#include <kernel/tee_ta_manager.h>
#include <sys/queue.h>
#include <types_ext.h>
#include <util.h>
#ifdef CFG_WITH_LPAE
#define PGT_SIZE (4 * 1024)
#define PGT_NUM_PGT_PER_PAGE 1
#else
#define PGT_SIZE (1 * 1024)
#define PGT_NUM_PGT_PER_PAGE 4
#endif
struct ts_ctx;
struct pgt {
void *tbl;
vaddr_t vabase;
#if !defined(CFG_CORE_PREALLOC_EL0_TBLS)
struct ts_ctx *ctx;
#endif
bool populated;
#if defined(CFG_PAGED_USER_TA)
uint16_t num_used_entries;
#endif
#if defined(CFG_CORE_PREALLOC_EL0_TBLS) || \
(defined(CFG_WITH_PAGER) && !defined(CFG_WITH_LPAE))
struct pgt_parent *parent;
#endif
SLIST_ENTRY(pgt) link;
};
SLIST_HEAD(pgt_cache, pgt);
struct user_mode_ctx;
bool pgt_check_avail(struct user_mode_ctx *uctx);
/*
* pgt_get_all() - makes all needed translation tables available
* @uctx: the context to own the tables
*
* Guaranteed to succeed, but may need to sleep for a while to get all the
* needed translation tables.
*/
#if defined(CFG_CORE_PREALLOC_EL0_TBLS)
static inline void pgt_get_all(struct user_mode_ctx *uctx __unused) { }
#else
void pgt_get_all(struct user_mode_ctx *uctx);
#endif
/*
* pgt_put_all() - informs the translation table manager that these tables
* will not be needed for a while
* @uctx: the context owning the tables to make inactive
*/
#if defined(CFG_CORE_PREALLOC_EL0_TBLS)
static inline void pgt_put_all(struct user_mode_ctx *uctx __unused) { }
#else
void pgt_put_all(struct user_mode_ctx *uctx);
#endif
void pgt_clear_range(struct user_mode_ctx *uctx, vaddr_t begin, vaddr_t end);
void pgt_flush_range(struct user_mode_ctx *uctx, vaddr_t begin, vaddr_t last);
#if defined(CFG_CORE_PREALLOC_EL0_TBLS)
static inline struct pgt *pgt_pop_from_cache_list(vaddr_t vabase __unused,
struct ts_ctx *ctx __unused)
{ return NULL; }
static inline void pgt_push_to_cache_list(struct pgt *pgt __unused) { }
#else
struct pgt *pgt_pop_from_cache_list(vaddr_t vabase, struct ts_ctx *ctx);
void pgt_push_to_cache_list(struct pgt *pgt);
#endif
#if defined(CFG_CORE_PREALLOC_EL0_TBLS)
static inline void pgt_init(void) { }
#else
void pgt_init(void);
#endif
void pgt_flush(struct user_mode_ctx *uctx);
#if defined(CFG_PAGED_USER_TA)
static inline void pgt_inc_used_entries(struct pgt *pgt)
{
pgt->num_used_entries++;
assert(pgt->num_used_entries);
}
static inline void pgt_dec_used_entries(struct pgt *pgt)
{
assert(pgt->num_used_entries);
pgt->num_used_entries--;
}
static inline void pgt_set_used_entries(struct pgt *pgt, size_t val)
{
pgt->num_used_entries = val;
}
#else
static inline void pgt_inc_used_entries(struct pgt *pgt __unused)
{
}
static inline void pgt_dec_used_entries(struct pgt *pgt __unused)
{
}
static inline void pgt_set_used_entries(struct pgt *pgt __unused,
size_t val __unused)
{
}
#endif
#endif /*__MM_PGT_CACHE_H*/
|