1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ARM64_KVM_NESTED_H
#define __ARM64_KVM_NESTED_H
#include <linux/bitfield.h>
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_pgtable.h>
static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
{
return (!__is_defined(__KVM_NVHE_HYPERVISOR__) &&
cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2));
}
/* Translation helpers from non-VHE EL2 to EL1 */
static inline u64 tcr_el2_ps_to_tcr_el1_ips(u64 tcr_el2)
{
return (u64)FIELD_GET(TCR_EL2_PS_MASK, tcr_el2) << TCR_IPS_SHIFT;
}
static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr)
{
return TCR_EPD1_MASK | /* disable TTBR1_EL1 */
((tcr & TCR_EL2_TBI) ? TCR_TBI0 : 0) |
tcr_el2_ps_to_tcr_el1_ips(tcr) |
(tcr & TCR_EL2_TG0_MASK) |
(tcr & TCR_EL2_ORGN0_MASK) |
(tcr & TCR_EL2_IRGN0_MASK) |
(tcr & TCR_EL2_T0SZ_MASK);
}
static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
{
u64 cpacr_el1 = CPACR_ELx_RES1;
if (cptr_el2 & CPTR_EL2_TTA)
cpacr_el1 |= CPACR_ELx_TTA;
if (!(cptr_el2 & CPTR_EL2_TFP))
cpacr_el1 |= CPACR_ELx_FPEN;
if (!(cptr_el2 & CPTR_EL2_TZ))
cpacr_el1 |= CPACR_ELx_ZEN;
cpacr_el1 |= cptr_el2 & (CPTR_EL2_TCPAC | CPTR_EL2_TAM);
return cpacr_el1;
}
static inline u64 translate_sctlr_el2_to_sctlr_el1(u64 val)
{
/* Only preserve the minimal set of bits we support */
val &= (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | SCTLR_ELx_SA |
SCTLR_ELx_I | SCTLR_ELx_IESB | SCTLR_ELx_WXN | SCTLR_ELx_EE);
val |= SCTLR_EL1_RES1;
return val;
}
static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
{
/* Clear the ASID field */
return ttbr0 & ~GENMASK_ULL(63, 48);
}
extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
extern void kvm_init_nested(struct kvm *kvm);
extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu);
union tlbi_info;
extern void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid,
const union tlbi_info *info,
void (*)(struct kvm_s2_mmu *,
const union tlbi_info *));
extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
extern void check_nested_vcpu_requests(struct kvm_vcpu *vcpu);
struct kvm_s2_trans {
phys_addr_t output;
unsigned long block_size;
bool writable;
bool readable;
int level;
u32 esr;
u64 desc;
};
static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
{
return trans->output;
}
static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
{
return trans->block_size;
}
static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
{
return trans->esr;
}
static inline bool kvm_s2_trans_readable(struct kvm_s2_trans *trans)
{
return trans->readable;
}
static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
{
return trans->writable;
}
static inline bool kvm_s2_trans_executable(struct kvm_s2_trans *trans)
{
return !(trans->desc & BIT(54));
}
extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
struct kvm_s2_trans *result);
extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
struct kvm_s2_trans *trans);
extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
extern void kvm_nested_s2_wp(struct kvm *kvm);
extern void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block);
extern void kvm_nested_s2_flush(struct kvm *kvm);
unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val);
static inline bool kvm_supported_tlbi_s1e1_op(struct kvm_vcpu *vpcu, u32 instr)
{
struct kvm *kvm = vpcu->kvm;
u8 CRm = sys_reg_CRm(instr);
if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
sys_reg_Op1(instr) == TLBI_Op1_EL1))
return false;
if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
(sys_reg_CRn(instr) == TLBI_CRn_nXS &&
kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
return false;
if (CRm == TLBI_CRm_nROS &&
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
return false;
if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
CRm == TLBI_CRm_RNS) &&
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
return false;
return true;
}
static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr)
{
struct kvm *kvm = vpcu->kvm;
u8 CRm = sys_reg_CRm(instr);
if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
sys_reg_Op1(instr) == TLBI_Op1_EL2))
return false;
if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
(sys_reg_CRn(instr) == TLBI_CRn_nXS &&
kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
return false;
if (CRm == TLBI_CRm_IPAIS || CRm == TLBI_CRm_IPAONS)
return false;
if (CRm == TLBI_CRm_nROS &&
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
return false;
if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
CRm == TLBI_CRm_RNS) &&
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
return false;
return true;
}
int kvm_init_nv_sysregs(struct kvm *kvm);
#ifdef CONFIG_ARM64_PTR_AUTH
bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr);
#else
static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
{
/* We really should never execute this... */
WARN_ON_ONCE(1);
*elr = 0xbad9acc0debadbad;
return false;
}
#endif
#define KVM_NV_GUEST_MAP_SZ (KVM_PGTABLE_PROT_SW1 | KVM_PGTABLE_PROT_SW0)
static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
{
return FIELD_PREP(KVM_NV_GUEST_MAP_SZ, trans->level);
}
/* Adjust alignment for the contiguous bit as per StageOA() */
#define contiguous_bit_shift(d, wi, l) \
({ \
u8 shift = 0; \
\
if ((d) & PTE_CONT) { \
switch (BIT((wi)->pgshift)) { \
case SZ_4K: \
shift = 4; \
break; \
case SZ_16K: \
shift = (l) == 2 ? 5 : 7; \
break; \
case SZ_64K: \
shift = 5; \
break; \
} \
} \
\
shift; \
})
static inline unsigned int ps_to_output_size(unsigned int ps)
{
switch (ps) {
case 0: return 32;
case 1: return 36;
case 2: return 40;
case 3: return 42;
case 4: return 44;
case 5:
default:
return 48;
}
}
#endif /* __ARM64_KVM_NESTED_H */
|