1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
*
* Authors:
* Atish Patra <atish.patra@wdc.com>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <asm/csr.h>
#include <asm/sbi.h>
#include <asm/kvm_vcpu_timer.h>
#include <asm/kvm_vcpu_sbi.h>
static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
unsigned long *out_val,
struct kvm_cpu_trap *utrap, bool *exit)
{
int ret = 0;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
u64 next_cycle;
if (cp->a6 != SBI_EXT_TIME_SET_TIMER)
return -EINVAL;
#if __riscv_xlen == 32
next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
#else
next_cycle = (u64)cp->a0;
#endif
kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
return ret;
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = {
.extid_start = SBI_EXT_TIME,
.extid_end = SBI_EXT_TIME,
.handler = kvm_sbi_ext_time_handler,
};
static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
unsigned long *out_val,
struct kvm_cpu_trap *utrap, bool *exit)
{
int ret = 0;
unsigned long i;
struct kvm_vcpu *tmp;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long hmask = cp->a0;
unsigned long hbase = cp->a1;
if (cp->a6 != SBI_EXT_IPI_SEND_IPI)
return -EINVAL;
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
if (hbase != -1UL) {
if (tmp->vcpu_id < hbase)
continue;
if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
continue;
}
ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
if (ret < 0)
break;
}
return ret;
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi = {
.extid_start = SBI_EXT_IPI,
.extid_end = SBI_EXT_IPI,
.handler = kvm_sbi_ext_ipi_handler,
};
static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
unsigned long *out_val,
struct kvm_cpu_trap *utrap, bool *exit)
{
int ret = 0;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long hmask = cp->a0;
unsigned long hbase = cp->a1;
unsigned long funcid = cp->a6;
switch (funcid) {
case SBI_EXT_RFENCE_REMOTE_FENCE_I:
kvm_riscv_fence_i(vcpu->kvm, hbase, hmask);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
if (cp->a2 == 0 && cp->a3 == 0)
kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask);
else
kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
cp->a2, cp->a3, PAGE_SHIFT);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
if (cp->a2 == 0 && cp->a3 == 0)
kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
hbase, hmask, cp->a4);
else
kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
hbase, hmask,
cp->a2, cp->a3,
PAGE_SHIFT, cp->a4);
break;
case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
/*
* Until nested virtualization is implemented, the
* SBI HFENCE calls should be treated as NOPs
*/
break;
default:
ret = -EOPNOTSUPP;
}
return ret;
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = {
.extid_start = SBI_EXT_RFENCE,
.extid_end = SBI_EXT_RFENCE,
.handler = kvm_sbi_ext_rfence_handler,
};
static int kvm_sbi_ext_srst_handler(struct kvm_vcpu *vcpu,
struct kvm_run *run,
unsigned long *out_val,
struct kvm_cpu_trap *utrap, bool *exit)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long funcid = cp->a6;
u32 reason = cp->a1;
u32 type = cp->a0;
int ret = 0;
switch (funcid) {
case SBI_EXT_SRST_RESET:
switch (type) {
case SBI_SRST_RESET_TYPE_SHUTDOWN:
kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
KVM_SYSTEM_EVENT_SHUTDOWN,
reason);
*exit = true;
break;
case SBI_SRST_RESET_TYPE_COLD_REBOOT:
case SBI_SRST_RESET_TYPE_WARM_REBOOT:
kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
KVM_SYSTEM_EVENT_RESET,
reason);
*exit = true;
break;
default:
ret = -EOPNOTSUPP;
}
break;
default:
ret = -EOPNOTSUPP;
}
return ret;
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst = {
.extid_start = SBI_EXT_SRST,
.extid_end = SBI_EXT_SRST,
.handler = kvm_sbi_ext_srst_handler,
};
|