1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023 Ventana Micro Systems Inc.
*/
#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/kvm_host.h>
#include <linux/mm.h>
#include <linux/sizes.h>
#include <asm/bug.h>
#include <asm/current.h>
#include <asm/kvm_vcpu_sbi.h>
#include <asm/page.h>
#include <asm/sbi.h>
#include <asm/uaccess.h>
void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
{
vcpu->arch.sta.shmem = INVALID_GPA;
vcpu->arch.sta.last_steal = 0;
}
void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
{
gpa_t shmem = vcpu->arch.sta.shmem;
u64 last_steal = vcpu->arch.sta.last_steal;
__le32 __user *sequence_ptr;
__le64 __user *steal_ptr;
__le32 sequence_le;
__le64 steal_le;
u32 sequence;
u64 steal;
unsigned long hva;
gfn_t gfn;
if (shmem == INVALID_GPA)
return;
/*
* shmem is 64-byte aligned (see the enforcement in
* kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct
* is 64 bytes, so we know all its offsets are in the same page.
*/
gfn = shmem >> PAGE_SHIFT;
hva = kvm_vcpu_gfn_to_hva(vcpu, gfn);
if (WARN_ON(kvm_is_error_hva(hva))) {
vcpu->arch.sta.shmem = INVALID_GPA;
return;
}
sequence_ptr = (__le32 __user *)(hva + offset_in_page(shmem) +
offsetof(struct sbi_sta_struct, sequence));
steal_ptr = (__le64 __user *)(hva + offset_in_page(shmem) +
offsetof(struct sbi_sta_struct, steal));
if (WARN_ON(get_user(sequence_le, sequence_ptr)))
return;
sequence = le32_to_cpu(sequence_le);
sequence += 1;
if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
return;
if (!WARN_ON(get_user(steal_le, steal_ptr))) {
steal = le64_to_cpu(steal_le);
vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
steal += vcpu->arch.sta.last_steal - last_steal;
WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));
}
sequence += 1;
WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr));
kvm_vcpu_mark_page_dirty(vcpu, gfn);
}
static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long shmem_phys_lo = cp->a0;
unsigned long shmem_phys_hi = cp->a1;
u32 flags = cp->a2;
struct sbi_sta_struct zero_sta = {0};
unsigned long hva;
bool writable;
gpa_t shmem;
int ret;
if (flags != 0)
return SBI_ERR_INVALID_PARAM;
if (shmem_phys_lo == SBI_SHMEM_DISABLE &&
shmem_phys_hi == SBI_SHMEM_DISABLE) {
vcpu->arch.sta.shmem = INVALID_GPA;
return 0;
}
if (shmem_phys_lo & (SZ_64 - 1))
return SBI_ERR_INVALID_PARAM;
shmem = shmem_phys_lo;
if (shmem_phys_hi != 0) {
if (IS_ENABLED(CONFIG_32BIT))
shmem |= ((gpa_t)shmem_phys_hi << 32);
else
return SBI_ERR_INVALID_ADDRESS;
}
hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable);
if (kvm_is_error_hva(hva) || !writable)
return SBI_ERR_INVALID_ADDRESS;
ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
if (ret)
return SBI_ERR_FAILURE;
vcpu->arch.sta.shmem = shmem;
vcpu->arch.sta.last_steal = current->sched_info.run_delay;
return 0;
}
static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_vcpu_sbi_return *retdata)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long funcid = cp->a6;
int ret;
switch (funcid) {
case SBI_EXT_STA_STEAL_TIME_SET_SHMEM:
ret = kvm_sbi_sta_steal_time_set_shmem(vcpu);
break;
default:
ret = SBI_ERR_NOT_SUPPORTED;
break;
}
retdata->err_val = ret;
return 0;
}
static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu)
{
return !!sched_info_on();
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
.extid_start = SBI_EXT_STA,
.extid_end = SBI_EXT_STA,
.handler = kvm_sbi_ext_sta_handler,
.probe = kvm_sbi_ext_sta_probe,
};
int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long *reg_val)
{
switch (reg_num) {
case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
*reg_val = (unsigned long)vcpu->arch.sta.shmem;
break;
case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
if (IS_ENABLED(CONFIG_32BIT))
*reg_val = upper_32_bits(vcpu->arch.sta.shmem);
else
*reg_val = 0;
break;
default:
return -EINVAL;
}
return 0;
}
int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long reg_val)
{
switch (reg_num) {
case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
if (IS_ENABLED(CONFIG_32BIT)) {
gpa_t hi = upper_32_bits(vcpu->arch.sta.shmem);
vcpu->arch.sta.shmem = reg_val;
vcpu->arch.sta.shmem |= hi << 32;
} else {
vcpu->arch.sta.shmem = reg_val;
}
break;
case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
if (IS_ENABLED(CONFIG_32BIT)) {
gpa_t lo = lower_32_bits(vcpu->arch.sta.shmem);
vcpu->arch.sta.shmem = ((gpa_t)reg_val << 32);
vcpu->arch.sta.shmem |= lo;
} else if (reg_val != 0) {
return -EINVAL;
}
break;
default:
return -EINVAL;
}
return 0;
}
|