1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
|
// SPDX-License-Identifier: GPL-2.0-only
#include "linux/types.h"
#include "linux/bitmap.h"
#include "linux/atomic.h"
#include "kvm_util.h"
#include "ucall_common.h"
#define GUEST_UCALL_FAILED -1
struct ucall_header {
DECLARE_BITMAP(in_use, KVM_MAX_VCPUS);
struct ucall ucalls[KVM_MAX_VCPUS];
};
int ucall_nr_pages_required(uint64_t page_size)
{
return align_up(sizeof(struct ucall_header), page_size) / page_size;
}
/*
* ucall_pool holds per-VM values (global data is duplicated by each VM), it
* must not be accessed from host code.
*/
static struct ucall_header *ucall_pool;
void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
{
struct ucall_header *hdr;
struct ucall *uc;
vm_vaddr_t vaddr;
int i;
vaddr = vm_vaddr_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR,
MEM_REGION_DATA);
hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr);
memset(hdr, 0, sizeof(*hdr));
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
uc = &hdr->ucalls[i];
uc->hva = uc;
}
write_guest_global(vm, ucall_pool, (struct ucall_header *)vaddr);
ucall_arch_init(vm, mmio_gpa);
}
static struct ucall *ucall_alloc(void)
{
struct ucall *uc;
int i;
if (!ucall_pool)
goto ucall_failed;
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (!test_and_set_bit(i, ucall_pool->in_use)) {
uc = &ucall_pool->ucalls[i];
memset(uc->args, 0, sizeof(uc->args));
return uc;
}
}
ucall_failed:
/*
* If the vCPU cannot grab a ucall structure, make a bare ucall with a
* magic value to signal to get_ucall() that things went sideways.
* GUEST_ASSERT() depends on ucall_alloc() and so cannot be used here.
*/
ucall_arch_do_ucall(GUEST_UCALL_FAILED);
return NULL;
}
static void ucall_free(struct ucall *uc)
{
/* Beware, here be pointer arithmetic. */
clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use);
}
void ucall_assert(uint64_t cmd, const char *exp, const char *file,
unsigned int line, const char *fmt, ...)
{
struct ucall *uc;
va_list va;
uc = ucall_alloc();
uc->cmd = cmd;
WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (uint64_t)(exp));
WRITE_ONCE(uc->args[GUEST_FILE], (uint64_t)(file));
WRITE_ONCE(uc->args[GUEST_LINE], line);
va_start(va, fmt);
guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va);
va_end(va);
ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
ucall_free(uc);
}
void ucall_fmt(uint64_t cmd, const char *fmt, ...)
{
struct ucall *uc;
va_list va;
uc = ucall_alloc();
uc->cmd = cmd;
va_start(va, fmt);
guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va);
va_end(va);
ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
ucall_free(uc);
}
void ucall(uint64_t cmd, int nargs, ...)
{
struct ucall *uc;
va_list va;
int i;
uc = ucall_alloc();
WRITE_ONCE(uc->cmd, cmd);
nargs = min(nargs, UCALL_MAX_ARGS);
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
WRITE_ONCE(uc->args[i], va_arg(va, uint64_t));
va_end(va);
ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
ucall_free(uc);
}
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{
struct ucall ucall;
void *addr;
if (!uc)
uc = &ucall;
addr = ucall_arch_get_ucall(vcpu);
if (addr) {
TEST_ASSERT(addr != (void *)GUEST_UCALL_FAILED,
"Guest failed to allocate ucall struct");
memcpy(uc, addr, sizeof(*uc));
vcpu_run_complete_io(vcpu);
} else {
memset(uc, 0, sizeof(*uc));
}
return uc->cmd;
}
|