1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
|
#include "kvm/virtio-console.h"
#include "kvm/virtio-pci-dev.h"
#include "kvm/disk-image.h"
#include "kvm/virtio.h"
#include "kvm/ioport.h"
#include "kvm/util.h"
#include "kvm/term.h"
#include "kvm/mutex.h"
#include "kvm/kvm.h"
#include "kvm/pci.h"
#include "kvm/threadpool.h"
#include "kvm/irq.h"
#include "kvm/guest_compat.h"
#include <linux/virtio_console.h>
#include <linux/virtio_ring.h>
#include <linux/virtio_blk.h>
#include <sys/uio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <termios.h>
#include <unistd.h>
#include <fcntl.h>
#define VIRTIO_CONSOLE_QUEUE_SIZE 128
#define VIRTIO_CONSOLE_NUM_QUEUES 2
#define VIRTIO_CONSOLE_RX_QUEUE 0
#define VIRTIO_CONSOLE_TX_QUEUE 1
struct con_dev {
struct mutex mutex;
struct virtio_device vdev;
struct virt_queue vqs[VIRTIO_CONSOLE_NUM_QUEUES];
struct virtio_console_config config;
u32 features;
pthread_cond_t poll_cond;
int vq_ready;
struct thread_pool__job jobs[VIRTIO_CONSOLE_NUM_QUEUES];
};
static struct con_dev cdev = {
.mutex = MUTEX_INITIALIZER,
.vq_ready = 0,
.config = {
.cols = 80,
.rows = 24,
.max_nr_ports = 1,
},
};
static int compat_id = -1;
/*
* Interrupts are injected for hvc0 only.
*/
static void virtio_console__inject_interrupt_callback(struct kvm *kvm, void *param)
{
struct iovec iov[VIRTIO_CONSOLE_QUEUE_SIZE];
struct virt_queue *vq;
u16 out, in;
u16 head;
int len;
if (kvm->cfg.active_console != CONSOLE_VIRTIO)
return;
mutex_lock(&cdev.mutex);
vq = param;
if (!cdev.vq_ready)
pthread_cond_wait(&cdev.poll_cond, &cdev.mutex.mutex);
if (term_readable(0) && virt_queue__available(vq)) {
head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
len = term_getc_iov(kvm, iov, in, 0);
virt_queue__set_used_elem(vq, head, len);
cdev.vdev.ops->signal_vq(kvm, &cdev.vdev, vq - cdev.vqs);
}
mutex_unlock(&cdev.mutex);
}
void virtio_console__inject_interrupt(struct kvm *kvm)
{
virtio_console__inject_interrupt_callback(kvm,
&cdev.vqs[VIRTIO_CONSOLE_RX_QUEUE]);
}
static void virtio_console_handle_callback(struct kvm *kvm, void *param)
{
struct iovec iov[VIRTIO_CONSOLE_QUEUE_SIZE];
struct virt_queue *vq;
u16 out, in;
u16 head;
u32 len;
vq = param;
/*
* The current Linux implementation polls for the buffer
* to be used, rather than waiting for an interrupt.
* So there is no need to inject an interrupt for the tx path.
*/
while (virt_queue__available(vq)) {
head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
len = term_putc_iov(iov, out, 0);
virt_queue__set_used_elem(vq, head, len);
}
}
static u8 *get_config(struct kvm *kvm, void *dev)
{
struct con_dev *cdev = dev;
return ((u8 *)(&cdev->config));
}
static u32 get_host_features(struct kvm *kvm, void *dev)
{
return 0;
}
static void set_guest_features(struct kvm *kvm, void *dev, u32 features)
{
struct con_dev *cdev = dev;
struct virtio_console_config *conf = &cdev->config;
conf->cols = virtio_host_to_guest_u16(&cdev->vdev, conf->cols);
conf->rows = virtio_host_to_guest_u16(&cdev->vdev, conf->rows);
conf->max_nr_ports = virtio_host_to_guest_u32(&cdev->vdev, conf->max_nr_ports);
}
static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 page_size, u32 align,
u32 pfn)
{
struct virt_queue *queue;
void *p;
BUG_ON(vq >= VIRTIO_CONSOLE_NUM_QUEUES);
compat__remove_message(compat_id);
queue = &cdev.vqs[vq];
queue->pfn = pfn;
p = virtio_get_vq(kvm, queue->pfn, page_size);
vring_init(&queue->vring, VIRTIO_CONSOLE_QUEUE_SIZE, p, align);
virtio_init_device_vq(&cdev.vdev, queue);
if (vq == VIRTIO_CONSOLE_TX_QUEUE) {
thread_pool__init_job(&cdev.jobs[vq], kvm, virtio_console_handle_callback, queue);
} else if (vq == VIRTIO_CONSOLE_RX_QUEUE) {
thread_pool__init_job(&cdev.jobs[vq], kvm, virtio_console__inject_interrupt_callback, queue);
/* Tell the waiting poll thread that we're ready to go */
mutex_lock(&cdev.mutex);
cdev.vq_ready = 1;
pthread_cond_signal(&cdev.poll_cond);
mutex_unlock(&cdev.mutex);
}
return 0;
}
static int notify_vq(struct kvm *kvm, void *dev, u32 vq)
{
struct con_dev *cdev = dev;
thread_pool__do_job(&cdev->jobs[vq]);
return 0;
}
static int get_pfn_vq(struct kvm *kvm, void *dev, u32 vq)
{
struct con_dev *cdev = dev;
return cdev->vqs[vq].pfn;
}
static int get_size_vq(struct kvm *kvm, void *dev, u32 vq)
{
return VIRTIO_CONSOLE_QUEUE_SIZE;
}
static int set_size_vq(struct kvm *kvm, void *dev, u32 vq, int size)
{
/* FIXME: dynamic */
return size;
}
static struct virtio_ops con_dev_virtio_ops = {
.get_config = get_config,
.get_host_features = get_host_features,
.set_guest_features = set_guest_features,
.init_vq = init_vq,
.notify_vq = notify_vq,
.get_pfn_vq = get_pfn_vq,
.get_size_vq = get_size_vq,
.set_size_vq = set_size_vq,
};
int virtio_console__init(struct kvm *kvm)
{
if (kvm->cfg.active_console != CONSOLE_VIRTIO)
return 0;
pthread_cond_init(&cdev.poll_cond, NULL);
virtio_init(kvm, &cdev, &cdev.vdev, &con_dev_virtio_ops,
VIRTIO_DEFAULT_TRANS(kvm), PCI_DEVICE_ID_VIRTIO_CONSOLE,
VIRTIO_ID_CONSOLE, PCI_CLASS_CONSOLE);
if (compat_id == -1)
compat_id = virtio_compat_add_message("virtio-console", "CONFIG_VIRTIO_CONSOLE");
return 0;
}
virtio_dev_init(virtio_console__init);
int virtio_console__exit(struct kvm *kvm)
{
return 0;
}
virtio_dev_exit(virtio_console__exit);
|