1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
|
#ifndef __VKI_XEN_X86_H
#define __VKI_XEN_X86_H
#if defined(__i386__)
#define ___DEFINE_VKI_XEN_GUEST_HANDLE(name, type) \
typedef struct { type *p; } \
__vki_xen_guest_handle_ ## name; \
typedef struct { union { type *p; vki_xen_uint64_aligned_t q; }; } \
__vki_xen_guest_handle_64_ ## name
#define vki_xen_uint64_aligned_t vki_uint64_t __attribute__((aligned(8)))
#define __VKI_XEN_GUEST_HANDLE_64(name) __vki_xen_guest_handle_64_ ## name
#define VKI_XEN_GUEST_HANDLE_64(name) __VKI_XEN_GUEST_HANDLE_64(name)
#else
#define ___DEFINE_VKI_XEN_GUEST_HANDLE(name, type) \
typedef struct { type *p; } __vki_xen_guest_handle_ ## name
#define vki_xen_uint64_aligned_t vki_uint64_t
#define __DEFINE_VKI_XEN_GUEST_HANDLE(name, type) \
___DEFINE_VKI_XEN_GUEST_HANDLE(name, type); \
___DEFINE_VKI_XEN_GUEST_HANDLE(const_##name, const type)
#define DEFINE_VKI_XEN_GUEST_HANDLE(name) __DEFINE_VKI_XEN_GUEST_HANDLE(name, name)
#define VKI_XEN_GUEST_HANDLE_64(name) VKI_XEN_GUEST_HANDLE(name)
#endif
#define __VKI_XEN_GUEST_HANDLE(name) __vki_xen_guest_handle_ ## name
#define VKI_XEN_GUEST_HANDLE(name) __VKI_XEN_GUEST_HANDLE(name)
typedef unsigned long vki_xen_pfn_t;
typedef unsigned long vki_xen_ulong_t;
#if defined(__i386__)
struct vki_xen_cpu_user_regs {
vki_uint32_t ebx;
vki_uint32_t ecx;
vki_uint32_t edx;
vki_uint32_t esi;
vki_uint32_t edi;
vki_uint32_t ebp;
vki_uint32_t eax;
vki_uint16_t error_code; /* private */
vki_uint16_t entry_vector; /* private */
vki_uint32_t eip;
vki_uint16_t cs;
vki_uint8_t saved_upcall_mask;
vki_uint8_t _pad0;
vki_uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
vki_uint32_t esp;
vki_uint16_t ss, _pad1;
vki_uint16_t es, _pad2;
vki_uint16_t ds, _pad3;
vki_uint16_t fs, _pad4;
vki_uint16_t gs, _pad5;
};
#else
struct vki_xen_cpu_user_regs {
vki_uint64_t r15;
vki_uint64_t r14;
vki_uint64_t r13;
vki_uint64_t r12;
vki_uint64_t rbp;
vki_uint64_t rbx;
vki_uint64_t r11;
vki_uint64_t r10;
vki_uint64_t r9;
vki_uint64_t r8;
vki_uint64_t rax;
vki_uint64_t rcx;
vki_uint64_t rdx;
vki_uint64_t rsi;
vki_uint64_t rdi;
vki_uint32_t error_code; /* private */
vki_uint32_t entry_vector; /* private */
vki_uint64_t rip;
vki_uint16_t cs, _pad0[1];
vki_uint8_t saved_upcall_mask;
vki_uint8_t _pad1[3];
vki_uint64_t rflags; /* rflags.IF == !saved_upcall_mask */
vki_uint64_t rsp;
vki_uint16_t ss, _pad2[3];
vki_uint16_t es, _pad3[3];
vki_uint16_t ds, _pad4[3];
vki_uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
vki_uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
};
#endif
struct vki_xen_trap_info {
vki_uint8_t vector; /* exception vector */
vki_uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
vki_uint16_t cs; /* code selector */
unsigned long address; /* code offset */
};
struct vki_xen_vcpu_guest_context {
/* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
unsigned long flags; /* VGCF_* flags */
struct vki_xen_cpu_user_regs user_regs; /* User-level CPU registers */
struct vki_xen_trap_info trap_ctxt[256];/* Virtual IDT */
unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
/* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
#ifdef __i386__
unsigned long event_callback_cs; /* CS:EIP of event callback */
unsigned long event_callback_eip;
unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
unsigned long failsafe_callback_eip;
#else
unsigned long event_callback_eip;
unsigned long failsafe_callback_eip;
unsigned long syscall_callback_eip;
#endif
unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
#ifdef __x86_64__
/* Segment base addresses. */
vki_uint64_t fs_base;
vki_uint64_t gs_base_kernel;
vki_uint64_t gs_base_user;
#endif
};
typedef struct vki_xen_vcpu_guest_context vki_xen_vcpu_guest_context_t;
DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_vcpu_guest_context_t);
/* HVM_SAVE types and declarations for getcontext_partial */
# define VKI_DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \
struct __VKI_HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[1];}
#define VKI_HVM_SAVE_TYPE(_x) typeof (((struct __VKI_HVM_SAVE_TYPE_##_x *)(0))->t)
#define VKI_HVM_SAVE_LENGTH(_x) (sizeof (VKI_HVM_SAVE_TYPE(_x)))
#define VKI_HVM_SAVE_CODE(_x) (sizeof (((struct __VKI_HVM_SAVE_TYPE_##_x *)(0))->c))
struct vki_hvm_hw_cpu {
vki_uint8_t fpu_regs[512];
vki_uint64_t rax;
vki_uint64_t rbx;
vki_uint64_t rcx;
vki_uint64_t rdx;
vki_uint64_t rbp;
vki_uint64_t rsi;
vki_uint64_t rdi;
vki_uint64_t rsp;
vki_uint64_t r8;
vki_uint64_t r9;
vki_uint64_t r10;
vki_uint64_t r11;
vki_uint64_t r12;
vki_uint64_t r13;
vki_uint64_t r14;
vki_uint64_t r15;
vki_uint64_t rip;
vki_uint64_t rflags;
vki_uint64_t cr0;
vki_uint64_t cr2;
vki_uint64_t cr3;
vki_uint64_t cr4;
vki_uint64_t dr0;
vki_uint64_t dr1;
vki_uint64_t dr2;
vki_uint64_t dr3;
vki_uint64_t dr6;
vki_uint64_t dr7;
vki_uint32_t cs_sel;
vki_uint32_t ds_sel;
vki_uint32_t es_sel;
vki_uint32_t fs_sel;
vki_uint32_t gs_sel;
vki_uint32_t ss_sel;
vki_uint32_t tr_sel;
vki_uint32_t ldtr_sel;
vki_uint32_t cs_limit;
vki_uint32_t ds_limit;
vki_uint32_t es_limit;
vki_uint32_t fs_limit;
vki_uint32_t gs_limit;
vki_uint32_t ss_limit;
vki_uint32_t tr_limit;
vki_uint32_t ldtr_limit;
vki_uint32_t idtr_limit;
vki_uint32_t gdtr_limit;
vki_uint64_t cs_base;
vki_uint64_t ds_base;
vki_uint64_t es_base;
vki_uint64_t fs_base;
vki_uint64_t gs_base;
vki_uint64_t ss_base;
vki_uint64_t tr_base;
vki_uint64_t ldtr_base;
vki_uint64_t idtr_base;
vki_uint64_t gdtr_base;
vki_uint32_t cs_arbytes;
vki_uint32_t ds_arbytes;
vki_uint32_t es_arbytes;
vki_uint32_t fs_arbytes;
vki_uint32_t gs_arbytes;
vki_uint32_t ss_arbytes;
vki_uint32_t tr_arbytes;
vki_uint32_t ldtr_arbytes;
vki_uint64_t sysenter_cs;
vki_uint64_t sysenter_esp;
vki_uint64_t sysenter_eip;
/* msr for em64t */
vki_uint64_t shadow_gs;
/* msr content saved/restored. */
vki_uint64_t msr_flags;
vki_uint64_t msr_lstar;
vki_uint64_t msr_star;
vki_uint64_t msr_cstar;
vki_uint64_t msr_syscall_mask;
vki_uint64_t msr_efer;
vki_uint64_t msr_tsc_aux;
/* guest's idea of what rdtsc() would return */
vki_uint64_t tsc;
/* pending event, if any */
union {
vki_uint32_t pending_event;
struct {
vki_uint8_t pending_vector:8;
vki_uint8_t pending_type:3;
vki_uint8_t pending_error_valid:1;
vki_uint32_t pending_reserved:19;
vki_uint8_t pending_valid:1;
};
};
/* error code for pending event */
vki_uint32_t error_code;
};
VKI_DECLARE_HVM_SAVE_TYPE(CPU, 2, struct vki_hvm_hw_cpu);
#endif // __VKI_XEN_H
/*--------------------------------------------------------------------*/
/*--- end ---*/
/*--------------------------------------------------------------------*/
|