1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
*
* Copyright SUSE Linux Products GmbH 2009
*
* Authors: Alexander Graf <agraf@suse.de>
*/
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>
#define SHADOW_SLB_ENTRY_LEN 0x10
#define OFFSET_ESID(x) (SHADOW_SLB_ENTRY_LEN * x)
#define OFFSET_VSID(x) ((SHADOW_SLB_ENTRY_LEN * x) + 8)
/******************************************************************************
* *
* Entry code *
* *
*****************************************************************************/
.macro LOAD_GUEST_SEGMENTS
/* Required state:
*
* MSR = ~IR|DR
* R13 = PACA
* R1 = host R1
* R2 = host R2
* R3 = shadow vcpu
* all other volatile GPRS = free except R4, R6
* SVCPU[CR] = guest CR
* SVCPU[XER] = guest XER
* SVCPU[CTR] = guest CTR
* SVCPU[LR] = guest LR
*/
BEGIN_FW_FTR_SECTION
/* Declare SLB shadow as 0 entries big */
ld r11, PACA_SLBSHADOWPTR(r13)
li r8, 0
stb r8, 3(r11)
END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
/* Flush SLB */
li r10, 0
slbmte r10, r10
slbia
/* Fill SLB with our shadow */
lbz r12, SVCPU_SLB_MAX(r3)
mulli r12, r12, 16
addi r12, r12, SVCPU_SLB
add r12, r12, r3
/* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
li r11, SVCPU_SLB
add r11, r11, r3
slb_loop_enter:
ld r10, 0(r11)
andis. r9, r10, SLB_ESID_V@h
beq slb_loop_enter_skip
ld r9, 8(r11)
slbmte r9, r10
slb_loop_enter_skip:
addi r11, r11, 16
cmpd cr0, r11, r12
blt slb_loop_enter
slb_do_enter:
.endm
/******************************************************************************
* *
* Exit code *
* *
*****************************************************************************/
.macro LOAD_HOST_SEGMENTS
/* Register usage at this point:
*
* R1 = host R1
* R2 = host R2
* R12 = exit handler id
* R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
* SVCPU.* = guest *
* SVCPU[CR] = guest CR
* SVCPU[XER] = guest XER
* SVCPU[CTR] = guest CTR
* SVCPU[LR] = guest LR
*
*/
/* Remove all SLB entries that are in use. */
li r0, 0
slbmte r0, r0
slbia
/* Restore bolted entries from the shadow */
ld r11, PACA_SLBSHADOWPTR(r13)
BEGIN_FW_FTR_SECTION
/* Declare SLB shadow as SLB_NUM_BOLTED entries big */
li r8, SLB_NUM_BOLTED
stb r8, 3(r11)
END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
/* Manually load all entries from shadow SLB */
li r8, SLBSHADOW_SAVEAREA
li r7, SLBSHADOW_SAVEAREA + 8
.rept SLB_NUM_BOLTED
LDX_BE r10, r11, r8
cmpdi r10, 0
beq 1f
LDX_BE r9, r11, r7
slbmte r9, r10
1: addi r7, r7, SHADOW_SLB_ENTRY_LEN
addi r8, r8, SHADOW_SLB_ENTRY_LEN
.endr
isync
sync
slb_do_exit:
.endm
|