1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2023 ARM Ltd.
*/
#include <linux/jump_label.h>
#include <linux/memblock.h>
#include <linux/psci.h>
#include <linux/swiotlb.h>
#include <linux/cc_platform.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/mem_encrypt.h>
#include <asm/rsi.h>
static struct realm_config config;
unsigned long prot_ns_shared;
EXPORT_SYMBOL(prot_ns_shared);
DEFINE_STATIC_KEY_FALSE_RO(rsi_present);
EXPORT_SYMBOL(rsi_present);
bool cc_platform_has(enum cc_attr attr)
{
switch (attr) {
case CC_ATTR_MEM_ENCRYPT:
return is_realm_world();
default:
return false;
}
}
EXPORT_SYMBOL_GPL(cc_platform_has);
static bool rsi_version_matches(void)
{
unsigned long ver_lower, ver_higher;
unsigned long ret = rsi_request_version(RSI_ABI_VERSION,
&ver_lower,
&ver_higher);
if (ret == SMCCC_RET_NOT_SUPPORTED)
return false;
if (ret != RSI_SUCCESS) {
pr_err("RME: RMM doesn't support RSI version %lu.%lu. Supported range: %lu.%lu-%lu.%lu\n",
RSI_ABI_VERSION_MAJOR, RSI_ABI_VERSION_MINOR,
RSI_ABI_VERSION_GET_MAJOR(ver_lower),
RSI_ABI_VERSION_GET_MINOR(ver_lower),
RSI_ABI_VERSION_GET_MAJOR(ver_higher),
RSI_ABI_VERSION_GET_MINOR(ver_higher));
return false;
}
pr_info("RME: Using RSI version %lu.%lu\n",
RSI_ABI_VERSION_GET_MAJOR(ver_lower),
RSI_ABI_VERSION_GET_MINOR(ver_lower));
return true;
}
static void __init arm64_rsi_setup_memory(void)
{
u64 i;
phys_addr_t start, end;
/*
* Iterate over the available memory ranges and convert the state to
* protected memory. We should take extra care to ensure that we DO NOT
* permit any "DESTROYED" pages to be converted to "RAM".
*
* panic() is used because if the attempt to switch the memory to
* protected has failed here, then future accesses to the memory are
* simply going to be reflected as a SEA (Synchronous External Abort)
* which we can't handle. Bailing out early prevents the guest limping
* on and dying later.
*/
for_each_mem_range(i, &start, &end) {
if (rsi_set_memory_range_protected_safe(start, end)) {
panic("Failed to set memory range to protected: %pa-%pa",
&start, &end);
}
}
}
bool __arm64_is_protected_mmio(phys_addr_t base, size_t size)
{
enum ripas ripas;
phys_addr_t end, top;
/* Overflow ? */
if (WARN_ON(base + size <= base))
return false;
end = ALIGN(base + size, RSI_GRANULE_SIZE);
base = ALIGN_DOWN(base, RSI_GRANULE_SIZE);
while (base < end) {
if (WARN_ON(rsi_ipa_state_get(base, end, &ripas, &top)))
break;
if (WARN_ON(top <= base))
break;
if (ripas != RSI_RIPAS_DEV)
break;
base = top;
}
return base >= end;
}
EXPORT_SYMBOL(__arm64_is_protected_mmio);
static int realm_ioremap_hook(phys_addr_t phys, size_t size, pgprot_t *prot)
{
if (__arm64_is_protected_mmio(phys, size))
*prot = pgprot_encrypted(*prot);
else
*prot = pgprot_decrypted(*prot);
return 0;
}
void __init arm64_rsi_init(void)
{
if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_SMC)
return;
if (!rsi_version_matches())
return;
if (WARN_ON(rsi_get_realm_config(&config)))
return;
prot_ns_shared = BIT(config.ipa_bits - 1);
if (arm64_ioremap_prot_hook_register(realm_ioremap_hook))
return;
if (realm_register_memory_enc_ops())
return;
arm64_rsi_setup_memory();
static_branch_enable(&rsi_present);
}
static struct platform_device rsi_dev = {
.name = RSI_PDEV_NAME,
.id = PLATFORM_DEVID_NONE
};
static int __init arm64_create_dummy_rsi_dev(void)
{
if (is_realm_world() &&
platform_device_register(&rsi_dev))
pr_err("failed to register rsi platform device\n");
return 0;
}
arch_initcall(arm64_create_dummy_rsi_dev)
|