1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Module strict rwx
*
* Copyright (C) 2015 Rusty Russell
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
#include <linux/execmem.h>
#include "internal.h"
static int module_set_memory(const struct module *mod, enum mod_mem_type type,
int (*set_memory)(unsigned long start, int num_pages))
{
const struct module_memory *mod_mem = &mod->mem[type];
if (!mod_mem->base)
return 0;
set_vm_flush_reset_perms(mod_mem->base);
return set_memory((unsigned long)mod_mem->base, mod_mem->size >> PAGE_SHIFT);
}
/*
* Since some arches are moving towards PAGE_KERNEL module allocations instead
* of PAGE_KERNEL_EXEC, keep module_enable_x() independent of
* CONFIG_STRICT_MODULE_RWX because they are needed regardless of whether we
* are strict.
*/
int module_enable_text_rox(const struct module *mod)
{
for_class_mod_mem_type(type, text) {
const struct module_memory *mem = &mod->mem[type];
int ret;
if (mem->is_rox)
ret = execmem_restore_rox(mem->base, mem->size);
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
ret = module_set_memory(mod, type, set_memory_rox);
else
ret = module_set_memory(mod, type, set_memory_x);
if (ret)
return ret;
}
return 0;
}
int module_enable_rodata_ro(const struct module *mod)
{
int ret;
if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX) || !rodata_enabled)
return 0;
ret = module_set_memory(mod, MOD_RODATA, set_memory_ro);
if (ret)
return ret;
ret = module_set_memory(mod, MOD_INIT_RODATA, set_memory_ro);
if (ret)
return ret;
return 0;
}
int module_enable_rodata_ro_after_init(const struct module *mod)
{
if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX) || !rodata_enabled)
return 0;
return module_set_memory(mod, MOD_RO_AFTER_INIT, set_memory_ro);
}
int module_enable_data_nx(const struct module *mod)
{
if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
return 0;
for_class_mod_mem_type(type, data) {
int ret = module_set_memory(mod, type, set_memory_nx);
if (ret)
return ret;
}
return 0;
}
int module_enforce_rwx_sections(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
const char *secstrings,
const struct module *mod)
{
const unsigned long shf_wx = SHF_WRITE | SHF_EXECINSTR;
int i;
if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
return 0;
for (i = 0; i < hdr->e_shnum; i++) {
if ((sechdrs[i].sh_flags & shf_wx) == shf_wx) {
pr_err("%s: section %s (index %d) has invalid WRITE|EXEC flags\n",
mod->name, secstrings + sechdrs[i].sh_name, i);
return -ENOEXEC;
}
}
return 0;
}
static const char *const ro_after_init[] = {
/*
* Section .data..ro_after_init holds data explicitly annotated by
* __ro_after_init.
*/
".data..ro_after_init",
/*
* Section __jump_table holds data structures that are never modified,
* with the exception of entries that refer to code in the __init
* section, which are marked as such at module load time.
*/
"__jump_table",
#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
/*
* Section .static_call_sites holds data structures that need to be
* sorted and processed at module load time but are never modified
* afterwards.
*/
".static_call_sites",
#endif
};
void module_mark_ro_after_init(const Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
const char *secstrings)
{
int i, j;
for (i = 1; i < hdr->e_shnum; i++) {
Elf_Shdr *shdr = &sechdrs[i];
for (j = 0; j < ARRAY_SIZE(ro_after_init); j++) {
if (strcmp(secstrings + shdr->sh_name,
ro_after_init[j]) == 0) {
shdr->sh_flags |= SHF_RO_AFTER_INIT;
break;
}
}
}
}
|