1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
|
// SPDX-License-Identifier: GPL-2.0
/*
* vdso setup for s390
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#include <linux/binfmts.h>
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/random.h>
#include <linux/vdso_datastore.h>
#include <vdso/datapage.h>
#include <asm/vdso/vsyscall.h>
#include <asm/alternative.h>
#include <asm/vdso.h>
extern char vdso_start[], vdso_end[];
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *vma)
{
current->mm->context.vdso_base = vma->vm_start;
return 0;
}
static struct vm_special_mapping vdso_mapping = {
.name = "[vdso]",
.mremap = vdso_mremap,
};
int vdso_getcpu_init(void)
{
set_tod_programmable_field(smp_processor_id());
return 0;
}
early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
{
unsigned long vvar_start, vdso_text_start, vdso_text_len;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int rc;
BUILD_BUG_ON(VDSO_NR_PAGES != __VDSO_PAGES);
if (mmap_write_lock_killable(mm))
return -EINTR;
vdso_text_len = vdso_end - vdso_start;
vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
rc = vvar_start;
if (IS_ERR_VALUE(vvar_start))
goto out;
vma = vdso_install_vvar_mapping(mm, vvar_start);
rc = PTR_ERR(vma);
if (IS_ERR(vma))
goto out;
vdso_text_start = vvar_start + VDSO_NR_PAGES * PAGE_SIZE;
/* VM_MAYWRITE for COW so gdb can set breakpoints */
vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
VM_READ|VM_EXEC|VM_SEALED_SYSMAP|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
&vdso_mapping);
if (IS_ERR(vma)) {
do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
rc = PTR_ERR(vma);
} else {
current->mm->context.vdso_base = vdso_text_start;
rc = 0;
}
out:
mmap_write_unlock(mm);
return rc;
}
static unsigned long vdso_addr(unsigned long start, unsigned long len)
{
unsigned long addr, end, offset;
/*
* Round up the start address. It can start out unaligned as a result
* of stack start randomization.
*/
start = PAGE_ALIGN(start);
/* Round the lowest possible end address up to a PMD boundary. */
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
if (end >= VDSO_BASE)
end = VDSO_BASE;
end -= len;
if (end > start) {
offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
addr = start + (offset << PAGE_SHIFT);
} else {
addr = start;
}
return addr;
}
unsigned long vdso_text_size(void)
{
return PAGE_ALIGN(vdso_end - vdso_start);
}
unsigned long vdso_size(void)
{
return vdso_text_size() + VDSO_NR_PAGES * PAGE_SIZE;
}
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
unsigned long addr = VDSO_BASE;
unsigned long size = vdso_size();
if (current->flags & PF_RANDOMIZE)
addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size);
return map_vdso(addr, size);
}
static struct page ** __init vdso_setup_pages(void *start, void *end)
{
int pages = (end - start) >> PAGE_SHIFT;
struct page **pagelist;
int i;
pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
if (!pagelist)
panic("%s: Cannot allocate page list for VDSO", __func__);
for (i = 0; i < pages; i++)
pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
return pagelist;
}
static void vdso_apply_alternatives(void)
{
const struct elf64_shdr *alt, *shdr;
struct alt_instr *start, *end;
const struct elf64_hdr *hdr;
hdr = (struct elf64_hdr *)vdso_start;
shdr = (void *)hdr + hdr->e_shoff;
alt = find_section(hdr, shdr, ".altinstructions");
if (!alt)
return;
start = (void *)hdr + alt->sh_offset;
end = (void *)hdr + alt->sh_offset + alt->sh_size;
apply_alternatives(start, end);
}
static int __init vdso_init(void)
{
vdso_apply_alternatives();
vdso_mapping.pages = vdso_setup_pages(vdso_start, vdso_end);
return 0;
}
arch_initcall(vdso_init);
|