1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Functions for initialisaing, allocating, freeing and duplicating VMAs. Shared
* between CONFIG_MMU and non-CONFIG_MMU kernel configurations.
*/
#include "vma_internal.h"
#include "vma.h"
/* SLAB cache for vm_area_struct structures */
static struct kmem_cache *vm_area_cachep;
void __init vma_state_init(void)
{
struct kmem_cache_args args = {
.use_freeptr_offset = true,
.freeptr_offset = offsetof(struct vm_area_struct, vm_freeptr),
};
vm_area_cachep = kmem_cache_create("vm_area_struct",
sizeof(struct vm_area_struct), &args,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
SLAB_ACCOUNT);
}
struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
{
struct vm_area_struct *vma;
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!vma)
return NULL;
vma_init(vma, mm);
return vma;
}
static void vm_area_init_from(const struct vm_area_struct *src,
struct vm_area_struct *dest)
{
dest->vm_mm = src->vm_mm;
dest->vm_ops = src->vm_ops;
dest->vm_start = src->vm_start;
dest->vm_end = src->vm_end;
dest->anon_vma = src->anon_vma;
dest->vm_pgoff = src->vm_pgoff;
dest->vm_file = src->vm_file;
dest->vm_private_data = src->vm_private_data;
vm_flags_init(dest, src->vm_flags);
memcpy(&dest->vm_page_prot, &src->vm_page_prot,
sizeof(dest->vm_page_prot));
/*
* src->shared.rb may be modified concurrently when called from
* dup_mmap(), but the clone will reinitialize it.
*/
data_race(memcpy(&dest->shared, &src->shared, sizeof(dest->shared)));
memcpy(&dest->vm_userfaultfd_ctx, &src->vm_userfaultfd_ctx,
sizeof(dest->vm_userfaultfd_ctx));
#ifdef CONFIG_ANON_VMA_NAME
dest->anon_name = src->anon_name;
#endif
#ifdef CONFIG_SWAP
memcpy(&dest->swap_readahead_info, &src->swap_readahead_info,
sizeof(dest->swap_readahead_info));
#endif
#ifndef CONFIG_MMU
dest->vm_region = src->vm_region;
#endif
#ifdef CONFIG_NUMA
dest->vm_policy = src->vm_policy;
#endif
#ifdef __HAVE_PFNMAP_TRACKING
dest->pfnmap_track_ctx = NULL;
#endif
}
#ifdef __HAVE_PFNMAP_TRACKING
static inline int vma_pfnmap_track_ctx_dup(struct vm_area_struct *orig,
struct vm_area_struct *new)
{
struct pfnmap_track_ctx *ctx = orig->pfnmap_track_ctx;
if (likely(!ctx))
return 0;
/*
* We don't expect to ever hit this. If ever required, we would have
* to duplicate the tracking.
*/
if (unlikely(kref_read(&ctx->kref) >= REFCOUNT_MAX))
return -ENOMEM;
kref_get(&ctx->kref);
new->pfnmap_track_ctx = ctx;
return 0;
}
static inline void vma_pfnmap_track_ctx_release(struct vm_area_struct *vma)
{
struct pfnmap_track_ctx *ctx = vma->pfnmap_track_ctx;
if (likely(!ctx))
return;
kref_put(&ctx->kref, pfnmap_track_ctx_release);
vma->pfnmap_track_ctx = NULL;
}
#else
static inline int vma_pfnmap_track_ctx_dup(struct vm_area_struct *orig,
struct vm_area_struct *new)
{
return 0;
}
static inline void vma_pfnmap_track_ctx_release(struct vm_area_struct *vma)
{
}
#endif
struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
{
struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!new)
return NULL;
ASSERT_EXCLUSIVE_WRITER(orig->vm_flags);
ASSERT_EXCLUSIVE_WRITER(orig->vm_file);
vm_area_init_from(orig, new);
if (vma_pfnmap_track_ctx_dup(orig, new)) {
kmem_cache_free(vm_area_cachep, new);
return NULL;
}
vma_lock_init(new, true);
INIT_LIST_HEAD(&new->anon_vma_chain);
vma_numab_state_init(new);
dup_anon_vma_name(orig, new);
return new;
}
void vm_area_free(struct vm_area_struct *vma)
{
/* The vma should be detached while being destroyed. */
vma_assert_detached(vma);
vma_numab_state_free(vma);
free_anon_vma_name(vma);
vma_pfnmap_track_ctx_release(vma);
kmem_cache_free(vm_area_cachep, vma);
}
|