1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 2001 by Ralf Baechle at alii
* Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
*/
#ifndef _ASM_PGALLOC_H
#define _ASM_PGALLOC_H
#include <linux/config.h>
/* TLB flushing:
*
* - flush_tlb_all() flushes all processes TLB entries
* - flush_tlb_mm(mm) flushes the specified mm context TLB entries
* - flush_tlb_page(mm, vmaddr) flushes a single page
* - flush_tlb_range(mm, start, end) flushes a range of pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
extern void (*_flush_tlb_all)(void);
extern void (*_flush_tlb_mm)(struct mm_struct *mm);
extern void (*_flush_tlb_range)(struct mm_struct *mm, unsigned long start,
unsigned long end);
extern void (*_flush_tlb_page)(struct vm_area_struct *vma, unsigned long page);
#ifndef CONFIG_SMP
#define flush_tlb_all() _flush_tlb_all()
#define flush_tlb_mm(mm) _flush_tlb_mm(mm)
#define flush_tlb_range(mm,vmaddr,end) _flush_tlb_range(mm, vmaddr, end)
#define flush_tlb_page(vma,page) _flush_tlb_page(vma, page)
#else /* CONFIG_SMP */
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
#endif /* CONFIG_SMP */
extern inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* Nothing to do on MIPS. */
}
/*
* Allocate and free page tables.
*/
#define pgd_quicklist (current_cpu_data.pgd_quick)
#define pmd_quicklist (current_cpu_data.pmd_quick)
#define pte_quicklist (current_cpu_data.pte_quick)
#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
#define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte)
#define pgd_populate(mm, pgd, pmd) pgd_set(pgd, pmd)
extern pgd_t *get_pgd_slow(void);
extern inline pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
if((ret = pgd_quicklist) != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
return (pgd_t *)ret;
}
ret = (unsigned long *) get_pgd_slow();
return (pgd_t *)ret;
}
extern inline void free_pgd_fast(pgd_t *pgd)
{
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
}
extern inline void free_pgd_slow(pgd_t *pgd)
{
free_pages((unsigned long)pgd, 1);
}
static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (pte)
clear_page(pte);
return pte;
}
static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
unsigned long *ret;
if ((ret = (unsigned long *)pte_quicklist) != NULL) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
}
return (pte_t *)ret;
}
extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
extern inline pte_t *get_pte_fast(void)
{
unsigned long *ret;
if((ret = (unsigned long *)pte_quicklist) != NULL) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
}
return (pte_t *)ret;
}
extern inline void free_pte_fast(pte_t *pte)
{
*(unsigned long *)pte = (unsigned long) pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
}
extern inline void free_pte_slow(pte_t *pte)
{
free_pages((unsigned long)pte, 0);
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd;
pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, 1);
if (pmd)
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
return pmd;
}
static inline pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
unsigned long *ret;
if ((ret = (unsigned long *)pmd_quicklist) != NULL) {
pmd_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
}
return (pmd_t *)ret;
}
extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long address_preadjusted);
extern inline pmd_t *get_pmd_fast(void)
{
unsigned long *ret;
if ((ret = (unsigned long *)pmd_quicklist) != NULL) {
pmd_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
return (pmd_t *)ret;
}
return (pmd_t *)ret;
}
extern inline void free_pmd_fast(pmd_t *pmd)
{
*(unsigned long *)pmd = (unsigned long) pmd_quicklist;
pmd_quicklist = (unsigned long *) pmd;
pgtable_cache_size++;
}
extern inline void free_pmd_slow(pmd_t *pmd)
{
free_pages((unsigned long)pmd, 1);
}
#define pte_free(pte) free_pte_fast(pte)
#define pmd_free(pte) free_pmd_fast(pte)
#define pgd_free(pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
extern pte_t kptbl[(PAGE_SIZE<<KPTBL_PAGE_ORDER)/sizeof(pte_t)];
extern pmd_t kpmdtbl[PTRS_PER_PMD];
extern int do_check_pgt_cache(int, int);
#endif /* _ASM_PGALLOC_H */
|