1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
|
/* $Id: pgalloc.h,v 1.29 2001/10/20 12:38:51 davem Exp $ */
#ifndef _SPARC64_PGALLOC_H
#define _SPARC64_PGALLOC_H
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/page.h>
#include <asm/spitfire.h>
#include <asm/pgtable.h>
/* Cache and TLB flush operations. */
/* These are the same regardless of whether this is an SMP kernel or not. */
#define flush_cache_mm(__mm) \
do { if ((__mm) == current->mm) flushw_user(); } while(0)
#define flush_cache_range(mm, start, end) \
flush_cache_mm(mm)
#define flush_cache_page(vma, page) \
flush_cache_mm((vma)->vm_mm)
/* This is unnecessary on the SpitFire since D-CACHE is write-through. */
#define flush_page_to_ram(page) do { } while (0)
/*
* On spitfire, the icache doesn't snoop local stores and we don't
* use block commit stores (which invalidate icache lines) during
* module load, so we need this.
*/
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_page(void *addr, int flush_icache);
extern void __flush_icache_page(unsigned long);
extern void flush_dcache_page_impl(struct page *page);
#ifdef CONFIG_SMP
extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
#else
#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
#endif
extern void flush_dcache_page(struct page *page);
extern void __flush_dcache_range(unsigned long start, unsigned long end);
extern void __flush_cache_all(void);
extern void __flush_tlb_all(void);
extern void __flush_tlb_mm(unsigned long context, unsigned long r);
extern void __flush_tlb_range(unsigned long context, unsigned long start,
unsigned long r, unsigned long end,
unsigned long pgsz, unsigned long size);
extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
#ifndef CONFIG_SMP
#define flush_cache_all() __flush_cache_all()
#define flush_tlb_all() __flush_tlb_all()
#define flush_tlb_mm(__mm) \
do { if(CTX_VALID((__mm)->context)) \
__flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \
} while(0)
#define flush_tlb_range(__mm, start, end) \
do { if(CTX_VALID((__mm)->context)) { \
unsigned long __start = (start)&PAGE_MASK; \
unsigned long __end = PAGE_ALIGN(end); \
__flush_tlb_range(CTX_HWBITS((__mm)->context), __start, \
SECONDARY_CONTEXT, __end, PAGE_SIZE, \
(__end - __start)); \
} \
} while(0)
#define flush_tlb_page(vma, page) \
do { struct mm_struct *__mm = (vma)->vm_mm; \
if(CTX_VALID(__mm->context)) \
__flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \
SECONDARY_CONTEXT); \
} while(0)
#else /* CONFIG_SMP */
extern void smp_flush_cache_all(void);
extern void smp_flush_tlb_all(void);
extern void smp_flush_tlb_mm(struct mm_struct *mm);
extern void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long end);
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
#define flush_cache_all() smp_flush_cache_all()
#define flush_tlb_all() smp_flush_tlb_all()
#define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
#define flush_tlb_range(mm, start, end) \
smp_flush_tlb_range(mm, start, end)
#define flush_tlb_page(vma, page) \
smp_flush_tlb_page((vma)->vm_mm, page)
#endif /* ! CONFIG_SMP */
#define VPTE_BASE_SPITFIRE 0xfffffffe00000000
#if 1
#define VPTE_BASE_CHEETAH VPTE_BASE_SPITFIRE
#else
#define VPTE_BASE_CHEETAH 0xffe0000000000000
#endif
extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
/* Note the signed type. */
long s = start, e = end, vpte_base;
if (s > e)
/* Nobody should call us with start below VM hole and end above.
See if it is really true. */
BUG();
#if 0
/* Currently free_pgtables guarantees this. */
s &= PMD_MASK;
e = (e + PMD_SIZE - 1) & PMD_MASK;
#endif
vpte_base = (tlb_type == spitfire ?
VPTE_BASE_SPITFIRE :
VPTE_BASE_CHEETAH);
flush_tlb_range(mm,
vpte_base + (s >> (PAGE_SHIFT - 3)),
vpte_base + (e >> (PAGE_SHIFT - 3)));
}
#undef VPTE_BASE_SPITFIRE
#undef VPTE_BASE_CHEETAH
/* Page table allocation/freeing. */
#ifdef CONFIG_SMP
/* Sliiiicck */
#define pgt_quicklists cpu_data[smp_processor_id()]
#else
extern struct pgtable_cache_struct {
unsigned long *pgd_cache;
unsigned long *pte_cache[2];
unsigned int pgcache_size;
unsigned int pgdcache_size;
} pgt_quicklists;
#endif
#define pgd_quicklist (pgt_quicklists.pgd_cache)
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist (pgt_quicklists.pte_cache)
#define pgtable_cache_size (pgt_quicklists.pgcache_size)
#define pgd_cache_size (pgt_quicklists.pgdcache_size)
#ifndef CONFIG_SMP
extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
struct page *page = virt_to_page(pgd);
if (!page->pprev_hash) {
(unsigned long *)page->next_hash = pgd_quicklist;
pgd_quicklist = (unsigned long *)page;
}
(unsigned long)page->pprev_hash |=
(((unsigned long)pgd & (PAGE_SIZE / 2)) ? 2 : 1);
pgd_cache_size++;
}
extern __inline__ pgd_t *get_pgd_fast(void)
{
struct page *ret;
if ((ret = (struct page *)pgd_quicklist) != NULL) {
unsigned long mask = (unsigned long)ret->pprev_hash;
unsigned long off = 0;
if (mask & 1)
mask &= ~1;
else {
off = PAGE_SIZE / 2;
mask &= ~2;
}
(unsigned long)ret->pprev_hash = mask;
if (!mask)
pgd_quicklist = (unsigned long *)ret->next_hash;
ret = (struct page *)(__page_address(ret) + off);
pgd_cache_size--;
} else {
struct page *page = alloc_page(GFP_KERNEL);
if (page) {
ret = (struct page *)page_address(page);
clear_page(ret);
(unsigned long)page->pprev_hash = 2;
(unsigned long *)page->next_hash = pgd_quicklist;
pgd_quicklist = (unsigned long *)page;
pgd_cache_size++;
}
}
return (pgd_t *)ret;
}
#else /* CONFIG_SMP */
extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
}
extern __inline__ pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
if((ret = pgd_quicklist) != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
} else {
ret = (unsigned long *) __get_free_page(GFP_KERNEL);
if(ret)
memset(ret, 0, PAGE_SIZE);
}
return (pgd_t *)ret;
}
extern __inline__ void free_pgd_slow(pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
#endif /* CONFIG_SMP */
#if (L1DCACHE_SIZE > PAGE_SIZE) /* is there D$ aliasing problem */
#define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL)
#define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL)
#else
#define VPTE_COLOR(address) 0
#define DCACHE_COLOR(address) 0
#endif
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
extern __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
if (pmd)
memset(pmd, 0, PAGE_SIZE);
return pmd;
}
extern __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
unsigned long *ret;
int color = 0;
if (pte_quicklist[color] == NULL)
color = 1;
if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
pte_quicklist[color] = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
}
return (pmd_t *)ret;
}
extern __inline__ void free_pmd_fast(pmd_t *pmd)
{
unsigned long color = DCACHE_COLOR((unsigned long)pmd);
*(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
pte_quicklist[color] = (unsigned long *) pmd;
pgtable_cache_size++;
}
extern __inline__ void free_pmd_slow(pmd_t *pmd)
{
free_page((unsigned long)pmd);
}
#define pmd_populate(MM, PMD, PTE) pmd_set(PMD, PTE)
extern pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address);
extern __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
unsigned long color = VPTE_COLOR(address);
unsigned long *ret;
if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
pte_quicklist[color] = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
}
return (pte_t *)ret;
}
extern __inline__ void free_pte_fast(pte_t *pte)
{
unsigned long color = DCACHE_COLOR((unsigned long)pte);
*(unsigned long *)pte = (unsigned long) pte_quicklist[color];
pte_quicklist[color] = (unsigned long *) pte;
pgtable_cache_size++;
}
extern __inline__ void free_pte_slow(pte_t *pte)
{
free_page((unsigned long)pte);
}
#define pte_free(pte) free_pte_fast(pte)
#define pmd_free(pmd) free_pmd_fast(pmd)
#define pgd_free(pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
extern int do_check_pgt_cache(int, int);
#endif /* _SPARC64_PGALLOC_H */
|