1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
|
/*
* linux/include/asm-xtensa/page.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version2 as
* published by the Free Software Foundation.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_PAGE_H
#define _XTENSA_PAGE_H
#ifdef __KERNEL__
#include <asm/processor.h>
/*
* PAGE_SHIFT determines the page size
* PAGE_ALIGN(x) aligns the pointer to the (next) page boundary
*/
#define PAGE_SHIFT XCHAL_MMU_MIN_PTE_PAGE_SIZE
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK)
#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS)
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
#ifdef __ASSEMBLY__
#define __pgprot(x) (x)
#else
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pte; } pte_t; /* page table entry */
typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
/*
* Pure 2^n version of get_order
*/
static inline int get_order(unsigned long size)
{
int order;
#ifndef XCHAL_HAVE_NSU
unsigned long x1, x2, x4, x8, x16;
size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
x1 = size & 0xAAAAAAAA;
x2 = size & 0xCCCCCCCC;
x4 = size & 0xF0F0F0F0;
x8 = size & 0xFF00FF00;
x16 = size & 0xFFFF0000;
order = x2 ? 2 : 0;
order += (x16 != 0) * 16;
order += (x8 != 0) * 8;
order += (x4 != 0) * 4;
order += (x1 != 0);
return order;
#else
size = (size - 1) >> PAGE_SHIFT;
asm ("nsau %0, %1" : "=r" (order) : "r" (size));
return 32 - order;
#endif
}
struct page;
extern void clear_page(void *page);
extern void copy_page(void *to, void *from);
/*
* If we have cache aliasing and writeback caches, we might have to do
* some extra work
*/
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
void clear_user_page(void *addr, unsigned long vaddr, struct page* page);
void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page);
#else
# define clear_user_page(page,vaddr,pg) clear_page(page)
# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#endif
/*
* This handles the memory map. We handle pages at
* XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
* These macros are for conversion of kernel address, not user
* addresses.
*/
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr)
#ifdef CONFIG_DISCONTIGMEM
# error CONFIG_DISCONTIGMEM not supported
#endif
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define WANT_PAGE_VIRTUAL
#endif /* __ASSEMBLY__ */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif /* __KERNEL__ */
#include <asm-generic/memory_model.h>
#endif /* _XTENSA_PAGE_H */
|