1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
|
/*
* linux/include/asm-arm/proc-armv/mm-init.h
*
* Copyright (C) 1996 Russell King
*
* This contains the code to setup the memory map on an ARM v3 or v4 machine.
* This is both processor & architecture specific, and requires some
* more work to get it to fit into our separate processor and architecture
* structure.
*/
/*
* On ebsa, we want the memory map set up so:
*
* PHYS VIRT
* 00000000 00000000 Zero page
* 000003ff 000003ff Zero page end
* 00000000 c0000000 Kernel and all physical memory
* 01ffffff c1ffffff End of physical (32MB)
* e0000000 e0000000 IO start
* ffffffff ffffffff IO end
*
* On rpc, we want:
*
* PHYS VIRT
* 10000000 00000000 Zero page
* 100003ff 000003ff Zero page end
* 10000000 c0000000 Kernel and all physical memory
* 1fffffff cfffffff End of physical (32MB)
* 02000000 d?000000 Screen memory (first image)
* 02000000 d8000000 Screen memory (second image)
* 00000000 df000000 StrongARM cache invalidation area
* 03000000 e0000000 IO start
* 03ffffff e0ffffff IO end
*
* We set it up using the section page table entries.
*/
#include <asm/pgtable.h>
#define PTE_SIZE (PTRS_PER_PTE * 4)
extern unsigned long setup_io_pagetables(unsigned long start_mem);
/*
* Add a SECTION mapping between VIRT and PHYS in domain DOMAIN with protection PROT
*/
static inline void
alloc_init_section(unsigned long *mem, unsigned long virt, unsigned long phys, int domain, int prot)
{
pgd_t *pgdp;
pmd_t *pmdp, pmd;
pgdp = pgd_offset_k(virt);
pmdp = pmd_offset(pgdp, virt);
pmd_val(pmd) = phys | PMD_TYPE_SECT | PMD_DOMAIN(domain) | prot;
set_pmd(pmdp, pmd);
}
/*
* Clear any mapping
*/
static inline void
free_init_section(unsigned long virt)
{
pgd_t *pgdp;
pmd_t *pmdp;
pgdp = pgd_offset_k(virt);
pmdp = pmd_offset(pgdp, virt);
pmd_clear(pmdp);
}
/*
* Add a PAGE mapping between VIRT and PHYS in domain DOMAIN with protection PROT
*/
static inline void
alloc_init_page(unsigned long *mem, unsigned long virt, unsigned long phys, int domain, int prot)
{
pgd_t *pgdp;
pmd_t *pmdp, pmd;
pte_t *ptep;
pgdp = pgd_offset_k(virt);
pmdp = pmd_offset(pgdp, virt);
if (pmd_none(*pmdp)) {
unsigned long memory = *mem;
memory = (memory + PTE_SIZE - 1) & ~(PTE_SIZE - 1);
ptep = (pte_t *)memory;
memzero(ptep, PTE_SIZE);
pmd_val(pmd) = __virt_to_phys(memory) | PMD_TYPE_TABLE | PMD_DOMAIN(domain);
set_pmd(pmdp, pmd);
*mem = memory + PTE_SIZE;
}
ptep = pte_offset(pmdp, virt);
pte_val(*ptep) = phys | prot | PTE_TYPE_SMALL;
}
static inline unsigned long
setup_pagetables(unsigned long start_mem, unsigned long end_mem)
{
unsigned long address;
/*
* map in zero page
*/
alloc_init_page(&start_mem, 0, __virt_to_phys(PAGE_OFFSET), DOMAIN_USER, PTE_CACHEABLE);
/*
* ensure no mappings in user space
*/
for (address = PGDIR_SIZE; address < PAGE_OFFSET; address += PGDIR_SIZE)
free_init_section(address);
/*
* map in physical ram & kernel
*/
for (address = PAGE_OFFSET; address < end_mem; address += PGDIR_SIZE)
alloc_init_section(&start_mem, address, __virt_to_phys(address), DOMAIN_KERNEL,
PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE | PMD_SECT_AP_WRITE);
/*
* unmap everything else
*/
for (address = end_mem; address; address += PGDIR_SIZE)
free_init_section(address);
/*
* An area to invalidate the cache
*/
alloc_init_section(&start_mem, FLUSH_BASE, FLUSH_BASE_PHYS, DOMAIN_KERNEL,
PMD_SECT_CACHEABLE | PMD_SECT_AP_READ);
/*
* Now set up our IO mappings
*/
start_mem = setup_io_pagetables(start_mem);
flush_cache_all();
return start_mem;
}
static inline
void mark_usable_memory_areas(unsigned long *start_mem, unsigned long end_mem)
{
unsigned long smem;
*start_mem = smem = PAGE_ALIGN(*start_mem);
while (smem < end_mem) {
clear_bit(PG_reserved, &mem_map[MAP_NR(smem)].flags);
smem += PAGE_SIZE;
}
}
|