1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
|
/*
* Creation Date: <1999/11/22 23:32:59 samuel>
* Time-stamp: <2001/04/15 12:28:31 samuel>
*
* <tlbie.c>
*
* TLBIE implementation and support functions
*
* Copyright (C) 1999, 2000, 2001 Samuel Rydh (samuel@ibrium.se)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation
*
*/
#include "compat.h"
#include <linux/config.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/unistd.h>
#include "mmu.h"
#include "asmfuncs.h"
#define PERFORMANCE_INFO
#include "performance.h"
/* tlbie bits: 14..19 on the 604/G3, 15..19 for the 603(e) */
#define EA_TO_INDEX( ea ) (((ea) >> 12) & 0x3f)
#define HEIGHT 64 /* DON'T TOUCH! */
#define DEPTH 512 /* 2^n */
#define TABLE_SIZE (HEIGHT * DEPTH * sizeof(ulong*) )
#define NEXT_MASK (DEPTH-1)
typedef struct tlbie_table {
ulong **table[ HEIGHT ];
int next[ HEIGHT ];
ulong *raw_tab;
} tlbie_table_t;
#define MMU (kv->mmu)
#define DECLARE_TT tlbie_table_t *tt = MMU.tlbie_table
/************************************************************************/
/* F U N C T I O N S */
/************************************************************************/
int
init_tlbie( kernel_vars_t *kv )
{
DECLARE_TT;
int i;
if( !(tt = kmalloc( sizeof(tlbie_table_t), GFP_KERNEL ) ))
return 1;
memset( tt, 0, sizeof(tlbie_table_t) );
if( !(tt->raw_tab = (ulong*)vmalloc( TABLE_SIZE ) )) {
kfree( tt );
return 1;
}
memset( tt->raw_tab, 0, TABLE_SIZE );
for(i=0; i<HEIGHT; i++ ){
tt->table[i] = (ulong**)(tt->raw_tab + (DEPTH*i));
tt->next[i] = 0;
}
MMU.tlbie_table = tt;
return 0;
}
void
cleanup_tlbie( kernel_vars_t *kv )
{
DECLARE_TT;
if( !MMU.tlbie_table )
return;
if( tt->raw_tab )
vfree( tt->raw_tab );
MMU.tlbie_table = NULL;
kfree( tt );
}
/* This function performs invalidates TLB entries matching bits 14..19.
* Thus, if a particular processor has a larger buffer, then multiple tlbie
* instructions are needed.
*/
static inline void
group_tlbie( ulong ea )
{
if( !cpu_is_601() ) {
/* 603(e) uses bits 15..19 */
/* 750 & 604(e) uses bits 14..19 */
_tlbie(ea);
} else {
/* 601 uses bits 13..19 */
_tlbie( ea | BIT(13) );
_tlbie( ea & ~BIT(13) );
}
}
void
hash_ea_to_pte( kernel_vars_t *kv, ulong ea, ulong *pte )
{
DECLARE_TT;
int ind = EA_TO_INDEX(ea);
int j = tt->next[ind];
ulong **t;
BUMP( hash_ea_to_pte );
if( !pte )
return;
t = tt->table[ind];
#if 0
if( t[j] ) {
BUMP( ea_hash_overwrite );
*t[j] = 0;
group_tlbie( ea );
}
#else
if( t[j] ) {
BUMP( ea_hash_overwrite );
table_tlbie( kv, ea );
hash_ea_to_pte( kv, ea, pte);
return;
}
#endif
t[j] = pte;
tt->next[ind] = (j+1) & NEXT_MASK;
}
void
table_tlbie( kernel_vars_t *kv, ulong ea )
{
DECLARE_TT;
int ind = EA_TO_INDEX(ea );
ulong **t = tt->table[ind];
int i;
BUMP( table_tlbie );
for(i=0; i<DEPTH && t[i] != NULL; i++ ) {
*t[i]=0;
t[i] = NULL;
}
tt->next[ind] = 0;
/* finally flush physical TLB cache */
group_tlbie( ea );
}
void
table_tlbia( kernel_vars_t *kv )
{
int i;
BUMP(table_tlbia);
for(i=0; i<HEIGHT; i++ )
table_tlbie( kv, i << 12 );
}
/* Not omptimal, but seldom called (currently only at BAT reloaded) */
void
unmap_ea_range( kernel_vars_t *kv, ulong ea, size_t size )
{
/* printk("unmap_ea_range %08lX, size %08lX\n", ea, size ); */
BUMP(unmap_ea_range_ctr);
table_tlbia( kv );
}
|