1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
|
/* rirq.S: Needed to return from an interrupt on SMP with no
* locks held or released.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/cprefix.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/psr.h>
#include <asm/asi.h>
#include <asm/smp.h>
#include <asm/contregs.h>
#include <asm/winmacro.h>
#include <asm/asmmacro.h>
#define t_psr l0
#define t_pc l1
#define t_npc l2
#define t_wim l3
#define twin_tmp1 l4
#define twin_tmp2 l5
#define twin_tmp3 l6
/* 7 WINDOW SPARC PATCH INSTRUCTIONS */
.globl rirq_7win_patch1, rirq_7win_patch2, rirq_7win_patch3
.globl rirq_7win_patch4, rirq_7win_patch5
rirq_7win_patch1: srl %t_wim, 0x6, %twin_tmp2
rirq_7win_patch2: and %twin_tmp2, 0x7f, %twin_tmp2
rirq_7win_patch3: srl %g1, 7, %g2
rirq_7win_patch4: srl %g2, 6, %g2
rirq_7win_patch5: and %g1, 0x7f, %g1
/* END OF PATCH INSTRUCTIONS */
.globl ret_irq_entry, rirq_patch1, rirq_patch2
.globl rirq_patch3, rirq_patch4, rirq_patch5
ret_irq_entry:
ld [%sp + REGWIN_SZ + PT_PSR], %t_psr
andcc %t_psr, PSR_PS, %g0
bne ret_irq_kernel
nop
ret_irq_user:
wr %t_psr, 0x0, %psr
WRITE_PAUSE
LOAD_CURRENT(twin_tmp2, twin_tmp1)
ld [%twin_tmp2 + THREAD_W_SAVED], %twin_tmp1
orcc %g0, %twin_tmp1, %g0
be ret_irq_nobufwins
nop
/* User has toasty windows, must grab klock. */
ENTER_SYSCALL
wr %t_psr, PSR_ET, %psr
WRITE_PAUSE
mov 1, %o1
call C_LABEL(try_to_clear_window_buffer)
add %sp, REGWIN_SZ, %o0
/* We have klock, so we must return just like a normal trap. */
b ret_trap_entry
nop
ret_irq_nobufwins:
/* Load up the user's out registers so we can pull
* a window from the stack, if necessary.
*/
LOAD_PT_INS(sp)
/* If there are already live user windows in the
* set we can return from trap safely.
*/
ld [%twin_tmp2 + THREAD_UMASK], %twin_tmp1
orcc %g0, %twin_tmp1, %g0
bne ret_irq_userwins_ok
nop
/* Calculate new %wim, we have to pull a register
* window from the users stack.
*/
ret_irq_pull_one_window:
rd %wim, %t_wim
sll %t_wim, 0x1, %twin_tmp1
rirq_patch1: srl %t_wim, 0x7, %twin_tmp2
or %twin_tmp2, %twin_tmp1, %twin_tmp2
rirq_patch2: and %twin_tmp2, 0xff, %twin_tmp2
wr %twin_tmp2, 0x0, %wim
WRITE_PAUSE
/* Here comes the architecture specific
* branch to the user stack checking routine
* for return from traps.
*/
.globl C_LABEL(rirq_mmu_patchme)
C_LABEL(rirq_mmu_patchme): b C_LABEL(sun4c_reti_stackchk)
andcc %fp, 0x7, %g0
ret_irq_userwins_ok:
LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc)
or %t_pc, %t_npc, %g2
andcc %g2, 0x3, %g0
bne ret_irq_unaligned_pc
nop
LOAD_PT_YREG(sp, g1)
LOAD_PT_GLOBALS(sp)
wr %t_psr, 0x0, %psr
WRITE_PAUSE
jmp %t_pc
rett %t_npc
ret_irq_unaligned_pc:
add %sp, REGWIN_SZ, %o0
ld [%sp + REGWIN_SZ + PT_PC], %o1
ld [%sp + REGWIN_SZ + PT_NPC], %o2
ld [%sp + REGWIN_SZ + PT_PSR], %o3
wr %t_wim, 0x0, %wim ! or else...
WRITE_PAUSE
/* User has unaligned crap, must grab klock. */
ENTER_SYSCALL
wr %t_psr, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(do_memaccess_unaligned)
nop
/* We have klock, so we must return just like a normal trap. */
b ret_trap_entry
nop
ret_irq_kernel:
wr %t_psr, 0x0, %psr
WRITE_PAUSE
/* Will the rett land us in the invalid window? */
mov 2, %g1
sll %g1, %t_psr, %g1
rirq_patch3: srl %g1, 8, %g2
or %g1, %g2, %g1
rd %wim, %g2
andcc %g2, %g1, %g0
be 1f ! Nope, just return from the trap
nop
/* We have to grab a window before returning. */
sll %g2, 0x1, %g1
rirq_patch4: srl %g2, 7, %g2
or %g1, %g2, %g1
rirq_patch5: and %g1, 0xff, %g1
wr %g1, 0x0, %wim
WRITE_PAUSE
restore %g0, %g0, %g0
LOAD_WINDOW(sp)
save %g0, %g0, %g0
/* Reload the entire frame in case this is from a
* kernel system call or whatever...
*/
1:
LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
wr %t_psr, 0x0, %psr
WRITE_PAUSE
jmp %t_pc
rett %t_npc
ret_irq_user_stack_is_bolixed:
wr %t_wim, 0x0, %wim
WRITE_PAUSE
/* User has a toasty window, must grab klock. */
ENTER_SYSCALL
wr %t_psr, PSR_ET, %psr
WRITE_PAUSE
call C_LABEL(window_ret_fault)
add %sp, REGWIN_SZ, %o0
/* We have klock, so we must return just like a normal trap. */
b ret_trap_entry
nop
.globl C_LABEL(sun4c_reti_stackchk)
C_LABEL(sun4c_reti_stackchk):
be 1f
and %fp, 0xfff, %g1 ! delay slot
b ret_irq_user_stack_is_bolixed
nop
/* See if we have to check the sanity of one page or two */
1:
add %g1, 0x38, %g1
sra %fp, 29, %g2
add %g2, 0x1, %g2
andncc %g2, 0x1, %g0
be 1f
andncc %g1, 0xff8, %g0
/* %sp is in vma hole, yuck */
b ret_irq_user_stack_is_bolixed
nop
1:
be sun4c_reti_onepage /* Only one page to check */
lda [%fp] ASI_PTE, %g2
sun4c_reti_twopages:
add %fp, 0x38, %g1
sra %g1, 29, %g2
add %g2, 0x1, %g2
andncc %g2, 0x1, %g0
be 1f
lda [%g1] ASI_PTE, %g2
/* Second page is in vma hole */
b ret_irq_user_stack_is_bolixed
nop
1:
srl %g2, 29, %g2
andcc %g2, 0x4, %g0
bne sun4c_reti_onepage
lda [%fp] ASI_PTE, %g2
/* Second page has bad perms */
b ret_irq_user_stack_is_bolixed
nop
sun4c_reti_onepage:
srl %g2, 29, %g2
andcc %g2, 0x4, %g0
bne 1f
nop
/* A page had bad page permissions, losing... */
b ret_irq_user_stack_is_bolixed
nop
/* Whee, things are ok, load the window and continue. */
1:
restore %g0, %g0, %g0
LOAD_WINDOW(sp)
save %g0, %g0, %g0
b ret_irq_userwins_ok
nop
.globl C_LABEL(srmmu_reti_stackchk)
C_LABEL(srmmu_reti_stackchk):
bne ret_irq_user_stack_is_bolixed
sethi %hi(KERNBASE), %g1
cmp %g1, %fp
bleu ret_irq_user_stack_is_bolixed
mov AC_M_SFSR, %g1
lda [%g1] ASI_M_MMUREGS, %g0
lda [%g0] ASI_M_MMUREGS, %g1
or %g1, 0x2, %g1
sta %g1, [%g0] ASI_M_MMUREGS
restore %g0, %g0, %g0
LOAD_WINDOW(sp)
save %g0, %g0, %g0
andn %g1, 0x2, %g1
sta %g1, [%g0] ASI_M_MMUREGS
mov AC_M_SFAR, %g2
lda [%g2] ASI_M_MMUREGS, %g2
mov AC_M_SFSR, %g1
lda [%g1] ASI_M_MMUREGS, %g1
andcc %g1, 0x2, %g0
bne ret_irq_user_stack_is_bolixed
nop
b ret_irq_userwins_ok
nop
|