1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
|
/* softirq.h: 64-bit Sparc soft IRQ support.
*
* Copyright (C) 1997, 1998 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef __SPARC64_SOFTIRQ_H
#define __SPARC64_SOFTIRQ_H
#include <asm/atomic.h>
#include <asm/hardirq.h>
#ifndef __SMP__
extern unsigned int local_bh_count;
#else
#define local_bh_count (cpu_data[smp_processor_id()].bh_count)
#endif
/* The locking mechanism for base handlers, to prevent re-entrancy,
* is entirely private to an implementation, it should not be
* referenced at all outside of this file.
*/
#define get_active_bhs() (bh_mask & bh_active)
#define clear_active_bhs(mask) \
__asm__ __volatile__( \
"1: ldx [%1], %%g7\n" \
" andn %%g7, %0, %%g5\n" \
" casx [%1], %%g7, %%g5\n" \
" cmp %%g7, %%g5\n" \
" bne,pn %%xcc, 1b\n" \
" nop" \
: /* no outputs */ \
: "HIr" (mask), "r" (&bh_active) \
: "g5", "g7", "cc", "memory")
extern inline void init_bh(int nr, void (*routine)(void))
{
bh_base[nr] = routine;
bh_mask_count[nr] = 0;
bh_mask |= 1 << nr;
}
extern inline void remove_bh(int nr)
{
bh_base[nr] = NULL;
bh_mask &= ~(1 << nr);
}
extern inline void mark_bh(int nr)
{
set_bit(nr, &bh_active);
}
#ifndef __SMP__
extern inline void start_bh_atomic(void)
{
local_bh_count++;
barrier();
}
extern inline void end_bh_atomic(void)
{
barrier();
local_bh_count--;
}
/* These are for the irq's testing the lock */
#define softirq_trylock(cpu) (local_bh_count ? 0 : (local_bh_count=1))
#define softirq_endlock(cpu) (local_bh_count = 0)
#define synchronize_bh() barrier()
#else /* (__SMP__) */
extern atomic_t global_bh_lock;
extern spinlock_t global_bh_count;
extern void synchronize_bh(void);
static inline void start_bh_atomic(void)
{
atomic_inc(&global_bh_lock);
synchronize_bh();
}
static inline void end_bh_atomic(void)
{
atomic_dec(&global_bh_lock);
}
/* These are for the IRQs testing the lock */
static inline int softirq_trylock(int cpu)
{
if (spin_trylock(&global_bh_count)) {
if (atomic_read(&global_bh_lock) == 0) {
++(cpu_data[cpu].bh_count);
return 1;
}
spin_unlock(&global_bh_count);
}
return 0;
}
static inline void softirq_endlock(int cpu)
{
(cpu_data[cpu].bh_count)--;
spin_unlock(&global_bh_count);
}
#endif /* (__SMP__) */
/*
* These use a mask count to correctly handle
* nested disable/enable calls
*/
extern inline void disable_bh(int nr)
{
bh_mask &= ~(1 << nr);
bh_mask_count[nr]++;
synchronize_bh();
}
extern inline void enable_bh(int nr)
{
if (!--bh_mask_count[nr])
bh_mask |= 1 << nr;
}
#endif /* !(__SPARC64_SOFTIRQ_H) */
|