1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
|
/*
* Copyright 2011, Ben Langmead <blangmea@jhsph.edu>
*
* This file is part of Bowtie 2.
*
* Bowtie 2 is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Bowtie 2 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Bowtie 2. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef SPINLOCK_H_
#define SPINLOCK_H_
/**
* This non-reentrant spinlock implementation for i386 and x86_64 is
* based on free code by Gert Boddaert:
*
* http://www.codeproject.com/KB/threads/spinlocks.aspx
*
* Using spinlocks instead of the heavier pthreads mutexes can, in some
* cases, help Bowtie perform better for large numbers of threads.
*/
// (If the user hasn't specified this, then there's no need for any
// kind of locking, so we skip this header)
#ifdef BOWTIE_PTHREADS
#if defined(__GNUC__)
#if defined(__x86_64__) || defined(__i386__)
#define USE_SPINLOCK
#endif
#endif
#ifdef USE_SPINLOCK
#if defined(__x86_64__)
#define SPINLOCK_WORD long
#else
#define SPINLOCK_WORD int
#endif
class SpinLock {
public: // inlined constructor
// inlined NON-virtual destructor
inline SpinLock() : m_s(1) {}
inline ~SpinLock() {}
// enter the lock, spinlocks (with/without Sleep)
// when mutex is already locked
inline void Enter(void)
{
SPINLOCK_WORD prev_s;
do
{
prev_s = TestAndSet(&m_s, 0);
if (m_s == 0 && prev_s == 1)
{
// The lock and was unlocked and we grabbed it
break;
}
// reluinquish current timeslice (can only
// be used when OS available and
// we do NOT want to 'spin')
// HWSleep(0);
}
while (true);
}
// Tries to enter the lock, returns 0
// when mutex is already locked,
// returns != 0 when success
inline int TryEnter(void)
{
SPINLOCK_WORD prev_s = TestAndSet(&m_s, 0);
if (m_s == 0 && prev_s == 1)
{
return 1;
}
return 0;
}
// Leaves or unlocks the mutex
// (should only be called by lock owner)
inline void Leave(void)
{
TestAndSet(&m_s, 1);
}
protected:
// sets BIT value and returns previous
// value.in 1 atomic un-interruptable operation
SPINLOCK_WORD TestAndSet(SPINLOCK_WORD* pTargetAddress, SPINLOCK_WORD nValue);
private:
SPINLOCK_WORD m_s;
};
// This part is Platform dependent!
/* The following piece of code can be found
in function AtomicExchange of file atomicops-internals-x86.h
under http://google-perftools.googlecode.com/svn/trunk/src/base/
*/
#if defined(__x86_64__)
#define TAS(_lw, _res) \
asm volatile("xchgq %1,%0":"=r"(_res):"m"(*_lw),"0"(_res):"memory")
#elif defined(__i386__)
#define TAS(_lw, _res) \
asm volatile("xchgl %1,%0":"=r"(_res):"m"(*_lw),"0"(_res):"memory")
#else
#error "Architecture is neither x86_64 nor i386 (see spinlock.h)"
#endif
/* TAS is only defined for GNUC on x86_64 and i386,
that is where GNUC x86 inline assembly can be used */
inline SPINLOCK_WORD SpinLock::TestAndSet(SPINLOCK_WORD* pTargetAddress, SPINLOCK_WORD nValue) {
#if 0
__asm
{
mov edx, dword ptr [pTargetAddress]
mov eax, nValue
lock xchg eax, dword ptr [edx]
}
#else
TAS(pTargetAddress, nValue);
return nValue;
#endif
// mov = 1 CPU cycle
// lock = 1 CPU cycle
// xchg = 3 CPU cycles
}
#endif /*USE_SPINLOCK*/
#endif /*BOWTIE_PTHREADS*/
#endif /*SPINLOCK_H_*/
|