From 86d65dc07080c557863e2595be84d6fcbc338166 Mon Sep 17 00:00:00 2001 From: Jeremy Allison Date: Tue, 9 Dec 2003 22:41:26 +0000 Subject: IRIX spinlock patch from James Peach . Jeremy. (This used to be commit 1ae1987a002716e8aa0d4bc0dd68f580ad762e47) --- source3/tdb/spinlock.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) (limited to 'source3/tdb') diff --git a/source3/tdb/spinlock.c b/source3/tdb/spinlock.c index 3fddeafb2c..3b3ebefded 100644 --- a/source3/tdb/spinlock.c +++ b/source3/tdb/spinlock.c @@ -143,6 +143,47 @@ static inline int __spin_is_locked(spinlock_t *lock) return (*lock != 1); } +#elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730) + +/* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See + * sync(3) for the details of the intrinsic operations. + * + * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro. + */ + +#if defined(STANDALONE) + +/* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */ +#define inline __inline + +#endif /* STANDALONE */ + +/* Returns 0 if the lock is acquired, EBUSY otherwise. */ +static inline int __spin_trylock(spinlock_t *lock) +{ + unsigned int val; + val = __lock_test_and_set(lock, 1); + return val == 0 ? 0 : EBUSY; +} + +static inline void __spin_unlock(spinlock_t *lock) +{ + __lock_release(lock); +} + +static inline void __spin_lock_init(spinlock_t *lock) +{ + __lock_release(lock); +} + +/* Returns 1 if the lock is held, 0 otherwise. */ +static inline int __spin_is_locked(spinlock_t *lock) +{ + unsigned int val; + val = __add_and_fetch(lock, 0); + return val; +} + #elif defined(MIPS_SPINLOCKS) static inline unsigned int load_linked(unsigned long addr) @@ -221,7 +262,11 @@ static void yield_cpu(void) static int this_is_smp(void) { +#if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN) + return (sysconf(_SC_NPROC_ONLN) > 1) ? 1 : 0; +#else return 0; +#endif } /* -- cgit