summaryrefslogtreecommitdiff
path: root/source4/lib/tdb/spinlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'source4/lib/tdb/spinlock.c')
-rw-r--r--source4/lib/tdb/spinlock.c45
1 files changed, 45 insertions, 0 deletions
diff --git a/source4/lib/tdb/spinlock.c b/source4/lib/tdb/spinlock.c
index dc7fa3b784..1b789d4daa 100644
--- a/source4/lib/tdb/spinlock.c
+++ b/source4/lib/tdb/spinlock.c
@@ -149,6 +149,47 @@ static inline int __spin_is_locked(spinlock_t *lock)
return (*lock != 1);
}
+#elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730)
+
+/* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See
+ * sync(3) for the details of the intrinsic operations.
+ *
+ * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro.
+ */
+
+#if defined(STANDALONE)
+
+/* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */
+#define inline __inline
+
+#endif /* STANDALONE */
+
+/* Returns 0 if the lock is acquired, EBUSY otherwise. */
+static inline int __spin_trylock(spinlock_t *lock)
+{
+ unsigned int val;
+ val = __lock_test_and_set(lock, 1);
+ return val == 0 ? 0 : EBUSY;
+}
+
+static inline void __spin_unlock(spinlock_t *lock)
+{
+ __lock_release(lock);
+}
+
+static inline void __spin_lock_init(spinlock_t *lock)
+{
+ __lock_release(lock);
+}
+
+/* Returns 1 if the lock is held, 0 otherwise. */
+static inline int __spin_is_locked(spinlock_t *lock)
+{
+ unsigned int val;
+ val = __add_and_fetch(lock, 0);
+ return val;
+}
+
#elif defined(MIPS_SPINLOCKS)
static inline unsigned int load_linked(unsigned long addr)
@@ -227,7 +268,11 @@ static void yield_cpu(void)
static int this_is_smp(void)
{
+#if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN)
+ return (sysconf(_SC_NPROC_ONLN) > 1) ? 1 : 0;
+#else
return 0;
+#endif
}
/*