diff options
author | Jeremy Allison <jra@samba.org> | 2003-12-09 22:32:55 +0000 |
---|---|---|
committer | Jeremy Allison <jra@samba.org> | 2003-12-09 22:32:55 +0000 |
commit | c5914a4b552ba0d6bf09305ae539d469847804a4 (patch) | |
tree | 82c03843cf265ef014b573f3cc20e1d5420b2c6a | |
parent | 272be24ffd94f81c273775e154f01d93028d4b28 (diff) | |
download | samba-c5914a4b552ba0d6bf09305ae539d469847804a4.tar.gz samba-c5914a4b552ba0d6bf09305ae539d469847804a4.tar.bz2 samba-c5914a4b552ba0d6bf09305ae539d469847804a4.zip |
IRIX spinlock patch from James Peach <jpeach@sgi.com>.
Jeremy.
(This used to be commit 04abff372b8939bd33c6dceb48baac9fdb7e27b6)
-rw-r--r-- | source3/configure.in | 8 | ||||
-rw-r--r-- | source3/tdb/spinlock.c | 45 |
2 files changed, 53 insertions, 0 deletions
diff --git a/source3/configure.in b/source3/configure.in index b5b5a7609f..a5db46bc18 100644 --- a/source3/configure.in +++ b/source3/configure.in @@ -2016,6 +2016,14 @@ if test x"$samba_cv_SYSCONF_SC_NGROUPS_MAX" = x"yes"; then AC_DEFINE(SYSCONF_SC_NGROUPS_MAX,1,[Whether sysconf(_SC_NGROUPS_MAX) is available]) fi +AC_CACHE_CHECK([for sysconf(_SC_NPROC_ONLN)],samba_cv_SYSCONF_SC_NPROC_ONLN,[ +AC_TRY_RUN([#include <unistd.h> +main() { exit(sysconf(_SC_NPROC_ONLN) == -1 ? 1 : 0); }], +samba_cv_SYSCONF_SC_NPROC_ONLN=yes,samba_cv_SYSCONF_SC_NPROC_ONLN=no,samba_cv_SYSCONF_SC_NPROC_ONLN=cross)]) +if test x"$samba_cv_SYSCONF_SC_NPROC_ONLN" = x"yes"; then + AC_DEFINE(SYSCONF_SC_NPROC_ONLN,1,[Whether sysconf(_SC_NPROC_ONLN) is available]) +fi + AC_CACHE_CHECK([for root],samba_cv_HAVE_ROOT,[ AC_TRY_RUN([main() { exit(getuid() != 0); }], samba_cv_HAVE_ROOT=yes,samba_cv_HAVE_ROOT=no,samba_cv_HAVE_ROOT=cross)]) diff --git a/source3/tdb/spinlock.c b/source3/tdb/spinlock.c index 3fddeafb2c..3b3ebefded 100644 --- a/source3/tdb/spinlock.c +++ b/source3/tdb/spinlock.c @@ -143,6 +143,47 @@ static inline int __spin_is_locked(spinlock_t *lock) return (*lock != 1); } +#elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730) + +/* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See + * sync(3) for the details of the intrinsic operations. + * + * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro. + */ + +#if defined(STANDALONE) + +/* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */ +#define inline __inline + +#endif /* STANDALONE */ + +/* Returns 0 if the lock is acquired, EBUSY otherwise. */ +static inline int __spin_trylock(spinlock_t *lock) +{ + unsigned int val; + val = __lock_test_and_set(lock, 1); + return val == 0 ? 0 : EBUSY; +} + +static inline void __spin_unlock(spinlock_t *lock) +{ + __lock_release(lock); +} + +static inline void __spin_lock_init(spinlock_t *lock) +{ + __lock_release(lock); +} + +/* Returns 1 if the lock is held, 0 otherwise. */ +static inline int __spin_is_locked(spinlock_t *lock) +{ + unsigned int val; + val = __add_and_fetch(lock, 0); + return val; +} + #elif defined(MIPS_SPINLOCKS) static inline unsigned int load_linked(unsigned long addr) @@ -221,7 +262,11 @@ static void yield_cpu(void) static int this_is_smp(void) { +#if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN) + return (sysconf(_SC_NPROC_ONLN) > 1) ? 1 : 0; +#else return 0; +#endif } /* |