summaryrefslogtreecommitdiff
path: root/lib/tdb/common
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2010-08-14 02:13:26 +0930
committerRusty Russell <rusty@rustcorp.com.au>2010-08-14 02:31:22 +0930
commit11ab43084b10cf53b530cdc3a6036c898b79ca38 (patch)
tree04549a9c16f5e68349aded2f8ead56571df01312 /lib/tdb/common
parentf00b61c7d4611802c66495824c97af6cad69704e (diff)
downloadsamba-11ab43084b10cf53b530cdc3a6036c898b79ca38.tar.gz
samba-11ab43084b10cf53b530cdc3a6036c898b79ca38.tar.bz2
samba-11ab43084b10cf53b530cdc3a6036c898b79ca38.zip
tdb: workaround starvation problem in locking entire database.
We saw tdb_lockall() take 71 seconds under heavy load; this is because Linux (at least) doesn't prevent new small locks being obtained while we're waiting for a big log. The workaround is to do divide and conquer using non-blocking chainlocks: if we get down to a single chain we block. Using a simple test program where children did "hold lock for 100ms, sleep for 1 second" the time to do tdb_lockall() dropped signifiantly. There are ln(hashsize) locks taken in the contended case, but that's slow anyway. More analysis is given in my blog at http://rusty.ozlabs.org/?p=120 This may also help transactions, though in that case it's the initial read lock which uses this gradual locking routine; the update-to-write-lock code is separate and still tries to update in one go. Even though ABI doesn't change, minor version bumped so behavior change can be easily detected. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'lib/tdb/common')
-rw-r--r--lib/tdb/common/lock.c86
1 files changed, 69 insertions, 17 deletions
diff --git a/lib/tdb/common/lock.c b/lib/tdb/common/lock.c
index 285b7a34c3..803feeecbb 100644
--- a/lib/tdb/common/lock.c
+++ b/lib/tdb/common/lock.c
@@ -152,14 +152,6 @@ int tdb_brlock(struct tdb_context *tdb,
return -1;
}
- /* Sanity check */
- if (tdb->transaction && offset >= lock_offset(-1) && len != 0) {
- tdb->ecode = TDB_ERR_RDONLY;
- TDB_LOG((tdb, TDB_DEBUG_TRACE, "tdb_brlock attempted in transaction at offset %d rw_type=%d flags=%d len=%d\n",
- offset, rw_type, flags, (int)len));
- return -1;
- }
-
do {
ret = fcntl_lock(tdb, rw_type, offset, len,
flags & TDB_LOCK_WAIT);
@@ -486,11 +478,9 @@ int tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
return tdb_nest_unlock(tdb, TRANSACTION_LOCK, ltype, false);
}
-
-/* lock/unlock entire database. It can only be upgradable if you have some
- * other way of guaranteeing exclusivity (ie. transaction write lock). */
-int tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
- enum tdb_lock_flags flags, bool upgradable)
+/* Returns 0 if all done, -1 if error, 1 if ok. */
+static int tdb_allrecord_check(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags, bool upgradable)
{
/* There are no locks on read-only dbs */
if (tdb->read_only || tdb->traverse_read) {
@@ -520,11 +510,73 @@ int tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
tdb->ecode = TDB_ERR_LOCK;
return -1;
}
+ return 1;
+}
- if (tdb_brlock(tdb, ltype, FREELIST_TOP, 0, flags)) {
- if (flags & TDB_LOCK_WAIT) {
- TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_lockall failed (%s)\n", strerror(errno)));
- }
+/* We only need to lock individual bytes, but Linux merges consecutive locks
+ * so we lock in contiguous ranges. */
+static int tdb_chainlock_gradual(struct tdb_context *tdb,
+ int ltype, enum tdb_lock_flags flags,
+ size_t off, size_t len)
+{
+ int ret;
+ enum tdb_lock_flags nb_flags = (flags & ~TDB_LOCK_WAIT);
+
+ if (len <= 4) {
+ /* Single record. Just do blocking lock. */
+ return tdb_brlock(tdb, ltype, off, len, flags);
+ }
+
+ /* First we try non-blocking. */
+ ret = tdb_brlock(tdb, ltype, off, len, nb_flags);
+ if (ret == 0) {
+ return 0;
+ }
+
+ /* Try locking first half, then second. */
+ ret = tdb_chainlock_gradual(tdb, ltype, flags, off, len / 2);
+ if (ret == -1)
+ return -1;
+
+ ret = tdb_chainlock_gradual(tdb, ltype, flags,
+ off + len / 2, len - len / 2);
+ if (ret == -1) {
+ tdb_brunlock(tdb, ltype, off, len / 2);
+ return -1;
+ }
+ return 0;
+}
+
+/* lock/unlock entire database. It can only be upgradable if you have some
+ * other way of guaranteeing exclusivity (ie. transaction write lock).
+ * We do the locking gradually to avoid being starved by smaller locks. */
+int tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags, bool upgradable)
+{
+ switch (tdb_allrecord_check(tdb, ltype, flags, upgradable)) {
+ case -1:
+ return -1;
+ case 0:
+ return 0;
+ }
+
+ /* We cover two kinds of locks:
+ * 1) Normal chain locks. Taken for almost all operations.
+ * 3) Individual records locks. Taken after normal or free
+ * chain locks.
+ *
+ * It is (1) which cause the starvation problem, so we're only
+ * gradual for that. */
+ if (tdb_chainlock_gradual(tdb, ltype, flags, FREELIST_TOP,
+ tdb->header.hash_size * 4) == -1) {
+ return -1;
+ }
+
+ /* Grab individual record locks. */
+ if (tdb_brlock(tdb, ltype, lock_offset(tdb->header.hash_size), 0,
+ flags) == -1) {
+ tdb_brunlock(tdb, ltype, FREELIST_TOP,
+ tdb->header.hash_size * 4);
return -1;
}