diff options
author | Volker Lendecke <vlendec@samba.org> | 2007-02-12 23:16:02 +0000 |
---|---|---|
committer | Gerald (Jerry) Carter <jerry@samba.org> | 2007-10-10 14:48:06 -0500 |
commit | 2acd8a9b3e1bd3885c5865e4b44ac3b4d92655d3 (patch) | |
tree | 9069bf607da93d19ec8608dd0d1dd41f154ce0ea /source4/lib/tdb/common/lock.c | |
parent | 4126b34e1f34a67b1eed06329530c17dfa54fb00 (diff) | |
download | samba-2acd8a9b3e1bd3885c5865e4b44ac3b4d92655d3.tar.gz samba-2acd8a9b3e1bd3885c5865e4b44ac3b4d92655d3.tar.bz2 samba-2acd8a9b3e1bd3885c5865e4b44ac3b4d92655d3.zip |
r21303: As discussed on samba-technical: Change the static array for the in-memory
mirrors of the hash chain locks to a dynamically allocated one.
Jeremy, I count on you to revert it if the build farm freaks out, it's after
midnight here :-)
Volker
(This used to be commit 7b5db2e472c7e27231fa432d3930789e708abd09)
Diffstat (limited to 'source4/lib/tdb/common/lock.c')
-rw-r--r-- | source4/lib/tdb/common/lock.c | 106 |
1 files changed, 88 insertions, 18 deletions
diff --git a/source4/lib/tdb/common/lock.c b/source4/lib/tdb/common/lock.c index a5bff2d0b3..8a964371d3 100644 --- a/source4/lib/tdb/common/lock.c +++ b/source4/lib/tdb/common/lock.c @@ -107,6 +107,9 @@ int tdb_brlock_upgrade(struct tdb_context *tdb, tdb_off_t offset, size_t len) /* lock a list in the database. list -1 is the alloc list */ int tdb_lock(struct tdb_context *tdb, int list, int ltype) { + struct tdb_lock_type *new_lck; + int i; + /* a global lock allows us to avoid per chain locks */ if (tdb->global_lock.count && (ltype == tdb->global_lock.ltype || ltype == F_RDLCK)) { @@ -125,18 +128,50 @@ int tdb_lock(struct tdb_context *tdb, int list, int ltype) if (tdb->flags & TDB_NOLOCK) return 0; + for (i=0; i<tdb->num_lockrecs; i++) { + if (tdb->lockrecs[i].list == list) { + if (tdb->lockrecs[i].count == 0) { + /* + * Can't happen, see tdb_unlock(). It should + * be an assert. + */ + TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_lock: " + "lck->count == 0 for list %d", list)); + } + /* + * Just increment the in-memory struct, posix locks + * don't stack. + */ + tdb->lockrecs[i].count++; + return 0; + } + } + + new_lck = (struct tdb_lock_type *)realloc( + tdb->lockrecs, + sizeof(*tdb->lockrecs) * (tdb->num_lockrecs+1)); + if (new_lck == NULL) { + errno = ENOMEM; + return -1; + } + tdb->lockrecs = new_lck; + /* Since fcntl locks don't nest, we do a lock for the first one, and simply bump the count for future ones */ - if (tdb->locked[list+1].count == 0) { - if (tdb->methods->tdb_brlock(tdb,FREELIST_TOP+4*list,ltype,F_SETLKW, 0, 1)) { - TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_lock failed on list %d ltype=%d (%s)\n", - list, ltype, strerror(errno))); - return -1; - } - tdb->locked[list+1].ltype = ltype; - tdb->num_locks++; + if (tdb->methods->tdb_brlock(tdb,FREELIST_TOP+4*list,ltype,F_SETLKW, + 0, 1)) { + TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_lock failed on list %d " + "ltype=%d (%s)\n", list, ltype, strerror(errno))); + return -1; } - tdb->locked[list+1].count++; + + tdb->num_locks++; + + tdb->lockrecs[tdb->num_lockrecs].list = list; + tdb->lockrecs[tdb->num_lockrecs].count = 1; + tdb->lockrecs[tdb->num_lockrecs].ltype = ltype; + tdb->num_lockrecs += 1; + return 0; } @@ -146,6 +181,8 @@ int tdb_lock(struct tdb_context *tdb, int list, int ltype) int tdb_unlock(struct tdb_context *tdb, int list, int ltype) { int ret = -1; + int i; + struct tdb_lock_type *lck = NULL; /* a global lock allows us to avoid per chain locks */ if (tdb->global_lock.count && @@ -166,19 +203,52 @@ int tdb_unlock(struct tdb_context *tdb, int list, int ltype) return ret; } - if (tdb->locked[list+1].count==0) { + for (i=0; i<tdb->num_lockrecs; i++) { + if (tdb->lockrecs[i].list == list) { + lck = &tdb->lockrecs[i]; + break; + } + } + + if ((lck == NULL) || (lck->count == 0)) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: count is 0\n")); - return ret; + return -1; + } + + if (lck->count > 1) { + lck->count--; + return 0; } - if (tdb->locked[list+1].count == 1) { - /* Down to last nested lock: unlock underneath */ - ret = tdb->methods->tdb_brlock(tdb, FREELIST_TOP+4*list, F_UNLCK, F_SETLKW, 0, 1); - tdb->num_locks--; - } else { - ret = 0; + /* + * This lock has count==1 left, so we need to unlock it in the + * kernel. We don't bother with decrementing the in-memory array + * element, we're about to overwrite it with the last array element + * anyway. + */ + + ret = tdb->methods->tdb_brlock(tdb, FREELIST_TOP+4*list, F_UNLCK, + F_SETLKW, 0, 1); + tdb->num_locks--; + + /* + * Shrink the array by overwriting the element just unlocked with the + * last array element. + */ + + if (tdb->num_lockrecs > 1) { + *lck = tdb->lockrecs[tdb->num_lockrecs-1]; + } + tdb->num_lockrecs -= 1; + + /* + * We don't bother with realloc when the array shrinks, but if we have + * a completely idle tdb we should get rid of the locked array. + */ + + if (tdb->num_lockrecs == 0) { + SAFE_FREE(tdb->lockrecs); } - tdb->locked[list+1].count--; if (ret) TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: An error occurred unlocking!\n")); |