summaryrefslogtreecommitdiff
path: root/source3/locking
diff options
context:
space:
mode:
authorVolker Lendecke <vl@samba.org>2009-11-16 09:40:47 +0100
committerVolker Lendecke <vl@samba.org>2009-11-21 11:40:13 +0100
commitf0a933d1408678fabc856e89a5aaebb792047de3 (patch)
treea6740c2d9bcd89d3dbec413d681eaef9561dd014 /source3/locking
parentbda1c701f418d3263d36714f9b646ab60ea2da0f (diff)
downloadsamba-f0a933d1408678fabc856e89a5aaebb792047de3.tar.gz
samba-f0a933d1408678fabc856e89a5aaebb792047de3.tar.bz2
samba-f0a933d1408678fabc856e89a5aaebb792047de3.zip
s3: Cache brlock.tdb entries for the fast read&write strict locking code path
For a netbench run this gains around 2% user-space CPU, fetching a 100MB file takes around 4% less.
Diffstat (limited to 'source3/locking')
-rw-r--r--source3/locking/brlock.c62
-rw-r--r--source3/locking/locking.c18
2 files changed, 65 insertions, 15 deletions
diff --git a/source3/locking/brlock.c b/source3/locking/brlock.c
index c72fad7f2e..d3f5e61f7d 100644
--- a/source3/locking/brlock.c
+++ b/source3/locking/brlock.c
@@ -264,12 +264,25 @@ NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, bool
void brl_init(bool read_only)
{
+ int tdb_flags;
+
if (brlock_db) {
return;
}
+
+ tdb_flags = TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST;
+
+ if (!lp_clustering()) {
+ /*
+ * We can't use the SEQNUM trick to cache brlock
+ * entries in the clustering case because ctdb seqnum
+ * propagation has a delay.
+ */
+ tdb_flags |= TDB_SEQNUM;
+ }
+
brlock_db = db_open(NULL, lock_path("brlock.tdb"),
- lp_open_files_db_hash_size(),
- TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST,
+ lp_open_files_db_hash_size(), tdb_flags,
read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
if (!brlock_db) {
DEBUG(0,("Failed to open byte range locking database %s\n",
@@ -1890,10 +1903,49 @@ struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
return brl_get_locks_internal(mem_ctx, fsp, False);
}
-struct byte_range_lock *brl_get_locks_readonly(TALLOC_CTX *mem_ctx,
- files_struct *fsp)
+struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)
{
- return brl_get_locks_internal(mem_ctx, fsp, True);
+ struct byte_range_lock *br_lock;
+
+ if (lp_clustering()) {
+ return brl_get_locks_internal(talloc_tos(), fsp, true);
+ }
+
+ if ((fsp->brlock_rec != NULL)
+ && (brlock_db->get_seqnum(brlock_db) == fsp->brlock_seqnum)) {
+ return fsp->brlock_rec;
+ }
+
+ TALLOC_FREE(fsp->brlock_rec);
+
+ br_lock = brl_get_locks_internal(talloc_tos(), fsp, false);
+ if (br_lock == NULL) {
+ return NULL;
+ }
+ fsp->brlock_seqnum = brlock_db->get_seqnum(brlock_db);
+
+ fsp->brlock_rec = talloc_zero(fsp, struct byte_range_lock);
+ if (fsp->brlock_rec == NULL) {
+ goto fail;
+ }
+ fsp->brlock_rec->fsp = fsp;
+ fsp->brlock_rec->num_locks = br_lock->num_locks;
+ fsp->brlock_rec->read_only = true;
+ fsp->brlock_rec->key = br_lock->key;
+
+ fsp->brlock_rec->lock_data = (struct lock_struct *)
+ talloc_memdup(fsp->brlock_rec, br_lock->lock_data,
+ sizeof(struct lock_struct) * br_lock->num_locks);
+ if (fsp->brlock_rec->lock_data == NULL) {
+ goto fail;
+ }
+
+ TALLOC_FREE(br_lock);
+ return fsp->brlock_rec;
+fail:
+ TALLOC_FREE(br_lock);
+ TALLOC_FREE(fsp->brlock_rec);
+ return NULL;
}
struct brl_revalidate_state {
diff --git a/source3/locking/locking.c b/source3/locking/locking.c
index cf787d4fac..5a6fdf081e 100644
--- a/source3/locking/locking.c
+++ b/source3/locking/locking.c
@@ -116,7 +116,9 @@ bool strict_lock_default(files_struct *fsp, struct lock_struct *plock)
DEBUG(10,("is_locked: optimisation - level II oplock on file %s\n", fsp_str_dbg(fsp)));
ret = True;
} else {
- struct byte_range_lock *br_lck = brl_get_locks_readonly(talloc_tos(), fsp);
+ struct byte_range_lock *br_lck;
+
+ br_lck = brl_get_locks_readonly(fsp);
if (!br_lck) {
return True;
}
@@ -127,10 +129,11 @@ bool strict_lock_default(files_struct *fsp, struct lock_struct *plock)
plock->size,
plock->lock_type,
plock->lock_flav);
- TALLOC_FREE(br_lck);
}
} else {
- struct byte_range_lock *br_lck = brl_get_locks_readonly(talloc_tos(), fsp);
+ struct byte_range_lock *br_lck;
+
+ br_lck = brl_get_locks_readonly(fsp);
if (!br_lck) {
return True;
}
@@ -141,7 +144,6 @@ bool strict_lock_default(files_struct *fsp, struct lock_struct *plock)
plock->size,
plock->lock_type,
plock->lock_flav);
- TALLOC_FREE(br_lck);
}
DEBUG(10,("strict_lock_default: flavour = %s brl start=%.0f "
@@ -170,7 +172,6 @@ NTSTATUS query_lock(files_struct *fsp,
enum brl_flavour lock_flav)
{
struct byte_range_lock *br_lck = NULL;
- NTSTATUS status = NT_STATUS_LOCK_NOT_GRANTED;
if (!fsp->can_lock) {
return fsp->is_directory ? NT_STATUS_INVALID_DEVICE_REQUEST : NT_STATUS_INVALID_HANDLE;
@@ -180,21 +181,18 @@ NTSTATUS query_lock(files_struct *fsp,
return NT_STATUS_OK;
}
- br_lck = brl_get_locks_readonly(talloc_tos(), fsp);
+ br_lck = brl_get_locks_readonly(fsp);
if (!br_lck) {
return NT_STATUS_NO_MEMORY;
}
- status = brl_lockquery(br_lck,
+ return brl_lockquery(br_lck,
psmbpid,
procid_self(),
poffset,
pcount,
plock_type,
lock_flav);
-
- TALLOC_FREE(br_lck);
- return status;
}
static void increment_current_lock_count(files_struct *fsp,