summaryrefslogtreecommitdiff
path: root/source4/ntvfs/posix/pvfs_lock.c
diff options
context:
space:
mode:
authorAndrew Tridgell <tridge@samba.org>2004-10-19 06:39:51 +0000
committerGerald (Jerry) Carter <jerry@samba.org>2007-10-10 13:01:54 -0500
commitcf1b85348a0fc5bf4788291109c9dca9e95eb9c2 (patch)
tree4ddd852640cc81c255677a7a6057a6d84b692a45 /source4/ntvfs/posix/pvfs_lock.c
parent2b8aa720f47804f783679388f40a9345eff897b9 (diff)
downloadsamba-cf1b85348a0fc5bf4788291109c9dca9e95eb9c2.tar.gz
samba-cf1b85348a0fc5bf4788291109c9dca9e95eb9c2.tar.bz2
samba-cf1b85348a0fc5bf4788291109c9dca9e95eb9c2.zip
r3056: added a id -> pointer data structure (a type of radix tree). This is
an extremely efficient way of mapping from an integer handle (such as an open file handle) to a pointer (such as the structure containing the open file information). The code is taken from lib/idr.c in the 2.6 Linux kernel, and is very fast and space efficient. By using talloc it even has auto cleanup. This commit converts the handling of open file handles and open directory search handles to use the idtree routines. In combination with talloc destructors, this simplifies the structure handling in the pvfs backend a lot. For example, we no longer need to keep a linked list of open directory searches at all, and we no longer need to do linear scans of the list of open files on most operations. The end result is that the pvfs code is now extremely scalable. You can have 10s of thousands of open files and open searches and the code still runs very fast. I have also added a small optimisation into the file close path, to avoid looking in the byte range locking database if we know that there are no locks outstanding. (This used to be commit 16835a0ef91a16fa01145b773aad8d43da215dbf)
Diffstat (limited to 'source4/ntvfs/posix/pvfs_lock.c')
-rw-r--r--source4/ntvfs/posix/pvfs_lock.c64
1 files changed, 41 insertions, 23 deletions
diff --git a/source4/ntvfs/posix/pvfs_lock.c b/source4/ntvfs/posix/pvfs_lock.c
index e32fcb2e3a..f52e68eeb1 100644
--- a/source4/ntvfs/posix/pvfs_lock.c
+++ b/source4/ntvfs/posix/pvfs_lock.c
@@ -75,6 +75,7 @@ static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
f->fnum,
locks[i].offset,
locks[i].count);
+ f->lock_count--;
}
req->async.status = status;
req->async.send_fn(req);
@@ -117,6 +118,10 @@ static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
locks[pending->pending_lock].count,
rw, NULL);
+ if (NT_STATUS_IS_OK(status)) {
+ f->lock_count++;
+ }
+
/* if we have failed and timed out, or succeeded, then we
don't need the pending lock any more */
if (NT_STATUS_IS_OK(status) || timed_out) {
@@ -182,6 +187,8 @@ static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
pvfs_lock_async_failed(pvfs, req, f, locks, i, status);
return;
}
+
+ f->lock_count++;
}
/* we've managed to get all the locks. Tell the client */
@@ -191,26 +198,28 @@ static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
/*
- called when we close a file that might have pending locks
+ called when we close a file that might have locks
*/
-void pvfs_lock_close_pending(struct pvfs_state *pvfs, struct pvfs_file *f)
+void pvfs_lock_close(struct pvfs_state *pvfs, struct pvfs_file *f)
{
struct pvfs_pending_lock *p, *next;
- NTSTATUS status;
+ if (f->lock_count || f->pending_list) {
+ DEBUG(5,("pvfs_lock: removing %.0f locks on close\n",
+ (double)f->lock_count));
+ brl_close(f->pvfs->brl_context, &f->locking_key, f->fnum);
+ f->lock_count = 0;
+ }
+
+ /* reply to all the pending lock requests, telling them the
+ lock failed */
for (p=f->pending_list;p;p=next) {
next = p->next;
DLIST_REMOVE(f->pending_list, p);
- status = brl_remove_pending(pvfs->brl_context, &f->locking_key, p);
- if (!NT_STATUS_IS_OK(status)) {
- DEBUG(0,("pvfs_lock_close_pending: failed to remove pending lock - %s\n",
- nt_errstr(status)));
- }
talloc_free(p->wait_handle);
p->req->async.status = NT_STATUS_RANGE_NOT_LOCKED;
p->req->async.send_fn(p->req);
}
-
}
@@ -262,6 +271,7 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
int i;
enum brl_type rw;
struct pvfs_pending_lock *pending = NULL;
+ NTSTATUS status;
f = pvfs_find_fd(pvfs, req, lck->generic.in.fnum);
if (!f) {
@@ -270,21 +280,29 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
switch (lck->generic.level) {
case RAW_LOCK_LOCK:
- return brl_lock(pvfs->brl_context,
- &f->locking_key,
- req->smbpid,
- f->fnum,
- lck->lock.in.offset,
- lck->lock.in.count,
- WRITE_LOCK, NULL);
-
- case RAW_LOCK_UNLOCK:
- return brl_unlock(pvfs->brl_context,
+ status = brl_lock(pvfs->brl_context,
&f->locking_key,
req->smbpid,
f->fnum,
lck->lock.in.offset,
- lck->lock.in.count);
+ lck->lock.in.count,
+ WRITE_LOCK, NULL);
+ if (NT_STATUS_IS_OK(status)) {
+ f->lock_count++;
+ }
+ return status;
+
+ case RAW_LOCK_UNLOCK:
+ status = brl_unlock(pvfs->brl_context,
+ &f->locking_key,
+ req->smbpid,
+ f->fnum,
+ lck->lock.in.offset,
+ lck->lock.in.count);
+ if (NT_STATUS_IS_OK(status)) {
+ f->lock_count--;
+ }
+ return status;
case RAW_LOCK_GENERIC:
return NT_STATUS_INVALID_LEVEL;
@@ -337,7 +355,6 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
locks = lck->lockx.in.locks;
for (i=0;i<lck->lockx.in.ulock_cnt;i++) {
- NTSTATUS status;
status = brl_unlock(pvfs->brl_context,
&f->locking_key,
locks[i].pid,
@@ -347,13 +364,12 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
if (!NT_STATUS_IS_OK(status)) {
return status;
}
+ f->lock_count--;
}
locks += i;
for (i=0;i<lck->lockx.in.lock_cnt;i++) {
- NTSTATUS status;
-
if (pending) {
pending->pending_lock = i;
}
@@ -387,9 +403,11 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
f->fnum,
locks[i].offset,
locks[i].count);
+ f->lock_count--;
}
return status;
}
+ f->lock_count++;
}
return NT_STATUS_OK;